From 9d2c028d9f77c33fe937c3af47c2b4377f001038 Mon Sep 17 00:00:00 2001 From: "Sascha (Oleksandr) Fedorenko" Date: Tue, 29 Jan 2019 13:43:21 +0100 Subject: [PATCH 0001/1212] remark on existing local cache for README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 28dfaee8682..6907217328e 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,8 @@ Using the provider ---------------------- If you're building the provider, follow the instructions to [install it as a plugin.](https://www.terraform.io/docs/plugins/basics.html#installing-a-plugin) After placing it into your plugins directory, run `terraform init` to initialize it. Documentation about the provider specific configuration options can be found on the [provider's website](https://www.terraform.io/docs/providers/aws/index.html). +*Note:* You will need to remove the previously downloaded and cached version of the plugin from the `./terraform/plugins/` if you previouslt used the standard version of plugin on your project. + Developing the Provider --------------------------- From 5c2c90ea9ab494092444d9eef76720b56898d3f3 Mon Sep 17 00:00:00 2001 From: "Sascha (Oleksandr) Fedorenko" Date: Tue, 29 Jan 2019 13:49:43 +0100 Subject: [PATCH 0002/1212] openAPI body's basePath import options --- aws/resource_aws_api_gateway_rest_api.go | 20 + aws/resource_aws_api_gateway_rest_api_test.go | 344 +++++++++++++++++- 2 files changed, 363 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index 7f304c8d7ff..d5d74e5c463 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -60,6 +60,13 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { Optional: true, }, + "body_base_path": { + Type: schema.TypeString, + Default: "ignore", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ignore", "prepend", "split"}, true), + }, + "minimum_compression_size": { Type: schema.TypeInt, Optional: true, @@ -153,12 +160,20 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} d.SetId(*gateway.Id) + bodyBasePathMode := d.Get("body_base_path").(string) + if body, ok := d.GetOk("body"); ok { log.Printf("[DEBUG] Initializing API Gateway from OpenAPI spec %s", d.Id()) _, err := conn.PutRestApi(&apigateway.PutRestApiInput{ RestApiId: gateway.Id, Mode: aws.String(apigateway.PutModeOverwrite), Body: []byte(body.(string)), + Parameters: map[string]*string{ + // See https://docs.aws.amazon.com/cli/latest/reference/apigateway/import-rest-api.html + // At the moment of writing, according to aws support, the docs are incorrect + // and the parameter should be called 'basepath' and not 'basePath' + "basepath": &bodyBasePathMode, + }, }) if err != nil { return fmt.Errorf("error creating API Gateway specification: %s", err) @@ -344,12 +359,17 @@ func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Updating API Gateway %s", d.Id()) if d.HasChange("body") { + bodyBasePathMode := d.Get("body_base_path").(string) + if body, ok := d.GetOk("body"); ok { log.Printf("[DEBUG] Updating API Gateway from OpenAPI spec: %s", d.Id()) _, err := conn.PutRestApi(&apigateway.PutRestApiInput{ RestApiId: aws.String(d.Id()), Mode: aws.String(apigateway.PutModeOverwrite), Body: []byte(body.(string)), + Parameters: map[string]*string{ + "basepath": &bodyBasePathMode, + }, }) if err != nil { return fmt.Errorf("error updating API Gateway specification: %s", err) diff --git a/aws/resource_aws_api_gateway_rest_api_test.go b/aws/resource_aws_api_gateway_rest_api_test.go index e169a3b4023..1af758fdf7a 100644 --- a/aws/resource_aws_api_gateway_rest_api_test.go +++ b/aws/resource_aws_api_gateway_rest_api_test.go @@ -379,6 +379,105 @@ func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { }) } +func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_ignore(t *testing.T) { + var conf apigateway.RestApi + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathIgnore, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), + ), + }, + { + ResourceName: "aws_api_gateway_rest_api.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathIgnore, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/update"}), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_prepend(t *testing.T) { + var conf apigateway.RestApi + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathPrepend, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo/bar/baz/test"}), + ), + }, + { + ResourceName: "aws_api_gateway_rest_api.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathPrepend, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo/bar/baz/update"}), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_split(t *testing.T) { + var conf apigateway.RestApi + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathSplit, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/bar/baz/test"}), + ), + }, + { + ResourceName: "aws_api_gateway_rest_api.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathSplit, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), + testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/bar/baz/update"}), + ), + }, + }, + }) +} + func testAccCheckAWSAPIGatewayRestAPINameAttribute(conf *apigateway.RestApi, name string) resource.TestCheckFunc { return func(s *terraform.State) error { if *conf.Name != name { @@ -624,7 +723,7 @@ resource "aws_api_gateway_rest_api" "test" { "info": { "title": "test", "version": "2017-04-20T04:08:08Z" - }, + }, "schemes": [ "https" ], @@ -692,3 +791,246 @@ resource "aws_api_gateway_rest_api" "test" { EOF } ` + +const testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathIgnore = ` +resource "aws_api_gateway_rest_api" "test" { + name = "test" + body_base_path = "ignore" + body = < Date: Tue, 29 Jan 2019 14:01:30 +0100 Subject: [PATCH 0003/1212] docs --- website/docs/r/api_gateway_rest_api.html.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/docs/r/api_gateway_rest_api.html.markdown b/website/docs/r/api_gateway_rest_api.html.markdown index 03ea9701669..66a935cda73 100644 --- a/website/docs/r/api_gateway_rest_api.html.markdown +++ b/website/docs/r/api_gateway_rest_api.html.markdown @@ -43,6 +43,7 @@ The following arguments are supported: * `binary_media_types` - (Optional) The list of binary media types supported by the RestApi. By default, the RestApi supports only UTF-8-encoded text payloads. * `minimum_compression_size` - (Optional) Minimum response size to compress for the REST API. Integer between -1 and 10485760 (10MB). Setting a value greater than -1 will enable compression, -1 disables compression (default). * `body` - (Optional) An OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. +* `body_base_path` - (Optional) Together with OpenAPI specification in `body`, instructs how to interpret the `basePath` field. Defined below. * `policy` - (Optional) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](/docs/providers/aws/guides/iam-policy-documents.html) * `api_key_source` - (Optional) The source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. @@ -61,6 +62,10 @@ __Note__: If the `body` argument is provided, the OpenAPI specification will be * `types` - (Required) A list of endpoint types. This resource currently only supports managing a single value. Valid values: `EDGE`, `REGIONAL` or `PRIVATE`. If unspecified, defaults to `EDGE`. Must be declared as `REGIONAL` in non-Commercial partitions. Refer to the [documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/create-regional-api.html) for more information on the difference between edge-optimized and regional APIs. +### body_base_path + +* `types` - (Required) One of `ignore` (default), `prepend` or `split`. Refer to the [documentation](https://docs.aws.amazon.com/cli/latest/reference/apigateway/import-rest-api.html) + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 62bdba4fd77e83534fa0fc31fcde43ea89a1f72a Mon Sep 17 00:00:00 2001 From: 1newsr Date: Tue, 3 Sep 2019 20:33:01 +0900 Subject: [PATCH 0004/1212] Add reader_endpoint_address to aws_elasticache_replication_group --- aws/data_source_aws_elasticache_replication_group.go | 5 +++++ aws/data_source_aws_elasticache_replication_group_test.go | 1 + aws/resource_aws_elasticache_replication_group.go | 5 +++++ aws/resource_aws_elasticache_replication_group_test.go | 4 ++++ website/docs/d/elasticache_replication_group.html.markdown | 1 + website/docs/r/elasticache_replication_group.html.markdown | 1 + 6 files changed, 17 insertions(+) diff --git a/aws/data_source_aws_elasticache_replication_group.go b/aws/data_source_aws_elasticache_replication_group.go index 7c3ee5e2ca5..ab129fbf4f0 100644 --- a/aws/data_source_aws_elasticache_replication_group.go +++ b/aws/data_source_aws_elasticache_replication_group.go @@ -41,6 +41,10 @@ func dataSourceAwsElasticacheReplicationGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "reader_endpoint_address": { + Type: schema.TypeString, + Computed: true, + }, "number_cache_clusters": { Type: schema.TypeInt, Computed: true, @@ -110,6 +114,7 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i } d.Set("port", rg.NodeGroups[0].PrimaryEndpoint.Port) d.Set("primary_endpoint_address", rg.NodeGroups[0].PrimaryEndpoint.Address) + d.Set("reader_endpoint_address", rg.NodeGroups[0].ReaderEndpoint.Address) } d.Set("number_cache_clusters", len(rg.MemberClusters)) if err := d.Set("member_clusters", flattenStringList(rg.MemberClusters)); err != nil { diff --git a/aws/data_source_aws_elasticache_replication_group_test.go b/aws/data_source_aws_elasticache_replication_group_test.go index d948bded24a..db0354279f7 100644 --- a/aws/data_source_aws_elasticache_replication_group_test.go +++ b/aws/data_source_aws_elasticache_replication_group_test.go @@ -23,6 +23,7 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_basic(t *testing.T) { resource.TestCheckResourceAttr("data.aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"), resource.TestCheckResourceAttr("data.aws_elasticache_replication_group.bar", "port", "6379"), resource.TestCheckResourceAttrSet("data.aws_elasticache_replication_group.bar", "primary_endpoint_address"), + resource.TestCheckResourceAttrSet("data.aws_elasticache_replication_group.bar", "reader_endpoint_address"), resource.TestCheckResourceAttr("data.aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), resource.TestCheckResourceAttr("data.aws_elasticache_replication_group.bar", "member_clusters.#", "2"), resource.TestCheckResourceAttr("data.aws_elasticache_replication_group.bar", "node_type", "cache.m1.small"), diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 44d96fdf382..d1ad13ce25b 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -153,6 +153,10 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "reader_endpoint_address": { + Type: schema.TypeString, + Computed: true, + }, "replication_group_description": { Type: schema.TypeString, Required: true, @@ -472,6 +476,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } else { d.Set("port", rgp.NodeGroups[0].PrimaryEndpoint.Port) d.Set("primary_endpoint_address", rgp.NodeGroups[0].PrimaryEndpoint.Address) + d.Set("reader_endpoint_address", rgp.NodeGroups[0].ReaderEndpoint.Address) } d.Set("auto_minor_version_upgrade", c.AutoMinorVersionUpgrade) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 98aabf2ed44..7cd85d6a721 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -304,6 +304,8 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( "aws_elasticache_replication_group.bar", "primary_endpoint_address"), + resource.TestCheckResourceAttrSet( + "aws_elasticache_replication_group.bar", "reader_endpoint_address"), ), }, }, @@ -331,6 +333,8 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( "aws_elasticache_replication_group.bar", "primary_endpoint_address"), + resource.TestCheckResourceAttrSet( + "aws_elasticache_replication_group.bar", "reader_endpoint_address"), ), }, }, diff --git a/website/docs/d/elasticache_replication_group.html.markdown b/website/docs/d/elasticache_replication_group.html.markdown index ec0f9d71c6c..4c043469322 100644 --- a/website/docs/d/elasticache_replication_group.html.markdown +++ b/website/docs/d/elasticache_replication_group.html.markdown @@ -40,3 +40,4 @@ In addition to all arguments above, the following attributes are exported: * `port` – The port number on which the configuration endpoint will accept connections. * `configuration_endpoint_address` - The configuration endpoint address to allow host discovery. * `primary_endpoint_address` - The endpoint of the primary node in this node group (shard). +* `reader_endpoint_address` - The endpoint of the reader node in this node group (shard). diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 0cc0e42bbbd..cf1ce1a5b14 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -148,6 +148,7 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ID of the ElastiCache Replication Group. * `configuration_endpoint_address` - The address of the replication group configuration endpoint when cluster mode is enabled. * `primary_endpoint_address` - (Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled. +* `reader_endpoint_address` - (Redis only) The address of the endpoint for the reader node in the replication group, if the cluster mode is disabled. * `member_clusters` - The identifiers of all the nodes that are part of this replication group. ## Timeouts From d7b8088c3e963addb9bebc76947247ad056c4795 Mon Sep 17 00:00:00 2001 From: "Yi-Wen.Chang" Date: Thu, 12 Sep 2019 18:09:17 +0200 Subject: [PATCH 0005/1212] Add mutex and retry for usage plan key --- aws/resource_aws_api_gateway_usage_plan_key.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_api_gateway_usage_plan_key.go b/aws/resource_aws_api_gateway_usage_plan_key.go index edc2416d9b6..cdabd7697b2 100644 --- a/aws/resource_aws_api_gateway_usage_plan_key.go +++ b/aws/resource_aws_api_gateway_usage_plan_key.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "sync" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -10,6 +11,8 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) +var resourceAwsApiGatewayUsagePlanKeyMutex = &sync.Mutex{} + func resourceAwsApiGatewayUsagePlanKey() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayUsagePlanKeyCreate, @@ -58,11 +61,19 @@ func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interf UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), } - up, err := conn.CreateUsagePlanKey(params) + resourceAwsApiGatewayUsagePlanKeyMutex.Lock() + defer resourceAwsApiGatewayUsagePlanKeyMutex.Unlock() + + o, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { + return conn.CreateUsagePlanKey(params) + }) + if err != nil { return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err) } + up := o.(*apigateway.UsagePlanKey) + d.SetId(*up.Id) return resourceAwsApiGatewayUsagePlanKeyRead(d, meta) From 65365bcf7239b59dbf7fd9bf354470827f2979bb Mon Sep 17 00:00:00 2001 From: 1newsr <35871065+1newsr@users.noreply.github.com> Date: Tue, 26 Nov 2019 08:30:04 +0900 Subject: [PATCH 0006/1212] Fix format --- aws/resource_aws_elasticache_replication_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 7d253b4aecb..ab60dd3c2a2 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -382,7 +382,7 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( resourceName, "primary_endpoint_address"), - resource.TestCheckResourceAttrSet( + resource.TestCheckResourceAttrSet( resourceName, "reader_endpoint_address"), ), }, From 10d2deb951ac449d9a49387bfdb5a29fc1c88170 Mon Sep 17 00:00:00 2001 From: 1newsr <35871065+1newsr@users.noreply.github.com> Date: Tue, 26 Nov 2019 08:31:43 +0900 Subject: [PATCH 0007/1212] Fix format --- aws/resource_aws_elasticache_replication_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index ab60dd3c2a2..6f7d32694b3 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -345,7 +345,7 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( resourceName, "primary_endpoint_address"), - resource.TestCheckResourceAttrSet( + resource.TestCheckResourceAttrSet( resourceName, "reader_endpoint_address"), ), }, From 31319f18cc3e55ed1a5a6b8b2ba174ec0d11e842 Mon Sep 17 00:00:00 2001 From: "Sascha (Oleksandr) Fedorenko" Date: Mon, 20 Jan 2020 14:47:34 +0100 Subject: [PATCH 0008/1212] fixing merge problem --- website/docs/r/api_gateway_rest_api.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/api_gateway_rest_api.html.markdown b/website/docs/r/api_gateway_rest_api.html.markdown index b82602fb22a..ae5180d32ab 100644 --- a/website/docs/r/api_gateway_rest_api.html.markdown +++ b/website/docs/r/api_gateway_rest_api.html.markdown @@ -44,7 +44,7 @@ The following arguments are supported: * `minimum_compression_size` - (Optional) Minimum response size to compress for the REST API. Integer between -1 and 10485760 (10MB). Setting a value greater than -1 will enable compression, -1 disables compression (default). * `body` - (Optional) An OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. * `body_base_path` - (Optional) Together with OpenAPI specification in `body`, instructs how to interpret the `basePath` field. Defined below. -* `policy` - (Optional) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](/docs/providers/aws/guides/iam-policy-documents.html) +* `policy` - (Optional) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy) * `api_key_source` - (Optional) The source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. * `tags` - (Optional) Key-value mapping of resource tags From deb6be88bcd4b1b19f0d82f660956cfb42e0257b Mon Sep 17 00:00:00 2001 From: Joe Atzberger Date: Mon, 27 Jan 2020 15:06:49 -0500 Subject: [PATCH 0009/1212] SNS:Receive is not a recognized IAM action Refer to https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazonsns.html --- website/docs/r/sns_topic_subscription.html.markdown | 1 - 1 file changed, 1 deletion(-) diff --git a/website/docs/r/sns_topic_subscription.html.markdown b/website/docs/r/sns_topic_subscription.html.markdown index 424864c17bf..a7799d771f1 100644 --- a/website/docs/r/sns_topic_subscription.html.markdown +++ b/website/docs/r/sns_topic_subscription.html.markdown @@ -86,7 +86,6 @@ data "aws_iam_policy_document" "sns-topic-policy" { "SNS:Subscribe", "SNS:SetTopicAttributes", "SNS:RemovePermission", - "SNS:Receive", "SNS:Publish", "SNS:ListSubscriptionsByTopic", "SNS:GetTopicAttributes", From 1a99dce89d0fde32f138f4a14c5eaf7680bc87fd Mon Sep 17 00:00:00 2001 From: Joseph Heyburn <34041368+jdheyburn@users.noreply.github.com> Date: Tue, 4 Feb 2020 08:21:08 +0000 Subject: [PATCH 0010/1212] Add patch_source block to resource_aws_ssm_patch_baseline --- aws/resource_aws_ssm_patch_baseline.go | 83 +++++++++++++++++ aws/resource_aws_ssm_patch_baseline_test.go | 92 +++++++++++++++++++ .../docs/r/ssm_patch_baseline.html.markdown | 26 ++++++ 3 files changed, 201 insertions(+) diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index d1853aa69d7..476455bdc72 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" @@ -143,6 +144,38 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Default: ssm.PatchComplianceLevelUnspecified, ValidateFunc: validation.StringInSlice(ssmPatchComplianceLevels, false), }, + + "patch_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,50}$`), "see https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchSource.html"), + }, + + "configuration": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + + "products": { + Type: schema.TypeList, + Required: true, + MaxItems: 20, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + }, + }, + }, + }, + "tags": tagsSchema(), }, } @@ -181,6 +214,10 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) } + if _, ok := d.GetOk("patch_source"); ok { + params.Sources = expandAwsSsmPatchSource(d) + } + resp, err := ssmconn.CreatePatchBaseline(params) if err != nil { return err @@ -225,6 +262,10 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) params.GlobalFilters = expandAwsSsmPatchFilterGroup(d) } + if d.HasChange("patch_source") { + params.Sources = expandAwsSsmPatchSource(d) + } + _, err := ssmconn.UpdatePatchBaseline(params) if err != nil { if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { @@ -277,6 +318,10 @@ func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error setting approval rules error: %#v", err) } + if err := d.Set("patch_source", flattenAwsSsmPatchSource(resp.Sources)); err != nil { + return fmt.Errorf("Error setting patch sources error: %#v", err) + } + tags, err := keyvaluetags.SsmListTags(ssmconn, d.Id(), ssm.ResourceTypeForTaggingPatchBaseline) if err != nil { @@ -405,3 +450,41 @@ func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interfa return result } + +func expandAwsSsmPatchSource(d *schema.ResourceData) []*ssm.PatchSource { + var sources []*ssm.PatchSource + + sourceConfigs := d.Get("patch_source").([]interface{}) + + for _, sConfig := range sourceConfigs { + config := sConfig.(map[string]interface{}) + + source := &ssm.PatchSource{ + Name: aws.String(config["name"].(string)), + Configuration: aws.String(config["configuration"].(string)), + Products: expandStringList(config["products"].([]interface{})), + } + + sources = append(sources, source) + } + + return sources +} + +func flattenAwsSsmPatchSource(sources []*ssm.PatchSource) []map[string]interface{} { + if len(sources) == 0 { + return nil + } + + result := make([]map[string]interface{}, 0, len(sources)) + + for _, source := range sources { + s := make(map[string]interface{}) + s["name"] = *source.Name + s["configuration"] = *source.Configuration + s["products"] = flattenStringList(source.Products) + result = append(result, s) + } + + return result +} diff --git a/aws/resource_aws_ssm_patch_baseline_test.go b/aws/resource_aws_ssm_patch_baseline_test.go index f9bdc34cf9b..8af4c12fc90 100644 --- a/aws/resource_aws_ssm_patch_baseline_test.go +++ b/aws/resource_aws_ssm_patch_baseline_test.go @@ -167,6 +167,56 @@ func TestAccAWSSSMPatchBaseline_OperatingSystem(t *testing.T) { }) } +func TestAccAWSSSMPatchBaseline_PatchSources(t *testing.T) { + var before, after ssm.PatchBaselineIdentity + name := acctest.RandString(10) + resourceName := "aws_ssm_patch_baseline.foo" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMPatchBaselineConfigWithPatchSource(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "patch_source.#", "1"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.name", "My-AL2017.09"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.0", "AmazonLinux2017.09"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMPatchBaselineConfigWithPatchSourceUpdated(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "patch_source.#", "2"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.name", "My-AL2017.09"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.0", "AmazonLinux2017.09"), + resource.TestCheckResourceAttr(resourceName, "patch_source.1.name", "My-AL2018.03"), + resource.TestCheckResourceAttr(resourceName, "patch_source.1.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "patch_source.1.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "patch_source.1.products.0", "AmazonLinux2018.03"), + func(*terraform.State) error { + if *before.BaselineId != *after.BaselineId { + t.Fatal("Baseline IDs changed unexpectedly") + } + return nil + }, + ), + }, + }, + }) +} + func testAccCheckAwsSsmPatchBaselineRecreated(t *testing.T, before, after *ssm.PatchBaselineIdentity) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -373,3 +423,45 @@ resource "aws_ssm_patch_baseline" "foo" { } `, rName) } + +func testAccAWSSSMPatchBaselineConfigWithPatchSource(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "foo" { + name = %[1]q + description = "Baseline containing all updates approved for production systems" + approved_patches_compliance_level = "CRITICAL" + approved_patches = ["test123"] + operating_system = "AMAZON_LINUX" + + patch_source { + name = "My-AL2017.09" + configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" + products = ["AmazonLinux2017.09"] + } +} +`, rName) +} + +func testAccAWSSSMPatchBaselineConfigWithPatchSourceUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "foo" { + name = %[1]q + description = "Baseline containing all updates approved for production systems" + approved_patches_compliance_level = "CRITICAL" + approved_patches = ["test123"] + operating_system = "AMAZON_LINUX" + + patch_source { + name = "My-AL2017.09" + configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" + products = ["AmazonLinux2017.09"] + } + + patch_source { + name = "My-AL2018.03" + configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" + products = ["AmazonLinux2018.03"] + } +} +`, rName) +} diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 19abea2713f..f383db67e4d 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -119,6 +119,25 @@ resource "aws_ssm_patch_baseline" "windows_os_apps" { } ``` +Advanced usage, specifying alternate patch source repository + +```hcl +resource "aws_ssm_patch_baseline" "al_2017_09" { + name = "Amazon-Linux-2017.09" + description = "My patch repository for Amazon Linux 2017.09" + operating_system = "AMAZON_LINUX" + + approval_rule { + ... + } + + patch_source { + name = "My-AL2017.09" + configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" + products = ["AmazonLinux2017.09"] + } +} +``` ## Argument Reference @@ -132,6 +151,7 @@ The following arguments are supported: * `rejected_patches` - (Optional) A list of rejected patches. * `global_filter` - (Optional) A set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`. * `approval_rule` - (Optional) A set of rules used to include patches in the baseline. up to 10 approval rules can be specified. Each approval_rule block requires the fields documented below. +* `patch_source` - (Optional) A list of alternate source repositories to retrieve patches from. Each patch_source block requires the fields documented below. Applies to Linux instances only. The `approval_rule` block supports: @@ -142,6 +162,12 @@ The `approval_rule` block supports: * `enable_non_security` - (Optional) Boolean enabling the application of non-security updates. The default value is 'false'. Valid for Linux instances only. * `tags` - (Optional) A mapping of tags to assign to the resource. +The `patch_source` block supports: + +* `name` - (Required) The name specified to identify the patch source. +* `configuration` - (Required) The value of the yum repo configuration. +* `products` - (Required) The specific operating system versions a patch repository applies to, such as `"Ubuntu16.04"`, `"AmazonLinux2016.09"`, `"RedhatEnterpriseLinux7.2"` or `"Suse12.7"`. For lists of supported product values, see [PatchFilter](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html). + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 8f40b1c0e78c65086cba5ff43097c91a24e0d8a8 Mon Sep 17 00:00:00 2001 From: Roman Stepanchuk Date: Sat, 8 Feb 2020 12:30:06 +0200 Subject: [PATCH 0011/1212] Update dlm_lifecycle_policy.markdown A list of actions needs an update. Because it doesn't work. The list I've provided based on default aws_iam_role_policy which AWS recommends during Data Lifecycle Manager configuration. --- website/docs/r/dlm_lifecycle_policy.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/docs/r/dlm_lifecycle_policy.markdown b/website/docs/r/dlm_lifecycle_policy.markdown index e5157107c68..4b028fca9a4 100644 --- a/website/docs/r/dlm_lifecycle_policy.markdown +++ b/website/docs/r/dlm_lifecycle_policy.markdown @@ -45,7 +45,9 @@ resource "aws_iam_role_policy" "dlm_lifecycle" { "Effect": "Allow", "Action": [ "ec2:CreateSnapshot", + "ec2:CreateSnapshots", "ec2:DeleteSnapshot", + "ec2:DescribeInstances", "ec2:DescribeVolumes", "ec2:DescribeSnapshots" ], From 85fdcd5160d6c7a35a57d401a599c63f59c6ccd1 Mon Sep 17 00:00:00 2001 From: Charlie Stocker Date: Thu, 27 Feb 2020 18:55:32 +0000 Subject: [PATCH 0012/1212] aws_ssm_maintenance_window_task: allow service_role_arn to be optional --- ...esource_aws_ssm_maintenance_window_task.go | 12 +- ...ce_aws_ssm_maintenance_window_task_test.go | 174 +++++++++--------- .../ssm_maintenance_window_task.html.markdown | 6 +- 3 files changed, 96 insertions(+), 96 deletions(-) diff --git a/aws/resource_aws_ssm_maintenance_window_task.go b/aws/resource_aws_ssm_maintenance_window_task.go index 72d7148027d..d814657c25e 100644 --- a/aws/resource_aws_ssm_maintenance_window_task.go +++ b/aws/resource_aws_ssm_maintenance_window_task.go @@ -54,7 +54,7 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "service_role_arn": { Type: schema.TypeString, - Required: true, + Optional: true, }, "targets": { @@ -680,11 +680,14 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte MaxConcurrency: aws.String(d.Get("max_concurrency").(string)), MaxErrors: aws.String(d.Get("max_errors").(string)), TaskType: aws.String(d.Get("task_type").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), TaskArn: aws.String(d.Get("task_arn").(string)), Targets: expandAwsSsmTargets(d.Get("targets").([]interface{})), } + if v, ok := d.GetOk("service_role_arn"); ok { + params.ServiceRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("name"); ok { params.Name = aws.String(v.(string)) } @@ -781,12 +784,15 @@ func resourceAwsSsmMaintenanceWindowTaskUpdate(d *schema.ResourceData, meta inte WindowTaskId: aws.String(d.Id()), MaxConcurrency: aws.String(d.Get("max_concurrency").(string)), MaxErrors: aws.String(d.Get("max_errors").(string)), - ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), TaskArn: aws.String(d.Get("task_arn").(string)), Targets: expandAwsSsmTargets(d.Get("targets").([]interface{})), Replace: aws.Bool(true), } + if v, ok := d.GetOk("service_role_arn"); ok { + params.ServiceRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("name"); ok { params.Name = aws.String(v.(string)) } diff --git a/aws/resource_aws_ssm_maintenance_window_task_test.go b/aws/resource_aws_ssm_maintenance_window_task_test.go index 73292c1a48f..c2d27648cf8 100644 --- a/aws/resource_aws_ssm_maintenance_window_task_test.go +++ b/aws/resource_aws_ssm_maintenance_window_task_test.go @@ -52,6 +52,27 @@ func TestAccAWSSSMMaintenanceWindowTask_basic(t *testing.T) { }) } +func TestAccAWSSSMMaintenanceWindowTask_noRole(t *testing.T) { + var task ssm.MaintenanceWindowTask + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ssm_maintenance_window_task.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMMaintenanceWindowTaskNoRoleConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource(t *testing.T) { var before, after ssm.MaintenanceWindowTask name := acctest.RandString(10) @@ -378,45 +399,49 @@ resource "aws_ssm_maintenance_window_target" "test" { values = ["tf-acc-test"] } } +`, rName) +} +func testAccAWSSSMMaintenanceWindowTaskConfigBaseIAM(rName string) string { + return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName)+` resource "aws_iam_role" "test" { - name = %[1]q - assume_role_policy = < Date: Thu, 5 Mar 2020 16:15:49 +0900 Subject: [PATCH 0013/1212] Fixed MalformedPolicyDocument: JSON strings must not have leading spaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ```bash ❯ terraform --version Terraform v0.12.21 ❯ terraform apply Error: Error Updating IAM Role (role) Assume Role Policy: MalformedPolicyDocument: JSON strings must not have leading spaces status code: 400 ``` --- .../r/iam_role_policy_attachment.markdown | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/docs/r/iam_role_policy_attachment.markdown b/website/docs/r/iam_role_policy_attachment.markdown index fe8173a221d..040c752c2c3 100644 --- a/website/docs/r/iam_role_policy_attachment.markdown +++ b/website/docs/r/iam_role_policy_attachment.markdown @@ -19,19 +19,19 @@ resource "aws_iam_role" "role" { name = "test-role" assume_role_policy = < Date: Fri, 20 Mar 2020 23:42:38 +0900 Subject: [PATCH 0014/1212] d/aws_api_gateway_domain_name: Add API Gateway Custom Domain lookup --- ...data_source_aws_api_gateway_domain_name.go | 133 ++++++++++++++++++ ...source_aws_api_gateway_domain_name_test.go | 119 ++++++++++++++++ aws/provider.go | 1 + website/aws.erb | 3 + .../d/api_gateway_domain_name.html.markdown | 51 +++++++ 5 files changed, 307 insertions(+) create mode 100644 aws/data_source_aws_api_gateway_domain_name.go create mode 100644 aws/data_source_aws_api_gateway_domain_name_test.go create mode 100644 website/docs/d/api_gateway_domain_name.html.markdown diff --git a/aws/data_source_aws_api_gateway_domain_name.go b/aws/data_source_aws_api_gateway_domain_name.go new file mode 100644 index 00000000000..6cc7e97f5fa --- /dev/null +++ b/aws/data_source_aws_api_gateway_domain_name.go @@ -0,0 +1,133 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func dataSourceAwsApiGatewayDomainName() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsApiGatewayDomainNameRead, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "certificate_arn": { + Type: schema.TypeString, + Computed: true, + }, + "certificate_name": { + Type: schema.TypeString, + Computed: true, + }, + "certificate_upload_date": { + Type: schema.TypeString, + Computed: true, + }, + "cloudfront_domain_name": { + Type: schema.TypeString, + Computed: true, + }, + "cloudfront_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "endpoint_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "regional_certificate_arn": { + Type: schema.TypeString, + Computed: true, + }, + "regional_certificate_name": { + Type: schema.TypeString, + Computed: true, + }, + "regional_domain_name": { + Type: schema.TypeString, + Computed: true, + }, + "regional_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_policy": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigatewayconn + targetDomainName := d.Get("domain_name").(string) + log.Printf("[DEBUG] Reading API Gateway Domain Name %s", targetDomainName) + domainName, err := conn.GetDomainName(&apigateway.GetDomainNameInput{ + DomainName: aws.String(targetDomainName), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { + return fmt.Errorf("API Gateway Domain Name (%s) not found", targetDomainName) + } + + return err + } + + d.SetId(*domainName.DomainName) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "apigateway", + Region: meta.(*AWSClient).region, + Resource: fmt.Sprintf("/domainnames/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("certificate_arn", domainName.CertificateArn) + d.Set("certificate_name", domainName.CertificateName) + if err := d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting certificate_upload_date: %s", err) + } + d.Set("cloudfront_domain_name", domainName.DistributionDomainName) + d.Set("cloudfront_zone_id", cloudFrontRoute53ZoneID) + d.Set("domain_name", domainName.DomainName) + d.Set("security_policy", domainName.SecurityPolicy) + + if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(domainName.EndpointConfiguration)); err != nil { + return fmt.Errorf("error setting endpoint_configuration: %s", err) + } + + d.Set("regional_certificate_arn", domainName.RegionalCertificateArn) + d.Set("regional_certificate_name", domainName.RegionalCertificateName) + d.Set("regional_domain_name", domainName.RegionalDomainName) + d.Set("regional_zone_id", domainName.RegionalHostedZoneId) + + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + return nil +} diff --git a/aws/data_source_aws_api_gateway_domain_name_test.go b/aws/data_source_aws_api_gateway_domain_name_test.go new file mode 100644 index 00000000000..bea732b5f01 --- /dev/null +++ b/aws/data_source_aws_api_gateway_domain_name_test.go @@ -0,0 +1,119 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func TestAccDataSourceAwsApiGatewayDomainName_CertificateArn(t *testing.T) { + certificateArn := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_ARN") + if certificateArn == "" { + t.Skip( + "Environment variable AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_ARN is not set. " + + "This environment variable must be set to the ARN of " + + "an ISSUED ACM certificate in us-east-1 to enable this test.") + } + + // This test must always run in us-east-1 + // BadRequestException: Invalid certificate ARN: arn:aws:acm:us-west-2:123456789012:certificate/xxxxx. Certificate must be in 'us-east-1'. + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + + resourceName := "aws_api_gateway_domain_name.test" + dataSourceName := "data.aws_api_gateway_domain_name.test" + rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsApiGatewayDomainNameConfig_CertificateArn(rName, certificateArn), + Check: resource.ComposeTestCheckFunc( + testAccMatchResourceAttrRegionalARNNoAccount(dataSourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)), + resource.TestCheckResourceAttr(dataSourceName, "domain_name", rName), + resource.TestCheckResourceAttr(dataSourceName, "cloudfront_zone_id", "Z2FDTNDATAQYW2"), + resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "domain_name", dataSourceName, "domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "cloudfront_domain_name", dataSourceName, "cloudfront_domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "cloudfront_zone_id", dataSourceName, "cloudfront_zone_id"), + resource.TestCheckResourceAttrPair(resourceName, "certificate_upload_date", dataSourceName, "certificate_upload_date"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsApiGatewayDomainName_RegionalCertificateArn(t *testing.T) { + resourceName := "aws_api_gateway_domain_name.test" + dataSourceName := "data.aws_api_gateway_domain_name.test" + rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8)) + + key := tlsRsaPrivateKeyPem(2048) + certificate := tlsRsaX509SelfSignedCertificatePem(key, rName) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsApiGatewayDomainNameConfig_RegionalCertificateArn(rName, key, certificate), + Check: resource.ComposeTestCheckFunc( + testAccMatchResourceAttrRegionalARNNoAccount(dataSourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)), + resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "domain_name", dataSourceName, "domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "regional_domain_name", dataSourceName, "regional_domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "regional_zone_id", dataSourceName, "regional_zone_id"), + resource.TestCheckResourceAttrPair(resourceName, "certificate_upload_date", dataSourceName, "certificate_upload_date"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsApiGatewayDomainNameConfig_CertificateArn(domainName, certificateArn string) string { + return fmt.Sprintf(` +resource "aws_api_gateway_domain_name" "test" { + domain_name = "%s" + certificate_arn = "%s" + + endpoint_configuration { + types = ["EDGE"] + } +} + +data "aws_api_gateway_domain_name" "test" { + domain_name = "${aws_api_gateway_domain_name.test.domain_name}" +} +`, domainName, certificateArn) +} + +func testAccDataSourceAwsApiGatewayDomainNameConfig_RegionalCertificateArn(domainName, key, certificate string) string { + return fmt.Sprintf(` +resource "aws_acm_certificate" "test" { + certificate_body = "%[2]s" + private_key = "%[3]s" +} + +resource "aws_api_gateway_domain_name" "test" { + domain_name = %[1]q + regional_certificate_arn = "${aws_acm_certificate.test.arn}" + + endpoint_configuration { + types = ["REGIONAL"] + } +} + +data "aws_api_gateway_domain_name" "test" { + domain_name = "${aws_api_gateway_domain_name.test.domain_name}" +} +`, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key)) +} diff --git a/aws/provider.go b/aws/provider.go index 3758c3ae6e2..875358c24de 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -162,6 +162,7 @@ func Provider() terraform.ResourceProvider { "aws_ami": dataSourceAwsAmi(), "aws_ami_ids": dataSourceAwsAmiIds(), "aws_api_gateway_api_key": dataSourceAwsApiGatewayApiKey(), + "aws_api_gateway_domain_name": dataSourceAwsApiGatewayDomainName(), "aws_api_gateway_resource": dataSourceAwsApiGatewayResource(), "aws_api_gateway_rest_api": dataSourceAwsApiGatewayRestApi(), "aws_api_gateway_vpc_link": dataSourceAwsApiGatewayVpcLink(), diff --git a/website/aws.erb b/website/aws.erb index b338883522c..1617294fbed 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -122,6 +122,9 @@
  • aws_api_gateway_api_key
  • +
  • + aws_api_gateway_domain_name +
  • aws_api_gateway_resource
  • diff --git a/website/docs/d/api_gateway_domain_name.html.markdown b/website/docs/d/api_gateway_domain_name.html.markdown new file mode 100644 index 00000000000..78b63c202ab --- /dev/null +++ b/website/docs/d/api_gateway_domain_name.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "API Gateway (REST APIs)" +layout: "aws" +page_title: "AWS: aws_api_gateway_domain_name" +description: |- + Get information on a custom domain name for use with AWS API Gateway. +--- + +# Data Source: aws_api_gateway_domain_name + +Use this data source to get the custom domain name for use with AWS API Gateway. + +## Example Usage + +```hcl +resource "aws_api_gateway_domain_name" "example" { + domain_name = "api.example.com" +} +``` + +## Argument Reference + + * `domain_name` - (Required) The fully-qualified domain name to look up. + If no domain name is found, an error will be returned. + +## Attributes Reference + +In addition to the arguments, the following attributes are exported: + + * `arn` - The ARN of the found custom domain name. + * `certificate_arn` - The ARN for an AWS-managed certificate + that is used by edge-optimized endpoint for this domain name. + * `certificate_name` - The name of the certificate that is used by + edge-optimized endpoint for this domain name. + * `certificate_upload_date` - The upload date associated with + the domain certificate. + * `cloudfront_domain_name` - The hostname created by Cloudfront to represent + the distribution that implements this domain name mapping. + * `cloudfront_zone_id` - For convenience, the hosted zone ID (`Z2FDTNDATAQYW2`) + that can be used to create a Route53 alias record for the distribution. + * `endpoint_configuration` - The endpoint configuration of this domain name + showing the endpoint types of the domain name. + * `regional_certificate_arn` - The ARN for an AWS-managed certificate + that is used for validating the regional domain name. + * `regional_certificate_name` - The user-friendly name of the certificate + that is used by regional endpoint for this domain name. + * `regional_domain_name` - The hostname for the custom domain's + regional endpoint. + * `regional_zone_id` - The hosted zone ID that can be used to create + a Route53 alias record for the regional endpoint. + * `tags` - A mapping of tags for the resource. \ No newline at end of file From a58737eeee6b9eae74c3c78de95ea2893a5ca378 Mon Sep 17 00:00:00 2001 From: Bumsoo Kim Date: Sat, 21 Mar 2020 00:10:55 +0900 Subject: [PATCH 0015/1212] fix website formatting --- website/docs/d/api_gateway_domain_name.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/d/api_gateway_domain_name.html.markdown b/website/docs/d/api_gateway_domain_name.html.markdown index 78b63c202ab..c782ab76f42 100644 --- a/website/docs/d/api_gateway_domain_name.html.markdown +++ b/website/docs/d/api_gateway_domain_name.html.markdown @@ -14,7 +14,7 @@ Use this data source to get the custom domain name for use with AWS API Gateway. ```hcl resource "aws_api_gateway_domain_name" "example" { - domain_name = "api.example.com" + domain_name = "api.example.com" } ``` @@ -48,4 +48,4 @@ In addition to the arguments, the following attributes are exported: regional endpoint. * `regional_zone_id` - The hosted zone ID that can be used to create a Route53 alias record for the regional endpoint. - * `tags` - A mapping of tags for the resource. \ No newline at end of file + * `tags` - A mapping of tags for the resource. From 3da7baab40f0f2251403b5f4c0f3cc7ff84d8fee Mon Sep 17 00:00:00 2001 From: sanzalb Date: Wed, 20 May 2020 09:18:41 +0200 Subject: [PATCH 0016/1212] Added CIDR block set to datasource VPC peering --- aws/data_source_aws_vpc_peering_connection.go | 46 +++++++++++++++++++ ..._source_aws_vpc_peering_connection_test.go | 38 ++++++++++++++- 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index 4e58eab1e8c..871dd5c74d1 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -40,6 +40,19 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { Optional: true, Computed: true, }, + "cidr_block_set": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "region": { Type: schema.TypeString, Optional: true, @@ -60,6 +73,19 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { Optional: true, Computed: true, }, + "peer_cidr_block_set": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "peer_region": { Type: schema.TypeString, Optional: true, @@ -138,10 +164,30 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId) d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId) d.Set("cidr_block", pcx.RequesterVpcInfo.CidrBlock) + cidrBlockSet := []interface{}{} + for _, associationSet := range pcx.RequesterVpcInfo.CidrBlockSet { + association := map[string]interface{}{ + "cidr_block": aws.StringValue(associationSet.CidrBlock), + } + cidrBlockSet = append(cidrBlockSet, association) + } + if err := d.Set("cidr_block_set", cidrBlockSet); err != nil { + return fmt.Errorf("error setting cidr_block_set: %s", err) + } d.Set("region", pcx.RequesterVpcInfo.Region) d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) d.Set("peer_owner_id", pcx.AccepterVpcInfo.OwnerId) d.Set("peer_cidr_block", pcx.AccepterVpcInfo.CidrBlock) + peerCidrBlockSet := []interface{}{} + for _, associationSet := range pcx.AccepterVpcInfo.CidrBlockSet { + association := map[string]interface{}{ + "cidr_block": aws.StringValue(associationSet.CidrBlock), + } + peerCidrBlockSet = append(peerCidrBlockSet, association) + } + if err := d.Set("peer_cidr_block_set", peerCidrBlockSet); err != nil { + return fmt.Errorf("error setting peer_cidr_block_set: %s", err) + } d.Set("peer_region", pcx.AccepterVpcInfo.Region) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pcx.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index 56911c680eb..7596824e6e1 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -19,21 +19,45 @@ func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_vpc_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_vpc_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_cidr_block"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_cidr_block"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_owner_ids"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "peer_cidr_block_set.1.cidr_block"), ), ExpectNonEmptyPlan: true, }, @@ -76,6 +100,11 @@ resource "aws_vpc" "foo" { } } +resource "aws_vpc_ipv4_cidr_block_association" "foo_secondary_cidr" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.4.0.0/16" +} + resource "aws_vpc" "bar" { cidr_block = "10.2.0.0/16" @@ -84,6 +113,11 @@ resource "aws_vpc" "bar" { } } +resource "aws_vpc_ipv4_cidr_block_association" "bar_secondary_cidr" { + vpc_id = "${aws_vpc.bar.id}" + cidr_block = "10.8.0.0/16" +} + resource "aws_vpc_peering_connection" "test" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" @@ -91,7 +125,9 @@ resource "aws_vpc_peering_connection" "test" { tags = { Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar" - } + } + + depends_on = ["aws_vpc_ipv4_cidr_block_association.foo_secondary_cidr", "aws_vpc_ipv4_cidr_block_association.bar_secondary_cidr"] } data "aws_caller_identity" "current" {} From 46c6b0e3a99eca7540f794ebba339dab4505e1ef Mon Sep 17 00:00:00 2001 From: sanzalb Date: Wed, 20 May 2020 15:53:10 +0200 Subject: [PATCH 0017/1212] Moved CIDR block set checks to another test --- ..._source_aws_vpc_peering_connection_test.go | 101 +++++++++++------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index 7596824e6e1..0d3360acda2 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -19,45 +19,21 @@ func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_vpc_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_vpc_id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_cidr_block"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_cidr_block"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "peer_cidr_block_set.1.cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_owner_ids"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "id"), resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "peer_cidr_block_set.1.cidr_block"), ), ExpectNonEmptyPlan: true, }, @@ -65,6 +41,25 @@ func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { }) } +func TestAccDataSourceAwsVpcPeeringConnection_cidBlockSets(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsVpcPeeringConnectionCidrBlockSetConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.1.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.0.cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.1.cidr_block"), + ), + }, + }, + }) +} + func testAccDataSourceAwsVpcPeeringConnectionCheck(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] @@ -100,11 +95,6 @@ resource "aws_vpc" "foo" { } } -resource "aws_vpc_ipv4_cidr_block_association" "foo_secondary_cidr" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "10.4.0.0/16" -} - resource "aws_vpc" "bar" { cidr_block = "10.2.0.0/16" @@ -113,11 +103,6 @@ resource "aws_vpc" "bar" { } } -resource "aws_vpc_ipv4_cidr_block_association" "bar_secondary_cidr" { - vpc_id = "${aws_vpc.bar.id}" - cidr_block = "10.8.0.0/16" -} - resource "aws_vpc_peering_connection" "test" { vpc_id = "${aws_vpc.foo.id}" peer_vpc_id = "${aws_vpc.bar.id}" @@ -125,9 +110,7 @@ resource "aws_vpc_peering_connection" "test" { tags = { Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar" - } - - depends_on = ["aws_vpc_ipv4_cidr_block_association.foo_secondary_cidr", "aws_vpc_ipv4_cidr_block_association.bar_secondary_cidr"] + } } data "aws_caller_identity" "current" {} @@ -170,3 +153,47 @@ data "aws_vpc_peering_connection" "test_by_owner_ids" { depends_on = ["aws_vpc_peering_connection.test"] } ` + +const testAccDataSourceAwsVpcPeeringConnectionCidrBlockSetConfig = ` +resource "aws_vpc" "foo" { + cidr_block = "10.4.0.0/16" + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source-foo-cidr-block-set" + } +} + +resource "aws_vpc_ipv4_cidr_block_association" "foo_secondary_cidr" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "10.5.0.0/16" +} + +resource "aws_vpc" "bar" { + cidr_block = "10.6.0.0/16" + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source-bar-cidr-block-set" + } +} + +resource "aws_vpc_ipv4_cidr_block_association" "bar_secondary_cidr" { + vpc_id = "${aws_vpc.bar.id}" + cidr_block = "10.7.0.0/16" +} + +resource "aws_vpc_peering_connection" "test" { + vpc_id = "${aws_vpc.foo.id}" + peer_vpc_id = "${aws_vpc.bar.id}" + auto_accept = true + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar-cidr-block-set" + } + + depends_on = ["aws_vpc_ipv4_cidr_block_association.foo_secondary_cidr", "aws_vpc_ipv4_cidr_block_association.bar_secondary_cidr"] +} + +data "aws_vpc_peering_connection" "test_by_id" { + id = "${aws_vpc_peering_connection.test.id}" +} +` From 9cdcec2dd2e9536c1603f7860a2eb659c70f7bb8 Mon Sep 17 00:00:00 2001 From: sanzalb Date: Wed, 20 May 2020 15:53:18 +0200 Subject: [PATCH 0018/1212] Updated docs --- website/docs/d/vpc_peering_connection.html.markdown | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/website/docs/d/vpc_peering_connection.html.markdown b/website/docs/d/vpc_peering_connection.html.markdown index 324f1ab570f..c92abb590a7 100644 --- a/website/docs/d/vpc_peering_connection.html.markdown +++ b/website/docs/d/vpc_peering_connection.html.markdown @@ -46,7 +46,7 @@ The given filters must match exactly one VPC peering connection whose data will * `owner_id` - (Optional) The AWS account ID of the owner of the requester VPC of the specific VPC Peering Connection to retrieve. -* `cidr_block` - (Optional) The CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve. +* `cidr_block` - (Optional) The primary CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve. * `region` - (Optional) The region of the requester VPC of the specific VPC Peering Connection to retrieve. @@ -54,7 +54,7 @@ The given filters must match exactly one VPC peering connection whose data will * `peer_owner_id` - (Optional) The AWS account ID of the owner of the accepter VPC of the specific VPC Peering Connection to retrieve. -* `peer_cidr_block` - (Optional) The CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve. +* `peer_cidr_block` - (Optional) The primary CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve. * `peer_region` - (Optional) The region of the accepter VPC of the specific VPC Peering Connection to retrieve. @@ -82,6 +82,10 @@ All of the argument attributes except `filter` are also exported as result attri * `requester` - A configuration block that describes [VPC Peering Connection] (http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the requester VPC. +* `cidr_block_set` - (Optional) The list of all CIDR blocks of the requester VPC of the specific VPC Peering Connection to retrieve. + +* `peer_cidr_block_set` - (Optional) The list of all CIDR blocks of the accepter VPC of the specific VPC Peering Connection to retrieve. + #### Accepter and Requester Attributes Reference * `allow_remote_vpc_dns_resolution` - Indicates whether a local VPC can resolve public DNS hostnames to @@ -92,3 +96,7 @@ with the peer VPC over the VPC peering connection. * `allow_vpc_to_remote_classic_link` - Indicates whether a local VPC can communicate with a ClassicLink connection in the peer VPC over the VPC peering connection. + +#### CIDR block set Attributes Reference + +* `cidr_block` - A CIDR block associated to the VPC of the specific VPC Peering Connection. From b6f983bdb5206ee3baec70dc61e778bf71291a44 Mon Sep 17 00:00:00 2001 From: sanzalb Date: Fri, 22 May 2020 08:18:58 +0200 Subject: [PATCH 0019/1212] Fix typo --- aws/data_source_aws_vpc_peering_connection_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index 0d3360acda2..7a43cf9bea9 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -41,7 +41,7 @@ func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { }) } -func TestAccDataSourceAwsVpcPeeringConnection_cidBlockSets(t *testing.T) { +func TestAccDataSourceAwsVpcPeeringConnection_cidrBlockSets(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From fa8de7725c8299b7820d5773a79f0b4984c2f539 Mon Sep 17 00:00:00 2001 From: Saurabh Hirani Date: Tue, 26 May 2020 19:47:31 +0530 Subject: [PATCH 0020/1212] Add feature to concurrently update/del AWS API Gateway method settings --- ...esource_aws_api_gateway_method_settings.go | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_api_gateway_method_settings.go b/aws/resource_aws_api_gateway_method_settings.go index eeba69d1b3e..50b349366b2 100644 --- a/aws/resource_aws_api_gateway_method_settings.go +++ b/aws/resource_aws_api_gateway_method_settings.go @@ -4,12 +4,17 @@ import ( "fmt" "log" + "sync" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) +var resourceAwsApiGatewayMethodSettingsUpdateMutex = &sync.Mutex{} +var resourceAwsApiGatewayMethodSettingsDeleteMutex = &sync.Mutex{} + func resourceAwsApiGatewayMethodSettings() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayMethodSettingsUpdate, @@ -214,7 +219,14 @@ func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta inte PatchOperations: ops, } log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - _, err := conn.UpdateStage(&input) + + resourceAwsApiGatewayMethodSettingsUpdateMutex.Lock() + defer resourceAwsApiGatewayMethodSettingsUpdateMutex.Unlock() + + _, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { + return conn.UpdateStage(&input) + }) + if err != nil { return fmt.Errorf("Updating API Gateway Stage failed: %s", err) } @@ -239,7 +251,14 @@ func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta inte }, } log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - _, err := conn.UpdateStage(&input) + + resourceAwsApiGatewayMethodSettingsDeleteMutex.Lock() + defer resourceAwsApiGatewayMethodSettingsDeleteMutex.Unlock() + + _, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { + return conn.UpdateStage(&input) + }) + if err != nil { return fmt.Errorf("Updating API Gateway Stage failed: %s", err) } From 50c348f0cedc53745782d947d4bc230b4f203e45 Mon Sep 17 00:00:00 2001 From: 1newsr <35871065+1newsr@users.noreply.github.com> Date: Wed, 3 Jun 2020 18:08:04 +0900 Subject: [PATCH 0021/1212] fix fmt --- aws/resource_aws_elasticache_replication_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index d3afcdc1c42..fc19b982530 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -365,7 +365,7 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( resourceName, "primary_endpoint_address"), - resource.TestCheckResourceAttrSet( + resource.TestCheckResourceAttrSet( resourceName, "reader_endpoint_address"), ), }, From 88a55ee9a10fdfea5e5550135c221e531677948c Mon Sep 17 00:00:00 2001 From: Paul Cantea Date: Tue, 23 Jun 2020 10:39:19 -0700 Subject: [PATCH 0022/1212] Add `multi_az_enabled` parameter to `aws_elasticache_replication_group` --- ...ource_aws_elasticache_replication_group.go | 26 ++++++++++++++++++- ..._aws_elasticache_replication_group_test.go | 16 +++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index e1a3f9a4037..98d445fcd7b 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -119,6 +119,11 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, + "multi_az_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "node_type": { Type: schema.TypeString, Optional: true, @@ -308,6 +313,10 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i params.PreferredMaintenanceWindow = aws.String(v.(string)) } + if _, ok := d.GetOk("multi_az_enabled"); ok { + params.MultiAZEnabled = aws.Bool(d.Get("multi_az_enabled").(bool)) + } + if v, ok := d.GetOk("notification_topic_arn"); ok { params.NotificationTopicArn = aws.String(v.(string)) } @@ -437,8 +446,18 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } } - d.Set("kms_key_id", rgp.KmsKeyId) + if rgp.MultiAZ != nil { + switch strings.ToLower(*rgp.MultiAZ) { + case "enabled": + d.Set("multi_az_enabled", true) + case "disabled": + d.Set("multi_az_enabled", false) + default: + log.Printf("Unknown MultiAZ state %s", *rgp.MultiAZ) + } + } + d.Set("kms_key_id", rgp.KmsKeyId) d.Set("replication_group_description", rgp.Description) d.Set("number_cache_clusters", len(rgp.MemberClusters)) if err := d.Set("member_clusters", flattenStringList(rgp.MemberClusters)); err != nil { @@ -741,6 +760,11 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i requestUpdate = true } + if d.HasChange("multi_az_enabled") { + params.MultiAZEnabled = aws.Bool(d.Get("multi_az_enabled").(bool)) + requestUpdate = true + } + if d.HasChange("notification_topic_arn") { params.NotificationTopicArn = aws.String(d.Get("notification_topic_arn").(string)) requestUpdate = true diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index c434043cac6..0030ef694c4 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -322,6 +322,8 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( resourceName, "automatic_failover_enabled", "true"), + resource.TestCheckResourceAttr( + resourceName, "multi_az_enabled", "true"), resource.TestCheckResourceAttr( resourceName, "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr( @@ -692,6 +694,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), ), }, @@ -704,6 +707,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail var input *elasticache.ModifyReplicationGroupInput = &elasticache.ModifyReplicationGroupInput{ ApplyImmediately: aws.Bool(true), AutomaticFailoverEnabled: aws.Bool(false), + MultiAZEnabled: aws.Bool(false), ReplicationGroupId: aws.String(rName), } if _, err := conn.ModifyReplicationGroup(input); err != nil { @@ -730,6 +734,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail input = &elasticache.ModifyReplicationGroupInput{ ApplyImmediately: aws.Bool(true), AutomaticFailoverEnabled: aws.Bool(true), + MultiAZEnabled: aws.Bool(true), ReplicationGroupId: aws.String(rName), } if _, err := conn.ModifyReplicationGroup(input); err != nil { @@ -743,6 +748,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), ), }, @@ -1049,15 +1055,15 @@ data "aws_availability_zones" "available" { } } resource "aws_vpc" "test" { - cidr_block = "192.168.0.0/16" + cidr_block = "192.168.0.0/16" tags = { Name = "terraform-testacc-elasticache-replication-group-multi-az-in-vpc" } } resource "aws_subnet" "test" { - vpc_id = "${aws_vpc.test.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "${data.aws_availability_zones.available.names[0]}" + vpc_id = "${aws_vpc.test.id}" + cidr_block = "192.168.0.0/20" + availability_zone = "${data.aws_availability_zones.available.names[0]}" tags = { Name = "tf-acc-elasticache-replication-group-multi-az-in-vpc-foo" } @@ -1099,6 +1105,7 @@ resource "aws_elasticache_replication_group" "test" { security_group_ids = ["${aws_security_group.test.id}"] availability_zones = ["${data.aws_availability_zones.available.names[0]}","${data.aws_availability_zones.available.names[1]}"] automatic_failover_enabled = true + multi_az_enabled = true snapshot_window = "02:00-03:00" snapshot_retention_limit = 7 } @@ -1581,6 +1588,7 @@ resource "aws_elasticache_subnet_group" "test" { resource "aws_elasticache_replication_group" "test" { # InvalidParameterCombination: Automatic failover is not supported for T1 and T2 cache node types. automatic_failover_enabled = %[2]t + multi_az_enabled = %[2]t node_type = "cache.m3.medium" number_cache_clusters = %[3]d replication_group_id = "%[1]s" From 45876d03d74ac031404ebe69a27ec3b86f64e3ca Mon Sep 17 00:00:00 2001 From: Paul Cantea Date: Tue, 23 Jun 2020 11:09:47 -0700 Subject: [PATCH 0023/1212] Update website docs --- website/docs/r/elasticache_replication_group.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index b6c9c5aa0d8..b961c4d1b1b 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -102,7 +102,8 @@ The following arguments are supported: * `replication_group_description` – (Required) A user-created description for the replication group. * `number_cache_clusters` - (Required for Cluster Mode Disabled) The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. * `node_type` - (Required) The compute and memory capacity of the nodes in the node group. -* `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. +* `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. +* `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. Defaults to `false`. * `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`. * `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important. * `engine` - (Optional) The name of the cache engine to be used for the clusters in this replication group. e.g. `redis` From d283ae50e880594342eff1de4a2dc563459ff051 Mon Sep 17 00:00:00 2001 From: William Perron <37561740+wperron@users.noreply.github.com> Date: Mon, 11 May 2020 21:37:45 -0400 Subject: [PATCH 0024/1212] added `operation_name` attribute to API G. method This commit adds the `operation_name` attribute to the `aws_api_gateway_method` resource. This attribute allows users to specify a custom function name to be used when using API Gateway to generate an SDK. The attribute is already supported by the AWS API and SDKs so there was really no reason not to include it. It also made it a pretty straight forward change. This commit also includes: * test cases for the new attribute * updated documentation for the resource Closes #13232 --- aws/resource_aws_api_gateway_method.go | 25 ++++ aws/resource_aws_api_gateway_method_test.go | 107 ++++++++++++++++++ .../docs/r/api_gateway_method.html.markdown | 1 + 3 files changed, 133 insertions(+) diff --git a/aws/resource_aws_api_gateway_method.go b/aws/resource_aws_api_gateway_method.go index bfbe5af1dcb..040d87fd7dd 100644 --- a/aws/resource_aws_api_gateway_method.go +++ b/aws/resource_aws_api_gateway_method.go @@ -100,6 +100,11 @@ func resourceAwsApiGatewayMethod() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "operation_name": { + Type: schema.TypeString, + Optional: true, + }, }, } } @@ -147,6 +152,10 @@ func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) input.RequestValidatorId = aws.String(v.(string)) } + if v, ok := d.GetOk("operation_name"); ok { + input.OperationName = aws.String(v.(string)) + } + _, err := conn.PutMethod(&input) if err != nil { return fmt.Errorf("Error creating API Gateway Method: %s", err) @@ -196,6 +205,8 @@ func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) e d.Set("request_validator_id", out.RequestValidatorId) + d.Set("operation_name", out.OperationName) + return nil } @@ -304,6 +315,20 @@ func resourceAwsApiGatewayMethodUpdate(d *schema.ResourceData, meta interface{}) }) } + if d.HasChange("operation_name") { + var operation_name *string + if v, ok := d.GetOk("operation_name"); ok { + if s := v.(string); len(s) > 0 { + operation_name = &s + } + } + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/operationName"), + Value: operation_name, + }) + } + method, err := conn.UpdateMethod(&apigateway.UpdateMethodInput{ HttpMethod: aws.String(d.Get("http_method").(string)), ResourceId: aws.String(d.Get("resource_id").(string)), diff --git a/aws/resource_aws_api_gateway_method_test.go b/aws/resource_aws_api_gateway_method_test.go index 1d7a5923229..e5cf48f2879 100644 --- a/aws/resource_aws_api_gateway_method_test.go +++ b/aws/resource_aws_api_gateway_method_test.go @@ -195,6 +195,50 @@ func TestAccAWSAPIGatewayMethod_customrequestvalidator(t *testing.T) { }) } +func TestAccAWSAPIGatewayMethod_customoperationname(t *testing.T) { + var conf apigateway.Method + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayMethodDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayMethodConfigWithCustomOperationName(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf), + testAccCheckAWSAPIGatewayMethodAttributes(&conf), + resource.TestCheckResourceAttr( + "aws_api_gateway_method.test", "http_method", "GET"), + resource.TestCheckResourceAttr( + "aws_api_gateway_method.test", "authorization", "NONE"), + resource.TestCheckResourceAttr( + "aws_api_gateway_method.test", "request_models.application/json", "Error"), + resource.TestCheckResourceAttr( + "aws_api_gateway_method.test", "operation_name", "getTest"), + ), + }, + { + ResourceName: "aws_api_gateway_method.test", + ImportState: true, + ImportStateIdFunc: testAccAWSAPIGatewayMethodImportStateIdFunc("aws_api_gateway_method.test"), + ImportStateVerify: true, + }, + + { + Config: testAccAWSAPIGatewayMethodConfigWithCustomOperationNameUpdate(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf), + testAccCheckAWSAPIGatewayMethodAttributesUpdate(&conf), + resource.TestCheckResourceAttr( + "aws_api_gateway_method.test", "operation_name", "describeTest"), + ), + }, + }, + }) +} + func testAccCheckAWSAPIGatewayMethodAttributes(conf *apigateway.Method) resource.TestCheckFunc { return func(s *terraform.State) error { if *conf.HttpMethod != "GET" { @@ -722,3 +766,66 @@ resource "aws_api_gateway_method" "test" { } `, rInt) } + +func testAccAWSAPIGatewayMethodConfigWithCustomOperationName(rInt int) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + name = "tf-acc-test-apig-method-custom-op-name-%d" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" + + request_models = { + "application/json" = "Error" + } + + request_parameters = { + "method.request.header.Content-Type" = false + "method.request.querystring.page" = true + } + + operation_name = "getTest" +} +`, rInt) +} + +func testAccAWSAPIGatewayMethodConfigWithCustomOperationNameUpdate(rInt int) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + name = "tf-acc-test-apig-method-custom-op-name-%d" +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = "${aws_api_gateway_rest_api.test.id}" + resource_id = "${aws_api_gateway_resource.test.id}" + http_method = "GET" + authorization = "NONE" + + request_models = { + "application/json" = "Error" + } + + request_parameters = { + "method.request.querystring.page" = false + } + + operation_name = "describeTest" +} +`, rInt) +} diff --git a/website/docs/r/api_gateway_method.html.markdown b/website/docs/r/api_gateway_method.html.markdown index 5405933f397..f61cd412c9a 100644 --- a/website/docs/r/api_gateway_method.html.markdown +++ b/website/docs/r/api_gateway_method.html.markdown @@ -88,6 +88,7 @@ The following arguments are supported: * `request_validator_id` - (Optional) The ID of a `aws_api_gateway_request_validator` * `request_parameters` - (Optional) A map of request parameters (from the path, query string and headers) that should be passed to the integration. The boolean value indicates whether the parameter is required (`true`) or optional (`false`). For example: `request_parameters = {"method.request.header.X-Some-Header" = true "method.request.querystring.some-query-param" = true}` would define that the header `X-Some-Header` and the query string `some-query-param` must be provided in the request. +* `operation_name` - (Optional) The function name that will be given to the method when generating an SDK through API Gateway. If omitted, API Gateway will generate a function name based on the resource path and HTTP verb. ## Import From ff67906b381d3ddb0d47c5474dff6549fadf486a Mon Sep 17 00:00:00 2001 From: stp Date: Sun, 12 Jan 2020 20:53:00 +0100 Subject: [PATCH 0025/1212] Enhancement: Step Functions for Express Workflows --- aws/resource_aws_sfn_state_machine.go | 117 +++++- aws/resource_aws_sfn_state_machine_test.go | 386 ++++++++++++++++++ .../docs/r/sfn_state_machine.html.markdown | 66 +++ 3 files changed, 561 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_sfn_state_machine.go b/aws/resource_aws_sfn_state_machine.go index af8f8d0e6ad..b4dcec19a7f 100644 --- a/aws/resource_aws_sfn_state_machine.go +++ b/aws/resource_aws_sfn_state_machine.go @@ -31,6 +31,37 @@ func resourceAwsSfnStateMachine() *schema.Resource { ValidateFunc: validation.StringLenBetween(0, 1024*1024), // 1048576 }, + "logging_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_destination": { + Type: schema.TypeString, + Optional: true, + }, + "include_execution_data": { + Type: schema.TypeBool, + Optional: true, + // Default: false, + }, + "level": { + Type: schema.TypeString, + Optional: true, + // Default: sfn.LogLevelOff, + ValidateFunc: validation.StringInSlice([]string{ + sfn.LogLevelAll, + sfn.LogLevelError, + sfn.LogLevelFatal, + sfn.LogLevelOff, + }, false), + }, + }, + }, + }, + "name": { Type: schema.TypeString, Required: true, @@ -53,11 +84,21 @@ func resourceAwsSfnStateMachine() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tagsSchema(), "arn": { Type: schema.TypeString, Computed: true, }, + "type": { + Type: schema.TypeString, + Optional: true, + Default: sfn.StateMachineTypeStandard, + ValidateFunc: validation.StringInSlice([]string{ + sfn.StateMachineTypeStandard, + sfn.StateMachineTypeExpress, + }, false), + }, }, } } @@ -65,12 +106,13 @@ func resourceAwsSfnStateMachine() *schema.Resource { func resourceAwsSfnStateMachineCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sfnconn log.Print("[DEBUG] Creating Step Function State Machine") - params := &sfn.CreateStateMachineInput{ - Definition: aws.String(d.Get("definition").(string)), - Name: aws.String(d.Get("name").(string)), - RoleArn: aws.String(d.Get("role_arn").(string)), - Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().SfnTags(), + Definition: aws.String(d.Get("definition").(string)), + LoggingConfiguration: expandAwsSfnLoggingConfiguration(d.Get("logging_configuration").([]interface{})), + Name: aws.String(d.Get("name").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().SfnTags(), + Type: aws.String(d.Get("type").(string)), } var stateMachine *sfn.CreateStateMachineOutput @@ -114,7 +156,6 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig log.Printf("[DEBUG] Reading Step Function State Machine: %s", d.Id()) - sm, err := conn.DescribeStateMachine(&sfn.DescribeStateMachineInput{ StateMachineArn: aws.String(d.Id()), }) @@ -132,8 +173,18 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er d.Set("definition", sm.Definition) d.Set("name", sm.Name) d.Set("role_arn", sm.RoleArn) + d.Set("type", sm.Type) d.Set("status", sm.Status) + loggingConfiguration := flattenAwsSfnLoggingConfiguration(sm.LoggingConfiguration) + + if loggingConfiguration != nil { + err := d.Set("logging_configuration", loggingConfiguration) + if err != nil { + log.Printf("[DEBUG] Error setting logging_configuration %s \n", err) + } + } + if err := d.Set("creation_date", sm.CreationDate.Format(time.RFC3339)); err != nil { log.Printf("[DEBUG] Error setting creation_date: %s", err) } @@ -154,13 +205,21 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sfnconn - if d.HasChanges("definition", "role_arn") { + if d.HasChange("logging_configuration") && d.Get("type").(string) == sfn.StateMachineTypeExpress { + params.LoggingConfiguration = expandAwsSfnLoggingConfiguration(d.Get("logging_configuration").([]interface{})) + } + + if d.HasChanges("definition", "role_arn", "logging_configuration") { params := &sfn.UpdateStateMachineInput{ StateMachineArn: aws.String(d.Id()), Definition: aws.String(d.Get("definition").(string)), RoleArn: aws.String(d.Get("role_arn").(string)), } + if d.HasChange("logging_configuration") { + params.LoggingConfiguration = expandAwsSfnLoggingConfiguration(d.Get("logging_configuration").([]interface{})) + } + _, err := conn.UpdateStateMachine(params) log.Printf("[DEBUG] Updating Step Function State Machine: %#v", params) @@ -180,7 +239,7 @@ func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) } } - return resourceAwsSfnStateMachineRead(d, meta) + return nil } func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) error { @@ -206,3 +265,45 @@ func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) return nil } + +func expandAwsSfnLoggingConfiguration(l []interface{}) *sfn.LoggingConfiguration { + + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + loggingConfiguration := &sfn.LoggingConfiguration{ + Destinations: []*sfn.LogDestination{ + { + CloudWatchLogsLogGroup: &sfn.CloudWatchLogsLogGroup{ + LogGroupArn: aws.String(m["log_destination"].(string)), + }, + }, + }, + IncludeExecutionData: aws.Bool(m["include_execution_data"].(bool)), + Level: aws.String(m["level"].(string)), + } + + return loggingConfiguration +} + +func flattenAwsSfnLoggingConfiguration(loggingConfiguration *sfn.LoggingConfiguration) []interface{} { + + if loggingConfiguration == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "log_destination": "", + "include_execution_data": aws.BoolValue(loggingConfiguration.IncludeExecutionData), + "level": aws.StringValue(loggingConfiguration.Level), + } + + if len(loggingConfiguration.Destinations) > 0 { + m["log_destination"] = aws.StringValue(loggingConfiguration.Destinations[0].CloudWatchLogsLogGroup.LogGroupArn) + } + + return []interface{}{m} +} diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index 663b6c94bf2..88d48fc3493 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -57,6 +57,84 @@ func TestAccAWSSfnStateMachine_createUpdate(t *testing.T) { }) } +func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { + var sm sfn.DescribeStateMachineOutput + name := acctest.RandString(10) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSfnStateMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSfnStateMachineTypedConfig(sfn.StateMachineTypeExpress, name, 5), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + // resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "logging_configuration"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), + ), + }, + { + Config: testAccAWSSfnStateMachineTypedConfig(sfn.StateMachineTypeExpress, name, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 10.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + // resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "logging_configuration"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), + ), + }, + }, + }) +} + +func TestAccAWSSfnStateMachine_standard_createUpdate(t *testing.T) { + var sm sfn.DescribeStateMachineOutput + name := acctest.RandString(10) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSfnStateMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSfnStateMachineTypedConfig(sfn.StateMachineTypeStandard, name, 5), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + ), + }, + { + Config: testAccAWSSfnStateMachineTypedConfig(sfn.StateMachineTypeStandard, name, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 10.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + ), + }, + }, + }) +} + func TestAccAWSSfnStateMachine_tags(t *testing.T) { var sm sfn.DescribeStateMachineOutput resourceName := "aws_sfn_state_machine.test" @@ -123,6 +201,46 @@ func TestAccAWSSfnStateMachine_disappears(t *testing.T) { }) } +func TestAccAWSSfnStateMachine_express_LoggingConfiguration(t *testing.T) { + var sm sfn.DescribeStateMachineOutput + name := acctest.RandString(10) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSfnStateMachineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration1(sfn.StateMachineTypeExpress, name, sfn.LogLevelError), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelError), + ), + }, + { + Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration1(sfn.StateMachineTypeExpress, name, sfn.LogLevelAll), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), + resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), + resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelAll), + ), + }, + }, + }) +} + func testAccCheckAWSSfnExists(n string, sm *sfn.DescribeStateMachineOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -351,3 +469,271 @@ EOF } `, rName, tag1Key, tag1Value, tag2Key, tag2Value) } + +func testAccAWSSfnStateMachineTypedConfig(rType string, rName string, rMaxAttempts int) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_region" "current" {} + +resource "aws_iam_role_policy" "iam_policy_for_lambda" { + name = "iam_policy_for_lambda_%s" + role = "${aws_iam_role.iam_for_lambda.id}" + + policy = < *NOTE:* Logging is only accepted for EXPRESS Workflows. See the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html) for more information about enabling Step Function logging. ```hcl # ... @@ -18,6 +70,7 @@ Provides a Step Function State Machine resource resource "aws_sfn_state_machine" "sfn_state_machine" { name = "my-state-machine" role_arn = "${aws_iam_role.iam_for_sfn.arn}" + type = "EXPRESS" definition = < Date: Wed, 4 Mar 2020 06:49:59 -0800 Subject: [PATCH 0026/1212] removed express logging restrictions --- aws/resource_aws_sfn_state_machine.go | 10 +++++++++- website/docs/r/sfn_state_machine.html.markdown | 3 +-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_sfn_state_machine.go b/aws/resource_aws_sfn_state_machine.go index b4dcec19a7f..769e0a3662b 100644 --- a/aws/resource_aws_sfn_state_machine.go +++ b/aws/resource_aws_sfn_state_machine.go @@ -205,7 +205,15 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sfnconn - if d.HasChange("logging_configuration") && d.Get("type").(string) == sfn.StateMachineTypeExpress { + params := &sfn.UpdateStateMachineInput{ + StateMachineArn: aws.String(d.Id()), + Definition: aws.String(d.Get("definition").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + } + + log.Printf("[DEBUG] Updating Step Function State Machine: %#v", params) + + if d.HasChange("logging_configuration") { params.LoggingConfiguration = expandAwsSfnLoggingConfiguration(d.Get("logging_configuration").([]interface{})) } diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index aa2c019bff9..6af037cdf30 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -62,7 +62,7 @@ EOF ### Logging -~> *NOTE:* Logging is only accepted for EXPRESS Workflows. See the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html) for more information about enabling Step Function logging. +~> *NOTE:* See the [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html) for more information about enabling Step Function logging. ```hcl # ... @@ -70,7 +70,6 @@ EOF resource "aws_sfn_state_machine" "sfn_state_machine" { name = "my-state-machine" role_arn = "${aws_iam_role.iam_for_sfn.arn}" - type = "EXPRESS" definition = < Date: Wed, 4 Mar 2020 07:22:54 -0800 Subject: [PATCH 0027/1212] fenced lines fix --- website/docs/r/sfn_state_machine.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 6af037cdf30..5d0efaf6abb 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -12,6 +12,7 @@ Provides a Step Function State Machine resource ## Example Usage ### Basic (Standard Workflow) + ```hcl # ... From 863f4073e417956cf5471f7da60e65618e00e482 Mon Sep 17 00:00:00 2001 From: graham jenson Date: Wed, 4 Mar 2020 09:39:30 -0800 Subject: [PATCH 0028/1212] fixing hcl styling --- website/docs/r/sfn_state_machine.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 5d0efaf6abb..f22773ac417 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -37,6 +37,7 @@ EOF ``` ### Basic (Express Workflow) + ```hcl # ... From c0f10b23fd46a92a4e9302f6358a77966d514d40 Mon Sep 17 00:00:00 2001 From: graham jenson Date: Wed, 4 Mar 2020 09:53:06 -0800 Subject: [PATCH 0029/1212] fix styling --- website/docs/r/sfn_state_machine.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index f22773ac417..ca81efa3fc7 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -107,6 +107,7 @@ The following arguments are supported: * `type` - (Optional) Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. Valid Values: STANDARD | EXPRESS ### `logging_configuration` Configuration Block + * `log_destination` - (Optional) Amazon Resource Name (ARN) of CloudWatch log group. Make sure the State Machine does have the right IAM Policies for Logging. * `include_execution_data` - (Optional) Determines whether execution data is included in your log. When set to FALSE, data is excluded. * `level` - (Optional) Defines which category of execution history events are logged. Valid Values: ALL | ERROR | FATAL | OFF From 7bcc4d0c8cd0461998657cb9238e398af2f9e3b6 Mon Sep 17 00:00:00 2001 From: graham jenson Date: Thu, 11 Jun 2020 09:49:34 -0700 Subject: [PATCH 0030/1212] force new --- aws/resource_aws_sfn_state_machine.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_sfn_state_machine.go b/aws/resource_aws_sfn_state_machine.go index 769e0a3662b..f10d32a2a47 100644 --- a/aws/resource_aws_sfn_state_machine.go +++ b/aws/resource_aws_sfn_state_machine.go @@ -93,6 +93,7 @@ func resourceAwsSfnStateMachine() *schema.Resource { "type": { Type: schema.TypeString, Optional: true, + ForceNew: true, Default: sfn.StateMachineTypeStandard, ValidateFunc: validation.StringInSlice([]string{ sfn.StateMachineTypeStandard, From c40cff7e4041f50a1b0d88b11124de4ce7ff506e Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Mon, 22 Jun 2020 10:18:05 -0700 Subject: [PATCH 0031/1212] align docs --- website/docs/r/sfn_state_machine.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index ca81efa3fc7..c34ebaf1c88 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -88,10 +88,10 @@ resource "aws_sfn_state_machine" "sfn_state_machine" { EOF logging_configuration { - log_destination = "${aws_cloudwatch_log_group.log_group_for_sfn.arn}" - include_execution_data = true - level = "ERROR" - } + log_destination = "${aws_cloudwatch_log_group.log_group_for_sfn.arn}" + include_execution_data = true + level = "ERROR" + } } ``` From 4db146671e08c4714161733e99a6c353da3c2295 Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Sun, 28 Jun 2020 22:12:03 -0700 Subject: [PATCH 0032/1212] Update aws/resource_aws_sfn_state_machine_test.go Co-authored-by: Dexter Miguel --- aws/resource_aws_sfn_state_machine_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index 88d48fc3493..c13bd8e7005 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -76,7 +76,6 @@ func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), - // resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "logging_configuration"), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), ), From 13df67915f2ae371afc5099b8b3447ebccb41e93 Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Sun, 28 Jun 2020 22:12:12 -0700 Subject: [PATCH 0033/1212] Update aws/resource_aws_sfn_state_machine_test.go Co-authored-by: Dexter Miguel --- aws/resource_aws_sfn_state_machine_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index c13bd8e7005..d8a086dcfa4 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -89,7 +89,6 @@ func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 10.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), - // resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "logging_configuration"), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), ), From 5f3f198d75292963b481f62c85510c9cfc7d0a0f Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Sun, 28 Jun 2020 22:12:36 -0700 Subject: [PATCH 0034/1212] Update website/docs/r/sfn_state_machine.html.markdown Co-authored-by: Dexter Miguel --- website/docs/r/sfn_state_machine.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index c34ebaf1c88..0509c807623 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -103,7 +103,7 @@ The following arguments are supported: * `definition` - (Required) The Amazon States Language definition of the state machine. * `role_arn` - (Required) The Amazon Resource Name (ARN) of the IAM role to use for this state machine. * `tags` - (Optional) Key-value map of resource tags -* `logging_configuration` - (Optional) Defines what execution history events are logged and where they are logged. The logging_configuration parameter is only valid when type is set to EXPRESS. By default, the level is set to OFF. For more information see [Logging Express Workflows](https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html) and [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide. +* `logging_configuration` - (Optional) Defines what execution history events are logged and where they are logged. The `logging_configuration` parameter is only valid when `type` is set to `EXPRESS`. Defaults to `OFF`. For more information see [Logging Express Workflows](https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html) and [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide. * `type` - (Optional) Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. Valid Values: STANDARD | EXPRESS ### `logging_configuration` Configuration Block From 817a992641ecd49d572868812d2582b841df514e Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Wed, 1 Jul 2020 17:38:03 -0700 Subject: [PATCH 0035/1212] new test format --- aws/resource_aws_sfn_state_machine_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index d8a086dcfa4..4701b0c9c7b 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -225,7 +225,7 @@ func TestAccAWSSfnStateMachine_express_LoggingConfiguration(t *testing.T) { { Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration1(sfn.StateMachineTypeExpress, name, sfn.LogLevelAll), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSfnExists("aws_sfn_state_machine.foo"), + testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "name"), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), From b99c8a84051a0463dfa24e01f3fba8d1b0a7573d Mon Sep 17 00:00:00 2001 From: Graham Jenson Date: Wed, 1 Jul 2020 17:44:49 -0700 Subject: [PATCH 0036/1212] fixing linter --- website/docs/r/sfn_state_machine.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 0509c807623..0cb2df13867 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -86,11 +86,11 @@ resource "aws_sfn_state_machine" "sfn_state_machine" { } } EOF - + logging_configuration { - log_destination = "${aws_cloudwatch_log_group.log_group_for_sfn.arn}" + log_destination = "${aws_cloudwatch_log_group.log_group_for_sfn.arn}" include_execution_data = true - level = "ERROR" + level = "ERROR" } } ``` From 9f53080aae62fefc3948caf0f0050cfe956b7dd1 Mon Sep 17 00:00:00 2001 From: drexler Date: Thu, 25 Jun 2020 07:56:51 -0400 Subject: [PATCH 0037/1212] feat: support ApproveUntilDate attribute --- aws/resource_aws_ssm_patch_baseline.go | 53 ++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index aaeac579163..93b9fd195cb 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" @@ -76,8 +77,15 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "approve_after_days": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "approve_until_date": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))`), "must be formatted YYYY-MM-DD"), }, "compliance_level": { @@ -178,7 +186,11 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) } if _, ok := d.GetOk("approval_rule"); ok { - params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) + rules, err := expandAwsSsmPatchRuleGroup(d) + if err != nil { + return err + } + params.ApprovalRules = rules } resp, err := ssmconn.CreatePatchBaseline(params) @@ -218,7 +230,11 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("approval_rule") { - params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) + rules, err := expandAwsSsmPatchRuleGroup(d) + if err != nil { + return err + } + params.ApprovalRules = rules } if d.HasChange("global_filter") { @@ -347,7 +363,7 @@ func flattenAwsSsmPatchFilterGroup(group *ssm.PatchFilterGroup) []map[string]int return result } -func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { +func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) (*ssm.PatchRuleGroup, error) { var rules []*ssm.PatchRule ruleConfig := d.Get("approval_rule").([]interface{}) @@ -374,18 +390,31 @@ func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { } rule := &ssm.PatchRule{ - ApproveAfterDays: aws.Int64(int64(rCfg["approve_after_days"].(int))), PatchFilterGroup: filterGroup, ComplianceLevel: aws.String(rCfg["compliance_level"].(string)), EnableNonSecurity: aws.Bool(rCfg["enable_non_security"].(bool)), } + // Verify that at least one of approve_after_days or approve_until_date is set + approveAfterDays, _ := rCfg["approve_after_days"].(int) + approveUntilDate, _ := rCfg["approve_until_date"].(string) + + if approveAfterDays > 0 && len(approveUntilDate) > 0 { + return nil, fmt.Errorf("Only one of approve_after_days or approve_until_date must be configured") + } + + if len(approveUntilDate) > 0 { + rule.ApproveUntilDate = aws.String(approveUntilDate) + } else { + rule.ApproveAfterDays = aws.Int64(int64(approveAfterDays)) + } + rules = append(rules, rule) } return &ssm.PatchRuleGroup{ PatchRules: rules, - } + }, nil } func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interface{} { @@ -397,10 +426,18 @@ func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interfa for _, rule := range group.PatchRules { r := make(map[string]interface{}) - r["approve_after_days"] = *rule.ApproveAfterDays r["compliance_level"] = *rule.ComplianceLevel r["enable_non_security"] = *rule.EnableNonSecurity r["patch_filter"] = flattenAwsSsmPatchFilterGroup(rule.PatchFilterGroup) + + if rule.ApproveAfterDays != nil { + r["approve_after_days"] = *rule.ApproveAfterDays + } + + if rule.ApproveUntilDate != nil { + r["approve_until_date"] = *rule.ApproveUntilDate + } + result = append(result, r) } From 499c49042f5db249fe05981b681e09631f164c4d Mon Sep 17 00:00:00 2001 From: drexler Date: Thu, 25 Jun 2020 07:57:08 -0400 Subject: [PATCH 0038/1212] test: add cover test --- aws/resource_aws_ssm_patch_baseline_test.go | 154 ++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/aws/resource_aws_ssm_patch_baseline_test.go b/aws/resource_aws_ssm_patch_baseline_test.go index 438392a833c..68d54a221bc 100644 --- a/aws/resource_aws_ssm_patch_baseline_test.go +++ b/aws/resource_aws_ssm_patch_baseline_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -168,6 +169,68 @@ func TestAccAWSSSMPatchBaseline_OperatingSystem(t *testing.T) { }) } +func TestAccAWSSSMPatchBaseline_ApproveUntilDateParam(t *testing.T) { + var before, after ssm.PatchBaselineIdentity + name := acctest.RandString(10) + resourceName := "aws_ssm_patch_baseline.foo" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMPatchBaselineConfigWithApproveUntilDate(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "approval_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.approve_until_date", "2020-01-01"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.patch_filter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.compliance_level", ssm.PatchComplianceLevelCritical), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.enable_non_security", "true"), + resource.TestCheckResourceAttr(resourceName, "operating_system", "AMAZON_LINUX"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMPatchBaselineConfigWithApproveUntilDateUpdated(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "approval_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.approve_until_date", "2020-02-02"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.patch_filter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "approval_rule.0.compliance_level", ssm.PatchComplianceLevelCritical), + resource.TestCheckResourceAttr(resourceName, "operating_system", "AMAZON_LINUX"), + func(*terraform.State) error { + if *before.BaselineId != *after.BaselineId { + t.Fatal("Baseline IDs changed unexpectedly") + } + return nil + }, + ), + }, + }, + }) +} + +func TestAccAWSSSMPatchBaseline_InvalidConfiguration(t *testing.T) { + name := acctest.RandString(10) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMPatchBaselineConfigWithWithBothApprovalTimes(name), + ExpectError: regexp.MustCompile(`Only one of approve_after_days or approve_until_date must be configured`), + }, + }, + }) +} + func testAccCheckAwsSsmPatchBaselineRecreated(t *testing.T, before, after *ssm.PatchBaselineIdentity) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -374,3 +437,94 @@ resource "aws_ssm_patch_baseline" "foo" { } `, rName) } + +func testAccAWSSSMPatchBaselineConfigWithApproveUntilDate(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "foo" { + name = "patch-baseline-%s" + operating_system = "AMAZON_LINUX" + description = "Baseline containing all updates approved for production systems" + + tags = { + Name = "My Patch Baseline" + } + + approval_rule { + approve_until_date = "2020-01-01" + enable_non_security = true + compliance_level = "CRITICAL" + + patch_filter { + key = "PRODUCT" + values = ["AmazonLinux2016.03", "AmazonLinux2016.09", "AmazonLinux2017.03", "AmazonLinux2017.09"] + } + + patch_filter { + key = "SEVERITY" + values = ["Critical", "Important"] + } + } +} +`, rName) +} + +func testAccAWSSSMPatchBaselineConfigWithApproveUntilDateUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "foo" { + name = "patch-baseline-%s" + operating_system = "AMAZON_LINUX" + description = "Baseline containing all updates approved for production systems" + + tags = { + Name = "My Patch Baseline" + } + + approval_rule { + approve_until_date = "2020-02-02" + enable_non_security = true + compliance_level = "CRITICAL" + + patch_filter { + key = "PRODUCT" + values = ["AmazonLinux2016.03", "AmazonLinux2016.09", "AmazonLinux2017.03", "AmazonLinux2017.09"] + } + + patch_filter { + key = "SEVERITY" + values = ["Critical", "Important"] + } + } +} +`, rName) +} + +func testAccAWSSSMPatchBaselineConfigWithWithBothApprovalTimes(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "foo" { + name = "patch-baseline-%s" + operating_system = "AMAZON_LINUX" + description = "Baseline containing all updates approved for production systems" + + tags = { + Name = "My Patch Baseline" + } + + approval_rule { + approve_after_days = 7 + approve_until_date = "2020-01-03" + enable_non_security = true + compliance_level = "CRITICAL" + + patch_filter { + key = "PRODUCT" + values = ["AmazonLinux2016.03", "AmazonLinux2016.09", "AmazonLinux2017.03", "AmazonLinux2017.09"] + } + + patch_filter { + key = "SEVERITY" + values = ["Critical", "Important"] + } + } +} +`, rName) +} From b59f0436bf3c14a9799a582f17f0d9ae4a50455e Mon Sep 17 00:00:00 2001 From: drexler Date: Thu, 25 Jun 2020 07:57:27 -0400 Subject: [PATCH 0039/1212] docs: update resource documentation --- website/docs/r/ssm_patch_baseline.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 3990d31a1b9..766119c714e 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -135,7 +135,8 @@ The following arguments are supported: The `approval_rule` block supports: -* `approve_after_days` - (Required) The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline. Valid Range: 0 to 100. +* `approve_after_days` - (Optional) The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline. Valid Range: 0 to 100. Conflicts with `approve_until_date` +* `approve_until_date` - (Optional) The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Date is formatted as `YYYY-MM-DD`. Conflicts with `approve_after_days` * `patch_filter` - (Required) The patch filter group that defines the criteria for the rule. Up to 5 patch filters can be specified per approval rule using Key/Value pairs. Valid Keys are `PATCH_SET | PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`. * `PATCH_SET` defaults to `OS` if unspecified * `compliance_level` - (Optional) Defines the compliance level for patches approved by this rule. Valid compliance levels include the following: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`, `INFORMATIONAL`, `UNSPECIFIED`. The default value is `UNSPECIFIED`. From e4dc2a48f440e9e31241d090e16d83bd56ee6f74 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Wed, 17 Jun 2020 10:15:22 +0100 Subject: [PATCH 0040/1212] resource/aws_autoscaling_group: add instance_refresh block --- aws/autoscaling_tags.go | 38 +- aws/resource_aws_autoscaling_group.go | 152 +++++++- aws/resource_aws_autoscaling_group_test.go | 361 ++++++++++++++++++ .../docs/r/autoscaling_group.html.markdown | 60 ++- 4 files changed, 598 insertions(+), 13 deletions(-) diff --git a/aws/autoscaling_tags.go b/aws/autoscaling_tags.go index c0bbbe134d8..bf60e5c0905 100644 --- a/aws/autoscaling_tags.go +++ b/aws/autoscaling_tags.go @@ -51,8 +51,12 @@ func autoscalingTagToHash(v interface{}) int { } // setTags is a helper to set the tags for a resource. It expects the -// tags field to be named "tag" -func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) error { +// tags field to be named "tag". +// +// When the return value requiresPropagation is true, instances of the +// ASG should be refreshed in order for the changed or removed tags to +// fully take effect. +func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) (requiresPropagation bool, err error) { resourceID := d.Get("name").(string) var createTags, removeTags []*autoscaling.Tag @@ -63,17 +67,17 @@ func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) e old, err := autoscalingTagsFromMap(o, resourceID) if err != nil { - return err + return false, err } new, err := autoscalingTagsFromMap(n, resourceID) if err != nil { - return err + return false, err } c, r, err := diffAutoscalingTags(old, new, resourceID) if err != nil { - return err + return false, err } createTags = append(createTags, c...) @@ -82,17 +86,17 @@ func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) e oraw, nraw = d.GetChange("tags") old, err = autoscalingTagsFromList(oraw.(*schema.Set).List(), resourceID) if err != nil { - return err + return false, err } new, err = autoscalingTagsFromList(nraw.(*schema.Set).List(), resourceID) if err != nil { - return err + return false, err } c, r, err = diffAutoscalingTags(old, new, resourceID) if err != nil { - return err + return false, err } createTags = append(createTags, c...) @@ -108,7 +112,13 @@ func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) e } if _, err := conn.DeleteTags(&remove); err != nil { - return err + return false, err + } + + for _, tag := range removeTags { + if aws.BoolValue(tag.PropagateAtLaunch) { + requiresPropagation = true + } } } @@ -120,11 +130,17 @@ func setAutoscalingTags(conn *autoscaling.AutoScaling, d *schema.ResourceData) e } if _, err := conn.CreateOrUpdateTags(&create); err != nil { - return err + return false, err + } + + for _, tag := range createTags { + if aws.BoolValue(tag.PropagateAtLaunch) { + requiresPropagation = true + } } } - return nil + return requiresPropagation, nil } // diffTags takes our tags locally and the ones remotely and returns diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 0349c4a0305..d648609a397 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -442,6 +442,35 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Optional: true, Computed: true, }, + + "instance_refresh": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_warmup_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: -1, // default to health_check_grace_period + ValidateFunc: validation.IntAtLeast(-1), + }, + "min_healthy_percentage": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + ValidateFunc: validation.IntBetween(0, 100), + }, + "strategy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice( + []string{autoscaling.RefreshStrategyRolling}, + false), + }, + }, + }, + }, }, CustomizeDiff: customdiff.Sequence( @@ -894,6 +923,7 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.Au func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn shouldWaitForCapacity := false + shouldRefreshInstances := false opts := autoscaling.UpdateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(d.Id()), @@ -914,16 +944,21 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("launch_configuration"); ok { opts.LaunchConfigurationName = aws.String(v.(string)) } + + shouldRefreshInstances = true } if d.HasChange("launch_template") { if v, ok := d.GetOk("launch_template"); ok && len(v.([]interface{})) > 0 { opts.LaunchTemplate, _ = expandLaunchTemplateSpecification(v.([]interface{})) } + + shouldRefreshInstances = true } if d.HasChange("mixed_instances_policy") { opts.MixedInstancesPolicy = expandAutoScalingMixedInstancesPolicy(d.Get("mixed_instances_policy").([]interface{})) + shouldRefreshInstances = true } if d.HasChange("min_size") { @@ -950,16 +985,20 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("vpc_zone_identifier") { opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) + shouldRefreshInstances = true } if d.HasChange("availability_zones") { if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) } + + shouldRefreshInstances = true } if d.HasChange("placement_group") { opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) + shouldRefreshInstances = true } if d.HasChange("termination_policies") { @@ -977,8 +1016,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) opts.ServiceLinkedRoleARN = aws.String(d.Get("service_linked_role_arn").(string)) } - if err := setAutoscalingTags(conn, d); err != nil { + switch requiresPropagation, err := setAutoscalingTags(conn, d); { + case err != nil: return err + case requiresPropagation: + shouldRefreshInstances = true } log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts) @@ -1144,6 +1186,12 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } + if shouldRefreshInstances { + if err := startAutoscalingInstanceRefresh(d, conn); err != nil { + return fmt.Errorf("failed to start instance refresh of asg %s: %s", d.Id(), err) + } + } + return resourceAwsAutoscalingGroupRead(d, meta) } @@ -1756,3 +1804,105 @@ func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling return nil } + +// startAutoscalingInstanceRefresh starts a new Instance Refresh in this +// Auto-Scaling Group. If there is already an active refresh, it is cancelled. +func startAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + asgName := d.Id() + input := autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Preferences: &autoscaling.RefreshPreferences{}, + Strategy: nil, + } + + if block, ok := d.Get("instance_refresh").([]interface{}); ok && len(block) > 0 { + m := block[0].(map[string]interface{}) + + if warmup := m["instance_warmup_seconds"].(int); warmup > -1 { + // -1 would mean defaulting to using the group's health_check_grace_period + input.Preferences.InstanceWarmup = aws.Int64(int64(warmup)) + } + + // validated by schema + input.Preferences.MinHealthyPercentage = aws.Int64(int64(m["min_healthy_percentage"].(int))) + input.Strategy = aws.String(m["strategy"].(string)) + } else { + log.Printf("[DEBUG] Instance refresh not enabled in ASG %s", asgName) + return nil + } + + log.Printf("[DEBUG] Cancelling active refresh in ASG %s, if any...", asgName) + + if err := cancelAutoscalingInstanceRefresh(d, conn); err != nil { + // todo: add comment about subsequent ASG updates not picking up the refresh? + return fmt.Errorf("failed to cancel previous refresh: %s", err) + } + + log.Printf("[DEBUG] Starting instance refresh in ASG %s...", asgName) + + instanceRefreshId := "" + switch output, err := conn.StartInstanceRefresh(&input); { + case err != nil: + return err + default: + instanceRefreshId = aws.StringValue(output.InstanceRefreshId) + } + + log.Printf("[INFO] Started instance refresh %s in ASG %s", instanceRefreshId, asgName) + + return nil +} + +// cancelAutoscalingInstanceRefresh cancels the currently active Instance +// Refresh of this Auto-Scaling Group, and waits until the refresh reaches a +// terminal state (usually Cancelled). If there is no active refresh, the +// function short-circuits without error. +func cancelAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { + asgName := d.Id() + input := autoscaling.CancelInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + } + + _, err := conn.CancelInstanceRefresh(&input) + switch { + case isAWSErr(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault, ""): + log.Printf("[DEBUG] No active Instance Refresh in ASG %s", asgName) + return nil + case err != nil: + return err + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + input := autoscaling.DescribeInstanceRefreshesInput{ + AutoScalingGroupName: aws.String(asgName), + MaxRecords: aws.Int64(1), + } + + output, err := conn.DescribeInstanceRefreshes(&input) + switch { + case err != nil: + return resource.NonRetryableError(err) + case len(output.InstanceRefreshes) != 1: + return nil + } + + switch status := aws.StringValue(output.InstanceRefreshes[0].Status); status { + case + autoscaling.InstanceRefreshStatusCancelled, + autoscaling.InstanceRefreshStatusFailed, + autoscaling.InstanceRefreshStatusSuccessful: + + return nil + default: + return resource.RetryableError(fmt.Errorf("refresh status %s is not terminal", status)) + } + }) + + if isResourceTimeoutError(err) { + return fmt.Errorf("timed out before the previous refresh reached a terminal state") + } + + log.Printf("[INFO] Cancelled active instance refresh in ASG %s", asgName) + + return nil +} diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 4a9fe50e543..c4710fdcf18 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -99,6 +99,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSAutoScalingGroupAttributes(&group, randName), testAccMatchResourceAttrRegionalARN("aws_autoscaling_group.bar", "arn", "autoscaling", regexp.MustCompile(`autoScalingGroup:.+`)), tfawsresource.TestCheckTypeSetElemAttr("aws_autoscaling_group.bar", "availability_zones.*", "us-west-2a"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "default_cooldown", "300"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "desired_capacity", "4"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "enabled_metrics.#", "0"), @@ -126,6 +127,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "vpc_zone_identifier.#", "0"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "max_instance_lifetime", "0"), + resource.TestCheckNoResourceAttr("aws_autoscaling_group.bar", "instance_refresh.#"), ), }, { @@ -147,6 +149,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "desired_capacity", "5"), resource.TestCheckResourceAttr( @@ -4250,3 +4253,361 @@ resource "aws_autoscaling_group" "test" { } `, rName) } + +func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { + var group autoscaling.Group + resourceName := "aws_autoscaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: []resource.TestStep{ + { + // check that an instance refresh isn't started by a new asg + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", true, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "min_size", "1"), + resource.TestCheckResourceAttr(resourceName, "max_size", "2"), + resource.TestCheckResourceAttr(resourceName, "desired_capacity", "1"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "-1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "90"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_delete", + "wait_for_capacity_timeout", + "instance_refresh", + }, + }, + { + // check that changing asg size doesn't trigger a refresh + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", false, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "min_size", "2"), + resource.TestCheckResourceAttr(resourceName, "max_size", "4"), + resource.TestCheckResourceAttr(resourceName, "desired_capacity", "2"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + ), + }, + { + // check that changing propagated tags triggers a refresh + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Bravo", false, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "10"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "50"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 1, 0, []string{ + autoscaling.InstanceRefreshStatusPending, + autoscaling.InstanceRefreshStatusInProgress, + }), + ), + }, + { + // check that an active refresh is cancelled in favour of a new one + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Charlie", false, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 2, 1, []string{ + autoscaling.InstanceRefreshStatusCancelled, + }), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 2, 0, []string{ + autoscaling.InstanceRefreshStatusPending, + autoscaling.InstanceRefreshStatusInProgress, + }), + ), + }, + }, + }) +} + +func testAccAwsAutoScalingGroup_InstanceRefresh_Enabled( + tagValue string, + defaults bool, + sizeFactor int, +) string { + preference := `` + if !defaults { + preference = ` + min_healthy_percentage = 50 + instance_warmup_seconds = 10` + } + + return fmt.Sprintf(` +locals { + size_factor = %[3]d +} + +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_launch_configuration" "test" { + image_id = "${data.aws_ami.test.id}" + instance_type = "t3.nano" +} + +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 * local.size_factor + min_size = 1 * local.size_factor + desired_capacity = 1 * local.size_factor + launch_configuration = "${aws_launch_configuration.test.name}" + + tag { + key = "Test" + value = %[1]q + propagate_at_launch = true + } + + instance_refresh { + strategy = "Rolling" +%[2]s + } +} +`, + tagValue, + preference, + sizeFactor) +} + +func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { + // note: propagated tags have been implicitly checked + // by TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled + + matrix := []struct { + AvailabilityZoneCount int + SubnetCount int + InstanceType string + UseLaunchConfiguration bool + UseLaunchTemplate bool + UseMixedInstancesPolicy bool + UsePlacementGroup bool + ExpectRefreshCount int + }{ + {2, 0, "t3.nano", true, false, false, false, 0}, // create asg + {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 subnet + {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 vpcs, drop subnets + {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 vpc + {0, 1, "t3.nano", false, true, false, false, 4}, // drop launch config, add template + {0, 1, "t3.micro", false, true, false, false, 5}, // update template + {0, 1, "t3.micro", false, false, true, false, 6}, // drop template, add mixed policy + {0, 1, "t3.nano", false, false, true, false, 7}, // update mixed policy + {0, 1, "t3.nano", false, false, true, true, 8}, // use placement group + } + + var group autoscaling.Group + resourceName := "aws_autoscaling_group.test" + placementGroupName := fmt.Sprintf("tf-test-%s", acctest.RandString(8)) + + steps := make([]resource.TestStep, len(matrix)) + for i, test := range matrix { + steps[i] = resource.TestStep{ + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( + test.AvailabilityZoneCount, + test.SubnetCount, + test.InstanceType, + test.UseLaunchConfiguration, + test.UseLaunchTemplate, + test.UseMixedInstancesPolicy, + test.UsePlacementGroup, + placementGroupName, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, test.ExpectRefreshCount, 0, nil), + ), + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: steps, + }) +} + +func testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( + availabilityZoneCount int, + subnetCount int, + instanceType string, + useLaunchConfiguration bool, + useLaunchTemplate bool, + useMixedInstancesPolicy bool, + usePlacementGroup bool, + placementGroupName string, +) string { + return fmt.Sprintf(` +locals { + availability_zone_count = %[1]d + subnet_count = %[2]d + instance_type = %[3]q + use_launch_configuration = %[4]v + use_launch_template = %[5]v + use_mixed_instances_policy = %[6]v + use_placement_group = %[7]v + placement_group_name = %[8]q +} + +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_default_subnet" "current" { + count = length(data.aws_availability_zones.current.names) + availability_zone = data.aws_availability_zones.current.names[count.index] +} + +resource "aws_launch_configuration" "test" { + image_id = "${data.aws_ami.test.id}" + instance_type = local.instance_type +} + +resource "aws_launch_template" "test" { + image_id = data.aws_ami.test.image_id + instance_type = local.instance_type +} + +resource "aws_placement_group" "test" { + name = local.placement_group_name + strategy = "cluster" +} + +resource "aws_autoscaling_group" "test" { + availability_zones = local.availability_zone_count > 0 ? slice(data.aws_availability_zones.current.names, 0, local.availability_zone_count) : null + max_size = 1 + min_size = 1 + desired_capacity = 1 + launch_configuration = local.use_launch_configuration ? aws_launch_configuration.test.name : null + vpc_zone_identifier = local.subnet_count > 0 ? slice(aws_default_subnet.current.*.id, 0, local.subnet_count) : null + placement_group = local.use_placement_group ? aws_placement_group.test.name : null + + dynamic "launch_template" { + for_each = local.use_launch_template ? [1] : [] + content { + id = aws_launch_template.test.id + version = aws_launch_template.test.latest_version + } + } + + dynamic "mixed_instances_policy" { + for_each = local.use_mixed_instances_policy ? [1] : [] + content { + launch_template { + launch_template_specification { + launch_template_id = aws_launch_template.test.id + version = aws_launch_template.test.latest_version + } + } + } + } + + instance_refresh { + strategy = "Rolling" + } +} +`, + availabilityZoneCount, + subnetCount, + instanceType, + useLaunchConfiguration, + useLaunchTemplate, + useMixedInstancesPolicy, + usePlacementGroup, + placementGroupName, + ) +} + +// testAccCheckAutoscalingLatestInstanceRefreshState checks the Instance Refreshes +// of an Auto-Scaling Group. +// +// Use length to set the number of refreshes (of any state) that are expected. +// +// Use the offset parameter to choose the instance refresh to check. Offset 0 +// is the latest refresh, with negative offsets yielding successive elements. +// When length is 0, this argument has no effect. +// +// When length is greater than 0 and acceptedStatuses is non-nil, expect the +// refresh at the given offset to have one of the given accepted statuses. +func testAccCheckAutoscalingLatestInstanceRefreshState( + group *autoscaling.Group, + length int, + offset int, + acceptedStatuses []string, +) resource.TestCheckFunc { + return func(state *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn + name := aws.StringValue(group.AutoScalingGroupName) + input := autoscaling.DescribeInstanceRefreshesInput{ + AutoScalingGroupName: aws.String(name), + } + + output, err := conn.DescribeInstanceRefreshes(&input) + switch { + case err != nil: + return err + case len(output.InstanceRefreshes) != length: + return fmt.Errorf("expected %d instance refreshes, but found %d", length, len(output.InstanceRefreshes)) + } + + switch { + case length == 0: + return nil + case len(acceptedStatuses) == 0: + return nil + } + + status := aws.StringValue(output.InstanceRefreshes[offset].Status) + for _, acceptedStatus := range acceptedStatuses { + if status == acceptedStatus { + return nil + } + } + + return fmt.Errorf( + "expected status of refresh at offset %d to be one of %s, got %s", + offset, + strings.Join(acceptedStatuses, " or "), + status) + } +} diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index a2c582ad2f9..fd679932a54 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -123,7 +123,7 @@ resource "aws_autoscaling_group" "example" { } ``` -## Interpolated tags +### Interpolated tags ```hcl variable "extra_tags" { @@ -158,6 +158,42 @@ resource "aws_autoscaling_group" "bar" { } ``` +### Automatically refresh all instances after the group is updated + +```hcl +data "aws_ami" "example" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +resource "aws_launch_template" "example" { + image_id = "${data.aws_ami.example.id}" + instance_type = "t3.nano" +} + +resource "aws_autoscaling_group" "example" { + availability_zones = ["us-east-1a"] + desired_capacity = 1 + max_size = 2 + min_size = 1 + + launch_template { + id = "${aws_launch_template.example.id}" + version = "${aws_launch_template.example.latest_version}" + } + + instance_refresh { + strategy = "Rolling" + min_healthy_percentage = 50 + } +} +``` + ## Argument Reference The following arguments are supported: @@ -221,6 +257,9 @@ Note that if you suspend either the `Launch` or `Terminate` process types, it ca during scale in events. * `service_linked_role_arn` (Optional) The ARN of the service-linked role that the ASG will use to call other AWS services * `max_instance_lifetime` (Optional) The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds. +* `instance_refresh` - (Optional) If this block is configured, start an + [Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) + when this autoscaling group is updated. Defined below. ### launch_template @@ -288,6 +327,25 @@ This allows the construction of dynamic lists of tags which is not possible usin ~> **NOTE:** Other AWS APIs may automatically add special tags to their associated Auto Scaling Group for management purposes, such as ECS Capacity Providers adding the `AmazonECSManaged` tag. To ignore the removal of these automatic tags, see the [`ignore_tags` provider configuration](https://www.terraform.io/docs/providers/aws/index.html#ignore_tags) or the [`ignore_changes` lifecycle argument for Terraform resources](https://www.terraform.io/docs/configuration/resources.html#ignore_changes). +### instance_refresh + +This configuration block supports the following: + +* `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched + instance is configured and ready to use. Default behavior (set with `-1` or `null`) + is to match the autoscaling group's health check grace period. +* `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group + that must remain healthy during an instance refresh to allow the operation to continue, + as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. +* `strategy` - (Required) The strategy to use for instance refresh. The only allowed + value is `"Rolling"`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. + +~> **NOTE:** A refresh is only started when any of the following autoscaling group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`, `vpc_zone_identifier`, `availability_zones`, `placement_group`, or any `tag` or `tags` configured to propagate at launch. + +~> **NOTE:** Autoscaling groups support up to one active instance refresh at a time. When this resource is updated, any existing refresh is cancelled. + +~> **NOTE:** Depending on health check settings and group size, an instance refresh may take a long time or fail. This resource does not wait for the instance refresh to complete. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 908c553eb96ea387d16f0f32abbe75dc34a41c10 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Fri, 26 Jun 2020 13:55:21 +0100 Subject: [PATCH 0041/1212] resource/aws_autoscaling_group: fix typos in test comments --- aws/resource_aws_autoscaling_group_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index c4710fdcf18..d3be987dd33 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -4409,10 +4409,10 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { UsePlacementGroup bool ExpectRefreshCount int }{ - {2, 0, "t3.nano", true, false, false, false, 0}, // create asg - {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 subnet - {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 vpcs, drop subnets - {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 vpc + {2, 0, "t3.nano", true, false, false, false, 0}, // create asg with 2 az-s + {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 az + {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 subnets, drop az-s + {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 subnet {0, 1, "t3.nano", false, true, false, false, 4}, // drop launch config, add template {0, 1, "t3.micro", false, true, false, false, 5}, // update template {0, 1, "t3.micro", false, false, true, false, 6}, // drop template, add mixed policy From e9aa3f259a84bf7fe7c15fabc0185b469ffabc07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20=C5=BDurek?= Date: Thu, 9 Jul 2020 17:23:50 +0200 Subject: [PATCH 0042/1212] Added ec2 url to LocalStack example configuration --- website/docs/guides/custom-service-endpoints.html.md | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/guides/custom-service-endpoints.html.md b/website/docs/guides/custom-service-endpoints.html.md index f830a489c37..8071df9659c 100644 --- a/website/docs/guides/custom-service-endpoints.html.md +++ b/website/docs/guides/custom-service-endpoints.html.md @@ -246,6 +246,7 @@ provider "aws" { cloudformation = "http://localhost:4581" cloudwatch = "http://localhost:4582" dynamodb = "http://localhost:4569" + ec2 = "http://localhost:4597" es = "http://localhost:4578" firehose = "http://localhost:4573" iam = "http://localhost:4593" From 24f26d99c7cb09e9bcb1d312c2e58d0bb593cdf6 Mon Sep 17 00:00:00 2001 From: drexler Date: Thu, 9 Jul 2020 09:31:23 -0400 Subject: [PATCH 0043/1212] feat: support version_name property --- aws/resource_aws_ssm_document.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/aws/resource_aws_ssm_document.go b/aws/resource_aws_ssm_document.go index 635588c2272..c1cab0d86d7 100644 --- a/aws/resource_aws_ssm_document.go +++ b/aws/resource_aws_ssm_document.go @@ -172,6 +172,12 @@ func resourceAwsSsmDocument() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "version_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, }, } } @@ -207,6 +213,9 @@ func resourceAwsSsmDocumentCreate(d *schema.ResourceData, meta interface{}) erro if v, ok := d.GetOk("target_type"); ok { docInput.TargetType = aws.String(v.(string)) } + if v, ok := d.GetOk("version_name"); ok { + docInput.VersionName = aws.String(v.(string)) + } resp, err := ssmconn.CreateDocument(docInput) @@ -282,6 +291,7 @@ func resourceAwsSsmDocumentRead(d *schema.ResourceData, meta interface{}) error d.Set("hash", doc.Hash) d.Set("hash_type", doc.HashType) d.Set("latest_version", doc.LatestVersion) + d.Set("version_name", doc.VersionName) d.Set("name", doc.Name) d.Set("owner", doc.Owner) d.Set("platform_types", flattenStringList(doc.PlatformTypes)) From e4890835f7bdedf3a425263756680935cf213fcc Mon Sep 17 00:00:00 2001 From: drexler Date: Thu, 9 Jul 2020 09:31:51 -0400 Subject: [PATCH 0044/1212] test: add cover test --- aws/resource_aws_ssm_document_test.go | 62 +++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/aws/resource_aws_ssm_document_test.go b/aws/resource_aws_ssm_document_test.go index 42f1619f24d..501747cfa19 100644 --- a/aws/resource_aws_ssm_document_test.go +++ b/aws/resource_aws_ssm_document_test.go @@ -72,6 +72,37 @@ func TestAccAWSSSMDocument_target_type(t *testing.T) { }) } +func TestAccAWSSSMDocument_version_name(t *testing.T) { + name := acctest.RandString(10) + resourceName := "aws_ssm_document.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMDocumentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMDocumentBasicConfigVersionName(name, "release-1.0.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMDocumentExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "version_name", "release-1.0.0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMDocumentBasicConfigVersionName(name, "release-1.0.1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMDocumentExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "version_name", "release-1.0.1"), + ), + }, + }, + }) +} + func TestAccAWSSSMDocument_update(t *testing.T) { name := acctest.RandString(10) resourceName := "aws_ssm_document.test" @@ -624,6 +655,37 @@ DOC `, rName, typ) } +func testAccAWSSSMDocumentBasicConfigVersionName(rName, version string) string { + return fmt.Sprintf(` +resource "aws_ssm_document" "test" { + name = "%s" + document_type = "Command" + version_name = "%s" + + content = < Date: Thu, 9 Jul 2020 09:35:32 -0400 Subject: [PATCH 0045/1212] docs: update resource documentation --- website/docs/r/ssm_document.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/ssm_document.html.markdown b/website/docs/r/ssm_document.html.markdown index a34f65ddf48..887384dba1c 100644 --- a/website/docs/r/ssm_document.html.markdown +++ b/website/docs/r/ssm_document.html.markdown @@ -53,7 +53,8 @@ The following arguments are supported: * `document_format` - (Optional, defaults to JSON) The format of the document. Valid document types include: `JSON` and `YAML` * `document_type` - (Required) The type of the document. Valid document types include: `Automation`, `Command`, `Package`, `Policy`, and `Session` * `permissions` - (Optional) Additional Permissions to attach to the document. See [Permissions](#permissions) below for details. -* `target_type` - (Optional) The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see AWS Resource Types Reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) +* `target_type` - (Optional) The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid +* `version_name` - (Optional) A field specifying the version of the artifact you are creating with the document. For example, "Release 12, Update 6" * `tags` - (Optional) A map of tags to assign to the object. ## attachments_source From 550b9714341952915b36a952d327b8f9b2a5f015 Mon Sep 17 00:00:00 2001 From: sanzalb Date: Wed, 12 Aug 2020 10:53:39 +0200 Subject: [PATCH 0046/1212] Updated to Terraform 0.12 syntax --- ..._source_aws_vpc_peering_connection_test.go | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index 7a43cf9bea9..68fef0e4626 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -104,8 +104,8 @@ resource "aws_vpc" "bar" { } resource "aws_vpc_peering_connection" "test" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" + vpc_id = aws_vpc.foo.id + peer_vpc_id = aws_vpc.bar.id auto_accept = true tags = { @@ -116,17 +116,17 @@ resource "aws_vpc_peering_connection" "test" { data "aws_caller_identity" "current" {} data "aws_vpc_peering_connection" "test_by_id" { - id = "${aws_vpc_peering_connection.test.id}" + id = aws_vpc_peering_connection.test.id } data "aws_vpc_peering_connection" "test_by_requester_vpc_id" { - vpc_id = "${aws_vpc.foo.id}" + vpc_id = aws_vpc.foo.id depends_on = ["aws_vpc_peering_connection.test"] } data "aws_vpc_peering_connection" "test_by_accepter_vpc_id" { - peer_vpc_id = "${aws_vpc.bar.id}" + peer_vpc_id = aws_vpc.bar.id depends_on = ["aws_vpc_peering_connection.test"] } @@ -146,8 +146,8 @@ data "aws_vpc_peering_connection" "test_by_accepter_cidr_block" { } data "aws_vpc_peering_connection" "test_by_owner_ids" { - owner_id = "${data.aws_caller_identity.current.account_id}" - peer_owner_id = "${data.aws_caller_identity.current.account_id}" + owner_id = data.aws_caller_identity.current.account_id + peer_owner_id = data.aws_caller_identity.current.account_id status = "active" depends_on = ["aws_vpc_peering_connection.test"] @@ -164,7 +164,7 @@ resource "aws_vpc" "foo" { } resource "aws_vpc_ipv4_cidr_block_association" "foo_secondary_cidr" { - vpc_id = "${aws_vpc.foo.id}" + vpc_id = aws_vpc.foo.id cidr_block = "10.5.0.0/16" } @@ -177,13 +177,13 @@ resource "aws_vpc" "bar" { } resource "aws_vpc_ipv4_cidr_block_association" "bar_secondary_cidr" { - vpc_id = "${aws_vpc.bar.id}" + vpc_id = aws_vpc.bar.id cidr_block = "10.7.0.0/16" } resource "aws_vpc_peering_connection" "test" { - vpc_id = "${aws_vpc.foo.id}" - peer_vpc_id = "${aws_vpc.bar.id}" + vpc_id = aws_vpc.foo.id + peer_vpc_id = aws_vpc.bar.id auto_accept = true tags = { @@ -194,6 +194,6 @@ resource "aws_vpc_peering_connection" "test" { } data "aws_vpc_peering_connection" "test_by_id" { - id = "${aws_vpc_peering_connection.test.id}" + id = aws_vpc_peering_connection.test.id } ` From 84a1e586283360a43a5ef1baad2e3097b9c21894 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Mon, 17 Aug 2020 06:16:53 +0900 Subject: [PATCH 0047/1212] r/aws_cloudwatch_log_group: Add validation for retention_in_days --- aws/resource_aws_cloudwatch_log_group.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_cloudwatch_log_group.go b/aws/resource_aws_cloudwatch_log_group.go index 0e937447748..1d0fdd9f760 100644 --- a/aws/resource_aws_cloudwatch_log_group.go +++ b/aws/resource_aws_cloudwatch_log_group.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -41,9 +42,10 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource { }, "retention_in_days": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntInSlice([]int{1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653}), }, "kms_key_id": { From 614c5a0fa4a7f843cab51537a2eeb5cf7a83f51f Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 21 Aug 2020 08:04:19 +0900 Subject: [PATCH 0048/1212] r/aws_lambda_event_source_mapping: Fix creation wait for proper state update --- ...esource_aws_lambda_event_source_mapping.go | 50 +++++++++++++++++-- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lambda_event_source_mapping.go b/aws/resource_aws_lambda_event_source_mapping.go index 786b358c135..4ae343b3370 100644 --- a/aws/resource_aws_lambda_event_source_mapping.go +++ b/aws/resource_aws_lambda_event_source_mapping.go @@ -18,6 +18,15 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +const ( + LambdaEventSourceMappingCreating = "Creating" + LambdaEventSourceMappingEnabling = "Enabling" + LambdaEventSourceMappingUpdating = "Updating" + LambdaEventSourceMappingDisabling = "Disabling" + LambdaEventSourceMappingEnabled = "Enabled" + LambdaEventSourceMappingDisabled = "Disabled" +) + func resourceAwsLambdaEventSourceMapping() *schema.Resource { return &schema.Resource{ Create: resourceAwsLambdaEventSourceMappingCreate, @@ -257,6 +266,11 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte // No error d.Set("uuid", eventSourceMappingConfiguration.UUID) d.SetId(*eventSourceMappingConfiguration.UUID) + + if err := waitForLambdaEventSourceMapping(conn, *eventSourceMappingConfiguration.UUID); err != nil { + return err + } + return resourceAwsLambdaEventSourceMappingRead(d, meta) } @@ -303,12 +317,12 @@ func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interf state := aws.StringValue(eventSourceMappingConfiguration.State) switch state { - case "Enabled", "Enabling": + case LambdaEventSourceMappingEnabled: d.Set("enabled", true) - case "Disabled", "Disabling": + case LambdaEventSourceMappingDisabled: d.Set("enabled", false) default: - log.Printf("[DEBUG] Lambda event source mapping is neither enabled nor disabled but %s", *eventSourceMappingConfiguration.State) + return fmt.Errorf("state is neither enabled nor disabled but %s", *eventSourceMappingConfiguration.State) } return nil @@ -406,5 +420,35 @@ func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta inte return fmt.Errorf("Error updating Lambda event source mapping: %s", err) } + if err := waitForLambdaEventSourceMapping(conn, d.Id()); err != nil { + return err + } + return resourceAwsLambdaEventSourceMappingRead(d, meta) } + +func waitForLambdaEventSourceMapping(conn *lambda.Lambda, id string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{LambdaEventSourceMappingCreating, LambdaEventSourceMappingEnabling, LambdaEventSourceMappingUpdating, LambdaEventSourceMappingDisabling}, + Target: []string{LambdaEventSourceMappingEnabled, LambdaEventSourceMappingDisabled}, + Refresh: func() (interface{}, string, error) { + params := &lambda.GetEventSourceMappingInput{ + UUID: aws.String(id), + } + + res, err := conn.GetEventSourceMapping(params) + if err != nil { + return nil, "", err + } + + return res, aws.StringValue(res.State), err + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + } + + log.Printf("[DEBUG] Waiting for LambdaEventSourceMapping state update: %s", id) + _, err := stateConf.WaitForState() + + return err +} From 9ce0caa3040bdbba54ca25a5aa598323a8c14d61 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sat, 22 Aug 2020 13:42:55 +0900 Subject: [PATCH 0049/1212] r/aws_customer_gateway: Add device_name attribute --- aws/resource_aws_customer_gateway.go | 50 +++++++++++++------ aws/resource_aws_customer_gateway_test.go | 41 +++++++++++++++ website/docs/r/customer_gateway.html.markdown | 2 + 3 files changed, 79 insertions(+), 14 deletions(-) diff --git a/aws/resource_aws_customer_gateway.go b/aws/resource_aws_customer_gateway.go index 8fc7bcdb941..ed140bd6b1a 100644 --- a/aws/resource_aws_customer_gateway.go +++ b/aws/resource_aws_customer_gateway.go @@ -33,6 +33,13 @@ func resourceAwsCustomerGateway() *schema.Resource { ValidateFunc: validate4ByteAsn, }, + "device_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "ip_address": { Type: schema.TypeString, Required: true, @@ -53,6 +60,7 @@ func resourceAwsCustomerGateway() *schema.Resource { }, "tags": tagsSchema(), + "arn": { Type: schema.TypeString, Computed: true, @@ -67,8 +75,9 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) ipAddress := d.Get("ip_address").(string) vpnType := d.Get("type").(string) bgpAsn := d.Get("bgp_asn").(string) + deviceName := d.Get("device_name").(string) - alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, conn) + alreadyExists, err := resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, deviceName, conn) if err != nil { return err } @@ -89,6 +98,10 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeCustomerGateway), } + if len(deviceName) != 0 { + createOpts.DeviceName = aws.String(deviceName) + } + // Create the Customer Gateway. log.Printf("[DEBUG] Creating customer gateway") resp, err := conn.CreateCustomerGateway(createOpts) @@ -113,6 +126,7 @@ func resourceAwsCustomerGatewayCreate(d *schema.ResourceData, meta interface{}) } _, stateErr := stateConf.WaitForState() + if stateErr != nil { return fmt.Errorf( "Error waiting for customer gateway (%s) to become ready: %s", cgId, err) @@ -150,24 +164,31 @@ func customerGatewayRefreshFunc(conn *ec2.EC2, gatewayId string) resource.StateR } } -func resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn string, conn *ec2.EC2) (bool, error) { - ipAddressFilter := &ec2.Filter{ - Name: aws.String("ip-address"), - Values: []*string{aws.String(ipAddress)}, - } - - typeFilter := &ec2.Filter{ - Name: aws.String("type"), - Values: []*string{aws.String(vpnType)}, +func resourceAwsCustomerGatewayExists(vpnType, ipAddress, bgpAsn, deviceName string, conn *ec2.EC2) (bool, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("ip-address"), + Values: []*string{aws.String(ipAddress)}, + }, + { + Name: aws.String("type"), + Values: []*string{aws.String(vpnType)}, + }, + { + Name: aws.String("bgp-asn"), + Values: []*string{aws.String(bgpAsn)}, + }, } - bgpAsnFilter := &ec2.Filter{ - Name: aws.String("bgp-asn"), - Values: []*string{aws.String(bgpAsn)}, + if len(deviceName) != 0 { + filters = append(filters, &ec2.Filter{ + Name: aws.String("device-name"), + Values: []*string{aws.String(deviceName)}, + }) } resp, err := conn.DescribeCustomerGateways(&ec2.DescribeCustomerGatewaysInput{ - Filters: []*ec2.Filter{ipAddressFilter, typeFilter, bgpAsnFilter}, + Filters: filters, }) if err != nil { return false, err @@ -217,6 +238,7 @@ func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) er d.Set("bgp_asn", customerGateway.BgpAsn) d.Set("ip_address", customerGateway.IpAddress) d.Set("type", customerGateway.Type) + d.Set("device_name", customerGateway.DeviceName) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(customerGateway.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) diff --git a/aws/resource_aws_customer_gateway_test.go b/aws/resource_aws_customer_gateway_test.go index da6220f4625..23c70307018 100644 --- a/aws/resource_aws_customer_gateway_test.go +++ b/aws/resource_aws_customer_gateway_test.go @@ -120,6 +120,36 @@ func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) { }) } +func TestAccAWSCustomerGateway_deviceName(t *testing.T) { + var gateway ec2.CustomerGateway + rBgpAsn := acctest.RandIntRange(64512, 65534) + resourceName := "aws_customer_gateway.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckCustomerGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCustomerGatewayConfigDeviceName(rBgpAsn), + Check: resource.ComposeTestCheckFunc( + testAccCheckCustomerGateway(resourceName, &gateway), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`customer-gateway/cgw-.+`)), + resource.TestCheckResourceAttr(resourceName, "bgp_asn", strconv.Itoa(rBgpAsn)), + resource.TestCheckResourceAttr(resourceName, "device_name", "test"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSCustomerGateway_disappears(t *testing.T) { rBgpAsn := acctest.RandIntRange(64512, 65534) var gateway ec2.CustomerGateway @@ -300,6 +330,17 @@ resource "aws_customer_gateway" "identical" { `, rBgpAsn) } +func testAccCustomerGatewayConfigDeviceName(rBgpAsn int) string { + return fmt.Sprintf(` +resource "aws_customer_gateway" "test" { + bgp_asn = %[1]d + ip_address = "172.0.0.1" + type = "ipsec.1" + device_name = "test" +} +`, rBgpAsn) +} + // Change the ip_address. func testAccCustomerGatewayConfigForceReplace(rBgpAsn int) string { return fmt.Sprintf(` diff --git a/website/docs/r/customer_gateway.html.markdown b/website/docs/r/customer_gateway.html.markdown index c4d434bad44..b641a0aef45 100644 --- a/website/docs/r/customer_gateway.html.markdown +++ b/website/docs/r/customer_gateway.html.markdown @@ -31,6 +31,7 @@ resource "aws_customer_gateway" "main" { The following arguments are supported: * `bgp_asn` - (Required) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). +* `device_name` - (Optional) A name for the customer gateway device. * `ip_address` - (Required) The IP address of the gateway's Internet-routable external interface. * `type` - (Required) The type of customer gateway. The only type AWS supports at this time is "ipsec.1". @@ -43,6 +44,7 @@ In addition to all arguments above, the following attributes are exported: * `id` - The amazon-assigned ID of the gateway. * `arn` - The ARN of the customer gateway. * `bgp_asn` - The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). +* `device_name` - A name for the customer gateway device. * `ip_address` - The IP address of the gateway's Internet-routable external interface. * `type` - The type of customer gateway. * `tags` - Tags applied to the gateway. From c96484651c52f0e4ffb2f120d23bc40ddd639cbe Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sat, 22 Aug 2020 13:44:11 +0900 Subject: [PATCH 0050/1212] d/aws_customer_gateway: Add device_name attribute --- aws/data_source_aws_customer_gateway.go | 6 +++++- aws/data_source_aws_customer_gateway_test.go | 8 +++++--- website/docs/d/customer_gateway.html.markdown | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_customer_gateway.go b/aws/data_source_aws_customer_gateway.go index ee61a93669f..24dadd367f7 100644 --- a/aws/data_source_aws_customer_gateway.go +++ b/aws/data_source_aws_customer_gateway.go @@ -22,11 +22,14 @@ func dataSourceAwsCustomerGateway() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "bgp_asn": { Type: schema.TypeInt, Computed: true, }, + "device_name": { + Type: schema.TypeString, + Computed: true, + }, "ip_address": { Type: schema.TypeString, Computed: true, @@ -80,6 +83,7 @@ func dataSourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) d.Set("ip_address", cg.IpAddress) d.Set("type", cg.Type) + d.Set("device_name", cg.DeviceName) d.SetId(aws.StringValue(cg.CustomerGatewayId)) if v := aws.StringValue(cg.BgpAsn); v != "" { diff --git a/aws/data_source_aws_customer_gateway_test.go b/aws/data_source_aws_customer_gateway_test.go index 7a4b99b46b0..3acf5c49c11 100644 --- a/aws/data_source_aws_customer_gateway_test.go +++ b/aws/data_source_aws_customer_gateway_test.go @@ -57,6 +57,7 @@ func TestAccAWSCustomerGatewayDataSource_ID(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "ip_address", dataSourceName, "ip_address"), resource.TestCheckResourceAttrPair(resourceName, "tags.%", dataSourceName, "tags.%"), resource.TestCheckResourceAttrPair(resourceName, "type", dataSourceName, "type"), + resource.TestCheckResourceAttrPair(resourceName, "device_name", dataSourceName, "device_name"), ), }, }, @@ -88,9 +89,10 @@ data "aws_customer_gateway" "test" { func testAccAWSCustomerGatewayDataSourceConfigID(asn, hostOctet int) string { return fmt.Sprintf(` resource "aws_customer_gateway" "test" { - bgp_asn = %d - ip_address = "50.0.0.%d" - type = "ipsec.1" + bgp_asn = %d + ip_address = "50.0.0.%d" + device_name = "test" + type = "ipsec.1" } data "aws_customer_gateway" "test" { diff --git a/website/docs/d/customer_gateway.html.markdown b/website/docs/d/customer_gateway.html.markdown index 9678384a812..5b7cc5c08f5 100644 --- a/website/docs/d/customer_gateway.html.markdown +++ b/website/docs/d/customer_gateway.html.markdown @@ -48,6 +48,7 @@ In addition to the arguments above, the following attributes are exported: * `arn` - The ARN of the customer gateway. * `bgp_asn` - (Optional) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). +* `device_name` - (Optional) A name for the customer gateway device. * `ip_address` - (Optional) The IP address of the gateway's Internet-routable external interface. * `tags` - Map of key-value pairs assigned to the gateway. * `type` - (Optional) The type of customer gateway. The only type AWS supports at this time is "ipsec.1". From f2f21affa25caaf764cce5432f9754ac7ffdb949 Mon Sep 17 00:00:00 2001 From: Ay0 Date: Mon, 24 Aug 2020 17:53:48 +0100 Subject: [PATCH 0051/1212] improved StepScaling docs --- website/docs/r/autoscaling_policy.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/docs/r/autoscaling_policy.html.markdown b/website/docs/r/autoscaling_policy.html.markdown index aa913b2d608..c4e309666ae 100644 --- a/website/docs/r/autoscaling_policy.html.markdown +++ b/website/docs/r/autoscaling_policy.html.markdown @@ -91,6 +91,8 @@ difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity. The upper bound must be greater than the lower bound. +Notice the bounds are **relative** to the alarm threshold, meaning that the starting point is not 0%, but the alarm threshold. Check the official [docs](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-steps) for a detailed example. + The following arguments are only available to "TargetTrackingScaling" type policies: * `target_tracking_configuration` - (Optional) A target tracking policy. These have the following structure: From 38bee62bd081ccb435586dba9e7315f817f3ffdf Mon Sep 17 00:00:00 2001 From: nikhil-goenka <70277861+nikhil-goenka@users.noreply.github.com> Date: Sat, 5 Sep 2020 03:03:05 +0530 Subject: [PATCH 0052/1212] Support ApplyOnlyAtCronInterval for aws_ssm_association --- aws/resource_aws_ssm_association.go | 14 ++++++++++++++ website/docs/r/ssm_association.html.markdown | 1 + 2 files changed, 15 insertions(+) diff --git a/aws/resource_aws_ssm_association.go b/aws/resource_aws_ssm_association.go index dd0ed4b27ad..500e0f321fe 100644 --- a/aws/resource_aws_ssm_association.go +++ b/aws/resource_aws_ssm_association.go @@ -26,6 +26,11 @@ func resourceAwsSsmAssociation() *schema.Resource { SchemaVersion: 1, Schema: map[string]*schema.Schema{ + "apply_only_at_cron_interval": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, "association_name": { Type: schema.TypeString, Optional: true, @@ -133,6 +138,10 @@ func resourceAwsSsmAssociationCreate(d *schema.ResourceData, meta interface{}) e Name: aws.String(d.Get("name").(string)), } + if v, ok := d.GetOk("apply_only_at_cron_interval"); ok { + associationInput.ApplyOnlyAtCronInterval = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("association_name"); ok { associationInput.AssociationName = aws.String(v.(string)) } @@ -215,6 +224,7 @@ func resourceAwsSsmAssociationRead(d *schema.ResourceData, meta interface{}) err } association := resp.AssociationDescription + d.Set("apply_only_at_cron_interval", association.ApplyOnlyAtCronInterval) d.Set("association_name", association.AssociationName) d.Set("instance_id", association.InstanceId) d.Set("name", association.Name) @@ -250,6 +260,10 @@ func resourceAwsSsmAssociationUpdate(d *schema.ResourceData, meta interface{}) e AssociationId: aws.String(d.Get("association_id").(string)), } + if v, ok := d.GetOk("apply_only_at_cron_interval"); ok { + associationInput.ApplyOnlyAtCronInterval = aws.Bool(v.(bool)) + } + // AWS creates a new version every time the association is updated, so everything should be passed in the update. if v, ok := d.GetOk("association_name"); ok { associationInput.AssociationName = aws.String(v.(string)) diff --git a/website/docs/r/ssm_association.html.markdown b/website/docs/r/ssm_association.html.markdown index 1205cafc923..0fca56a5c01 100644 --- a/website/docs/r/ssm_association.html.markdown +++ b/website/docs/r/ssm_association.html.markdown @@ -28,6 +28,7 @@ resource "aws_ssm_association" "example" { The following arguments are supported: * `name` - (Required) The name of the SSM document to apply. +* `apply_only_at_cron_interval` - (Optional) System runs the new associations immediately after it is created and then according to the schedule you specified. Specify this option if you don't want an association to run immediately after you create it (Default: `false`). * `association_name` - (Optional) The descriptive name for the association. * `document_version` - (Optional) The document version you want to associate with the target(s). Can be a specific version or the default version. * `instance_id` - (Optional) The instance ID to apply an SSM document to. Use `targets` with key `InstanceIds` for document schema versions 2.0 and above. From 5ac14737376b66006ae4c176b72833dca0476306 Mon Sep 17 00:00:00 2001 From: nikhil-goenka <70277861+nikhil-goenka@users.noreply.github.com> Date: Sat, 5 Sep 2020 13:53:52 +0530 Subject: [PATCH 0053/1212] Support ApplyOnlyAtCronInterval for aws_ssm_association - test case --- aws/resource_aws_ssm_association_test.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_ssm_association_test.go b/aws/resource_aws_ssm_association_test.go index 293291f472f..8d80f6d280a 100644 --- a/aws/resource_aws_ssm_association_test.go +++ b/aws/resource_aws_ssm_association_test.go @@ -367,7 +367,7 @@ func TestAccAWSSSMAssociation_withAutomationTargetParamName(t *testing.T) { }) } -func TestAccAWSSSMAssociation_withScheduleExpression(t *testing.T) { +func TestAccAWSSSMAssociation_withScheduleExpressionAndCronInterval(t *testing.T) { name := acctest.RandString(10) resourceName := "aws_ssm_association.test" @@ -380,8 +380,8 @@ func TestAccAWSSSMAssociation_withScheduleExpression(t *testing.T) { Config: testAccAWSSSMAssociationBasicConfigWithScheduleExpression(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists(resourceName), - resource.TestCheckResourceAttr( - resourceName, "schedule_expression", "cron(0 16 ? * TUE *)"), + resource.TestCheckResourceAttr(resourceName, "schedule_expression", "cron(0 16 ? * TUE *)"), + resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "true"), ), }, { @@ -393,8 +393,8 @@ func TestAccAWSSSMAssociation_withScheduleExpression(t *testing.T) { Config: testAccAWSSSMAssociationBasicConfigWithScheduleExpressionUpdated(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists(resourceName), - resource.TestCheckResourceAttr( - resourceName, "schedule_expression", "cron(0 16 ? * WED *)"), + resource.TestCheckResourceAttr(resourceName, "schedule_expression", "cron(0 16 ? * WED *)"), + resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "false"), ), }, }, @@ -958,6 +958,7 @@ DOC resource "aws_ssm_association" "test" { name = aws_ssm_document.test.name schedule_expression = "cron(0 16 ? * TUE *)" + apply_only_at_cron_interval = true targets { key = "tag:Name" @@ -998,6 +999,7 @@ DOC resource "aws_ssm_association" "test" { name = aws_ssm_document.test.name schedule_expression = "cron(0 16 ? * WED *)" + apply_only_at_cron_interval = false targets { key = "tag:Name" From 820f72d86a3f6b8b09af6affad8f1df48a1443b2 Mon Sep 17 00:00:00 2001 From: Angel Abad Date: Thu, 24 Sep 2020 17:39:04 +0200 Subject: [PATCH 0054/1212] resource_aws_elasticache_replication_group: Add arn attribute --- aws/resource_aws_elasticache_replication_group.go | 14 ++++++++++++++ website/docs/r/elasticache_cluster.html.markdown | 2 +- .../r/elasticache_replication_group.html.markdown | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index a72737d5e3c..5d3732fc113 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -34,6 +34,10 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Optional: true, Computed: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "at_rest_encryption_enabled": { Type: schema.TypeBool, Optional: true, @@ -521,6 +525,16 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "elasticache", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("replicationgroup:%s", d.Id()), + }.String() + + d.Set("arn", arn) + return nil } diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index f68a2d5c79b..cb1e8810f0e 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -138,9 +138,9 @@ SNS topic to send ElastiCache notifications to. Example: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN of the created ElastiCache Cluster. * `cache_nodes` - List of node objects including `id`, `address`, `port` and `availability_zone`. Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}` - * `configuration_endpoint` - (Memcached only) The configuration endpoint to allow host discovery. * `cluster_address` - (Memcached only) The DNS name of the cache cluster without the port appended. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index b1ff694a792..5a8b83cd541 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -146,6 +146,7 @@ Cluster Mode (`cluster_mode`) supports the following: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN of the created ElastiCache Replication Group. * `id` - The ID of the ElastiCache Replication Group. * `configuration_endpoint_address` - The address of the replication group configuration endpoint when cluster mode is enabled. * `primary_endpoint_address` - (Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled. From d54270c0bd3b14d8637749e8c7f32aa88e7ecf2e Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 3 May 2020 17:22:49 +0300 Subject: [PATCH 0055/1212] add plan time validations: `budget_type`, `time_unit`, `subscriber_sns_topic_arns` add arn attribute --- aws/resource_aws_budgets_budget.go | 36 ++++++- aws/resource_aws_budgets_budget_test.go | 120 ++++++++++++------------ 2 files changed, 95 insertions(+), 61 deletions(-) diff --git a/aws/resource_aws_budgets_budget.go b/aws/resource_aws_budgets_budget.go index 1f65e6d4678..99ab6c402cc 100644 --- a/aws/resource_aws_budgets_budget.go +++ b/aws/resource_aws_budgets_budget.go @@ -7,6 +7,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/budgets" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -16,6 +17,10 @@ import ( func resourceAwsBudgetsBudget() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "account_id": { Type: schema.TypeString, Computed: true, @@ -39,6 +44,14 @@ func resourceAwsBudgetsBudget() *schema.Resource { "budget_type": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.StringInSlice([]string{ + budgets.BudgetTypeCost, + budgets.BudgetTypeRiCoverage, + budgets.BudgetTypeRiUtilization, + budgets.BudgetTypeSavingsPlansCoverage, + budgets.BudgetTypeSavingsPlansUtilization, + budgets.BudgetTypeUsage, + }, false), }, "limit_amount": { Type: schema.TypeString, @@ -125,6 +138,12 @@ func resourceAwsBudgetsBudget() *schema.Resource { "time_unit": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.StringInSlice([]string{ + budgets.TimeUnitAnnually, + budgets.TimeUnitDaily, + budgets.TimeUnitMonthly, + budgets.TimeUnitQuarterly, + }, false), }, "cost_filters": { Type: schema.TypeMap, @@ -174,7 +193,10 @@ func resourceAwsBudgetsBudget() *schema.Resource { "subscriber_sns_topic_arns": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, }, }, }, @@ -350,6 +372,14 @@ func resourceAwsBudgetsBudgetRead(d *schema.ResourceData, meta interface{}) erro d.Set("time_unit", budget.TimeUnit) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "budgetservice", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("budget/%s", aws.StringValue(budget.BudgetName)), + } + d.Set("arn", arn.String()) + return resourceAwsBudgetsBudgetNotificationRead(d, meta) } @@ -402,9 +432,9 @@ func resourceAwsBudgetsBudgetNotificationRead(d *schema.ResourceData, meta inter emailSubscribers := make([]interface{}, 0) for _, subscriberOutput := range subscribersOutput.Subscribers { - if *subscriberOutput.SubscriptionType == budgets.SubscriptionTypeSns { + if aws.StringValue(subscriberOutput.SubscriptionType) == budgets.SubscriptionTypeSns { snsSubscribers = append(snsSubscribers, *subscriberOutput.Address) - } else if *subscriberOutput.SubscriptionType == budgets.SubscriptionTypeEmail { + } else if aws.StringValue(subscriberOutput.SubscriptionType) == budgets.SubscriptionTypeEmail { emailSubscribers = append(emailSubscribers, *subscriberOutput.Address) } } diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index 80e0a13f41b..782eee6e3d9 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -78,10 +78,11 @@ func testSweepBudgetsBudgets(region string) error { func TestAccAWSBudgetsBudget_basic(t *testing.T) { costFilterKey := "AZ" - name := fmt.Sprintf("test-budget-%d", acctest.RandInt()) - configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(name) + rName := acctest.RandomWithPrefix("tf-acc-test") + configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(rName) accountID := "012345678910" - configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(name) + configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(rName) + resourceName := "aws_budgets_budget.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, @@ -91,14 +92,15 @@ func TestAccAWSBudgetsBudget_basic(t *testing.T) { { Config: testAccAWSBudgetsBudgetConfig_BasicDefaults(configBasicDefaults, costFilterKey), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), - resource.TestMatchResourceAttr("aws_budgets_budget.foo", "name", regexp.MustCompile(*configBasicDefaults.BudgetName)), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "budget_type", *configBasicDefaults.BudgetType), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_amount", *configBasicDefaults.BudgetLimit.Amount), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_unit", *configBasicDefaults.BudgetLimit.Unit), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_start", configBasicDefaults.TimePeriod.Start.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_end", configBasicDefaults.TimePeriod.End.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_unit", *configBasicDefaults.TimeUnit), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), + testAccMatchResourceAttrGlobalARN(resourceName, "arn", "budgetservice", regexp.MustCompile(`budget/.+`)), + resource.TestMatchResourceAttr(resourceName, "name", regexp.MustCompile(*configBasicDefaults.BudgetName)), + resource.TestCheckResourceAttr(resourceName, "budget_type", *configBasicDefaults.BudgetType), + resource.TestCheckResourceAttr(resourceName, "limit_amount", *configBasicDefaults.BudgetLimit.Amount), + resource.TestCheckResourceAttr(resourceName, "limit_unit", *configBasicDefaults.BudgetLimit.Unit), + resource.TestCheckResourceAttr(resourceName, "time_period_start", configBasicDefaults.TimePeriod.Start.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_period_end", configBasicDefaults.TimePeriod.End.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_unit", *configBasicDefaults.TimeUnit), ), }, { @@ -109,18 +111,18 @@ func TestAccAWSBudgetsBudget_basic(t *testing.T) { { Config: testAccAWSBudgetsBudgetConfig_Basic(configBasicUpdate, costFilterKey), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicUpdate), - resource.TestMatchResourceAttr("aws_budgets_budget.foo", "name", regexp.MustCompile(*configBasicUpdate.BudgetName)), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "budget_type", *configBasicUpdate.BudgetType), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_amount", *configBasicUpdate.BudgetLimit.Amount), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_unit", *configBasicUpdate.BudgetLimit.Unit), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_start", configBasicUpdate.TimePeriod.Start.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_end", configBasicUpdate.TimePeriod.End.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_unit", *configBasicUpdate.TimeUnit), + testAccAWSBudgetsBudgetExists(resourceName, configBasicUpdate), + resource.TestMatchResourceAttr(resourceName, "name", regexp.MustCompile(*configBasicUpdate.BudgetName)), + resource.TestCheckResourceAttr(resourceName, "budget_type", *configBasicUpdate.BudgetType), + resource.TestCheckResourceAttr(resourceName, "limit_amount", *configBasicUpdate.BudgetLimit.Amount), + resource.TestCheckResourceAttr(resourceName, "limit_unit", *configBasicUpdate.BudgetLimit.Unit), + resource.TestCheckResourceAttr(resourceName, "time_period_start", configBasicUpdate.TimePeriod.Start.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_period_end", configBasicUpdate.TimePeriod.End.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_unit", *configBasicUpdate.TimeUnit), ), }, { - ResourceName: "aws_budgets_budget.foo", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name_prefix"}, @@ -131,9 +133,10 @@ func TestAccAWSBudgetsBudget_basic(t *testing.T) { func TestAccAWSBudgetsBudget_prefix(t *testing.T) { costFilterKey := "AZ" - name := "test-budget-" - configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(name) - configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(name) + rName := acctest.RandomWithPrefix("tf-acc-test") + configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(rName) + configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(rName) + resourceName := "aws_budgets_budget.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, @@ -143,33 +146,33 @@ func TestAccAWSBudgetsBudget_prefix(t *testing.T) { { Config: testAccAWSBudgetsBudgetConfig_PrefixDefaults(configBasicDefaults, costFilterKey), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), - resource.TestMatchResourceAttr("aws_budgets_budget.foo", "name_prefix", regexp.MustCompile(*configBasicDefaults.BudgetName)), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "budget_type", *configBasicDefaults.BudgetType), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_amount", *configBasicDefaults.BudgetLimit.Amount), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_unit", *configBasicDefaults.BudgetLimit.Unit), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_start", configBasicDefaults.TimePeriod.Start.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_end", configBasicDefaults.TimePeriod.End.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_unit", *configBasicDefaults.TimeUnit), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), + resource.TestMatchResourceAttr(resourceName, "name_prefix", regexp.MustCompile(*configBasicDefaults.BudgetName)), + resource.TestCheckResourceAttr(resourceName, "budget_type", *configBasicDefaults.BudgetType), + resource.TestCheckResourceAttr(resourceName, "limit_amount", *configBasicDefaults.BudgetLimit.Amount), + resource.TestCheckResourceAttr(resourceName, "limit_unit", *configBasicDefaults.BudgetLimit.Unit), + resource.TestCheckResourceAttr(resourceName, "time_period_start", configBasicDefaults.TimePeriod.Start.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_period_end", configBasicDefaults.TimePeriod.End.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_unit", *configBasicDefaults.TimeUnit), ), }, { Config: testAccAWSBudgetsBudgetConfig_Prefix(configBasicUpdate, costFilterKey), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicUpdate), - resource.TestMatchResourceAttr("aws_budgets_budget.foo", "name_prefix", regexp.MustCompile(*configBasicUpdate.BudgetName)), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "budget_type", *configBasicUpdate.BudgetType), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_amount", *configBasicUpdate.BudgetLimit.Amount), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "limit_unit", *configBasicUpdate.BudgetLimit.Unit), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_start", configBasicUpdate.TimePeriod.Start.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_period_end", configBasicUpdate.TimePeriod.End.Format("2006-01-02_15:04")), - resource.TestCheckResourceAttr("aws_budgets_budget.foo", "time_unit", *configBasicUpdate.TimeUnit), + testAccAWSBudgetsBudgetExists(resourceName, configBasicUpdate), + resource.TestMatchResourceAttr(resourceName, "name_prefix", regexp.MustCompile(*configBasicUpdate.BudgetName)), + resource.TestCheckResourceAttr(resourceName, "budget_type", *configBasicUpdate.BudgetType), + resource.TestCheckResourceAttr(resourceName, "limit_amount", *configBasicUpdate.BudgetLimit.Amount), + resource.TestCheckResourceAttr(resourceName, "limit_unit", *configBasicUpdate.BudgetLimit.Unit), + resource.TestCheckResourceAttr(resourceName, "time_period_start", configBasicUpdate.TimePeriod.Start.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_period_end", configBasicUpdate.TimePeriod.End.Format("2006-01-02_15:04")), + resource.TestCheckResourceAttr(resourceName, "time_unit", *configBasicUpdate.TimeUnit), ), }, { - ResourceName: "aws_budgets_budget.foo", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name_prefix"}, @@ -179,9 +182,10 @@ func TestAccAWSBudgetsBudget_prefix(t *testing.T) { } func TestAccAWSBudgetsBudget_notification(t *testing.T) { - name := fmt.Sprintf("test-budget-%d", acctest.RandInt()) - configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(name) + rName := acctest.RandomWithPrefix("tf-acc-test") + configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(rName) configBasicDefaults.CostFilters = map[string][]*string{} + resourceName := "aws_budgets_budget.test" notificationConfigDefaults := []budgets.Notification{testAccAWSBudgetsBudgetNotificationConfigDefaults()} notificationConfigUpdated := []budgets.Notification{testAccAWSBudgetsBudgetNotificationConfigUpdate()} @@ -191,7 +195,7 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { } noEmails := []string{} - oneEmail := []string{"foo@example.com"} + oneEmail := []string{"test@example.com"} oneOtherEmail := []string{"bar@example.com"} twoEmails := []string{"bar@example.com", "baz@example.com"} noTopics := []string{} @@ -207,42 +211,42 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, noEmails, noTopics), ExpectError: regexp.MustCompile(`Notification must have at least one subscriber`), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Basic Notification with only email { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, oneEmail, noTopics), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Change only subscriber to a different e-mail { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, oneOtherEmail, noTopics), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Add a second e-mail and a topic { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, twoEmails, oneTopic), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Delete both E-Mails { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, noEmails, oneTopic), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Swap one Topic fo one E-Mail { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, oneEmail, noTopics), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Can't update without at least one subscriber @@ -250,7 +254,7 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigDefaults, noEmails, noTopics), ExpectError: regexp.MustCompile(`Notification must have at least one subscriber`), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Update all non-subscription parameters @@ -258,7 +262,7 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, notificationConfigUpdated, noEmails, noTopics), ExpectError: regexp.MustCompile(`Notification must have at least one subscriber`), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, // Add a second subscription @@ -266,7 +270,7 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { Config: testAccAWSBudgetsBudgetConfigWithNotification_Basic(configBasicDefaults, twoNotificationConfigs, noEmails, noTopics), ExpectError: regexp.MustCompile(`Notification must have at least one subscriber`), Check: resource.ComposeTestCheckFunc( - testAccAWSBudgetsBudgetExists("aws_budgets_budget.foo", configBasicDefaults), + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), ), }, }, @@ -506,7 +510,7 @@ func testAccAWSBudgetsBudgetConfig_WithAccountID(budgetConfig budgets.Budget, ac costFilterValue := *budgetConfig.CostFilters[costFilterKey][0] return fmt.Sprintf(` -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { account_id = "%s" name_prefix = "%s" budget_type = "%s" @@ -527,7 +531,7 @@ func testAccAWSBudgetsBudgetConfig_PrefixDefaults(budgetConfig budgets.Budget, c costFilterValue := *budgetConfig.CostFilters[costFilterKey][0] return fmt.Sprintf(` -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { name_prefix = "%s" budget_type = "%s" limit_amount = "%s" @@ -548,7 +552,7 @@ func testAccAWSBudgetsBudgetConfig_Prefix(budgetConfig budgets.Budget, costFilte costFilterValue := *budgetConfig.CostFilters[costFilterKey][0] return fmt.Sprintf(` -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { name_prefix = "%s" budget_type = "%s" limit_amount = "%s" @@ -575,7 +579,7 @@ func testAccAWSBudgetsBudgetConfig_BasicDefaults(budgetConfig budgets.Budget, co costFilterValue := *budgetConfig.CostFilters[costFilterKey][0] return fmt.Sprintf(` -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { name = "%s" budget_type = "%s" limit_amount = "%s" @@ -596,7 +600,7 @@ func testAccAWSBudgetsBudgetConfig_Basic(budgetConfig budgets.Budget, costFilter costFilterValue := *budgetConfig.CostFilters[costFilterKey][0] return fmt.Sprintf(` -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { name = "%s" budget_type = "%s" limit_amount = "%s" @@ -633,7 +637,7 @@ resource "aws_sns_topic" "budget_notifications" { name_prefix = "user-updates-topic" } -resource "aws_budgets_budget" "foo" { +resource "aws_budgets_budget" "test" { name = "%s" budget_type = "%s" limit_amount = "%s" From d41b006060c927f637e39fb87c23feb4752d983e Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 3 May 2020 17:25:07 +0300 Subject: [PATCH 0056/1212] add docs --- website/docs/r/budgets_budget.html.markdown | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/website/docs/r/budgets_budget.html.markdown b/website/docs/r/budgets_budget.html.markdown index 75ffbcb0f0d..ecead1ce709 100644 --- a/website/docs/r/budgets_budget.html.markdown +++ b/website/docs/r/budgets_budget.html.markdown @@ -123,22 +123,24 @@ The following arguments are supported: * `name` - (Optional) The name of a budget. Unique within accounts. * `name_prefix` - (Optional) The prefix of the name of a budget. Unique within accounts. * `budget_type` - (Required) Whether this budget tracks monetary cost or usage. -* `cost_filters` - (Optional) Map of [CostFilters](#CostFilters) key/value pairs to apply to the budget. -* `cost_types` - (Optional) Object containing [CostTypes](#CostTypes) The types of cost included in a budget, such as tax and subscriptions.. +* `cost_filters` - (Optional) Map of [Cost Filters](#Cost-Filters) key/value pairs to apply to the budget. +* `cost_types` - (Optional) Object containing [Cost Types](#Cost-Types) The types of cost included in a budget, such as tax and subscriptions.. * `limit_amount` - (Required) The amount of cost or usage being measured for a budget. * `limit_unit` - (Required) The unit of measurement used for the budget forecast, actual spend, or budget threshold, such as dollars or GB. See [Spend](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/data-type-spend.html) documentation. * `time_period_end` - (Optional) The end of the time period covered by the budget. There are no restrictions on the end date. Format: `2017-01-01_12:00`. * `time_period_start` - (Required) The start of the time period covered by the budget. The start date must come before the end date. Format: `2017-01-01_12:00`. -* `time_unit` - (Required) The length of time until a budget resets the actual and forecasted spend. Valid values: `MONTHLY`, `QUARTERLY`, `ANNUALLY`. -* `notification` - (Optional) Object containing [Budget Notifications](#BudgetNotification). Can be used multiple times to define more than one budget notification +* `time_unit` - (Required) The length of time until a budget resets the actual and forecasted spend. Valid values: `MONTHLY`, `QUARTERLY`, `ANNUALLY`, and `DAILY`. +* `notification` - (Optional) Object containing [Budget Notifications](#Budget-Notification). Can be used multiple times to define more than one budget notification ## Attributes Reference In addition to all arguments above, the following attributes are exported: * `id` - id of resource. +* `arn` - The ARN of the budget. -### CostTypes + +### Cost Types Valid keys for `cost_types` parameter. @@ -156,7 +158,7 @@ Valid keys for `cost_types` parameter. Refer to [AWS CostTypes documentation](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_CostTypes.html) for further detail. -### CostFilters +### Cost Filters Valid keys for `cost_filters` parameter vary depending on the `budget_type` value. @@ -177,7 +179,7 @@ Valid keys for `cost_filters` parameter vary depending on the `budget_type` valu Refer to [AWS CostFilter documentation](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/data-type-filter.html) for further detail. -### BudgetNotification +### Budget Notification Valid keys for `notification` parameter. From 7b125fa8d963f72ea4ce3e6dd50fbd850bff7aba Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Wed, 6 May 2020 13:14:35 +0300 Subject: [PATCH 0057/1212] added disappears test case --- aws/resource_aws_budgets_budget_test.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index 782eee6e3d9..4ca795c97c9 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -277,6 +277,29 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { }) } +func TestAccAWSBudgetsBudget_disappears(t *testing.T) { + costFilterKey := "AZ" + rName := acctest.RandomWithPrefix("tf-acc-test") + configBasicDefaults := testAccAWSBudgetsBudgetConfigDefaults(rName) + resourceName := "aws_budgets_budget.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, + Providers: testAccProviders, + CheckDestroy: testAccAWSBudgetsBudgetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSBudgetsBudgetConfig_BasicDefaults(configBasicDefaults, costFilterKey), + Check: resource.ComposeTestCheckFunc( + testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), + testAccCheckResourceDisappears(testAccProvider, resourceAwsBudgetsBudget(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccAWSBudgetsBudgetExists(resourceName string, config budgets.Budget) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] From b315bb110dc3689df0d31384679b21c80e44b65e Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 18 Jun 2020 12:57:12 +0300 Subject: [PATCH 0058/1212] sdk wrapper --- aws/resource_aws_budgets_budget.go | 2 +- aws/resource_aws_budgets_budget_test.go | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_budgets_budget.go b/aws/resource_aws_budgets_budget.go index 99ab6c402cc..4f424503fad 100644 --- a/aws/resource_aws_budgets_budget.go +++ b/aws/resource_aws_budgets_budget.go @@ -244,7 +244,7 @@ func resourceAwsBudgetsBudgetCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("create budget failed: %v", err) } - d.SetId(fmt.Sprintf("%s:%s", accountID, *budget.BudgetName)) + d.SetId(fmt.Sprintf("%s:%s", accountID, aws.StringValue(budget.BudgetName))) notificationsRaw := d.Get("notification").(*schema.Set).List() notifications, subscribers := expandBudgetNotificationsUnmarshal(notificationsRaw) diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index 4ca795c97c9..af1346c17b3 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -93,7 +93,7 @@ func TestAccAWSBudgetsBudget_basic(t *testing.T) { Config: testAccAWSBudgetsBudgetConfig_BasicDefaults(configBasicDefaults, costFilterKey), Check: resource.ComposeTestCheckFunc( testAccAWSBudgetsBudgetExists(resourceName, configBasicDefaults), - testAccMatchResourceAttrGlobalARN(resourceName, "arn", "budgetservice", regexp.MustCompile(`budget/.+`)), + testAccCheckResourceAttrGlobalARN(resourceName, "arn", "budgetservice", fmt.Sprintf(`budget/%s`, rName)), resource.TestMatchResourceAttr(resourceName, "name", regexp.MustCompile(*configBasicDefaults.BudgetName)), resource.TestCheckResourceAttr(resourceName, "budget_type", *configBasicDefaults.BudgetType), resource.TestCheckResourceAttr(resourceName, "limit_amount", *configBasicDefaults.BudgetLimit.Amount), @@ -326,8 +326,9 @@ func testAccAWSBudgetsBudgetExists(resourceName string, config budgets.Budget) r return fmt.Errorf("No budget returned %v in %v", b.Budget, b) } - if *b.Budget.BudgetLimit.Amount != *config.BudgetLimit.Amount { - return fmt.Errorf("budget limit incorrectly set %v != %v", *config.BudgetLimit.Amount, *b.Budget.BudgetLimit.Amount) + if aws.StringValue(b.Budget.BudgetLimit.Amount) != aws.StringValue(config.BudgetLimit.Amount) { + return fmt.Errorf("budget limit incorrectly set %v != %v", aws.StringValue(config.BudgetLimit.Amount), + aws.StringValue(b.Budget.BudgetLimit.Amount)) } if err := testAccAWSBudgetsBudgetCheckCostTypes(config, *b.Budget.CostTypes); err != nil { From 67cb1602000a1fc3b63c8beed4ee585bd90ffd4a Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 4 Sep 2020 14:35:09 +0300 Subject: [PATCH 0059/1212] Revert "resource/aws_storagegateway_cached_iscsi_volume: Add kms_encrypted and kms_key arguments (#12066)" This reverts commit eb63cfb2 --- ..._aws_storagegateway_cached_iscsi_volume.go | 28 +------------------ ...egateway_cached_iscsi_volume.html.markdown | 2 -- 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/aws/resource_aws_storagegateway_cached_iscsi_volume.go b/aws/resource_aws_storagegateway_cached_iscsi_volume.go index c56811ea6d5..c35ed2901f9 100644 --- a/aws/resource_aws_storagegateway_cached_iscsi_volume.go +++ b/aws/resource_aws_storagegateway_cached_iscsi_volume.go @@ -87,18 +87,6 @@ func resourceAwsStorageGatewayCachedIscsiVolume() *schema.Resource { ForceNew: true, }, "tags": tagsSchema(), - "kms_encrypted": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "kms_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateArn, - RequiredWith: []string{"kms_encrypted"}, - }, }, } } @@ -123,14 +111,6 @@ func resourceAwsStorageGatewayCachedIscsiVolumeCreate(d *schema.ResourceData, me input.SourceVolumeARN = aws.String(v.(string)) } - if v, ok := d.GetOk("kms_key"); ok { - input.KMSKey = aws.String(v.(string)) - } - - if v, ok := d.GetOk("kms_encrypted"); ok { - input.KMSEncrypted = aws.Bool(v.(bool)) - } - log.Printf("[DEBUG] Creating Storage Gateway cached iSCSI volume: %s", input) output, err := conn.CreateCachediSCSIVolume(input) if err != nil { @@ -167,7 +147,7 @@ func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta output, err := conn.DescribeCachediSCSIVolumes(input) if err != nil { - if isAWSErr(err, storagegateway.ErrorCodeVolumeNotFound, "") || isAWSErr(err, storagegateway.ErrCodeInvalidGatewayRequestException, "The specified volume was not found") { + if isAWSErr(err, storagegateway.ErrorCodeVolumeNotFound, "") { log.Printf("[WARN] Storage Gateway cached iSCSI volume %q not found, removing from state", d.Id()) d.SetId("") return nil @@ -189,12 +169,6 @@ func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta d.Set("volume_arn", arn) d.Set("volume_id", aws.StringValue(volume.VolumeId)) d.Set("volume_size_in_bytes", int(aws.Int64Value(volume.VolumeSizeInBytes))) - d.Set("kms_key", volume.KMSKey) - if volume.KMSKey != nil { - d.Set("kms_encrypted", true) - } else { - d.Set("kms_encrypted", false) - } tags, err := keyvaluetags.StoragegatewayListTags(conn, arn) if err != nil { diff --git a/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown b/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown index da21068a415..031f1970149 100644 --- a/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown +++ b/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown @@ -63,8 +63,6 @@ The following arguments are supported: * `volume_size_in_bytes` - (Required) The size of the volume in bytes. * `snapshot_id` - (Optional) The snapshot ID of the snapshot to restore as the new cached volume. e.g. `snap-1122aabb`. * `source_volume_arn` - (Optional) The ARN for an existing volume. Specifying this ARN makes the new volume into an exact copy of the specified existing volume's latest recovery point. The `volume_size_in_bytes` value for this new volume must be equal to or larger than the size of the existing volume, in bytes. -* `kms_encrypted` - (Optional) Set to `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. -* `kms_key` - (Optional) The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. Is required when `kms_encrypted` is set. * `tags` - (Optional) Key-value map of resource tags ## Attribute Reference From 654ea3181457cd9638582b2f9e8f10874f05aca9 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 4 Sep 2020 14:36:29 +0300 Subject: [PATCH 0060/1212] Revert "Revert "resource/aws_storagegateway_cached_iscsi_volume: Add kms_encrypted and kms_key arguments (#12066)"" This reverts commit 5ae123f4 --- ..._aws_storagegateway_cached_iscsi_volume.go | 28 ++++++++++++++++++- ...egateway_cached_iscsi_volume.html.markdown | 2 ++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_storagegateway_cached_iscsi_volume.go b/aws/resource_aws_storagegateway_cached_iscsi_volume.go index c35ed2901f9..c56811ea6d5 100644 --- a/aws/resource_aws_storagegateway_cached_iscsi_volume.go +++ b/aws/resource_aws_storagegateway_cached_iscsi_volume.go @@ -87,6 +87,18 @@ func resourceAwsStorageGatewayCachedIscsiVolume() *schema.Resource { ForceNew: true, }, "tags": tagsSchema(), + "kms_encrypted": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateArn, + RequiredWith: []string{"kms_encrypted"}, + }, }, } } @@ -111,6 +123,14 @@ func resourceAwsStorageGatewayCachedIscsiVolumeCreate(d *schema.ResourceData, me input.SourceVolumeARN = aws.String(v.(string)) } + if v, ok := d.GetOk("kms_key"); ok { + input.KMSKey = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_encrypted"); ok { + input.KMSEncrypted = aws.Bool(v.(bool)) + } + log.Printf("[DEBUG] Creating Storage Gateway cached iSCSI volume: %s", input) output, err := conn.CreateCachediSCSIVolume(input) if err != nil { @@ -147,7 +167,7 @@ func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta output, err := conn.DescribeCachediSCSIVolumes(input) if err != nil { - if isAWSErr(err, storagegateway.ErrorCodeVolumeNotFound, "") { + if isAWSErr(err, storagegateway.ErrorCodeVolumeNotFound, "") || isAWSErr(err, storagegateway.ErrCodeInvalidGatewayRequestException, "The specified volume was not found") { log.Printf("[WARN] Storage Gateway cached iSCSI volume %q not found, removing from state", d.Id()) d.SetId("") return nil @@ -169,6 +189,12 @@ func resourceAwsStorageGatewayCachedIscsiVolumeRead(d *schema.ResourceData, meta d.Set("volume_arn", arn) d.Set("volume_id", aws.StringValue(volume.VolumeId)) d.Set("volume_size_in_bytes", int(aws.Int64Value(volume.VolumeSizeInBytes))) + d.Set("kms_key", volume.KMSKey) + if volume.KMSKey != nil { + d.Set("kms_encrypted", true) + } else { + d.Set("kms_encrypted", false) + } tags, err := keyvaluetags.StoragegatewayListTags(conn, arn) if err != nil { diff --git a/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown b/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown index 031f1970149..da21068a415 100644 --- a/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown +++ b/website/docs/r/storagegateway_cached_iscsi_volume.html.markdown @@ -63,6 +63,8 @@ The following arguments are supported: * `volume_size_in_bytes` - (Required) The size of the volume in bytes. * `snapshot_id` - (Optional) The snapshot ID of the snapshot to restore as the new cached volume. e.g. `snap-1122aabb`. * `source_volume_arn` - (Optional) The ARN for an existing volume. Specifying this ARN makes the new volume into an exact copy of the specified existing volume's latest recovery point. The `volume_size_in_bytes` value for this new volume must be equal to or larger than the size of the existing volume, in bytes. +* `kms_encrypted` - (Optional) Set to `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. +* `kms_key` - (Optional) The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. Is required when `kms_encrypted` is set. * `tags` - (Optional) Key-value map of resource tags ## Attribute Reference From 575df9f3e54cda45da8a0eefceb8bfd68f601a5d Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 4 Sep 2020 14:38:58 +0300 Subject: [PATCH 0061/1212] use enum slices --- aws/resource_aws_budgets_budget.go | 52 +++++++++--------------------- 1 file changed, 15 insertions(+), 37 deletions(-) diff --git a/aws/resource_aws_budgets_budget.go b/aws/resource_aws_budgets_budget.go index 4f424503fad..7979bae02c1 100644 --- a/aws/resource_aws_budgets_budget.go +++ b/aws/resource_aws_budgets_budget.go @@ -42,16 +42,9 @@ func resourceAwsBudgetsBudget() *schema.Resource { ForceNew: true, }, "budget_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - budgets.BudgetTypeCost, - budgets.BudgetTypeRiCoverage, - budgets.BudgetTypeRiUtilization, - budgets.BudgetTypeSavingsPlansCoverage, - budgets.BudgetTypeSavingsPlansUtilization, - budgets.BudgetTypeUsage, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(budgets.BudgetType_Values(), false), }, "limit_amount": { Type: schema.TypeString, @@ -136,14 +129,9 @@ func resourceAwsBudgetsBudget() *schema.Resource { Default: "2087-06-15_00:00", }, "time_unit": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - budgets.TimeUnitAnnually, - budgets.TimeUnitDaily, - budgets.TimeUnitMonthly, - budgets.TimeUnitQuarterly, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(budgets.TimeUnit_Values(), false), }, "cost_filters": { Type: schema.TypeMap, @@ -157,33 +145,23 @@ func resourceAwsBudgetsBudget() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "comparison_operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - budgets.ComparisonOperatorEqualTo, - budgets.ComparisonOperatorGreaterThan, - budgets.ComparisonOperatorLessThan, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(budgets.ComparisonOperator_Values(), false), }, "threshold": { Type: schema.TypeFloat, Required: true, }, "threshold_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - budgets.ThresholdTypeAbsoluteValue, - budgets.ThresholdTypePercentage, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(budgets.ThresholdType_Values(), false), }, "notification_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - budgets.NotificationTypeActual, - budgets.NotificationTypeForecasted, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(budgets.NotificationType_Values(), false), }, "subscriber_email_addresses": { Type: schema.TypeSet, From b547329e7f27124759401bb1285d0ab23c417b2a Mon Sep 17 00:00:00 2001 From: Tadhg McGahern Date: Mon, 5 Oct 2020 20:02:07 +0100 Subject: [PATCH 0062/1212] Add TLS config to apigw integration Co-authored-by: jake-mcdermott --- aws/resource_aws_api_gateway_integration.go | 63 +++++++++++++++++++ ...source_aws_api_gateway_integration_test.go | 23 ++++++- .../r/api_gateway_integration.html.markdown | 5 ++ 3 files changed, 88 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_api_gateway_integration.go b/aws/resource_aws_api_gateway_integration.go index 8ec56b5a794..8e5f1faf04e 100644 --- a/aws/resource_aws_api_gateway_integration.go +++ b/aws/resource_aws_api_gateway_integration.go @@ -150,6 +150,21 @@ func resourceAwsApiGatewayIntegration() *schema.Resource { ValidateFunc: validation.IntBetween(50, 29000), Default: 29000, }, + + "tls_config": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "insecure_skip_verification": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, }, } } @@ -224,6 +239,11 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa timeoutInMillis = aws.Int64(int64(v.(int))) } + var tlsConfig *apigateway.TlsConfig + if v, ok := d.GetOk("tls_config"); ok { + tlsConfig = expandApiGatewayTlsConfig(v.([]interface{})) + } + _, err := conn.PutIntegration(&apigateway.PutIntegrationInput{ HttpMethod: aws.String(d.Get("http_method").(string)), ResourceId: aws.String(d.Get("resource_id").(string)), @@ -241,6 +261,7 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa ConnectionType: connectionType, ConnectionId: connectionId, TimeoutInMillis: timeoutInMillis, + TlsConfig: tlsConfig, }) if err != nil { return fmt.Errorf("Error creating API Gateway Integration: %s", err) @@ -301,6 +322,10 @@ func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface d.Set("type", integration.Type) d.Set("uri", integration.Uri) + if err := d.Set("tls_config", flattenApiGatewayTlsConfig(integration.TlsConfig)); err != nil { + return fmt.Errorf("error setting tls_config: %s", err) + } + return nil } @@ -464,6 +489,20 @@ func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interfa }) } + if d.HasChange("tls_config.0.insecure_skip_verification") { + // The domain name must have an endpoint type. + // If attempting to remove the configuration, do nothing. + if v, ok := d.GetOk("tls_config"); ok && len(v.([]interface{})) > 0 { + m := v.([]interface{})[0].(map[string]interface{}) + + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/tlsConfig/insecureSkipVerification"), + Value: aws.String(strconv.FormatBool(m["insecure_skip_verification"].(bool))), + }) + } + } + params := &apigateway.UpdateIntegrationInput{ HttpMethod: aws.String(d.Get("http_method").(string)), ResourceId: aws.String(d.Get("resource_id").(string)), @@ -501,3 +540,27 @@ func resourceAwsApiGatewayIntegrationDelete(d *schema.ResourceData, meta interfa return nil } + +func expandApiGatewayTlsConfig(vConfig []interface{}) *apigateway.TlsConfig { + config := &apigateway.TlsConfig{} + + if len(vConfig) == 0 || vConfig[0] == nil { + return config + } + mConfig := vConfig[0].(map[string]interface{}) + + if insecureSkipVerification, ok := mConfig["insecure_skip_verification"].(bool); ok { + config.InsecureSkipVerification = aws.Bool(insecureSkipVerification) + } + return config +} + +func flattenApiGatewayTlsConfig(config *apigateway.TlsConfig) []interface{} { + if config == nil { + return []interface{}{} + } + + return []interface{}{map[string]interface{}{ + "insecure_skip_verification": aws.BoolValue(config.InsecureSkipVerification), + }} +} diff --git a/aws/resource_aws_api_gateway_integration_test.go b/aws/resource_aws_api_gateway_integration_test.go index 5c0c0202f78..1c11e0f5740 100644 --- a/aws/resource_aws_api_gateway_integration_test.go +++ b/aws/resource_aws_api_gateway_integration_test.go @@ -272,11 +272,10 @@ func TestAccAWSAPIGatewayIntegration_integrationType(t *testing.T) { ), }, { - Config: testAccAWSAPIGatewayIntegrationConfig_IntegrationTypeInternet(rName), + Config: testAccAWSAPIGatewayIntegrationConfig_IntegrationTLSConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayIntegrationExists(resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, "connection_type", "INTERNET"), - resource.TestCheckResourceAttr(resourceName, "connection_id", ""), + resource.TestCheckResourceAttr(resourceName, "tls_config.0.insecure_skip_verification", "true"), ), }, { @@ -814,3 +813,21 @@ resource "aws_api_gateway_integration" "test" { } ` } + +func testAccAWSAPIGatewayIntegrationConfig_IntegrationTLSConfig(rName string) string { + return testAccAWSAPIGatewayIntegrationConfig_IntegrationTypeBase(rName) + ` +resource "aws_api_gateway_integration" "test" { + rest_api_id = aws_api_gateway_rest_api.test.id + resource_id = aws_api_gateway_resource.test.id + http_method = aws_api_gateway_method.test.http_method + type = "HTTP" + uri = "https://www.google.de" + integration_http_method = "GET" + passthrough_behavior = "WHEN_NO_MATCH" + content_handling = "CONVERT_TO_TEXT" + tls_config { + insecure_skip_verification = true + } +} +` +} diff --git a/website/docs/r/api_gateway_integration.html.markdown b/website/docs/r/api_gateway_integration.html.markdown index aa6376a4e3a..61a9586f285 100644 --- a/website/docs/r/api_gateway_integration.html.markdown +++ b/website/docs/r/api_gateway_integration.html.markdown @@ -40,6 +40,10 @@ resource "aws_api_gateway_integration" "MyDemoIntegration" { cache_namespace = "foobar" timeout_milliseconds = 29000 + tls_config { + insecure_skip_verification = true + } + request_parameters = { "integration.request.header.X-Authorization" = "'static'" } @@ -229,6 +233,7 @@ The following arguments are supported: * `cache_namespace` - (Optional) The integration's cache namespace. * `content_handling` - (Optional) Specifies how to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. * `timeout_milliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. +* `tls_config` - (Optional) Specifies the TLS configuration for an integration defined as a block. Supports `insecure_skip_verification` toggle. ## Import From 9b5a0d2af8684321e60161ac72df3fa2a59ff6a5 Mon Sep 17 00:00:00 2001 From: Tadhg McGahern Date: Mon, 5 Oct 2020 20:21:34 +0100 Subject: [PATCH 0063/1212] Add TLS config to apigw integration Co-authored-by: jake-mcdermott --- aws/resource_aws_api_gateway_integration_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws/resource_aws_api_gateway_integration_test.go b/aws/resource_aws_api_gateway_integration_test.go index 1c11e0f5740..bd8c9c855cd 100644 --- a/aws/resource_aws_api_gateway_integration_test.go +++ b/aws/resource_aws_api_gateway_integration_test.go @@ -820,11 +820,13 @@ resource "aws_api_gateway_integration" "test" { rest_api_id = aws_api_gateway_rest_api.test.id resource_id = aws_api_gateway_resource.test.id http_method = aws_api_gateway_method.test.http_method + type = "HTTP" uri = "https://www.google.de" integration_http_method = "GET" passthrough_behavior = "WHEN_NO_MATCH" content_handling = "CONVERT_TO_TEXT" + tls_config { insecure_skip_verification = true } From 102bbafd20b885cad366ca503cf5240786370b12 Mon Sep 17 00:00:00 2001 From: Hazmeister Date: Fri, 9 Oct 2020 16:09:41 +0100 Subject: [PATCH 0064/1212] By omitting .example.com the correct domain will be used for the Route53 Domain --- website/docs/r/ses_domain_dkim.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ses_domain_dkim.html.markdown b/website/docs/r/ses_domain_dkim.html.markdown index cdc217016c4..5dd856438dd 100644 --- a/website/docs/r/ses_domain_dkim.html.markdown +++ b/website/docs/r/ses_domain_dkim.html.markdown @@ -43,7 +43,7 @@ resource "aws_ses_domain_dkim" "example" { resource "aws_route53_record" "example_amazonses_dkim_record" { count = 3 zone_id = "ABCDEFGHIJ123" - name = "${element(aws_ses_domain_dkim.example.dkim_tokens, count.index)}._domainkey.example.com" + name = "${element(aws_ses_domain_dkim.example.dkim_tokens, count.index)}._domainkey" type = "CNAME" ttl = "600" records = ["${element(aws_ses_domain_dkim.example.dkim_tokens, count.index)}.dkim.amazonses.com"] From 8882599265297eb2b4c7afe169415bb97b670810 Mon Sep 17 00:00:00 2001 From: nikhil Date: Sat, 10 Oct 2020 13:05:55 +0530 Subject: [PATCH 0065/1212] f/aws_elasticache_cluster: Support final snapshot --- aws/resource_aws_elasticache_cluster.go | 9 +++++++++ aws/resource_aws_elasticache_replication_group.go | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index a1b94ba0177..38ee02f29a0 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -239,6 +239,10 @@ func resourceAwsElasticacheCluster() *schema.Resource { Computed: true, ForceNew: true, }, + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + }, "tags": tagsSchema(), }, @@ -788,6 +792,11 @@ func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID input := &elasticache.DeleteCacheClusterInput{ CacheClusterId: aws.String(cacheClusterID), } + + if v, ok := d.GetOk("final_snapshot_identifier"); ok { + input.FinalSnapshotIdentifier = aws.String(v.(string)) + } + log.Printf("[DEBUG] Deleting Elasticache Cache Cluster: %s", input) err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteCacheCluster(input) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index a72737d5e3c..cf7d4ee3c07 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -240,6 +240,10 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { ForceNew: true, Optional: true, }, + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + }, }, SchemaVersion: 1, @@ -896,6 +900,10 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica ReplicationGroupId: aws.String(replicationGroupID), } + if v, ok := d.GetOk("final_snapshot_identifier"); ok { + input.FinalSnapshotIdentifier = aws.String(v.(string)) + } + // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DeleteReplicationGroup(input) From 3cbe52923a8e7bdb29835fea21822697b7dea5a8 Mon Sep 17 00:00:00 2001 From: nikhil Date: Sat, 10 Oct 2020 16:55:51 +0530 Subject: [PATCH 0066/1212] f/aws_elasticache_cluster: Support final snapshot --- aws/resource_aws_elasticache_cluster.go | 10 ++++------ ...resource_aws_elasticache_replication_group.go | 16 +++++++++------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 38ee02f29a0..f9e3a38f8b3 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -671,7 +671,8 @@ func (b byCacheNodeId) Less(i, j int) bool { func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - err := deleteElasticacheCacheCluster(conn, d.Id()) + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err := deleteElasticacheCacheCluster(conn, d.Id(), finalSnapshotID) if err != nil { if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { return nil @@ -788,14 +789,11 @@ func waitForCreateElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheCl return err } -func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID string) error { +func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID string, finalSnapshotID string) error { input := &elasticache.DeleteCacheClusterInput{ CacheClusterId: aws.String(cacheClusterID), } - - if v, ok := d.GetOk("final_snapshot_identifier"); ok { - input.FinalSnapshotIdentifier = aws.String(v.(string)) - } + input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) log.Printf("[DEBUG] Deleting Elasticache Cache Cluster: %s", input) err := resource.Retry(5*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index cf7d4ee3c07..66ef6ac0315 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -610,7 +610,8 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i // Kick off all the Cache Cluster deletions for _, cacheClusterID := range removeClusterIDs { - err := deleteElasticacheCacheCluster(conn, cacheClusterID) + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err := deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) if err != nil { // Future enhancement: we could retry deletion with random existing ID on missing name // if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { ... } @@ -694,7 +695,8 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i } // Finally retry deleting the cache cluster - err = deleteElasticacheCacheCluster(conn, cacheClusterID) + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err = deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) if err != nil { return fmt.Errorf("error deleting Elasticache Cache Cluster (%s) (removing replica after setting new primary): %w", cacheClusterID, err) } @@ -839,7 +841,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - err := deleteElasticacheReplicationGroup(d.Id(), conn) + err := deleteElasticacheReplicationGroup(d, conn) if err != nil { return fmt.Errorf("error deleting Elasticache Replication Group (%s): %w", d.Id(), err) } @@ -895,9 +897,9 @@ func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replic } } -func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elasticache.ElastiCache) error { +func deleteElasticacheReplicationGroup(d *schema.ResourceData, conn *elasticache.ElastiCache) error { input := &elasticache.DeleteReplicationGroupInput{ - ReplicationGroupId: aws.String(replicationGroupID), + ReplicationGroupId: aws.String(d.Id()), } if v, ok := d.GetOk("final_snapshot_identifier"); ok { @@ -932,11 +934,11 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica return fmt.Errorf("error deleting Elasticache Replication Group: %w", err) } - log.Printf("[DEBUG] Waiting for deletion: %s", replicationGroupID) + log.Printf("[DEBUG] Waiting for deletion: %s", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "available", "deleting"}, Target: []string{}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, replicationGroupID, []string{}), + Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), []string{}), Timeout: 40 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, From c558cdf4ba16ad6d4e79a199f17b121c475c0835 Mon Sep 17 00:00:00 2001 From: nikhil Date: Sat, 10 Oct 2020 17:23:58 +0530 Subject: [PATCH 0067/1212] f/aws_elasticache_cluster: Support final snapshot --- aws/resource_aws_elasticache_cluster_test.go | 2 +- aws/resource_aws_elasticache_replication_group.go | 12 +++++------- ...esource_aws_elasticache_replication_group_test.go | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster_test.go b/aws/resource_aws_elasticache_cluster_test.go index aad64a1813e..556897d106a 100644 --- a/aws/resource_aws_elasticache_cluster_test.go +++ b/aws/resource_aws_elasticache_cluster_test.go @@ -46,7 +46,7 @@ func testSweepElasticacheClusters(region string) error { id := aws.StringValue(cluster.CacheClusterId) log.Printf("[INFO] Deleting Elasticache Cluster: %s", id) - err := deleteElasticacheCacheCluster(conn, id) + err := deleteElasticacheCacheCluster(conn, id, "") if err != nil { log.Printf("[ERROR] Failed to delete Elasticache Cache Cluster (%s): %s", id, err) } diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 66ef6ac0315..d4db2a7f474 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -841,7 +841,8 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - err := deleteElasticacheReplicationGroup(d, conn) + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err := deleteElasticacheReplicationGroup(d.Id(), conn, finalSnapshotID) if err != nil { return fmt.Errorf("error deleting Elasticache Replication Group (%s): %w", d.Id(), err) } @@ -897,14 +898,11 @@ func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replic } } -func deleteElasticacheReplicationGroup(d *schema.ResourceData, conn *elasticache.ElastiCache) error { +func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elasticache.ElastiCache, finalSnapshotID string) error { input := &elasticache.DeleteReplicationGroupInput{ - ReplicationGroupId: aws.String(d.Id()), - } - - if v, ok := d.GetOk("final_snapshot_identifier"); ok { - input.FinalSnapshotIdentifier = aws.String(v.(string)) + ReplicationGroupId: aws.String(replicationGroupID), } + input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete err := resource.Retry(10*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index ed54080f3c6..b9b7c3c6344 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -40,7 +40,7 @@ func testSweepElasticacheReplicationGroups(region string) error { id := aws.StringValue(replicationGroup.ReplicationGroupId) log.Printf("[INFO] Deleting Elasticache Replication Group: %s", id) - err := deleteElasticacheReplicationGroup(id, conn) + err := deleteElasticacheReplicationGroup(id, conn, "") if err != nil { log.Printf("[ERROR] Failed to delete Elasticache Replication Group (%s): %s", id, err) } From ebaf8a91751bde2131f4216b09a3a26e703a0aed Mon Sep 17 00:00:00 2001 From: nikhil Date: Sat, 10 Oct 2020 17:33:18 +0530 Subject: [PATCH 0068/1212] f/aws_elasticache_cluster: Support final snapshot --- aws/resource_aws_elasticache_replication_group.go | 4 ++-- website/docs/r/elasticache_cluster.html.markdown | 2 ++ website/docs/r/elasticache_replication_group.html.markdown | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index d4db2a7f474..f1ee2ef6b65 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -841,7 +841,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) err := deleteElasticacheReplicationGroup(d.Id(), conn, finalSnapshotID) if err != nil { return fmt.Errorf("error deleting Elasticache Replication Group (%s): %w", d.Id(), err) @@ -902,7 +902,7 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica input := &elasticache.DeleteReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), } - input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) + input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete err := resource.Retry(10*time.Minute, func() *resource.RetryError { diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index f68a2d5c79b..e77a683f54c 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -132,6 +132,8 @@ SNS topic to send ElastiCache notifications to. Example: * `preferred_availability_zones` - (Optional, Memcached only) A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference. +* `final_snapshot_identifier` - (Optional) The name of your final cluster snapshot. If omitted, no final snapshot will be made. + * `tags` - (Optional) A map of tags to assign to the resource ## Attributes Reference diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index b1ff694a792..d39adaefab7 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -135,6 +135,7 @@ before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes * `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`. * `tags` - (Optional) A map of tags to assign to the resource. Adding tags to this resource will add or overwrite any existing tags on the clusters in the replication group and not to the group itself. +* `final_snapshot_identifier` - (Optional) The name of your final node group (shard) snapshot. ElastiCache creates the snapshot from the primary node in the cluster. If omitted, no final snapshot will be made. * `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed. Cluster Mode (`cluster_mode`) supports the following: From 3013c72547447a29a9cf86dcf9b441d412b692c0 Mon Sep 17 00:00:00 2001 From: nikhil Date: Sat, 10 Oct 2020 18:00:41 +0530 Subject: [PATCH 0069/1212] f/aws_elasticache_cluster: Support final snapshot --- aws/resource_aws_elasticache_replication_group.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index f1ee2ef6b65..596b09caa31 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -932,11 +932,11 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica return fmt.Errorf("error deleting Elasticache Replication Group: %w", err) } - log.Printf("[DEBUG] Waiting for deletion: %s", d.Id()) + log.Printf("[DEBUG] Waiting for deletion: %s", replicationGroupID) stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "available", "deleting"}, Target: []string{}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), []string{}), + Refresh: cacheReplicationGroupStateRefreshFunc(conn, replicationGroupID, []string{}), Timeout: 40 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, From 222c479d4d4120974d98230d8c1b800c0f7ddb64 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 3 Nov 2020 09:07:45 +0900 Subject: [PATCH 0070/1212] Add aws_codestarconnections_connection resource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Kévin Sénéchal --- aws/provider.go | 1 + ...urce_aws_codestarconnections_connection.go | 112 ++++++++++++ ...aws_codestarconnections_connection_test.go | 70 +++++++ .../r/codestarconnections_connection.markdown | 171 ++++++++++++++++++ 4 files changed, 354 insertions(+) create mode 100644 aws/resource_aws_codestarconnections_connection.go create mode 100644 aws/resource_aws_codestarconnections_connection_test.go create mode 100644 website/docs/r/codestarconnections_connection.markdown diff --git a/aws/provider.go b/aws/provider.go index 8075ec7d6a6..fe199021a6a 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -512,6 +512,7 @@ func Provider() *schema.Provider { "aws_codebuild_webhook": resourceAwsCodeBuildWebhook(), "aws_codepipeline": resourceAwsCodePipeline(), "aws_codepipeline_webhook": resourceAwsCodePipelineWebhook(), + "aws_codestarconnections_connection": resourceAwsCodeStarConnectionsConnection(), "aws_codestarnotifications_notification_rule": resourceAwsCodeStarNotificationsNotificationRule(), "aws_cur_report_definition": resourceAwsCurReportDefinition(), "aws_customer_gateway": resourceAwsCustomerGateway(), diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go new file mode 100644 index 00000000000..a96f3b581e7 --- /dev/null +++ b/aws/resource_aws_codestarconnections_connection.go @@ -0,0 +1,112 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codestarconnections" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsCodeStarConnectionsConnection() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCodeStarConnectionsConnectionCreate, + Read: resourceAwsCodeStarConnectionsConnectionRead, + Delete: resourceAwsCodeStarConnectionsConnectionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "connection_arn": { + Type: schema.TypeString, + Computed: true, + }, + + "connection_status": { + Type: schema.TypeString, + Computed: true, + }, + + "connection_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "provider_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + codestarconnections.ProviderTypeBitbucket, + }, false), + }, + }, + } +} + +func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codestarconnectionsconn + + params := &codestarconnections.CreateConnectionInput{ + ConnectionName: aws.String(d.Get("connection_name").(string)), + ProviderType: aws.String(d.Get("provider_type").(string)), + } + + res, err := conn.CreateConnection(params) + if err != nil { + return fmt.Errorf("error creating codestar connection: %s", err) + } + + d.SetId(aws.StringValue(res.ConnectionArn)) + + return resourceAwsCodeStarConnectionsConnectionRead(d, meta) +} + +func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codestarconnectionsconn + + rule, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ + ConnectionArn: aws.String(d.Id()), + }) + + if err != nil { + if isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] codestar connection (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("error reading codestar connection: %s", err) + } + + d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) + d.Set("arn", rule.Connection.ConnectionArn) + d.Set("connection_arn", rule.Connection.ConnectionArn) + d.Set("connection_name", rule.Connection.ConnectionName) + d.Set("connection_status", rule.Connection.ConnectionStatus) + d.Set("provider_type", rule.Connection.ProviderType) + + return nil +} + +func resourceAwsCodeStarConnectionsConnectionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codestarconnectionsconn + + _, err := conn.DeleteConnection(&codestarconnections.DeleteConnectionInput{ + ConnectionArn: aws.String(d.Id()), + }) + + if err != nil { + return fmt.Errorf("error deleting codestar connection: %s", err) + } + + return nil +} diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go new file mode 100644 index 00000000000..6a090ac8fc4 --- /dev/null +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -0,0 +1,70 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codestarconnections" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { + resourceName := "aws_codestarconnections_connection.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodeStarConnectionsConnectionConfigBasic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccMatchResourceAttrRegionalARN(resourceName, "id", "codestar-connections", regexp.MustCompile("connection/.+")), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codestar-connections", regexp.MustCompile("connection/.+")), + testAccMatchResourceAttrRegionalARN(resourceName, "connection_arn", "codestar-connections", regexp.MustCompile("connection/.+")), + resource.TestCheckResourceAttr(resourceName, "provider_type", codestarconnections.ProviderTypeBitbucket), + resource.TestCheckResourceAttr(resourceName, "connection_name", rName), + resource.TestCheckResourceAttr(resourceName, "connection_status", codestarconnections.ConnectionStatusPending), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSCodeStarConnectionsConnectionDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codestarconnectionsconn + + for _, rs := range s.RootModule().Resources { + switch rs.Type { + case "aws_codestarconnections_connection": + _, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ + ConnectionArn: aws.String(rs.Primary.ID), + }) + + if err != nil && !isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { + return err + } + } + } + + return nil +} + +func testAccAWSCodeStarConnectionsConnectionConfigBasic(rName string) string { + return fmt.Sprintf(` +resource "aws_codestarconnections_connection" "test" { + connection_name = %[1]q + provider_type = "Bitbucket" +} +`, rName) +} diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown new file mode 100644 index 00000000000..3d01a0acb9b --- /dev/null +++ b/website/docs/r/codestarconnections_connection.markdown @@ -0,0 +1,171 @@ +--- +subcategory: "CodeStar Connections" +layout: "aws" +page_title: "AWS: aws_codestarconnections_connection" +description: |- + Provides a CodeStar Connection +--- + +# Resource: aws_codestarconnections_connection + +Provides a CodeStar Connection. + +## Example Usage + +```hcl +resource "aws_s3_bucket" "codepipeline_bucket" { + bucket = "tf-codestarconnections-codepipeline-bucket" + acl = "private" +} + +resource "aws_codestarconnections_connection" "example" { + connection_name = "example-connection" + provider_type = "Bitbucket" +} + +resource "aws_iam_role" "codepipeline_role" { + name = "test-role" + assume_role_policy = < Date: Thu, 12 Nov 2020 14:57:05 +0100 Subject: [PATCH 0071/1212] aws_lambda_permission: Add example with cloudwatch logs --- .../docs/r/lambda_permission.html.markdown | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/website/docs/r/lambda_permission.html.markdown b/website/docs/r/lambda_permission.html.markdown index c49fa51e18c..bba934e8214 100644 --- a/website/docs/r/lambda_permission.html.markdown +++ b/website/docs/r/lambda_permission.html.markdown @@ -128,6 +128,57 @@ resource "aws_lambda_permission" "lambda_permission" { } ``` +## Usage with CloudWatch log group + +```hcl +resource "aws_lambda_permission" "logging" { + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.logging.function_name + principal = "logs.eu-west-1.amazonaws.com" + source_arn = "${aws_cloudwatch_log_group.default.arn}:*" +} + +resource "aws_cloudwatch_log_group" "default" { + name = "/default" +} + +resource "aws_cloudwatch_log_subscription_filter" "logging" { + depends_on = [aws_lambda_permission.logging] + destination_arn = aws_lambda_function.logging.arn + filter_pattern = "" + log_group_name = aws_cloudwatch_log_group.default.name + name = "logging_default" +} + +resource "aws_lambda_function" "logging" { + filename = "lamba_logging.zip" + function_name = "lambda_called_from_cloudwatch_logs" + handler = "exports.handler" + role = aws_iam_role.default.arn + runtime = "python2.7" +} + +resource "aws_iam_role" "default" { + name = "iam_for_lambda_called_from_cloudwatch_logs" + + assume_role_policy = < Date: Wed, 25 Nov 2020 17:20:44 -0800 Subject: [PATCH 0072/1212] Reformats tests --- aws/resource_aws_autoscaling_group_test.go | 429 ++++++++++----------- 1 file changed, 207 insertions(+), 222 deletions(-) diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 87b7d3aa6ac..aa7e2f54efd 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -553,8 +553,7 @@ func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) { Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup(randName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "placement_group", randName), + resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "placement_group", randName), ), }, { @@ -996,6 +995,153 @@ func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) { }) } +func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { + var group autoscaling.Group + resourceName := "aws_autoscaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: []resource.TestStep{ + { + // check that an instance refresh isn't started by a new asg + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", true, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "min_size", "1"), + resource.TestCheckResourceAttr(resourceName, "max_size", "2"), + resource.TestCheckResourceAttr(resourceName, "desired_capacity", "1"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "-1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "90"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "force_delete", + "wait_for_capacity_timeout", + "instance_refresh", + }, + }, + { + // check that changing asg size doesn't trigger a refresh + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", false, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "min_size", "2"), + resource.TestCheckResourceAttr(resourceName, "max_size", "4"), + resource.TestCheckResourceAttr(resourceName, "desired_capacity", "2"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + ), + }, + { + // check that changing tags doesn't trigger a refresh + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Bravo", false, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "10"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "50"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + ), + }, + // TODO: check that an active refresh is cancelled in favour of a new one + }, + }) +} + +func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { + matrix := []struct { + AvailabilityZoneCount int + SubnetCount int + InstanceType string + UseLaunchConfiguration bool + UseLaunchTemplate bool + UseMixedInstancesPolicy bool + UsePlacementGroup bool + ExpectRefreshCount int + }{ + {2, 0, "t3.nano", true, false, false, false, 0}, // create asg with 2 az-s + {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 az + {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 subnets, drop az-s + {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 subnet + {0, 1, "t3.nano", false, true, false, false, 4}, // drop launch config, add template + {0, 1, "t3.micro", false, true, false, false, 5}, // update template + {0, 1, "t3.micro", false, false, true, false, 6}, // drop template, add mixed policy + {0, 1, "t3.nano", false, false, true, false, 7}, // update mixed policy + {0, 1, "t3.nano", false, false, true, true, 8}, // use placement group + } + + var group autoscaling.Group + resourceName := "aws_autoscaling_group.test" + placementGroupName := fmt.Sprintf("tf-test-%s", acctest.RandString(8)) + + steps := make([]resource.TestStep, len(matrix)) + for i, test := range matrix { + steps[i] = resource.TestStep{ + Config: testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( + test.AvailabilityZoneCount, + test.SubnetCount, + test.InstanceType, + test.UseLaunchConfiguration, + test.UseLaunchTemplate, + test.UseMixedInstancesPolicy, + test.UsePlacementGroup, + placementGroupName, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + testAccCheckAutoscalingLatestInstanceRefreshState(&group, test.ExpectRefreshCount, 0, nil), + ), + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: steps, + }) +} + +func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No AutoScaling Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn + + describeGroups, err := conn.DescribeAutoScalingGroups( + &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, + }) + + if err != nil { + return err + } + + if len(describeGroups.AutoScalingGroups) != 1 || + *describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID { + return fmt.Errorf("AutoScaling Group not found") + } + + *group = *describeGroups.AutoScalingGroups[0] + + return nil + } +} + func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).autoscalingconn @@ -1089,39 +1235,6 @@ func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Gr } } -func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No AutoScaling Group ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - - describeGroups, err := conn.DescribeAutoScalingGroups( - &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)}, - }) - - if err != nil { - return err - } - - if len(describeGroups.AutoScalingGroups) != 1 || - *describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID { - return fmt.Errorf("AutoScaling Group not found") - } - - *group = *describeGroups.AutoScalingGroups[0] - - return nil - } -} - func testLaunchConfigurationName(n string, lc *autoscaling.LaunchConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -4156,66 +4269,6 @@ resource "aws_autoscaling_group" "test" { `, rName) } -func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { - var group autoscaling.Group - resourceName := "aws_autoscaling_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: []resource.TestStep{ - { - // check that an instance refresh isn't started by a new asg - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", true, 1), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "min_size", "1"), - resource.TestCheckResourceAttr(resourceName, "max_size", "2"), - resource.TestCheckResourceAttr(resourceName, "desired_capacity", "1"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "-1"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "90"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "force_delete", - "wait_for_capacity_timeout", - "instance_refresh", - }, - }, - { - // check that changing asg size doesn't trigger a refresh - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", false, 2), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "min_size", "2"), - resource.TestCheckResourceAttr(resourceName, "max_size", "4"), - resource.TestCheckResourceAttr(resourceName, "desired_capacity", "2"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), - ), - }, - { - // check that changing propagated tags doesn't trigger a refresh - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Bravo", false, 1), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "10"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "50"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), - ), - }, - // TODO: check that an active refresh is cancelled in favour of a new one - }, - }) -} - func testAccAwsAutoScalingGroup_InstanceRefresh_Enabled( tagValue string, defaults bool, @@ -4229,6 +4282,25 @@ func testAccAwsAutoScalingGroup_InstanceRefresh_Enabled( } return fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 * local.size_factor + min_size = 1 * local.size_factor + desired_capacity = 1 * local.size_factor + launch_configuration = aws_launch_configuration.test.name + + tag { + key = "Test" + value = %[1]q + propagate_at_launch = true + } + + instance_refresh { + strategy = "Rolling" +%[2]s + } +} + locals { size_factor = %[3]d } @@ -4256,86 +4328,7 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } - -resource "aws_autoscaling_group" "test" { - availability_zones = [data.aws_availability_zones.current.names[0]] - max_size = 2 * local.size_factor - min_size = 1 * local.size_factor - desired_capacity = 1 * local.size_factor - launch_configuration = aws_launch_configuration.test.name - - tag { - key = "Test" - value = %[1]q - propagate_at_launch = true - } - - instance_refresh { - strategy = "Rolling" -%[2]s - } -} -`, - tagValue, - preference, - sizeFactor) -} - -func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { - // note: propagated tags have been implicitly checked - // by TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled - - matrix := []struct { - AvailabilityZoneCount int - SubnetCount int - InstanceType string - UseLaunchConfiguration bool - UseLaunchTemplate bool - UseMixedInstancesPolicy bool - UsePlacementGroup bool - ExpectRefreshCount int - }{ - {2, 0, "t3.nano", true, false, false, false, 0}, // create asg with 2 az-s - {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 az - {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 subnets, drop az-s - {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 subnet - {0, 1, "t3.nano", false, true, false, false, 4}, // drop launch config, add template - {0, 1, "t3.micro", false, true, false, false, 5}, // update template - {0, 1, "t3.micro", false, false, true, false, 6}, // drop template, add mixed policy - {0, 1, "t3.nano", false, false, true, false, 7}, // update mixed policy - {0, 1, "t3.nano", false, false, true, true, 8}, // use placement group - } - - var group autoscaling.Group - resourceName := "aws_autoscaling_group.test" - placementGroupName := fmt.Sprintf("tf-test-%s", acctest.RandString(8)) - - steps := make([]resource.TestStep, len(matrix)) - for i, test := range matrix { - steps[i] = resource.TestStep{ - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( - test.AvailabilityZoneCount, - test.SubnetCount, - test.InstanceType, - test.UseLaunchConfiguration, - test.UseLaunchTemplate, - test.UseMixedInstancesPolicy, - test.UsePlacementGroup, - placementGroupName, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, test.ExpectRefreshCount, 0, nil), - ), - } - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: steps, - }) +`, tagValue, preference, sizeFactor) } func testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( @@ -4349,6 +4342,40 @@ func testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( placementGroupName string, ) string { return fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = local.availability_zone_count > 0 ? slice(data.aws_availability_zones.current.names, 0, local.availability_zone_count) : null + max_size = 1 + min_size = 1 + desired_capacity = 1 + launch_configuration = local.use_launch_configuration ? aws_launch_configuration.test.name : null + vpc_zone_identifier = local.subnet_count > 0 ? slice(aws_subnet.test.*.id, 0, local.subnet_count) : null + placement_group = local.use_placement_group ? aws_placement_group.test.name : null + + dynamic "launch_template" { + for_each = local.use_launch_template ? [1] : [] + content { + id = aws_launch_template.test.id + version = aws_launch_template.test.latest_version + } + } + + dynamic "mixed_instances_policy" { + for_each = local.use_mixed_instances_policy ? [1] : [] + content { + launch_template { + launch_template_specification { + launch_template_id = aws_launch_template.test.id + version = aws_launch_template.test.latest_version + } + } + } + } + + instance_refresh { + strategy = "Rolling" + } +} + locals { availability_zone_count = %[1]d subnet_count = %[2]d @@ -4379,6 +4406,10 @@ data "aws_availability_zones" "current" { } } +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + resource "aws_subnet" "test" { count = length(data.aws_availability_zones.current.names) availability_zone = data.aws_availability_zones.current.names[count.index] @@ -4400,53 +4431,7 @@ resource "aws_placement_group" "test" { name = local.placement_group_name strategy = "cluster" } - -resource "aws_autoscaling_group" "test" { - availability_zones = local.availability_zone_count > 0 ? slice(data.aws_availability_zones.current.names, 0, local.availability_zone_count) : null - max_size = 1 - min_size = 1 - desired_capacity = 1 - launch_configuration = local.use_launch_configuration ? aws_launch_configuration.test.name : null - vpc_zone_identifier = local.subnet_count > 0 ? slice(aws_subnet.test.*.id, 0, local.subnet_count) : null - placement_group = local.use_placement_group ? aws_placement_group.test.name : null - - dynamic "launch_template" { - for_each = local.use_launch_template ? [1] : [] - content { - id = aws_launch_template.test.id - version = aws_launch_template.test.latest_version - } - } - - dynamic "mixed_instances_policy" { - for_each = local.use_mixed_instances_policy ? [1] : [] - content { - launch_template { - launch_template_specification { - launch_template_id = aws_launch_template.test.id - version = aws_launch_template.test.latest_version - } - } - } - } - - instance_refresh { - strategy = "Rolling" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} -`, availabilityZoneCount, - subnetCount, - instanceType, - useLaunchConfiguration, - useLaunchTemplate, - useMixedInstancesPolicy, - usePlacementGroup, - placementGroupName, - ) +`, availabilityZoneCount, subnetCount, instanceType, useLaunchConfiguration, useLaunchTemplate, useMixedInstancesPolicy, usePlacementGroup, placementGroupName) } // testAccCheckAutoscalingLatestInstanceRefreshState checks the Instance Refreshes From 468a499f215c146518b3463f82db9fd9973e5994 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 26 Nov 2020 17:54:40 -0800 Subject: [PATCH 0073/1212] Fixes instance refresh error --- .../service/autoscaling/waiter/status.go | 28 +++++ .../service/autoscaling/waiter/waiter.go | 50 +++++++++ aws/resource_aws_autoscaling_group.go | 101 ++++++++---------- 3 files changed, 121 insertions(+), 58 deletions(-) create mode 100644 aws/internal/service/autoscaling/waiter/status.go create mode 100644 aws/internal/service/autoscaling/waiter/waiter.go diff --git a/aws/internal/service/autoscaling/waiter/status.go b/aws/internal/service/autoscaling/waiter/status.go new file mode 100644 index 00000000000..a7bd32d56e0 --- /dev/null +++ b/aws/internal/service/autoscaling/waiter/status.go @@ -0,0 +1,28 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func InstanceRefreshStatus(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := autoscaling.DescribeInstanceRefreshesInput{ + AutoScalingGroupName: aws.String(asgName), + InstanceRefreshIds: []*string{aws.String(instanceRefreshId)}, + } + output, err := conn.DescribeInstanceRefreshes(&input) + if err != nil { + return nil, "", err + } + + if output == nil || len(output.InstanceRefreshes) == 0 || output.InstanceRefreshes[0] == nil { + return nil, "", nil + } + + instanceRefresh := output.InstanceRefreshes[0] + + return instanceRefresh, aws.StringValue(instanceRefresh.Status), nil + } +} diff --git a/aws/internal/service/autoscaling/waiter/waiter.go b/aws/internal/service/autoscaling/waiter/waiter.go new file mode 100644 index 00000000000..b4bf99543d8 --- /dev/null +++ b/aws/internal/service/autoscaling/waiter/waiter.go @@ -0,0 +1,50 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + // Maximum amount of time to wait for an InstanceRefresh to be Successful + InstanceRefreshSuccessfulTimeout = 5 * time.Minute + + // Maximum amount of time to wait for an InstanceRefresh to be Cancelled + InstanceRefreshCancelledTimeout = 5 * time.Minute +) + +func InstanceRefreshSuccessful(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress}, + Target: []string{autoscaling.InstanceRefreshStatusSuccessful}, + Refresh: InstanceRefreshStatus(conn, asgName, instanceRefreshId), + Timeout: InstanceRefreshSuccessfulTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*autoscaling.InstanceRefresh); ok { + return v, err + } + + return nil, err +} + +func InstanceRefreshCancelled(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress, autoscaling.InstanceRefreshStatusCancelling}, + Target: []string{autoscaling.InstanceRefreshStatusCancelled}, + Refresh: InstanceRefreshStatus(conn, asgName, instanceRefreshId), + Timeout: InstanceRefreshCancelledTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*autoscaling.InstanceRefresh); ok { + return v, err + } + + return nil, err +} diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 856e5463349..186b329072d 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -15,12 +15,14 @@ import ( "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/autoscaling/waiter" ) const ( @@ -492,11 +494,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource { ValidateFunc: validation.IntBetween(0, 100), }, "strategy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice( - []string{autoscaling.RefreshStrategyRolling}, - false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(autoscaling.RefreshStrategy_Values(), false), }, }, }, @@ -854,6 +854,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e return nil } +// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), @@ -861,6 +862,7 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling. var tgRemoving bool for { + // TODO: generate Pages function output, err := conn.DescribeLoadBalancerTargetGroups(input) if err != nil { @@ -890,6 +892,7 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling. return nil } +// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), @@ -897,6 +900,7 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.Au var tgAdding bool for { + // TODO: generate Pages function output, err := conn.DescribeLoadBalancerTargetGroups(input) if err != nil { @@ -962,7 +966,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("mixed_instances_policy") { opts.MixedInstancesPolicy = expandAutoScalingMixedInstancesPolicy(d.Get("mixed_instances_policy").([]interface{})) - // TODO: probably not + // TODO: optional trigger shouldRefreshInstances = true } @@ -991,7 +995,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) // TODO: this probably needs a wait for capacity if d.HasChange("vpc_zone_identifier") { opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) - // TODO: probably not + // TODO: no shouldRefreshInstances = true } @@ -1000,14 +1004,14 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) } - // TODO: probably not + // TODO: no shouldRefreshInstances = true } // TODO: does this need a wait for capacity? if d.HasChange("placement_group") { opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) - // TODO: probably not + // TODO: optional trigger shouldRefreshInstances = true } @@ -1293,6 +1297,8 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) return nil } +// TODO: make this a finder +// TODO: this should return a NotFoundError if not found func getAwsAutoscalingGroup(asgName string, conn *autoscaling.AutoScaling) (*autoscaling.Group, error) { describeOpts := autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: []*string{aws.String(asgName)}, @@ -1753,6 +1759,7 @@ func flattenAutoScalingMixedInstancesPolicy(mixedInstancesPolicy *autoscaling.Mi return []interface{}{m} } +// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), @@ -1789,6 +1796,7 @@ func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, return nil } +// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), @@ -1796,6 +1804,7 @@ func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling var lbRemoving bool for { + // TODO: generate Pages function output, err := conn.DescribeLoadBalancers(input) if err != nil { @@ -1853,76 +1862,52 @@ func startAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.A log.Printf("[DEBUG] Cancelling active refresh in ASG %s, if any...", asgName) - if err := cancelAutoscalingInstanceRefresh(d, conn); err != nil { + if err := cancelAutoscalingInstanceRefresh(conn, asgName); err != nil { // todo: add comment about subsequent ASG updates not picking up the refresh? - return fmt.Errorf("failed to cancel previous refresh: %s", err) + return fmt.Errorf("failed to cancel previous refresh: %w", err) } log.Printf("[DEBUG] Starting instance refresh in ASG %s...", asgName) - instanceRefreshId := "" - switch output, err := conn.StartInstanceRefresh(&input); { - case err != nil: + output, err := conn.StartInstanceRefresh(&input) + if err != nil { return err - default: - instanceRefreshId = aws.StringValue(output.InstanceRefreshId) } + instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - log.Printf("[INFO] Started instance refresh %s in ASG %s", instanceRefreshId, asgName) + log.Printf("[INFO] Started instance refresh %s in ASG %s", instanceRefreshID, asgName) return nil } -// cancelAutoscalingInstanceRefresh cancels the currently active Instance -// Refresh of this Auto-Scaling Group, and waits until the refresh reaches a -// terminal state (usually Cancelled). If there is no active refresh, the -// function short-circuits without error. -func cancelAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - asgName := d.Id() +// cancelAutoscalingInstanceRefresh cancels the currently active Instance Refresh +// of this Auto-Scaling Group, if any, and waits until the refresh is Cancelled. +func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName string) error { input := autoscaling.CancelInstanceRefreshInput{ AutoScalingGroupName: aws.String(asgName), } - - _, err := conn.CancelInstanceRefresh(&input) - switch { - case isAWSErr(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault, ""): - log.Printf("[DEBUG] No active Instance Refresh in ASG %s", asgName) + log.Printf("[DEBUG] Attempting to cancel Instance Refresh on ASG (%s): %s", asgName, input) + output, err := conn.CancelInstanceRefresh(&input) + if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault) { + log.Printf("[DEBUG] No active Instance Refresh on ASG (%s)", asgName) return nil - case err != nil: - return err + } + if err != nil { + return fmt.Errorf("error cancelling Instance Refresh on ASG (%s): %w", asgName, err) + } + if output == nil { + return fmt.Errorf("error cancelling Instance Refresh on ASG (%s): empty result", asgName) } - err = resource.Retry(5*time.Minute, func() *resource.RetryError { - input := autoscaling.DescribeInstanceRefreshesInput{ - AutoScalingGroupName: aws.String(asgName), - MaxRecords: aws.Int64(1), - } - - output, err := conn.DescribeInstanceRefreshes(&input) - switch { - case err != nil: - return resource.NonRetryableError(err) - case len(output.InstanceRefreshes) != 1: - return nil - } - - switch status := aws.StringValue(output.InstanceRefreshes[0].Status); status { - case - autoscaling.InstanceRefreshStatusCancelled, - autoscaling.InstanceRefreshStatusFailed, - autoscaling.InstanceRefreshStatusSuccessful: + instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - return nil - default: - return resource.RetryableError(fmt.Errorf("refresh status %s is not terminal", status)) - } - }) - - if isResourceTimeoutError(err) { - return fmt.Errorf("timed out before the previous refresh reached a terminal state") + log.Printf("[DEBUG] Waiting for cancellation of Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) + _, err = waiter.InstanceRefreshCancelled(conn, asgName, instanceRefreshID) + if err != nil { + return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on ASG (%s): %w", instanceRefreshID, asgName, err) } - log.Printf("[INFO] Cancelled active instance refresh in ASG %s", asgName) + log.Printf("[INFO] Cancelled Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) return nil } From cdfd2dc2f8da8b1bbe8f4ccdc9ae85c6aa7f92dd Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 28 Nov 2020 12:46:27 +0200 Subject: [PATCH 0074/1212] add global settings resource --- aws/provider.go | 1 + aws/resource_aws_backup_global_settings.go | 59 ++++++++++++++ ...esource_aws_backup_global_settings_test.go | 80 +++++++++++++++++++ .../r/backup_global_settings.html.markdown | 41 ++++++++++ 4 files changed, 181 insertions(+) create mode 100644 aws/resource_aws_backup_global_settings.go create mode 100644 aws/resource_aws_backup_global_settings_test.go create mode 100644 website/docs/r/backup_global_settings.html.markdown diff --git a/aws/provider.go b/aws/provider.go index b29cd32f036..a00be724df5 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -462,6 +462,7 @@ func Provider() *schema.Provider { "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(), "aws_autoscalingplans_scaling_plan": resourceAwsAutoScalingPlansScalingPlan(), + "aws_backup_global_settings": resourceAwsBackupGlobalSettings(), "aws_backup_plan": resourceAwsBackupPlan(), "aws_backup_region_settings": resourceAwsBackupRegionSettings(), "aws_backup_selection": resourceAwsBackupSelection(), diff --git a/aws/resource_aws_backup_global_settings.go b/aws/resource_aws_backup_global_settings.go new file mode 100644 index 00000000000..e7c17889cdd --- /dev/null +++ b/aws/resource_aws_backup_global_settings.go @@ -0,0 +1,59 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/backup" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAwsBackupGlobalSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsBackupGlobalSettingsUpdate, + Update: resourceAwsBackupGlobalSettingsUpdate, + Read: resourceAwsBackupGlobalSettingsRead, + Delete: schema.Noop, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "global_settings": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceAwsBackupGlobalSettingsUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).backupconn + + input := &backup.UpdateGlobalSettingsInput{ + GlobalSettings: stringMapToPointers(d.Get("global_settings").(map[string]interface{})), + } + + _, err := conn.UpdateGlobalSettings(input) + if err != nil { + return fmt.Errorf("error setting Backup Global Settings (%s): %w", d.Id(), err) + } + + d.SetId(meta.(*AWSClient).accountid) + + return resourceAwsBackupGlobalSettingsRead(d, meta) +} + +func resourceAwsBackupGlobalSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).backupconn + + resp, err := conn.DescribeGlobalSettings(&backup.DescribeGlobalSettingsInput{}) + if err != nil { + return fmt.Errorf("error reading Backup Global Settings (%s): %w", d.Id(), err) + } + + d.Set("global_settings", aws.StringValueMap(resp.GlobalSettings)) + + return nil +} diff --git a/aws/resource_aws_backup_global_settings_test.go b/aws/resource_aws_backup_global_settings_test.go new file mode 100644 index 00000000000..bca27e84086 --- /dev/null +++ b/aws/resource_aws_backup_global_settings_test.go @@ -0,0 +1,80 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/backup" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAwsBackupGlobalSettings_basic(t *testing.T) { + var settings backup.DescribeGlobalSettingsOutput + + resourceName := "aws_backup_global_settings.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSBackup(t) + }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccBackupGlobalSettingsConfig("true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupGlobalSettingsExists(&settings), + resource.TestCheckResourceAttr(resourceName, "global_settings.%", "1"), + resource.TestCheckResourceAttr(resourceName, "global_settings.isCrossAccountBackupEnabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBackupGlobalSettingsConfig("false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupGlobalSettingsExists(&settings), + resource.TestCheckResourceAttr(resourceName, "global_settings.%", "1"), + resource.TestCheckResourceAttr(resourceName, "global_settings.isCrossAccountBackupEnabled", "false"), + ), + }, + { + Config: testAccBackupGlobalSettingsConfig("true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupGlobalSettingsExists(&settings), + resource.TestCheckResourceAttr(resourceName, "global_settings.%", "1"), + resource.TestCheckResourceAttr(resourceName, "global_settings.isCrossAccountBackupEnabled", "true"), + ), + }, + }, + }) +} + +func testAccCheckAwsBackupGlobalSettingsExists(settings *backup.DescribeGlobalSettingsOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + + conn := testAccProvider.Meta().(*AWSClient).backupconn + resp, err := conn.DescribeGlobalSettings(&backup.DescribeGlobalSettingsInput{}) + if err != nil { + return err + } + + *settings = *resp + + return nil + } +} + +func testAccBackupGlobalSettingsConfig(setting string) string { + return fmt.Sprintf(` +resource "aws_backup_global_settings" "test" { + global_settings = { + "isCrossAccountBackupEnabled" = %[1]q + } +} +`, setting) +} diff --git a/website/docs/r/backup_global_settings.html.markdown b/website/docs/r/backup_global_settings.html.markdown new file mode 100644 index 00000000000..ceaad8b599f --- /dev/null +++ b/website/docs/r/backup_global_settings.html.markdown @@ -0,0 +1,41 @@ +--- +subcategory: "Backup" +layout: "aws" +page_title: "AWS: aws_backup_global_settings" +description: |- + Provides an AWS Backup Global Settings resource. +--- + +# Resource: aws_backup_global_settings + +Provides an AWS Backup Global Settings resource. + +## Example Usage + +```hcl +resource "aws_backup_global_settings" "test" { + global_settings = { + "isCrossAccountBackupEnabled" = "true" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `resource_type_opt_in_preference` - (Required) A list of resources along with the opt-in preferences for the account. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The AWS Account ID. + +## Import + +Backup Global Settings can be imported using the `id`, e.g. + +``` +$ terraform import aws_backup_global_settings.example 123456789012 +``` From 25adca65100a3b60c44239e95a8c02fff9931bd0 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 28 Nov 2020 12:48:08 +0200 Subject: [PATCH 0075/1212] fix doc --- website/docs/r/backup_global_settings.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/backup_global_settings.html.markdown b/website/docs/r/backup_global_settings.html.markdown index ceaad8b599f..aaac9911e7b 100644 --- a/website/docs/r/backup_global_settings.html.markdown +++ b/website/docs/r/backup_global_settings.html.markdown @@ -24,7 +24,7 @@ resource "aws_backup_global_settings" "test" { The following arguments are supported: -* `resource_type_opt_in_preference` - (Required) A list of resources along with the opt-in preferences for the account. +* `global_settings` - (Required) A list of resources along with the opt-in preferences for the account. ## Attributes Reference From 1c28ee556e0bb87abc184e7e135aca3f18a54590 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Mon, 30 Nov 2020 21:05:58 +0200 Subject: [PATCH 0076/1212] Update aws/resource_aws_backup_global_settings.go Co-authored-by: Kit Ewbank --- aws/resource_aws_backup_global_settings.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_backup_global_settings.go b/aws/resource_aws_backup_global_settings.go index e7c17889cdd..ce62cf6152c 100644 --- a/aws/resource_aws_backup_global_settings.go +++ b/aws/resource_aws_backup_global_settings.go @@ -53,7 +53,9 @@ func resourceAwsBackupGlobalSettingsRead(d *schema.ResourceData, meta interface{ return fmt.Errorf("error reading Backup Global Settings (%s): %w", d.Id(), err) } - d.Set("global_settings", aws.StringValueMap(resp.GlobalSettings)) + if err := d.Set("global_settings", aws.StringValueMap(resp.GlobalSettings)); err != nil { + return fmt.Errorf("error setting global_settings: %w", err) + } return nil } From 4e39c144af4b4df398027f9f2f48b8d625d38dc8 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Mon, 30 Nov 2020 21:06:43 +0200 Subject: [PATCH 0077/1212] Update resource_aws_backup_global_settings.go --- aws/resource_aws_backup_global_settings.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_backup_global_settings.go b/aws/resource_aws_backup_global_settings.go index ce62cf6152c..58ffd4e7fbb 100644 --- a/aws/resource_aws_backup_global_settings.go +++ b/aws/resource_aws_backup_global_settings.go @@ -37,7 +37,7 @@ func resourceAwsBackupGlobalSettingsUpdate(d *schema.ResourceData, meta interfac _, err := conn.UpdateGlobalSettings(input) if err != nil { - return fmt.Errorf("error setting Backup Global Settings (%s): %w", d.Id(), err) + return fmt.Errorf("error setting Backup Global Settings (%s): %w", meta.(*AWSClient).accountid, err) } d.SetId(meta.(*AWSClient).accountid) From 0614a00f0b6d940692565f87028d619602cf26ed Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 30 Nov 2020 13:57:16 -0800 Subject: [PATCH 0078/1212] Updates instance_refresh structure to match API --- aws/resource_aws_autoscaling_group.go | 97 +++++++----- aws/resource_aws_autoscaling_group_test.go | 148 +++++++++++++++--- .../docs/r/autoscaling_group.html.markdown | 58 +++---- 3 files changed, 212 insertions(+), 91 deletions(-) diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 186b329072d..0bff7005276 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -481,23 +481,32 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "instance_warmup_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: -1, // default to health_check_grace_period - ValidateFunc: validation.IntAtLeast(-1), - }, - "min_healthy_percentage": { - Type: schema.TypeInt, - Optional: true, - Default: 90, - ValidateFunc: validation.IntBetween(0, 100), - }, "strategy": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice(autoscaling.RefreshStrategy_Values(), false), }, + "preferences": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_warmup": { + Type: schema.TypeInt, + Optional: true, + Default: -1, // default to health_check_grace_period + ValidateFunc: validation.IntAtLeast(-1), + }, + "min_healthy_percentage": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + ValidateFunc: validation.IntBetween(0, 100), + }, + }, + }, + }, }, }, }, @@ -740,6 +749,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) return resourceAwsAutoscalingGroupRead(d, meta) } +// TODO: wrap all top-level error returns func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig @@ -1008,7 +1018,6 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) shouldRefreshInstances = true } - // TODO: does this need a wait for capacity? if d.HasChange("placement_group") { opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) // TODO: optional trigger @@ -1210,9 +1219,9 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } - if shouldRefreshInstances { - if err := startAutoscalingInstanceRefresh(d, conn); err != nil { - return fmt.Errorf("failed to start instance refresh of asg %s: %s", d.Id(), err) + if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok && shouldRefreshInstances { + if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefreshRaw.([]interface{})); err != nil { + return fmt.Errorf("failed to start instance refresh of asg %s: %w", d.Id(), err) } } @@ -1834,32 +1843,46 @@ func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling return nil } -// startAutoscalingInstanceRefresh starts a new Instance Refresh in this -// Auto-Scaling Group. If there is already an active refresh, it is cancelled. -func startAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { - asgName := d.Id() - input := autoscaling.StartInstanceRefreshInput{ - AutoScalingGroupName: aws.String(asgName), - Preferences: &autoscaling.RefreshPreferences{}, - Strategy: nil, +// TODO: rename +func expandAutoScalingGroupInstanceRefresh(asgName string, l []interface{}) *autoscaling.StartInstanceRefreshInput { + if len(l) == 0 || l[0] == nil { + return nil } - if block, ok := d.Get("instance_refresh").([]interface{}); ok && len(block) > 0 { - m := block[0].(map[string]interface{}) + m := l[0].(map[string]interface{}) - if warmup := m["instance_warmup_seconds"].(int); warmup > -1 { - // -1 would mean defaulting to using the group's health_check_grace_period - input.Preferences.InstanceWarmup = aws.Int64(int64(warmup)) - } + return &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String(m["strategy"].(string)), + Preferences: expandAutoScalingGroupInstanceRefreshPreferences(m["preferences"].([]interface{})), + } +} - // validated by schema - input.Preferences.MinHealthyPercentage = aws.Int64(int64(m["min_healthy_percentage"].(int))) - input.Strategy = aws.String(m["strategy"].(string)) - } else { - log.Printf("[DEBUG] Instance refresh not enabled in ASG %s", asgName) +func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscaling.RefreshPreferences { + if len(l) == 0 || l[0] == nil { return nil } + m := l[0].(map[string]interface{}) + + refreshPreferences := &autoscaling.RefreshPreferences{} + + if v, ok := m["instance_warmup"]; ok { + refreshPreferences.InstanceWarmup = aws.Int64(int64(v.(int))) + } + + if v, ok := m["min_healthy_percentage"]; ok { + refreshPreferences.MinHealthyPercentage = aws.Int64(int64(v.(int))) + } + + return refreshPreferences +} + +// autoScalingGroupRefreshInstances starts a new Instance Refresh in this +// Auto Scaling Group. If there is already an active refresh, it is cancelled. +func autoScalingGroupRefreshInstances(conn *autoscaling.AutoScaling, asgName string, d []interface{}) error { + input := expandAutoScalingGroupInstanceRefresh(asgName, d) + log.Printf("[DEBUG] Cancelling active refresh in ASG %s, if any...", asgName) if err := cancelAutoscalingInstanceRefresh(conn, asgName); err != nil { @@ -1869,7 +1892,7 @@ func startAutoscalingInstanceRefresh(d *schema.ResourceData, conn *autoscaling.A log.Printf("[DEBUG] Starting instance refresh in ASG %s...", asgName) - output, err := conn.StartInstanceRefresh(&input) + output, err := conn.StartInstanceRefresh(input) if err != nil { return err } @@ -1900,8 +1923,8 @@ func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName str } instanceRefreshID := aws.StringValue(output.InstanceRefreshId) + log.Printf("[INFO] Requested cancellation of Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) - log.Printf("[DEBUG] Waiting for cancellation of Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) _, err = waiter.InstanceRefreshCancelled(conn, asgName, instanceRefreshID) if err != nil { return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on ASG (%s): %w", instanceRefreshID, asgName, err) diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index aa7e2f54efd..d29c98502a1 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -1006,16 +1006,16 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { Steps: []resource.TestStep{ { // check that an instance refresh isn't started by a new asg - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", true, 1), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic("Alpha", 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), resource.TestCheckResourceAttr(resourceName, "min_size", "1"), resource.TestCheckResourceAttr(resourceName, "max_size", "2"), resource.TestCheckResourceAttr(resourceName, "desired_capacity", "1"), testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "-1"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "90"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "0"), ), }, { @@ -1030,31 +1030,41 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { }, { // check that changing asg size doesn't trigger a refresh - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Alpha", false, 2), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled("Alpha", 2), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), resource.TestCheckResourceAttr(resourceName, "min_size", "2"), resource.TestCheckResourceAttr(resourceName, "max_size", "4"), resource.TestCheckResourceAttr(resourceName, "desired_capacity", "2"), testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.instance_warmup", "10"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.0.min_healthy_percentage", "50"), ), }, { // check that changing tags doesn't trigger a refresh - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Enabled("Bravo", false, 1), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled("Bravo", 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.instance_warmup_seconds", "10"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.min_healthy_percentage", "50"), - resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), ), }, - // TODO: check that an active refresh is cancelled in favour of a new one + { + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled("Bravo", 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckNoResourceAttr(resourceName, "instance_refresh.#"), + ), + }, }, }) } +// TODO: check that an active refresh is cancelled in favour of a new one + func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { matrix := []struct { AvailabilityZoneCount int @@ -1084,7 +1094,7 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { steps := make([]resource.TestStep, len(matrix)) for i, test := range matrix { steps[i] = resource.TestStep{ - Config: testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( test.AvailabilityZoneCount, test.SubnetCount, test.InstanceType, @@ -4269,18 +4279,57 @@ resource "aws_autoscaling_group" "test" { `, rName) } -func testAccAwsAutoScalingGroup_InstanceRefresh_Enabled( - tagValue string, - defaults bool, - sizeFactor int, -) string { - preference := `` - if !defaults { - preference = ` - min_healthy_percentage = 50 - instance_warmup_seconds = 10` - } +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic(tagValue string, sizeFactor int) string { + return fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 * local.size_factor + min_size = 1 * local.size_factor + desired_capacity = 1 * local.size_factor + launch_configuration = aws_launch_configuration.test.name + + tag { + key = "Test" + value = %[1]q + propagate_at_launch = true + } + + instance_refresh { + strategy = "Rolling" + } +} + +locals { + size_factor = %[2]d +} + +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.test.id + instance_type = "t3.nano" +} +`, tagValue, sizeFactor) +} + +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled(tagValue string, sizeFactor int) string { return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] @@ -4297,12 +4346,61 @@ resource "aws_autoscaling_group" "test" { instance_refresh { strategy = "Rolling" -%[2]s + preferences { + instance_warmup = 10 + min_healthy_percentage = 50 + } + } +} + +locals { + size_factor = %[2]d +} + +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.test.id + instance_type = "t3.nano" +} +`, tagValue, sizeFactor) +} + +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled(tagValue string, sizeFactor int) string { + return fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 * local.size_factor + min_size = 1 * local.size_factor + desired_capacity = 1 * local.size_factor + launch_configuration = aws_launch_configuration.test.name + + tag { + key = "Test" + value = %[1]q + propagate_at_launch = true } } locals { - size_factor = %[3]d + size_factor = %[2]d } data "aws_ami" "test" { @@ -4328,10 +4426,10 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`, tagValue, preference, sizeFactor) +`, tagValue, sizeFactor) } -func testAccAwsAutoScalingGroup_InstanceRefresh_Triggers( +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( availabilityZoneCount int, subnetCount int, instanceType string, diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 73caf88010c..5ad6477eb9b 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -3,7 +3,7 @@ subcategory: "Autoscaling" layout: "aws" page_title: "AWS: aws_autoscaling_group" description: |- - Provides an AutoScaling Group resource. + Provides an Auto Scaling Group resource. --- # Resource: aws_autoscaling_group @@ -205,8 +205,10 @@ resource "aws_autoscaling_group" "example" { } instance_refresh { - strategy = "Rolling" - min_healthy_percentage = 50 + strategy = "Rolling" + preferences { + min_healthy_percentage = 50 + } } } ``` @@ -215,11 +217,11 @@ resource "aws_autoscaling_group" "example" { The following arguments are supported: -* `name` - (Optional) The name of the auto scaling group. By default generated by Terraform. +* `name` - (Optional) The name of the Auto Scaling Group. By default generated by Terraform. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. -* `max_size` - (Required) The maximum size of the auto scale group. -* `min_size` - (Required) The minimum size of the auto scale group. +* `max_size` - (Required) The maximum size of the Auto Scaling Group. +* `min_size` - (Required) The minimum size of the Auto Scaling Group. (See also [Waiting for Capacity](#waiting-for-capacity) below.) * `availability_zones` - (Optional) A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`. * `default_cooldown` - (Optional) The amount of time, in seconds, after a scaling activity completes before another scaling activity can start. @@ -228,18 +230,18 @@ The following arguments are supported: * `mixed_instances_policy` (Optional) Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below. * `initial_lifecycle_hook` - (Optional) One or more [Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html) - to attach to the autoscaling group **before** instances are launched. The + to attach to the Auto Scaling Group **before** instances are launched. The syntax is exactly the same as the separate [`aws_autoscaling_lifecycle_hook`](/docs/providers/aws/r/autoscaling_lifecycle_hook.html) resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating - a new autoscaling group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource. + a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource. * `health_check_grace_period` - (Optional, Default: 300) Time (in seconds) after instance comes into service before checking health. * `health_check_type` - (Optional) "EC2" or "ELB". Controls how health checking is done. * `desired_capacity` - (Optional) The number of Amazon EC2 instances that should be running in the group. (See also [Waiting for Capacity](#waiting-for-capacity) below.) -* `force_delete` - (Optional) Allows deleting the autoscaling group without waiting - for all instances in the pool to terminate. You can force an autoscaling group to delete +* `force_delete` - (Optional) Allows deleting the Auto Scaling Group without waiting + for all instances in the pool to terminate. You can force an Auto Scaling Group to delete even if it's in the process of scaling a resource. Normally, Terraform drains all the instances before deleting the group. This bypasses that behavior and potentially leaves resources dangling. @@ -247,9 +249,9 @@ The following arguments are supported: group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead. * `vpc_zone_identifier` (Optional) A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`. * `target_group_arns` (Optional) A set of `aws_alb_target_group` ARNs, for use with Application or Network Load Balancing. -* `termination_policies` (Optional) A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`. -* `suspended_processes` - (Optional) A list of processes to suspend for the AutoScaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. -Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your autoscaling group from functioning properly. +* `termination_policies` (Optional) A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`. +* `suspended_processes` - (Optional) A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`. +Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly. * `tag` (Optional) Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below. * `tags` (Optional) Set of maps containing resource tags. Conflicts with `tag`. Documented below. * `placement_group` (Optional) The name of the placement group into which you'll launch your instances, if any. @@ -261,22 +263,22 @@ Note that if you suspend either the `Launch` or `Terminate` process types, it ca for Capacity](#waiting-for-capacity) below.) Setting this to "0" causes Terraform to skip all Capacity Waiting behavior. * `min_elb_capacity` - (Optional) Setting this causes Terraform to wait for - this number of instances from this autoscaling group to show up healthy in the + this number of instances from this Auto Scaling Group to show up healthy in the ELB only on creation. Updates will not wait on ELB instance number changes. (See also [Waiting for Capacity](#waiting-for-capacity) below.) * `wait_for_elb_capacity` - (Optional) Setting this will cause Terraform to wait - for exactly this number of healthy instances from this autoscaling group in + for exactly this number of healthy instances from this Auto Scaling Group in all attached load balancers on both create and update operations. (Takes precedence over `min_elb_capacity` behavior.) (See also [Waiting for Capacity](#waiting-for-capacity) below.) * `protect_from_scale_in` (Optional) Allows setting instance protection. The - autoscaling group will not select instances with this setting for termination + Auto Scaling Group will not select instances with this setting for termination during scale in events. * `service_linked_role_arn` (Optional) The ARN of the service-linked role that the ASG will use to call other AWS services * `max_instance_lifetime` (Optional) The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds. * `instance_refresh` - (Optional) If this block is configured, start an [Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) - when this autoscaling group is updated. Defined below. + when this Auto Scaling Group is updated. Defined [below](#instance_refresh). ### launch_template @@ -348,18 +350,16 @@ This allows the construction of dynamic lists of tags which is not possible usin This configuration block supports the following: -* `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched - instance is configured and ready to use. Default behavior (set with `-1` or `null`) - is to match the autoscaling group's health check grace period. -* `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group +* `strategy` - (Required) The strategy to use for instance refresh. The only allowed value is `Rolling`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. +* `preferences` - (Optional) Override default parameters for Instance Refresh. + * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior (set with `-1` or `null`) is to match the Auto Scaling Group's health check grace period. + * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. -* `strategy` - (Required) The strategy to use for instance refresh. The only allowed - value is `"Rolling"`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. -~> **NOTE:** A refresh is only started when any of the following autoscaling group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`, `vpc_zone_identifier`, `availability_zones`, `placement_group`, or any `tag` or `tags` configured to propagate at launch. +~> **NOTE:** A refresh is only started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`, `vpc_zone_identifier`, `availability_zones`, `placement_group`, or any `tag` or `tags` configured to propagate at launch. -~> **NOTE:** Autoscaling groups support up to one active instance refresh at a time. When this resource is updated, any existing refresh is cancelled. +~> **NOTE:** Auto Scaling Groups support up to one active instance refresh at a time. When this resource is updated, any existing refresh is cancelled. ~> **NOTE:** Depending on health check settings and group size, an instance refresh may take a long time or fail. This resource does not wait for the instance refresh to complete. @@ -367,8 +367,8 @@ This configuration block supports the following: In addition to all arguments above, the following attributes are exported: -* `id` - The autoscaling group id. -* `arn` - The ARN for this AutoScaling Group +* `id` - The Auto Scaling Group id. +* `arn` - The ARN for this Auto Scaling Group * `availability_zones` - The availability zones of the autoscale group. * `min_size` - The minimum size of the autoscale group * `max_size` - The maximum size of the autoscale group @@ -387,7 +387,7 @@ the `initial_lifecycle_hook` attribute from this resource, or via the separate [`aws_autoscaling_lifecycle_hook`](/docs/providers/aws/r/autoscaling_lifecycle_hook.html) resource. `initial_lifecycle_hook` exists here because any lifecycle hooks added with `aws_autoscaling_lifecycle_hook` will not be added until the -autoscaling group has been created, and depending on your +Auto Scaling Group has been created, and depending on your [capacity](#waiting-for-capacity) settings, after the initial instances have been launched, creating unintended behavior. If you need hooks to run on all instances, add them with `initial_lifecycle_hook` here, but take @@ -466,7 +466,7 @@ for more information. ## Import -AutoScaling Groups can be imported using the `name`, e.g. +Auto Scaling Groups can be imported using the `name`, e.g. ``` $ terraform import aws_autoscaling_group.web web-asg From da1ee9d0f8be99623370285fc55c33777b681c15 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 08:30:20 -0500 Subject: [PATCH 0079/1212] r/aws_appmesh_virtual_node: Add ExactlyOneOf constraint on 'spec.listener.tls.certificate'. --- aws/resource_aws_appmesh_virtual_node.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws/resource_aws_appmesh_virtual_node.go b/aws/resource_aws_appmesh_virtual_node.go index db86d4a1a5f..81a6e2930e3 100644 --- a/aws/resource_aws_appmesh_virtual_node.go +++ b/aws/resource_aws_appmesh_virtual_node.go @@ -402,6 +402,7 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{"spec.0.listener.0.tls.0.certificate.0.acm", "spec.0.listener.0.tls.0.certificate.0.file"}, }, "file": { @@ -424,6 +425,7 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{"spec.0.listener.0.tls.0.certificate.0.acm", "spec.0.listener.0.tls.0.certificate.0.file"}, }, }, }, From 851215075fd3bba4ba3a014710d20744f1cee0d5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 08:34:44 -0500 Subject: [PATCH 0080/1212] r/aws_appmesh_virtual_node: Add ExactlyOneOf constraint on 'spec.listener.timeout'. --- aws/resource_aws_appmesh_virtual_node.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/aws/resource_aws_appmesh_virtual_node.go b/aws/resource_aws_appmesh_virtual_node.go index 81a6e2930e3..2156bf98b09 100644 --- a/aws/resource_aws_appmesh_virtual_node.go +++ b/aws/resource_aws_appmesh_virtual_node.go @@ -234,6 +234,12 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{ + "spec.0.listener.0.timeout.0.grpc", + "spec.0.listener.0.timeout.0.http", + "spec.0.listener.0.timeout.0.http2", + "spec.0.listener.0.timeout.0.tcp", + }, }, "http": { @@ -286,6 +292,12 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{ + "spec.0.listener.0.timeout.0.grpc", + "spec.0.listener.0.timeout.0.http", + "spec.0.listener.0.timeout.0.http2", + "spec.0.listener.0.timeout.0.tcp", + }, }, "http2": { @@ -338,6 +350,12 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{ + "spec.0.listener.0.timeout.0.grpc", + "spec.0.listener.0.timeout.0.http", + "spec.0.listener.0.timeout.0.http2", + "spec.0.listener.0.timeout.0.tcp", + }, }, "tcp": { @@ -369,6 +387,12 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, + ExactlyOneOf: []string{ + "spec.0.listener.0.timeout.0.grpc", + "spec.0.listener.0.timeout.0.http", + "spec.0.listener.0.timeout.0.http2", + "spec.0.listener.0.timeout.0.tcp", + }, }, }, }, From 065d845039fa0ca0edbdb590ab1c8b1905a2a99b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 10:22:26 -0500 Subject: [PATCH 0081/1212] r/aws_appmesh_virtual_node: Add 'listener.connection_pool' attribute. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSAppmesh/VirtualNode/listenerConnectionPool' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAppmesh/VirtualNode/listenerConnectionPool -timeout 120m === RUN TestAccAWSAppmesh_serial === RUN TestAccAWSAppmesh_serial/VirtualNode === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerConnectionPool --- PASS: TestAccAWSAppmesh_serial (28.73s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode (28.73s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerConnectionPool (28.73s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 28.802s --- aws/resource_aws_appmesh_test.go | 9 +- aws/resource_aws_appmesh_virtual_node.go | 131 +++- aws/resource_aws_appmesh_virtual_node_test.go | 721 +++++++++++------- aws/structure.go | 142 +++- 4 files changed, 689 insertions(+), 314 deletions(-) diff --git a/aws/resource_aws_appmesh_test.go b/aws/resource_aws_appmesh_test.go index 321717163d5..1dc1723d242 100644 --- a/aws/resource_aws_appmesh_test.go +++ b/aws/resource_aws_appmesh_test.go @@ -34,9 +34,9 @@ func TestAccAWSAppmesh_serial(t *testing.T) { "tags": testAccAwsAppmeshRoute_tags, }, "VirtualGateway": { - "backendDefaults": testAccAwsAppmeshVirtualGateway_BackendDefaults, "basic": testAccAwsAppmeshVirtualGateway_basic, "disappears": testAccAwsAppmeshVirtualGateway_disappears, + "backendDefaults": testAccAwsAppmeshVirtualGateway_BackendDefaults, "listenerConnectionPool": testAccAwsAppmeshVirtualGateway_ListenerConnectionPool, "listenerHealthChecks": testAccAwsAppmeshVirtualGateway_ListenerHealthChecks, "listenerTls": testAccAwsAppmeshVirtualGateway_ListenerTls, @@ -45,14 +45,15 @@ func TestAccAWSAppmesh_serial(t *testing.T) { }, "VirtualNode": { "basic": testAccAwsAppmeshVirtualNode_basic, + "backendClientPolicyAcm": testAccAwsAppmeshVirtualNode_backendClientPolicyAcm, + "backendClientPolicyFile": testAccAwsAppmeshVirtualNode_backendClientPolicyFile, "backendDefaults": testAccAwsAppmeshVirtualNode_backendDefaults, - "clientPolicyAcm": testAccAwsAppmeshVirtualNode_clientPolicyAcm, - "clientPolicyFile": testAccAwsAppmeshVirtualNode_clientPolicyFile, "cloudMapServiceDiscovery": testAccAwsAppmeshVirtualNode_cloudMapServiceDiscovery, + "listenerConnectionPool": testAccAwsAppmeshVirtualNode_listenerConnectionPool, "listenerHealthChecks": testAccAwsAppmeshVirtualNode_listenerHealthChecks, "listenerTimeout": testAccAwsAppmeshVirtualNode_listenerTimeout, + "listenerTls": testAccAwsAppmeshVirtualNode_listenerTls, "logging": testAccAwsAppmeshVirtualNode_logging, - "tls": testAccAwsAppmeshVirtualNode_tls, "tags": testAccAwsAppmeshVirtualNode_tags, }, "VirtualRouter": { diff --git a/aws/resource_aws_appmesh_virtual_node.go b/aws/resource_aws_appmesh_virtual_node.go index 2156bf98b09..e6ae6ef9489 100644 --- a/aws/resource_aws_appmesh_virtual_node.go +++ b/aws/resource_aws_appmesh_virtual_node.go @@ -103,6 +103,110 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "connection_pool": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "grpc": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_requests": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + ExactlyOneOf: []string{ + "spec.0.listener.0.connection_pool.0.grpc", + "spec.0.listener.0.connection_pool.0.http", + "spec.0.listener.0.connection_pool.0.http2", + "spec.0.listener.0.connection_pool.0.tcp", + }, + }, + + "http": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_connections": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + + "max_pending_requests": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + ExactlyOneOf: []string{ + "spec.0.listener.0.connection_pool.0.grpc", + "spec.0.listener.0.connection_pool.0.http", + "spec.0.listener.0.connection_pool.0.http2", + "spec.0.listener.0.connection_pool.0.tcp", + }, + }, + + "http2": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_requests": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + ExactlyOneOf: []string{ + "spec.0.listener.0.connection_pool.0.grpc", + "spec.0.listener.0.connection_pool.0.http", + "spec.0.listener.0.connection_pool.0.http2", + "spec.0.listener.0.connection_pool.0.tcp", + }, + }, + + "tcp": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_connections": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + ExactlyOneOf: []string{ + "spec.0.listener.0.connection_pool.0.grpc", + "spec.0.listener.0.connection_pool.0.http", + "spec.0.listener.0.connection_pool.0.http2", + "spec.0.listener.0.connection_pool.0.tcp", + }, + }, + }, + }, + }, + "health_check": { Type: schema.TypeList, Optional: true, @@ -689,10 +793,11 @@ func resourceAwsAppmeshVirtualNodeCreate(d *schema.ResourceData, meta interface{ req.MeshOwner = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating App Mesh virtual node: %#v", req) + log.Printf("[DEBUG] Creating App Mesh virtual node: %s", req) resp, err := conn.CreateVirtualNode(req) + if err != nil { - return fmt.Errorf("error creating App Mesh virtual node: %s", err) + return fmt.Errorf("error creating App Mesh virtual node: %w", err) } d.SetId(aws.StringValue(resp.VirtualNode.Metadata.Uid)) @@ -713,14 +818,17 @@ func resourceAwsAppmeshVirtualNodeRead(d *schema.ResourceData, meta interface{}) } resp, err := conn.DescribeVirtualNode(req) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { log.Printf("[WARN] App Mesh virtual node (%s) not found, removing from state", d.Id()) d.SetId("") return nil } + if err != nil { - return fmt.Errorf("error reading App Mesh virtual node: %s", err) + return fmt.Errorf("error reading App Mesh virtual node (%s): %w", d.Id(), err) } + if aws.StringValue(resp.VirtualNode.Status.Status) == appmesh.VirtualNodeStatusCodeDeleted { log.Printf("[WARN] App Mesh virtual node (%s) not found, removing from state", d.Id()) d.SetId("") @@ -737,17 +845,17 @@ func resourceAwsAppmeshVirtualNodeRead(d *schema.ResourceData, meta interface{}) d.Set("resource_owner", resp.VirtualNode.Metadata.ResourceOwner) err = d.Set("spec", flattenAppmeshVirtualNodeSpec(resp.VirtualNode.Spec)) if err != nil { - return fmt.Errorf("error setting spec: %s", err) + return fmt.Errorf("error setting spec: %w", err) } tags, err := keyvaluetags.AppmeshListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for App Mesh virtual node (%s): %s", arn, err) + return fmt.Errorf("error listing tags for App Mesh virtual node (%s): %w", arn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil @@ -767,10 +875,11 @@ func resourceAwsAppmeshVirtualNodeUpdate(d *schema.ResourceData, meta interface{ req.MeshOwner = aws.String(v.(string)) } - log.Printf("[DEBUG] Updating App Mesh virtual node: %#v", req) + log.Printf("[DEBUG] Updating App Mesh virtual node: %s", req) _, err := conn.UpdateVirtualNode(req) + if err != nil { - return fmt.Errorf("error updating App Mesh virtual node: %s", err) + return fmt.Errorf("error updating App Mesh virtual node (%s): %w", d.Id(), err) } } @@ -779,7 +888,7 @@ func resourceAwsAppmeshVirtualNodeUpdate(d *schema.ResourceData, meta interface{ o, n := d.GetChange("tags") if err := keyvaluetags.AppmeshUpdateTags(conn, arn, o, n); err != nil { - return fmt.Errorf("error updating App Mesh virtual node (%s) tags: %s", arn, err) + return fmt.Errorf("error updating App Mesh virtual node (%s) tags: %w", arn, err) } } @@ -794,11 +903,13 @@ func resourceAwsAppmeshVirtualNodeDelete(d *schema.ResourceData, meta interface{ MeshName: aws.String(d.Get("mesh_name").(string)), VirtualNodeName: aws.String(d.Get("name").(string)), }) + if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { return nil } + if err != nil { - return fmt.Errorf("error deleting App Mesh virtual node: %s", err) + return fmt.Errorf("error deleting App Mesh virtual node (%s): %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_appmesh_virtual_node_test.go b/aws/resource_aws_appmesh_virtual_node_test.go index 8b5c5a43b4c..82b1c2186ec 100644 --- a/aws/resource_aws_appmesh_virtual_node_test.go +++ b/aws/resource_aws_appmesh_virtual_node_test.go @@ -125,48 +125,66 @@ func testAccAwsAppmeshVirtualNode_basic(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_cloudMapServiceDiscovery(t *testing.T) { +func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { var vn appmesh.VirtualNodeData + var ca acmpca.CertificateAuthority resourceName := "aws_appmesh_virtual_node.test" - nsResourceName := "aws_service_discovery_http_namespace.test" + acmCAResourceName := "aws_acmpca_certificate_authority.test" meshName := acctest.RandomWithPrefix("tf-acc-test") vnName := acctest.RandomWithPrefix("tf-acc-test") - // Avoid 'config is invalid: last character of "name" must be a letter' for aws_service_discovery_http_namespace. - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(20, acctest.CharSetAlpha)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ + // We need to create and activate the CA before issuing a certificate. { - Config: testAccAppmeshVirtualNodeConfig_cloudMapServiceDiscovery(meshName, vnName, rName, "Key1", "Value1"), + Config: testAccAppmeshVirtualNodeConfigRootCA(vnName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), - resource.TestCheckResourceAttr(resourceName, "name", vnName), - resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.%", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.Key1", "Value1"), - resource.TestCheckResourceAttrPair(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.namespace_name", nsResourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.service_name", rName), + testAccCheckAwsAcmpcaCertificateAuthorityExists(acmCAResourceName, &ca), + testAccCheckAwsAcmpcaCertificateAuthorityActivateCA(&ca), ), }, { - Config: testAccAppmeshVirtualNodeConfig_cloudMapServiceDiscovery(meshName, vnName, rName, "Key1", "Value2"), + Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "1", + "virtual_service.0.client_policy.0.tls.#": "1", + "virtual_service.0.client_policy.0.tls.0.enforce": "true", + "virtual_service.0.client_policy.0.tls.0.ports.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.#": "0", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.ports.*", "8443"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.acm.certificate_authority_arns.*", acmCAResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.%", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.Key1", "Value2"), - resource.TestCheckResourceAttrPair(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.namespace_name", nsResourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.service_name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), ), }, { @@ -175,11 +193,19 @@ func testAccAwsAppmeshVirtualNode_cloudMapServiceDiscovery(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + // CA must be DISABLED for deletion. + testAccCheckAwsAcmpcaCertificateAuthorityDisableCA(&ca), + ), + ExpectNonEmptyPlan: true, + }, }, }) } -func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { +func testAccAwsAppmeshVirtualNode_backendClientPolicyFile(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" meshName := acctest.RandomWithPrefix("tf-acc-test") @@ -191,7 +217,7 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_listenerHealthChecks(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_clientPolicyFile(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -200,24 +226,26 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.client_policy.#": "0", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "1", + "virtual_service.0.client_policy.0.tls.#": "1", + "virtual_service.0.client_policy.0.tls.0.enforce": "true", + "virtual_service.0.client_policy.0.tls.0.ports.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.#": "0", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain": "/cert_chain.pem", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.ports.*", "8443"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.healthy_threshold", "3"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.interval_millis", "5000"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.path", "/ping"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "http2"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "2000"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "5"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "grpc"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), @@ -230,42 +258,41 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_listenerHealthChecksUpdated(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_clientPolicyFileUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.client_policy.#": "0", - "virtual_service.0.virtual_service_name": "servicec.simpleapp.local", - }), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.client_policy.#": "0", - "virtual_service.0.virtual_service_name": "serviced.simpleapp.local", + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "1", + "virtual_service.0.client_policy.0.tls.#": "1", + "virtual_service.0.client_policy.0.tls.0.enforce": "true", + "virtual_service.0.client_policy.0.tls.0.ports.#": "2", + "virtual_service.0.client_policy.0.tls.0.validation.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.#": "0", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.#": "1", + "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain": "/etc/ssl/certs/cert_chain.pem", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.ports.*", "443"), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.ports.*", "8443"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.healthy_threshold", "4"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.interval_millis", "7000"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.port", "8081"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "tcp"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "3000"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "9"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8081"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb1.simpleapp.local"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), @@ -282,7 +309,7 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { +func testAccAwsAppmeshVirtualNode_backendDefaults(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" meshName := acctest.RandomWithPrefix("tf-acc-test") @@ -294,34 +321,28 @@ func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_listenerTimeout(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendDefaults(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", - }), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "tcp"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.grpc.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http2.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.0.unit", "ms"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.0.value", "250000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.enforce", "true"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "8443"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.acm.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain", "/cert_chain.pem"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), @@ -329,37 +350,29 @@ func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_listenerTimeoutUpdated(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendDefaultsUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", - }), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.grpc.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.0.unit", "s"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.0.value", "10"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.0.unit", "s"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.0.value", "5"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http2.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.enforce", "true"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "443"), + resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "8443"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.acm.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain", "/etc/ssl/certs/cert_chain.pem"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), @@ -376,11 +389,14 @@ func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_logging(t *testing.T) { +func testAccAwsAppmeshVirtualNode_cloudMapServiceDiscovery(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" + nsResourceName := "aws_service_discovery_http_namespace.test" meshName := acctest.RandomWithPrefix("tf-acc-test") vnName := acctest.RandomWithPrefix("tf-acc-test") + // Avoid 'config is invalid: last character of "name" must be a letter' for aws_service_discovery_http_namespace. + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(20, acctest.CharSetAlpha)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, @@ -388,30 +404,33 @@ func testAccAwsAppmeshVirtualNode_logging(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, "/dev/stdout"), + Config: testAccAppmeshVirtualNodeConfig_cloudMapServiceDiscovery(meshName, vnName, rName, "Key1", "Value1"), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.0.path", "/dev/stdout"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.%", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.Key1", "Value1"), + resource.TestCheckResourceAttrPair(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.namespace_name", nsResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.service_name", rName), ), }, { - Config: testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, "/tmp/access.log"), + Config: testAccAppmeshVirtualNodeConfig_cloudMapServiceDiscovery(meshName, vnName, rName, "Key1", "Value2"), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.0.path", "/tmp/access.log"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.%", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.attributes.Key1", "Value2"), + resource.TestCheckResourceAttrPair(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.namespace_name", nsResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.aws_cloud_map.0.service_name", rName), ), }, { @@ -424,7 +443,7 @@ func testAccAwsAppmeshVirtualNode_logging(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_tags(t *testing.T) { +func testAccAwsAppmeshVirtualNode_listenerConnectionPool(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" meshName := acctest.RandomWithPrefix("tf-acc-test") @@ -436,28 +455,70 @@ func testAccAwsAppmeshVirtualNode_tags(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, "foo", "bar", "good", "bad"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), - resource.TestCheckResourceAttr(resourceName, "tags.good", "bad"), - ), - }, - { - Config: testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, "foo2", "bar", "good", "bad2"), + Config: testAccAppmeshVirtualNodeConfig_listenerConnectionPool(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.foo2", "bar"), - resource.TestCheckResourceAttr(resourceName, "tags.good", "bad2"), + resource.TestCheckResourceAttr(resourceName, "name", vnName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.grpc.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http2.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.tcp.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.tcp.0.max_connections", "4"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "tcp"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), ), }, { - Config: testAccAppmeshVirtualNodeConfig_basic(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerConnectionPoolUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "name", vnName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.grpc.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http.0.max_connections", "8"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http.0.max_pending_requests", "16"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.http2.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.0.tcp.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), ), }, { @@ -470,12 +531,9 @@ func testAccAwsAppmeshVirtualNode_tags(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_tls(t *testing.T) { +func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { var vn appmesh.VirtualNodeData - var ca acmpca.CertificateAuthority resourceName := "aws_appmesh_virtual_node.test" - acmCAResourceName := "aws_acmpca_certificate_authority.test" - acmCertificateResourceName := "aws_acm_certificate.test" meshName := acctest.RandomWithPrefix("tf-acc-test") vnName := acctest.RandomWithPrefix("tf-acc-test") @@ -485,7 +543,7 @@ func testAccAwsAppmeshVirtualNode_tls(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_tlsFile(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerHealthChecks(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -500,17 +558,20 @@ func testAccAwsAppmeshVirtualNode_tls(t *testing.T) { }), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.healthy_threshold", "3"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.interval_millis", "5000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.path", "/ping"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "http2"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "2000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "5"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.acm.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.0.certificate_chain", "/cert_chain.pem"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.0.private_key", "/key.pem"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.mode", "PERMISSIVE"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "grpc"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), @@ -522,40 +583,43 @@ func testAccAwsAppmeshVirtualNode_tls(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s", meshName, vnName), - ImportState: true, - ImportStateVerify: true, - }, - // We need to create and activate the CA before issuing a certificate. - { - Config: testAccAppmeshVirtualNodeConfigRootCA(vnName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAcmpcaCertificateAuthorityExists(acmCAResourceName, &ca), - testAccCheckAwsAcmpcaCertificateAuthorityActivateCA(&ca), - ), - }, - { - Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerHealthChecksUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ "virtual_service.#": "1", "virtual_service.0.client_policy.#": "0", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + "virtual_service.0.virtual_service_name": "servicec.simpleapp.local", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "0", + "virtual_service.0.virtual_service_name": "serviced.simpleapp.local", }), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - testAccCheckAppmeshVirtualNodeTlsAcmCertificateArn(acmCertificateResourceName, "arn", &vn), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.interval_millis", "7000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.port", "8081"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "tcp"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "3000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "9"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8081"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb1.simpleapp.local"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), @@ -568,19 +632,11 @@ func testAccAwsAppmeshVirtualNode_tls(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { - Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), - Check: resource.ComposeTestCheckFunc( - // CA must be DISABLED for deletion. - testAccCheckAwsAcmpcaCertificateAuthorityDisableCA(&ca), - ), - ExpectNonEmptyPlan: true, - }, }, }) } -func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { +func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" meshName := acctest.RandomWithPrefix("tf-acc-test") @@ -592,7 +648,7 @@ func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyFile(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerTimeout(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -601,25 +657,21 @@ func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.client_policy.#": "1", - "virtual_service.0.client_policy.0.tls.#": "1", - "virtual_service.0.client_policy.0.tls.0.enforce": "true", - "virtual_service.0.client_policy.0.tls.0.ports.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.#": "0", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain": "/cert_chain.pem", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "tcp"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.grpc.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http2.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.0.unit", "ms"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.0.idle.0.value", "250000"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), @@ -631,7 +683,7 @@ func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyFileUpdated(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerTimeoutUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -640,25 +692,24 @@ func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ - "virtual_service.#": "1", - "virtual_service.0.client_policy.#": "1", - "virtual_service.0.client_policy.0.tls.#": "1", - "virtual_service.0.client_policy.0.tls.0.enforce": "true", - "virtual_service.0.client_policy.0.tls.0.ports.#": "2", - "virtual_service.0.client_policy.0.tls.0.validation.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.#": "0", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.#": "1", - "virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain": "/etc/ssl/certs/cert_chain.pem", - "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.grpc.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.0.unit", "s"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.idle.0.value", "10"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.0.unit", "s"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http.0.per_request.0.value", "5"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.http2.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.timeout.0.tcp.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), @@ -679,11 +730,12 @@ func testAccAwsAppmeshVirtualNode_clientPolicyFile(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { +func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { var vn appmesh.VirtualNodeData var ca acmpca.CertificateAuthority resourceName := "aws_appmesh_virtual_node.test" acmCAResourceName := "aws_acmpca_certificate_authority.test" + acmCertificateResourceName := "aws_acm_certificate.test" meshName := acctest.RandomWithPrefix("tf-acc-test") vnName := acctest.RandomWithPrefix("tf-acc-test") @@ -692,6 +744,50 @@ func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ + { + Config: testAccAppmeshVirtualNodeConfig_tlsFile(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "name", vnName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "0", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.acm.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.0.certificate_chain", "/cert_chain.pem"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.0.private_key", "/key.pem"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.mode", "PERMISSIVE"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), + ), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/%s", meshName, vnName), + ImportState: true, + ImportStateVerify: true, + }, // We need to create and activate the CA before issuing a certificate. { Config: testAccAppmeshVirtualNodeConfigRootCA(vnName), @@ -701,7 +797,7 @@ func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -709,14 +805,24 @@ func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), - testAccCheckAppmeshVirtualNodeClientPolicyAcmCertificateAuthorityArn(acmCAResourceName, "arn", &vn), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.client_policy.#": "0", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.acm.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "spec.0.listener.0.tls.0.certificate.0.acm.0.certificate_arn", acmCertificateResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.certificate.0.file.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.tls.0.mode", "STRICT"), resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), @@ -734,7 +840,7 @@ func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( // CA must be DISABLED for deletion. testAccCheckAwsAcmpcaCertificateAuthorityDisableCA(&ca), @@ -745,7 +851,7 @@ func testAccAwsAppmeshVirtualNode_clientPolicyAcm(t *testing.T) { }) } -func testAccAwsAppmeshVirtualNode_backendDefaults(t *testing.T) { +func testAccAwsAppmeshVirtualNode_logging(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" meshName := acctest.RandomWithPrefix("tf-acc-test") @@ -757,62 +863,76 @@ func testAccAwsAppmeshVirtualNode_backendDefaults(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_backendDefaults(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, "/dev/stdout"), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.enforce", "true"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "8443"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.acm.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain", "/cert_chain.pem"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.0.path", "/dev/stdout"), ), }, { - Config: testAccAppmeshVirtualNodeConfig_backendDefaultsUpdated(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, "/tmp/access.log"), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), - resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.enforce", "true"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.#", "2"), - resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "443"), - resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.ports.*", "8443"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.acm.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.#", "1"), - resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.0.client_policy.0.tls.0.validation.0.trust.0.file.0.certificate_chain", "/etc/ssl/certs/cert_chain.pem"), - resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), - resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "0"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.0.access_log.0.file.0.path", "/tmp/access.log"), + ), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/%s", meshName, vnName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAwsAppmeshVirtualNode_tags(t *testing.T) { + var vn appmesh.VirtualNodeData + resourceName := "aws_appmesh_virtual_node.test" + meshName := acctest.RandomWithPrefix("tf-acc-test") + vnName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, "foo", "bar", "good", "bad"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), + resource.TestCheckResourceAttr(resourceName, "tags.good", "bad"), + ), + }, + { + Config: testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, "foo2", "bar", "good", "bad2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.foo2", "bar"), + resource.TestCheckResourceAttr(resourceName, "tags.good", "bad2"), + ), + }, + { + Config: testAccAppmeshVirtualNodeConfig_basic(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -902,30 +1022,6 @@ func testAccCheckAppmeshVirtualNodeClientPolicyAcmCertificateAuthorityArn(name, } } -func testAccCheckAppmeshVirtualNodeTlsAcmCertificateArn(name, key string, v *appmesh.VirtualNodeData) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - expected, ok := rs.Primary.Attributes[key] - if !ok { - return fmt.Errorf("Key not found: %s", key) - } - if v.Spec == nil || v.Spec.Listeners == nil || len(v.Spec.Listeners) != 1 || v.Spec.Listeners[0].Tls == nil || - v.Spec.Listeners[0].Tls.Certificate == nil || v.Spec.Listeners[0].Tls.Certificate.Acm == nil { - return fmt.Errorf("Not found: v.Spec.Listeners[0].Tls.Certificate.Acm") - } - got := aws.StringValue(v.Spec.Listeners[0].Tls.Certificate.Acm.CertificateArn) - if got != expected { - return fmt.Errorf("Expected ACM certificate ARN %q, got %q", expected, got) - } - - return nil - } -} - func testAccAppmeshVirtualNodeConfig_mesh(rName string) string { return fmt.Sprintf(` resource "aws_appmesh_mesh" "test" { @@ -1011,6 +1107,79 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName, rName, attrKey, attrValue)) } +func testAccAppmeshVirtualNodeConfig_listenerConnectionPool(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + } + } + + listener { + port_mapping { + port = 8080 + protocol = "tcp" + } + + connection_pool { + tcp { + max_connections = 4 + } + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_listenerConnectionPoolUpdated(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + } + } + + listener { + port_mapping { + port = 8080 + protocol = "http" + } + + connection_pool { + http { + max_connections = 8 + max_pending_requests = 16 + } + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + func testAccAppmeshVirtualNodeConfig_listenerHealthChecks(meshName, vnName string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_appmesh_virtual_node" "test" { diff --git a/aws/structure.go b/aws/structure.go index 20acfb8adb0..af2ea5f68d6 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -4748,6 +4748,65 @@ func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec mListener := vListener.(map[string]interface{}) + if vConnectionPool, ok := mListener["connection_pool"].([]interface{}); ok && len(vConnectionPool) > 0 && vConnectionPool[0] != nil { + mConnectionPool := vConnectionPool[0].(map[string]interface{}) + + connectionPool := &appmesh.VirtualNodeConnectionPool{} + + if vGrpcConnectionPool, ok := mConnectionPool["grpc"].([]interface{}); ok && len(vGrpcConnectionPool) > 0 && vGrpcConnectionPool[0] != nil { + mGrpcConnectionPool := vGrpcConnectionPool[0].(map[string]interface{}) + + grpcConnectionPool := &appmesh.VirtualNodeGrpcConnectionPool{} + + if vMaxRequests, ok := mGrpcConnectionPool["max_requests"].(int); ok && vMaxRequests > 0 { + grpcConnectionPool.MaxRequests = aws.Int64(int64(vMaxRequests)) + } + + connectionPool.Grpc = grpcConnectionPool + } + + if vHttpConnectionPool, ok := mConnectionPool["http"].([]interface{}); ok && len(vHttpConnectionPool) > 0 && vHttpConnectionPool[0] != nil { + mHttpConnectionPool := vHttpConnectionPool[0].(map[string]interface{}) + + httpConnectionPool := &appmesh.VirtualNodeHttpConnectionPool{} + + if vMaxConnections, ok := mHttpConnectionPool["max_connections"].(int); ok && vMaxConnections > 0 { + httpConnectionPool.MaxConnections = aws.Int64(int64(vMaxConnections)) + } + if vMaxPendingRequests, ok := mHttpConnectionPool["max_pending_requests"].(int); ok && vMaxPendingRequests > 0 { + httpConnectionPool.MaxPendingRequests = aws.Int64(int64(vMaxPendingRequests)) + } + + connectionPool.Http = httpConnectionPool + } + + if vHttp2ConnectionPool, ok := mConnectionPool["http2"].([]interface{}); ok && len(vHttp2ConnectionPool) > 0 && vHttp2ConnectionPool[0] != nil { + mHttp2ConnectionPool := vHttp2ConnectionPool[0].(map[string]interface{}) + + http2ConnectionPool := &appmesh.VirtualNodeHttp2ConnectionPool{} + + if vMaxRequests, ok := mHttp2ConnectionPool["max_requests"].(int); ok && vMaxRequests > 0 { + http2ConnectionPool.MaxRequests = aws.Int64(int64(vMaxRequests)) + } + + connectionPool.Http2 = http2ConnectionPool + } + + if vTcpConnectionPool, ok := mConnectionPool["tcp"].([]interface{}); ok && len(vTcpConnectionPool) > 0 && vTcpConnectionPool[0] != nil { + mTcpConnectionPool := vTcpConnectionPool[0].(map[string]interface{}) + + tcpConnectionPool := &appmesh.VirtualNodeTcpConnectionPool{} + + if vMaxConnections, ok := mTcpConnectionPool["max_connections"].(int); ok && vMaxConnections > 0 { + tcpConnectionPool.MaxConnections = aws.Int64(int64(vMaxConnections)) + } + + connectionPool.Tcp = tcpConnectionPool + } + + listener.ConnectionPool = connectionPool + } + if vHealthCheck, ok := mListener["health_check"].([]interface{}); ok && len(vHealthCheck) > 0 && vHealthCheck[0] != nil { healthCheck := &appmesh.HealthCheckPolicy{} @@ -4793,6 +4852,30 @@ func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec listener.PortMapping = portMapping } + if vTimeout, ok := mListener["timeout"].([]interface{}); ok && len(vTimeout) > 0 && vTimeout[0] != nil { + mTimeout := vTimeout[0].(map[string]interface{}) + + listenerTimeout := &appmesh.ListenerTimeout{} + + if vGrpcTimeout, ok := mTimeout["grpc"].([]interface{}); ok { + listenerTimeout.Grpc = expandAppmeshGrpcTimeout(vGrpcTimeout) + } + + if vHttpTimeout, ok := mTimeout["http"].([]interface{}); ok { + listenerTimeout.Http = expandAppmeshHttpTimeout(vHttpTimeout) + } + + if vHttp2Timeout, ok := mTimeout["http2"].([]interface{}); ok { + listenerTimeout.Http2 = expandAppmeshHttpTimeout(vHttp2Timeout) + } + + if vTcpTimeout, ok := mTimeout["tcp"].([]interface{}); ok { + listenerTimeout.Tcp = expandAppmeshTcpTimeout(vTcpTimeout) + } + + listener.Timeout = listenerTimeout + } + if vTls, ok := mListener["tls"].([]interface{}); ok && len(vTls) > 0 && vTls[0] != nil { tls := &appmesh.ListenerTls{} @@ -4840,30 +4923,6 @@ func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec listener.Tls = tls } - if vTimeout, ok := mListener["timeout"].([]interface{}); ok && len(vTimeout) > 0 && vTimeout[0] != nil { - mTimeout := vTimeout[0].(map[string]interface{}) - - listenerTimeout := &appmesh.ListenerTimeout{} - - if vGrpcTimeout, ok := mTimeout["grpc"].([]interface{}); ok { - listenerTimeout.Grpc = expandAppmeshGrpcTimeout(vGrpcTimeout) - } - - if vHttpTimeout, ok := mTimeout["http"].([]interface{}); ok { - listenerTimeout.Http = expandAppmeshHttpTimeout(vHttpTimeout) - } - - if vHttp2Timeout, ok := mTimeout["http2"].([]interface{}); ok { - listenerTimeout.Http2 = expandAppmeshHttpTimeout(vHttp2Timeout) - } - - if vTcpTimeout, ok := mTimeout["tcp"].([]interface{}); ok { - listenerTimeout.Tcp = expandAppmeshTcpTimeout(vTcpTimeout) - } - - listener.Timeout = listenerTimeout - } - listeners = append(listeners, listener) } @@ -4989,6 +5048,41 @@ func flattenAppmeshVirtualNodeSpec(spec *appmesh.VirtualNodeSpec) []interface{} listener := spec.Listeners[0] mListener := map[string]interface{}{} + if connectionPool := listener.ConnectionPool; connectionPool != nil { + mConnectionPool := map[string]interface{}{} + + if grpcConnectionPool := connectionPool.Grpc; grpcConnectionPool != nil { + mGrpcConnectionPool := map[string]interface{}{ + "max_requests": int(aws.Int64Value(grpcConnectionPool.MaxRequests)), + } + mConnectionPool["grpc"] = []interface{}{mGrpcConnectionPool} + } + + if httpConnectionPool := connectionPool.Http; httpConnectionPool != nil { + mHttpConnectionPool := map[string]interface{}{ + "max_connections": int(aws.Int64Value(httpConnectionPool.MaxConnections)), + "max_pending_requests": int(aws.Int64Value(httpConnectionPool.MaxPendingRequests)), + } + mConnectionPool["http"] = []interface{}{mHttpConnectionPool} + } + + if http2ConnectionPool := connectionPool.Http2; http2ConnectionPool != nil { + mHttp2ConnectionPool := map[string]interface{}{ + "max_requests": int(aws.Int64Value(http2ConnectionPool.MaxRequests)), + } + mConnectionPool["http2"] = []interface{}{mHttp2ConnectionPool} + } + + if tcpConnectionPool := connectionPool.Tcp; tcpConnectionPool != nil { + mTcpConnectionPool := map[string]interface{}{ + "max_connections": int(aws.Int64Value(tcpConnectionPool.MaxConnections)), + } + mConnectionPool["tcp"] = []interface{}{mTcpConnectionPool} + } + + mListener["connection_pool"] = []interface{}{mConnectionPool} + } + if healthCheck := listener.HealthCheck; healthCheck != nil { mHealthCheck := map[string]interface{}{ "healthy_threshold": int(aws.Int64Value(healthCheck.HealthyThreshold)), From 47210b80a32322721ff3e4bc1bbd664cfc84f83c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 10:29:47 -0500 Subject: [PATCH 0082/1212] r/aws_appmesh_virtual_node: Add '_disappears' test (#13826). Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSAppmesh/VirtualNode/disappears' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAppmesh/VirtualNode/disappears -timeout 120m === RUN TestAccAWSAppmesh_serial === RUN TestAccAWSAppmesh_serial/VirtualNode === RUN TestAccAWSAppmesh_serial/VirtualNode/disappears --- PASS: TestAccAWSAppmesh_serial (13.06s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode (13.06s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/disappears (13.06s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 13.121s --- aws/resource_aws_appmesh_test.go | 1 + aws/resource_aws_appmesh_virtual_node_test.go | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/aws/resource_aws_appmesh_test.go b/aws/resource_aws_appmesh_test.go index 1dc1723d242..1e626501da9 100644 --- a/aws/resource_aws_appmesh_test.go +++ b/aws/resource_aws_appmesh_test.go @@ -45,6 +45,7 @@ func TestAccAWSAppmesh_serial(t *testing.T) { }, "VirtualNode": { "basic": testAccAwsAppmeshVirtualNode_basic, + "disappears": testAccAwsAppmeshVirtualNode_disappears, "backendClientPolicyAcm": testAccAwsAppmeshVirtualNode_backendClientPolicyAcm, "backendClientPolicyFile": testAccAwsAppmeshVirtualNode_backendClientPolicyFile, "backendDefaults": testAccAwsAppmeshVirtualNode_backendDefaults, diff --git a/aws/resource_aws_appmesh_virtual_node_test.go b/aws/resource_aws_appmesh_virtual_node_test.go index 82b1c2186ec..96319264782 100644 --- a/aws/resource_aws_appmesh_virtual_node_test.go +++ b/aws/resource_aws_appmesh_virtual_node_test.go @@ -125,6 +125,29 @@ func testAccAwsAppmeshVirtualNode_basic(t *testing.T) { }) } +func testAccAwsAppmeshVirtualNode_disappears(t *testing.T) { + var vn appmesh.VirtualNodeData + resourceName := "aws_appmesh_virtual_node.test" + meshName := acctest.RandomWithPrefix("tf-acc-test") + vnName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAppmeshVirtualNodeConfig_basic(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + testAccCheckResourceDisappears(testAccProvider, resourceAwsAppmeshVirtualNode(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { var vn appmesh.VirtualNodeData var ca acmpca.CertificateAuthority From 2451a8540ecd83d487829ef7a763975955a4922c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 10:55:09 -0500 Subject: [PATCH 0083/1212] Add 'expandAppmeshDuration' and 'flattenAppmeshDuration'. --- aws/structure.go | 204 ++++++++++++----------------------------------- 1 file changed, 53 insertions(+), 151 deletions(-) diff --git a/aws/structure.go b/aws/structure.go index af2ea5f68d6..bf327f5f03a 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -5591,19 +5591,8 @@ func expandAppmeshGrpcRoute(vGrpcRoute []interface{}) *appmesh.GrpcRoute { grpcRetryPolicy.HttpRetryEvents = expandStringSet(vHttpRetryEvents) } - if vPerRetryTimeout, ok := mGrpcRetryPolicy["per_retry_timeout"].([]interface{}); ok && len(vPerRetryTimeout) > 0 && vPerRetryTimeout[0] != nil { - perRetryTimeout := &appmesh.Duration{} - - mPerRetryTimeout := vPerRetryTimeout[0].(map[string]interface{}) - - if vUnit, ok := mPerRetryTimeout["unit"].(string); ok && vUnit != "" { - perRetryTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mPerRetryTimeout["value"].(int); ok && vValue > 0 { - perRetryTimeout.Value = aws.Int64(int64(vValue)) - } - - grpcRetryPolicy.PerRetryTimeout = perRetryTimeout + if vPerRetryTimeout, ok := mGrpcRetryPolicy["per_retry_timeout"].([]interface{}); ok { + grpcRetryPolicy.PerRetryTimeout = expandAppmeshDuration(vPerRetryTimeout) } if vTcpRetryEvents, ok := mGrpcRetryPolicy["tcp_retry_events"].(*schema.Set); ok && vTcpRetryEvents.Len() > 0 { @@ -5629,34 +5618,12 @@ func expandAppmeshGrpcTimeout(vGrpcTimeout []interface{}) *appmesh.GrpcTimeout { mGrpcTimeout := vGrpcTimeout[0].(map[string]interface{}) - if vIdleTimeout, ok := mGrpcTimeout["idle"].([]interface{}); ok && len(vIdleTimeout) > 0 && vIdleTimeout[0] != nil { - idleTimeout := &appmesh.Duration{} - - mIdleTimeout := vIdleTimeout[0].(map[string]interface{}) - - if vUnit, ok := mIdleTimeout["unit"].(string); ok && vUnit != "" { - idleTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mIdleTimeout["value"].(int); ok && vValue > 0 { - idleTimeout.Value = aws.Int64(int64(vValue)) - } - - grpcTimeout.Idle = idleTimeout + if vIdleTimeout, ok := mGrpcTimeout["idle"].([]interface{}); ok { + grpcTimeout.Idle = expandAppmeshDuration(vIdleTimeout) } - if vPerRequestTimeout, ok := mGrpcTimeout["per_request"].([]interface{}); ok && len(vPerRequestTimeout) > 0 && vPerRequestTimeout[0] != nil { - perRequestTimeout := &appmesh.Duration{} - - mPerRequestTimeout := vPerRequestTimeout[0].(map[string]interface{}) - - if vUnit, ok := mPerRequestTimeout["unit"].(string); ok && vUnit != "" { - perRequestTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mPerRequestTimeout["value"].(int); ok && vValue > 0 { - perRequestTimeout.Value = aws.Int64(int64(vValue)) - } - - grpcTimeout.PerRequest = perRequestTimeout + if vPerRequestTimeout, ok := mGrpcTimeout["per_request"].([]interface{}); ok { + grpcTimeout.PerRequest = expandAppmeshDuration(vPerRequestTimeout) } return grpcTimeout @@ -5782,19 +5749,8 @@ func expandAppmeshHttpRoute(vHttpRoute []interface{}) *appmesh.HttpRoute { httpRetryPolicy.HttpRetryEvents = expandStringSet(vHttpRetryEvents) } - if vPerRetryTimeout, ok := mHttpRetryPolicy["per_retry_timeout"].([]interface{}); ok && len(vPerRetryTimeout) > 0 && vPerRetryTimeout[0] != nil { - perRetryTimeout := &appmesh.Duration{} - - mPerRetryTimeout := vPerRetryTimeout[0].(map[string]interface{}) - - if vUnit, ok := mPerRetryTimeout["unit"].(string); ok && vUnit != "" { - perRetryTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mPerRetryTimeout["value"].(int); ok && vValue > 0 { - perRetryTimeout.Value = aws.Int64(int64(vValue)) - } - - httpRetryPolicy.PerRetryTimeout = perRetryTimeout + if vPerRetryTimeout, ok := mHttpRetryPolicy["per_retry_timeout"].([]interface{}); ok { + httpRetryPolicy.PerRetryTimeout = expandAppmeshDuration(vPerRetryTimeout) } if vTcpRetryEvents, ok := mHttpRetryPolicy["tcp_retry_events"].(*schema.Set); ok && vTcpRetryEvents.Len() > 0 { @@ -5820,34 +5776,12 @@ func expandAppmeshHttpTimeout(vHttpTimeout []interface{}) *appmesh.HttpTimeout { mHttpTimeout := vHttpTimeout[0].(map[string]interface{}) - if vIdleTimeout, ok := mHttpTimeout["idle"].([]interface{}); ok && len(vIdleTimeout) > 0 && vIdleTimeout[0] != nil { - idleTimeout := &appmesh.Duration{} - - mIdleTimeout := vIdleTimeout[0].(map[string]interface{}) - - if vUnit, ok := mIdleTimeout["unit"].(string); ok && vUnit != "" { - idleTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mIdleTimeout["value"].(int); ok && vValue > 0 { - idleTimeout.Value = aws.Int64(int64(vValue)) - } - - httpTimeout.Idle = idleTimeout + if vIdleTimeout, ok := mHttpTimeout["idle"].([]interface{}); ok { + httpTimeout.Idle = expandAppmeshDuration(vIdleTimeout) } - if vPerRequestTimeout, ok := mHttpTimeout["per_request"].([]interface{}); ok && len(vPerRequestTimeout) > 0 && vPerRequestTimeout[0] != nil { - perRequestTimeout := &appmesh.Duration{} - - mPerRequestTimeout := vPerRequestTimeout[0].(map[string]interface{}) - - if vUnit, ok := mPerRequestTimeout["unit"].(string); ok && vUnit != "" { - perRequestTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mPerRequestTimeout["value"].(int); ok && vValue > 0 { - perRequestTimeout.Value = aws.Int64(int64(vValue)) - } - - httpTimeout.PerRequest = perRequestTimeout + if vPerRequestTimeout, ok := mHttpTimeout["per_request"].([]interface{}); ok { + httpTimeout.PerRequest = expandAppmeshDuration(vPerRequestTimeout) } return httpTimeout @@ -5905,22 +5839,30 @@ func expandAppmeshTcpTimeout(vTcpTimeout []interface{}) *appmesh.TcpTimeout { mTcpTimeout := vTcpTimeout[0].(map[string]interface{}) - if vIdleTimeout, ok := mTcpTimeout["idle"].([]interface{}); ok && len(vIdleTimeout) > 0 && vIdleTimeout[0] != nil { - idleTimeout := &appmesh.Duration{} + if vIdleTimeout, ok := mTcpTimeout["idle"].([]interface{}); ok { + tcpTimeout.Idle = expandAppmeshDuration(vIdleTimeout) + } - mIdleTimeout := vIdleTimeout[0].(map[string]interface{}) + return tcpTimeout +} - if vUnit, ok := mIdleTimeout["unit"].(string); ok && vUnit != "" { - idleTimeout.Unit = aws.String(vUnit) - } - if vValue, ok := mIdleTimeout["value"].(int); ok && vValue > 0 { - idleTimeout.Value = aws.Int64(int64(vValue)) - } +func expandAppmeshDuration(vDuration []interface{}) *appmesh.Duration { + if len(vDuration) == 0 || vDuration[0] == nil { + return nil + } + + duration := &appmesh.Duration{} + + mDuration := vDuration[0].(map[string]interface{}) - tcpTimeout.Idle = idleTimeout + if vUnit, ok := mDuration["unit"].(string); ok && vUnit != "" { + duration.Unit = aws.String(vUnit) + } + if vValue, ok := mDuration["value"].(int); ok && vValue > 0 { + duration.Value = aws.Int64(int64(vValue)) } - return tcpTimeout + return duration } func flattenAppmeshRouteSpec(spec *appmesh.RouteSpec) []interface{} { @@ -6013,18 +5955,10 @@ func flattenAppmeshGrpcRoute(grpcRoute *appmesh.GrpcRoute) []interface{} { "grpc_retry_events": flattenStringSet(grpcRetryPolicy.GrpcRetryEvents), "http_retry_events": flattenStringSet(grpcRetryPolicy.HttpRetryEvents), "max_retries": int(aws.Int64Value(grpcRetryPolicy.MaxRetries)), + "per_retry_timeout": flattenAppmeshDuration(grpcRetryPolicy.PerRetryTimeout), "tcp_retry_events": flattenStringSet(grpcRetryPolicy.TcpRetryEvents), } - if perRetryTimeout := grpcRetryPolicy.PerRetryTimeout; perRetryTimeout != nil { - mPerRetryTimeout := map[string]interface{}{ - "unit": aws.StringValue(perRetryTimeout.Unit), - "value": int(aws.Int64Value(perRetryTimeout.Value)), - } - - mGrpcRetryPolicy["per_retry_timeout"] = []interface{}{mPerRetryTimeout} - } - mGrpcRoute["retry_policy"] = []interface{}{mGrpcRetryPolicy} } @@ -6038,24 +5972,9 @@ func flattenAppmeshGrpcTimeout(grpcTimeout *appmesh.GrpcTimeout) []interface{} { return []interface{}{} } - mGrpcTimeout := map[string]interface{}{} - - if idleTimeout := grpcTimeout.Idle; idleTimeout != nil { - mIdleTimeout := map[string]interface{}{ - "unit": aws.StringValue(idleTimeout.Unit), - "value": int(aws.Int64Value(idleTimeout.Value)), - } - - mGrpcTimeout["idle"] = []interface{}{mIdleTimeout} - } - - if perRequestTimeout := grpcTimeout.PerRequest; perRequestTimeout != nil { - mPerRequestTimeout := map[string]interface{}{ - "unit": aws.StringValue(perRequestTimeout.Unit), - "value": int(aws.Int64Value(perRequestTimeout.Value)), - } - - mGrpcTimeout["per_request"] = []interface{}{mPerRequestTimeout} + mGrpcTimeout := map[string]interface{}{ + "idle": flattenAppmeshDuration(grpcTimeout.Idle), + "per_request": flattenAppmeshDuration(grpcTimeout.PerRequest), } return []interface{}{mGrpcTimeout} @@ -6135,18 +6054,10 @@ func flattenAppmeshHttpRoute(httpRoute *appmesh.HttpRoute) []interface{} { mHttpRetryPolicy := map[string]interface{}{ "http_retry_events": flattenStringSet(httpRetryPolicy.HttpRetryEvents), "max_retries": int(aws.Int64Value(httpRetryPolicy.MaxRetries)), + "per_retry_timeout": flattenAppmeshDuration(httpRetryPolicy.PerRetryTimeout), "tcp_retry_events": flattenStringSet(httpRetryPolicy.TcpRetryEvents), } - if perRetryTimeout := httpRetryPolicy.PerRetryTimeout; perRetryTimeout != nil { - mPerRetryTimeout := map[string]interface{}{ - "unit": aws.StringValue(perRetryTimeout.Unit), - "value": int(aws.Int64Value(perRetryTimeout.Value)), - } - - mHttpRetryPolicy["per_retry_timeout"] = []interface{}{mPerRetryTimeout} - } - mHttpRoute["retry_policy"] = []interface{}{mHttpRetryPolicy} } @@ -6160,24 +6071,9 @@ func flattenAppmeshHttpTimeout(httpTimeout *appmesh.HttpTimeout) []interface{} { return []interface{}{} } - mHttpTimeout := map[string]interface{}{} - - if idleTimeout := httpTimeout.Idle; idleTimeout != nil { - mIdleTimeout := map[string]interface{}{ - "unit": aws.StringValue(idleTimeout.Unit), - "value": int(aws.Int64Value(idleTimeout.Value)), - } - - mHttpTimeout["idle"] = []interface{}{mIdleTimeout} - } - - if perRequestTimeout := httpTimeout.PerRequest; perRequestTimeout != nil { - mPerRequestTimeout := map[string]interface{}{ - "unit": aws.StringValue(perRequestTimeout.Unit), - "value": int(aws.Int64Value(perRequestTimeout.Value)), - } - - mHttpTimeout["per_request"] = []interface{}{mPerRequestTimeout} + mHttpTimeout := map[string]interface{}{ + "idle": flattenAppmeshDuration(httpTimeout.Idle), + "per_request": flattenAppmeshDuration(httpTimeout.PerRequest), } return []interface{}{mHttpTimeout} @@ -6221,18 +6117,24 @@ func flattenAppmeshTcpTimeout(tcpTimeout *appmesh.TcpTimeout) []interface{} { return []interface{}{} } - mTcpTimeout := map[string]interface{}{} + mTcpTimeout := map[string]interface{}{ + "idle": flattenAppmeshDuration(tcpTimeout.Idle), + } - if idleTimeout := tcpTimeout.Idle; idleTimeout != nil { - mIdleTimeout := map[string]interface{}{ - "unit": aws.StringValue(idleTimeout.Unit), - "value": int(aws.Int64Value(idleTimeout.Value)), - } + return []interface{}{mTcpTimeout} +} - mTcpTimeout["idle"] = []interface{}{mIdleTimeout} +func flattenAppmeshDuration(duration *appmesh.Duration) []interface{} { + if duration == nil { + return []interface{}{} } - return []interface{}{mTcpTimeout} + mDuration := map[string]interface{}{ + "unit": aws.StringValue(duration.Unit), + "value": int(aws.Int64Value(duration.Value)), + } + + return []interface{}{mDuration} } func expandRoute53ResolverEndpointIpAddresses(vIpAddresses *schema.Set) []*route53resolver.IpAddressRequest { From b9bd4767272d2f5d47205fadbc9e0d2a38a796ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 12:37:39 -0500 Subject: [PATCH 0084/1212] r/aws_appmesh_virtual_node: Add 'listener.outlier_detection' attribute. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSAppmesh/VirtualNode/listenerOutlierDetection' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAppmesh/VirtualNode/listenerOutlierDetection -timeout 120m === RUN TestAccAWSAppmesh_serial === RUN TestAccAWSAppmesh_serial/VirtualNode === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerOutlierDetection --- PASS: TestAccAWSAppmesh_serial (29.16s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode (29.16s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerOutlierDetection (29.16s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 29.218s --- aws/resource_aws_appmesh_test.go | 1 + aws/resource_aws_appmesh_virtual_node.go | 64 ++++++ aws/resource_aws_appmesh_virtual_node_test.go | 190 ++++++++++++++++++ aws/structure.go | 33 +++ 4 files changed, 288 insertions(+) diff --git a/aws/resource_aws_appmesh_test.go b/aws/resource_aws_appmesh_test.go index 1e626501da9..db9afbaa07f 100644 --- a/aws/resource_aws_appmesh_test.go +++ b/aws/resource_aws_appmesh_test.go @@ -51,6 +51,7 @@ func TestAccAWSAppmesh_serial(t *testing.T) { "backendDefaults": testAccAwsAppmeshVirtualNode_backendDefaults, "cloudMapServiceDiscovery": testAccAwsAppmeshVirtualNode_cloudMapServiceDiscovery, "listenerConnectionPool": testAccAwsAppmeshVirtualNode_listenerConnectionPool, + "listenerOutlierDetection": testAccAwsAppmeshVirtualNode_listenerOutlierDetection, "listenerHealthChecks": testAccAwsAppmeshVirtualNode_listenerHealthChecks, "listenerTimeout": testAccAwsAppmeshVirtualNode_listenerTimeout, "listenerTls": testAccAwsAppmeshVirtualNode_listenerTls, diff --git a/aws/resource_aws_appmesh_virtual_node.go b/aws/resource_aws_appmesh_virtual_node.go index e6ae6ef9489..8f0bbaf2d15 100644 --- a/aws/resource_aws_appmesh_virtual_node.go +++ b/aws/resource_aws_appmesh_virtual_node.go @@ -259,6 +259,70 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, + "outlier_detection": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "base_ejection_duration": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "unit": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(appmesh.DurationUnit_Values(), false), + }, + + "value": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "interval": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "unit": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(appmesh.DurationUnit_Values(), false), + }, + + "value": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "max_ejection_percent": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "max_server_errors": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + "port_mapping": { Type: schema.TypeList, Required: true, diff --git a/aws/resource_aws_appmesh_virtual_node_test.go b/aws/resource_aws_appmesh_virtual_node_test.go index 96319264782..03e63f4a183 100644 --- a/aws/resource_aws_appmesh_virtual_node_test.go +++ b/aws/resource_aws_appmesh_virtual_node_test.go @@ -196,6 +196,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -266,6 +267,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyFile(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -308,6 +310,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyFile(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -590,6 +593,7 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "http2"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "2000"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "5"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "grpc"), @@ -634,6 +638,7 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.protocol", "tcp"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.timeout_millis", "3000"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.0.unhealthy_threshold", "9"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8081"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -659,6 +664,99 @@ func testAccAwsAppmeshVirtualNode_listenerHealthChecks(t *testing.T) { }) } +func testAccAwsAppmeshVirtualNode_listenerOutlierDetection(t *testing.T) { + var vn appmesh.VirtualNodeData + resourceName := "aws_appmesh_virtual_node.test" + meshName := acctest.RandomWithPrefix("tf-acc-test") + vnName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAppmeshVirtualNodeConfig_listenerOutlierDetection(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "name", vnName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.0.unit", "ms"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.0.value", "250000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.0.unit", "s"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.0.value", "10"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.max_ejection_percent", "50"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.max_server_errors", "5"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "tcp"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), + ), + }, + { + Config: testAccAppmeshVirtualNodeConfig_listenerOutlierDetectionUpdated(meshName, vnName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), + resource.TestCheckResourceAttr(resourceName, "name", vnName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), + tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + "virtual_service.#": "1", + "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", + }), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.0.unit", "s"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.base_ejection_duration.0.value", "6"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.0.unit", "ms"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.interval.0.value", "10000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.max_ejection_percent", "60"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.0.max_server_errors", "6"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.service_discovery.0.dns.0.hostname", "serviceb.simpleapp.local"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualNode/%s", meshName, vnName)), + ), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/%s", meshName, vnName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccAwsAppmeshVirtualNode_listenerTimeout(t *testing.T) { var vn appmesh.VirtualNodeData resourceName := "aws_appmesh_virtual_node.test" @@ -785,6 +883,7 @@ func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -837,6 +936,7 @@ func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.health_check.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.outlier_detection.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.port", "8080"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.port_mapping.0.protocol", "http"), @@ -1287,6 +1387,96 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName)) } +func testAccAppmeshVirtualNodeConfig_listenerOutlierDetection(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + } + } + + listener { + port_mapping { + port = 8080 + protocol = "tcp" + } + + outlier_detection { + base_ejection_duration { + unit = "ms" + value = 250000 + } + + interval { + unit = "s" + value = 10 + } + + max_ejection_percent = 50 + max_server_errors = 5 + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_listenerOutlierDetectionUpdated(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + } + } + + listener { + port_mapping { + port = 8080 + protocol = "http" + } + + outlier_detection { + base_ejection_duration { + unit = "s" + value = 6 + } + + interval { + unit = "ms" + value = 10000 + } + + max_ejection_percent = 60 + max_server_errors = 6 + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + func testAccAppmeshVirtualNodeConfig_listenerTimeout(meshName, vnName string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_appmesh_virtual_node" "test" { diff --git a/aws/structure.go b/aws/structure.go index bf327f5f03a..e9507b6031d 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -4837,6 +4837,29 @@ func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec listener.HealthCheck = healthCheck } + if vOutlierDetection, ok := mListener["outlier_detection"].([]interface{}); ok && len(vOutlierDetection) > 0 && vOutlierDetection[0] != nil { + outlierDetection := &appmesh.OutlierDetection{} + + mOutlierDetection := vOutlierDetection[0].(map[string]interface{}) + + if vMaxEjectionPercent, ok := mOutlierDetection["max_ejection_percent"].(int); ok && vMaxEjectionPercent > 0 { + outlierDetection.MaxEjectionPercent = aws.Int64(int64(vMaxEjectionPercent)) + } + if vMaxServerErrors, ok := mOutlierDetection["max_server_errors"].(int); ok && vMaxServerErrors > 0 { + outlierDetection.MaxServerErrors = aws.Int64(int64(vMaxServerErrors)) + } + + if vBaseEjectionDuration, ok := mOutlierDetection["base_ejection_duration"].([]interface{}); ok { + outlierDetection.BaseEjectionDuration = expandAppmeshDuration(vBaseEjectionDuration) + } + + if vInterval, ok := mOutlierDetection["interval"].([]interface{}); ok { + outlierDetection.Interval = expandAppmeshDuration(vInterval) + } + + listener.OutlierDetection = outlierDetection + } + if vPortMapping, ok := mListener["port_mapping"].([]interface{}); ok && len(vPortMapping) > 0 && vPortMapping[0] != nil { portMapping := &appmesh.PortMapping{} @@ -5096,6 +5119,16 @@ func flattenAppmeshVirtualNodeSpec(spec *appmesh.VirtualNodeSpec) []interface{} mListener["health_check"] = []interface{}{mHealthCheck} } + if outlierDetection := listener.OutlierDetection; outlierDetection != nil { + mOutlierDetection := map[string]interface{}{ + "base_ejection_duration": flattenAppmeshDuration(outlierDetection.BaseEjectionDuration), + "interval": flattenAppmeshDuration(outlierDetection.Interval), + "max_ejection_percent": int(aws.Int64Value(outlierDetection.MaxEjectionPercent)), + "max_server_errors": int(aws.Int64Value(outlierDetection.MaxServerErrors)), + } + mListener["outlier_detection"] = []interface{}{mOutlierDetection} + } + if portMapping := listener.PortMapping; portMapping != nil { mPortMapping := map[string]interface{}{ "port": int(aws.Int64Value(portMapping.Port)), From bad22fbcabfbecc2b1d2bf373b47e47df20d891c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 12:38:33 -0500 Subject: [PATCH 0085/1212] r/aws_appmesh_virtual_node: Document 'listener.connection_pool' and 'listener.outlier_detection' attributes. --- .../docs/r/appmesh_virtual_node.html.markdown | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/website/docs/r/appmesh_virtual_node.html.markdown b/website/docs/r/appmesh_virtual_node.html.markdown index 2ce48e38f96..4b1ba5dd8c5 100644 --- a/website/docs/r/appmesh_virtual_node.html.markdown +++ b/website/docs/r/appmesh_virtual_node.html.markdown @@ -230,7 +230,9 @@ The `backend_defaults` object supports the following: The `listener` object supports the following: * `port_mapping` - (Required) The port mapping information for the listener. +* `connection_pool` - (Optional) The connection pool information for the listener. * `health_check` - (Optional) The health check information for the listener. +* `outlier_detection` - (Optional) The outlier detection information for the listener. * `timeout` - (Optional) Timeouts for different protocols. * `tls` - (Optional) The Transport Layer Security (TLS) properties for the listener @@ -267,6 +269,30 @@ The `port_mapping` object supports the following: * `port` - (Required) The port used for the port mapping. * `protocol` - (Required) The protocol used for the port mapping. Valid values are `http`, `http2`, `tcp` and `grpc`. +The `connection_pool` object supports the following: + +* `grpc` - (Optional) Connection pool information for gRPC listeners. +* `http` - (Optional) Connection pool information for HTTP listeners. +* `http2` - (Optional) Connection pool information for HTTP2 listeners. +* `tcp` - (Optional) Connection pool information for TCP listeners. + +The `grpc` connection pool object supports the following: + +* `max_requests` - (Required) Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of `1`. + +The `http` connection pool object supports the following: + +* `max_connections` - (Required) Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of `1`. +* `max_pending_requests` - (Optional) Number of overflowing requests after `max_connections` Envoy will queue to upstream cluster. Minimum value of `1`. + +The `http2` connection pool object supports the following: + +* `max_requests` - (Required) Maximum number of inflight requests Envoy can concurrently support across hosts in upstream cluster. Minimum value of `1`. + +The `tcp` connection pool object supports the following: + +* `max_connections` - (Required) Maximum number of outbound TCP connections Envoy can establish concurrently with all hosts in upstream cluster. Minimum value of `1`. + The `health_check` object supports the following: * `healthy_threshold` - (Required) The number of consecutive successful health checks that must occur before declaring listener healthy. @@ -277,6 +303,19 @@ The `health_check` object supports the following: * `path` - (Optional) The destination path for the health check request. This is only required if the specified protocol is `http` or `http2`. * `port` - (Optional) The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener. +The `outlier_detection` object supports the following: + +* `base_ejection_duration` - (Required) The base amount of time for which a host is ejected. +* `interval` - (Required) The time interval between ejection sweep analysis. +* `max_ejection_percent` - (Required) Maximum percentage of hosts in load balancing pool for upstream service that can be ejected. Will eject at least one host regardless of the value. +Minimum value of `0`. Maximum value of `100`. +* `max_server_errors` - (Required) Number of consecutive `5xx` errors required for ejection. Minimum value of `1`. + +The `base_ejection_duration` and `interval` objects support the following: + +* `unit` - (Required) The unit of time. Valid values: `ms`, `s`. +* `value` - (Required) The number of time units. Minimum value of `0`. + The `timeout` object supports the following: * `grpc` - (Optional) Timeouts for gRPC listeners. From e3a381b9bf3d0e1e4a6c86cbaa3e0defc5314dba Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Nov 2020 16:51:13 -0500 Subject: [PATCH 0086/1212] r/aws_appmesh_virtual_node: Test corrections. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSAppmesh/VirtualNode' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAppmesh/VirtualNode -timeout 120m === RUN TestAccAWSAppmesh_serial === RUN TestAccAWSAppmesh_serial/VirtualNode === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerOutlierDetection === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerTls === RUN TestAccAWSAppmesh_serial/VirtualNode/logging === RUN TestAccAWSAppmesh_serial/VirtualNode/tags === RUN TestAccAWSAppmesh_serial/VirtualNode/disappears === RUN TestAccAWSAppmesh_serial/VirtualNode/backendClientPolicyFile === RUN TestAccAWSAppmesh_serial/VirtualNode/cloudMapServiceDiscovery === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerConnectionPool === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerTimeout === RUN TestAccAWSAppmesh_serial/VirtualNode/basic === RUN TestAccAWSAppmesh_serial/VirtualNode/backendClientPolicyAcm === RUN TestAccAWSAppmesh_serial/VirtualNode/backendDefaults === RUN TestAccAWSAppmesh_serial/VirtualNode/listenerHealthChecks --- PASS: TestAccAWSAppmesh_serial (468.07s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode (468.07s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerOutlierDetection (28.23s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerTls (71.32s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/logging (27.86s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/tags (40.02s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/disappears (12.83s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/backendClientPolicyFile (28.32s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/cloudMapServiceDiscovery (100.22s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerConnectionPool (28.26s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerTimeout (28.09s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/basic (16.17s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/backendClientPolicyAcm (52.11s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/backendDefaults (28.17s) --- PASS: TestAccAWSAppmesh_serial/VirtualNode/listenerHealthChecks (28.25s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 468.135s --- aws/resource_aws_appmesh_virtual_node_test.go | 455 ++++++++---------- 1 file changed, 214 insertions(+), 241 deletions(-) diff --git a/aws/resource_aws_appmesh_virtual_node_test.go b/aws/resource_aws_appmesh_virtual_node_test.go index 03e63f4a183..0fc1783a87b 100644 --- a/aws/resource_aws_appmesh_virtual_node_test.go +++ b/aws/resource_aws_appmesh_virtual_node_test.go @@ -170,7 +170,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendClientPolicyAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -191,7 +191,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), resource.TestCheckTypeSetElemAttr(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.ports.*", "8443"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.acm.certificate_authority_arns.*", acmCAResourceName, "arn"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "spec.0.backend.*.virtual_service.0.client_policy.0.tls.0.validation.0.trust.0.acm.0.certificate_authority_arns.*", acmCAResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend_defaults.#", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.listener.0.connection_pool.#", "0"), @@ -218,7 +218,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyAcm(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendClientPolicyAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( // CA must be DISABLED for deletion. testAccCheckAwsAcmpcaCertificateAuthorityDisableCA(&ca), @@ -241,7 +241,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyFile(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyFile(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendClientPolicyFile(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -283,7 +283,7 @@ func testAccAwsAppmeshVirtualNode_backendClientPolicyFile(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_clientPolicyFileUpdated(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_backendClientPolicyFileUpdated(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -684,7 +684,7 @@ func testAccAwsAppmeshVirtualNode_listenerOutlierDetection(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), - tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ "virtual_service.#": "1", "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), @@ -720,7 +720,7 @@ func testAccAwsAppmeshVirtualNode_listenerOutlierDetection(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.backend.#", "1"), - tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "spec.0.backend.*", map[string]string{ "virtual_service.#": "1", "virtual_service.0.virtual_service_name": "servicea.simpleapp.local", }), @@ -866,7 +866,7 @@ func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { CheckDestroy: testAccCheckAppmeshVirtualNodeDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshVirtualNodeConfig_tlsFile(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerTlsFile(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -919,7 +919,7 @@ func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { ), }, { - Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerTlsAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( testAccCheckAppmeshVirtualNodeExists(resourceName, &vn), resource.TestCheckResourceAttr(resourceName, "name", vnName), @@ -963,7 +963,7 @@ func testAccAwsAppmeshVirtualNode_listenerTls(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName), + Config: testAccAppmeshVirtualNodeConfig_listenerTlsAcm(meshName, vnName), Check: resource.ComposeTestCheckFunc( // CA must be DISABLED for deletion. testAccCheckAwsAcmpcaCertificateAuthorityDisableCA(&ca), @@ -1118,33 +1118,6 @@ func testAccCheckAppmeshVirtualNodeExists(name string, v *appmesh.VirtualNodeDat } } -func testAccCheckAppmeshVirtualNodeClientPolicyAcmCertificateAuthorityArn(name, key string, v *appmesh.VirtualNodeData) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - expected, ok := rs.Primary.Attributes[key] - if !ok { - return fmt.Errorf("Key not found: %s", key) - } - if v.Spec == nil || len(v.Spec.Backends) != 1 || v.Spec.Backends[0].VirtualService == nil || - v.Spec.Backends[0].VirtualService.ClientPolicy == nil || v.Spec.Backends[0].VirtualService.ClientPolicy.Tls == nil || - v.Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation == nil || v.Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation.Trust == nil || - v.Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation.Trust.Acm == nil || - len(v.Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation.Trust.Acm.CertificateAuthorityArns) != 1 { - return fmt.Errorf("Not found: .Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation.Trust.Acm.CertificateAuthorityArns[0]") - } - got := aws.StringValue(v.Spec.Backends[0].VirtualService.ClientPolicy.Tls.Validation.Trust.Acm.CertificateAuthorityArns[0]) - if got != expected { - return fmt.Errorf("Expected ACM CA ARN %q, got %q", expected, got) - } - - return nil - } -} - func testAccAppmeshVirtualNodeConfig_mesh(rName string) string { return fmt.Sprintf(` resource "aws_appmesh_mesh" "test" { @@ -1191,6 +1164,196 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName)) } +func testAccAppmeshVirtualNodeConfig_backendDefaults(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend_defaults { + client_policy { + tls { + ports = [8443] + + validation { + trust { + file { + certificate_chain = "/cert_chain.pem" + } + } + } + } + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_backendDefaultsUpdated(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend_defaults { + client_policy { + tls { + ports = [443, 8443] + + validation { + trust { + file { + certificate_chain = "/etc/ssl/certs/cert_chain.pem" + } + } + } + } + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_backendClientPolicyAcm(meshName, vnName string) string { + return composeConfig( + testAccAppmeshVirtualNodeConfigRootCA(vnName), + testAccAppmeshVirtualNodeConfigPrivateCert(vnName), + testAccAppmeshVirtualNodeConfig_mesh(meshName), + fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + + client_policy { + tls { + ports = [8443] + + validation { + trust { + acm { + certificate_authority_arns = [aws_acmpca_certificate_authority.test.arn] + } + } + } + } + } + } + } + + listener { + port_mapping { + port = 8080 + protocol = "http" + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_backendClientPolicyFile(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + + client_policy { + tls { + ports = [8443] + + validation { + trust { + file { + certificate_chain = "/cert_chain.pem" + } + } + } + } + } + } + } + + listener { + port_mapping { + port = 8080 + protocol = "http" + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + +func testAccAppmeshVirtualNodeConfig_backendClientPolicyFileUpdated(meshName, vnName string) string { + return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` +resource "aws_appmesh_virtual_node" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + + spec { + backend { + virtual_service { + virtual_service_name = "servicea.simpleapp.local" + + client_policy { + tls { + ports = [443, 8443] + + validation { + trust { + file { + certificate_chain = "/etc/ssl/certs/cert_chain.pem" + } + } + } + } + } + } + } + + listener { + port_mapping { + port = 8080 + protocol = "http" + } + } + + service_discovery { + dns { + hostname = "serviceb.simpleapp.local" + } + } + } +} +`, vnName)) +} + func testAccAppmeshVirtualNodeConfig_cloudMapServiceDiscovery(meshName, vnName, rName, attrKey, attrValue string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_service_discovery_http_namespace" "test" { @@ -1560,61 +1723,7 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName)) } -func testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, path string) string { - return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` -resource "aws_appmesh_virtual_node" "test" { - name = %[1]q - mesh_name = aws_appmesh_mesh.test.id - - spec { - backend { - virtual_service { - virtual_service_name = "servicea.simpleapp.local" - } - } - - listener { - port_mapping { - port = 8080 - protocol = "http" - } - } - - logging { - access_log { - file { - path = %[2]q - } - } - } - - service_discovery { - dns { - hostname = "serviceb.simpleapp.local" - } - } - } -} -`, vnName, path)) -} - -func testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` -resource "aws_appmesh_virtual_node" "test" { - name = %[1]q - mesh_name = aws_appmesh_mesh.test.id - - spec {} - - tags = { - %[2]s = %[3]q - %[4]s = %[5]q - } -} -`, vnName, tagKey1, tagValue1, tagKey2, tagValue2)) -} - -func testAccAppmeshVirtualNodeConfig_tlsFile(meshName, vnName string) string { +func testAccAppmeshVirtualNodeConfig_listenerTlsFile(meshName, vnName string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_appmesh_virtual_node" "test" { name = %[1]q @@ -1655,7 +1764,7 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName)) } -func testAccAppmeshVirtualNodeConfig_tlsAcm(meshName, vnName string) string { +func testAccAppmeshVirtualNodeConfig_listenerTlsAcm(meshName, vnName string) string { return composeConfig( testAccAppmeshVirtualNodeConfigRootCA(vnName), testAccAppmeshVirtualNodeConfigPrivateCert(vnName), @@ -1699,7 +1808,7 @@ resource "aws_appmesh_virtual_node" "test" { `, vnName)) } -func testAccAppmeshVirtualNodeConfig_clientPolicyFile(meshName, vnName string) string { +func testAccAppmeshVirtualNodeConfig_logging(meshName, vnName, path string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_appmesh_virtual_node" "test" { name = %[1]q @@ -1709,20 +1818,6 @@ resource "aws_appmesh_virtual_node" "test" { backend { virtual_service { virtual_service_name = "servicea.simpleapp.local" - - client_policy { - tls { - ports = [8443] - - validation { - trust { - file { - certificate_chain = "/cert_chain.pem" - } - } - } - } - } } } @@ -1733,98 +1828,14 @@ resource "aws_appmesh_virtual_node" "test" { } } - service_discovery { - dns { - hostname = "serviceb.simpleapp.local" - } - } - } -} -`, vnName)) -} - -func testAccAppmeshVirtualNodeConfig_clientPolicyFileUpdated(meshName, vnName string) string { - return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` -resource "aws_appmesh_virtual_node" "test" { - name = %[1]q - mesh_name = aws_appmesh_mesh.test.id - - spec { - backend { - virtual_service { - virtual_service_name = "servicea.simpleapp.local" - - client_policy { - tls { - ports = [443, 8443] - - validation { - trust { - file { - certificate_chain = "/etc/ssl/certs/cert_chain.pem" - } - } - } - } - } - } - } - - listener { - port_mapping { - port = 8080 - protocol = "http" - } - } - - service_discovery { - dns { - hostname = "serviceb.simpleapp.local" - } - } - } -} -`, vnName)) -} - -func testAccAppmeshVirtualNodeConfig_clientPolicyAcm(meshName, vnName string) string { - return composeConfig( - testAccAppmeshVirtualNodeConfigRootCA(vnName), - testAccAppmeshVirtualNodeConfigPrivateCert(vnName), - testAccAppmeshVirtualNodeConfig_mesh(meshName), - fmt.Sprintf(` -resource "aws_appmesh_virtual_node" "test" { - name = %[1]q - mesh_name = aws_appmesh_mesh.test.id - - spec { - backend { - virtual_service { - virtual_service_name = "servicea.simpleapp.local" - - client_policy { - tls { - ports = [8443] - - validation { - trust { - acm { - certificate_authority_arns = [aws_acmpca_certificate_authority.test.arn] - } - } - } - } + logging { + access_log { + file { + path = %[2]q } } } - listener { - port_mapping { - port = 8080 - protocol = "http" - } - } - service_discovery { dns { hostname = "serviceb.simpleapp.local" @@ -1832,59 +1843,21 @@ resource "aws_appmesh_virtual_node" "test" { } } } -`, vnName)) -} - -func testAccAppmeshVirtualNodeConfig_backendDefaults(meshName, vnName string) string { - return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` -resource "aws_appmesh_virtual_node" "test" { - name = %[1]q - mesh_name = aws_appmesh_mesh.test.id - - spec { - backend_defaults { - client_policy { - tls { - ports = [8443] - - validation { - trust { - file { - certificate_chain = "/cert_chain.pem" - } - } - } - } - } - } - } -} -`, vnName)) +`, vnName, path)) } -func testAccAppmeshVirtualNodeConfig_backendDefaultsUpdated(meshName, vnName string) string { +func testAccAppmeshVirtualNodeConfig_tags(meshName, vnName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return composeConfig(testAccAppmeshVirtualNodeConfig_mesh(meshName), fmt.Sprintf(` resource "aws_appmesh_virtual_node" "test" { name = %[1]q mesh_name = aws_appmesh_mesh.test.id - spec { - backend_defaults { - client_policy { - tls { - ports = [443, 8443] + spec {} - validation { - trust { - file { - certificate_chain = "/etc/ssl/certs/cert_chain.pem" - } - } - } - } - } - } + tags = { + %[2]s = %[3]q + %[4]s = %[5]q } } -`, vnName)) +`, vnName, tagKey1, tagValue1, tagKey2, tagValue2)) } From 130fc3ebef3ebac71bc5491d2fba665ab26f64c8 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 1 Dec 2020 17:56:02 -0800 Subject: [PATCH 0087/1212] Uses the TypeStringNullableInt pattern for instance_warmup --- aws/internal/nullable/int.go | 78 +++++++++ aws/internal/nullable/int_test.go | 100 ++++++++++++ aws/internal/nullable/testing.go | 45 ++++++ .../service/autoscaling/waiter/waiter.go | 2 +- aws/resource_aws_autoscaling_group.go | 25 +-- aws/resource_aws_autoscaling_group_test.go | 148 +++++++++++++++++- go.mod | 2 +- go.sum | 30 ---- .../docs/r/autoscaling_group.html.markdown | 6 +- 9 files changed, 382 insertions(+), 54 deletions(-) create mode 100644 aws/internal/nullable/int.go create mode 100644 aws/internal/nullable/int_test.go create mode 100644 aws/internal/nullable/testing.go diff --git a/aws/internal/nullable/int.go b/aws/internal/nullable/int.go new file mode 100644 index 00000000000..53981603485 --- /dev/null +++ b/aws/internal/nullable/int.go @@ -0,0 +1,78 @@ +package nullable + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + TypeNullableInt = schema.TypeString +) + +type Int string + +func (i Int) IsNull() bool { + return i == "" +} + +func (i Int) Value() (int64, bool, error) { + if i.IsNull() { + return 0, true, nil + } + + value, err := strconv.ParseInt(string(i), 10, 64) + if err != nil { + return 0, false, err + } + return value, false, nil +} + +// ValidateTypeStringNullableInt provides custom error messaging for TypeString ints +// Some arguments require an int value or unspecified, empty field. +func ValidateTypeStringNullableInt(v interface{}, k string) (ws []string, es []error) { + value, ok := v.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if value == "" { + return + } + + if _, err := strconv.ParseInt(value, 10, 64); err != nil { + es = append(es, fmt.Errorf("%s: cannot parse '%s' as int: %w", k, value, err)) + } + + return +} + +// ValidateTypeStringNullableIntAtLeast provides custom error messaging for TypeString ints +// Some arguments require an int value or unspecified, empty field. +func ValidateTypeStringNullableIntAtLeast(min int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (ws []string, es []error) { + value, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if value == "" { + return + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + es = append(es, fmt.Errorf("%s: cannot parse '%s' as int: %w", k, value, err)) + return + } + + if v < int64(min) { + es = append(es, fmt.Errorf("expected %s to be at least (%d), got %d", k, min, v)) + } + + return + } +} diff --git a/aws/internal/nullable/int_test.go b/aws/internal/nullable/int_test.go new file mode 100644 index 00000000000..cc1c61e554e --- /dev/null +++ b/aws/internal/nullable/int_test.go @@ -0,0 +1,100 @@ +package nullable + +import ( + "errors" + "regexp" + "strconv" + "testing" +) + +func TestNullableInt(t *testing.T) { + cases := []struct { + val string + expectedNull bool + expectedValue int64 + expectedErr error + }{ + { + val: "1", + expectedNull: false, + expectedValue: 1, + }, + { + val: "", + expectedNull: true, + expectedValue: 0, + }, + { + val: "A", + expectedNull: false, + expectedValue: 0, + expectedErr: strconv.ErrSyntax, + }, + } + + for i, tc := range cases { + v := Int(tc.val) + + if null := v.IsNull(); null != tc.expectedNull { + t.Fatalf("expected test case %d IsNull to return %t, got %t", i, null, tc.expectedNull) + } + + value, null, err := v.Value() + if value != tc.expectedValue { + t.Fatalf("expected test case %d Value to be %d, got %d", i, tc.expectedValue, value) + } + if null != tc.expectedNull { + t.Fatalf("expected test case %d Value null flag to be %t, got %t", i, tc.expectedNull, null) + } + if tc.expectedErr == nil && err != nil { + t.Fatalf("expected test case %d to succeed, got error %s", i, err) + } + if tc.expectedErr != nil { + if !errors.Is(err, tc.expectedErr) { + t.Fatalf("expected test case %d to have error matching \"%s\", got %s", i, tc.expectedErr, err) + } + } + } +} + +func TestValidationInt(t *testing.T) { + runTestCases(t, []testCase{ + { + val: "1", + f: ValidateTypeStringNullableInt, + }, + { + val: "A", + f: ValidateTypeStringNullableInt, + expectedErr: regexp.MustCompile(`[\w]+: cannot parse 'A' as int: .*`), + }, + { + val: 1, + f: ValidateTypeStringNullableInt, + expectedErr: regexp.MustCompile(`expected type of [\w]+ to be string`), + }, + }) +} + +func TestValidationIntAtLeast(t *testing.T) { + runTestCases(t, []testCase{ + { + val: "1", + f: ValidateTypeStringNullableIntAtLeast(1), + }, + { + val: "1", + f: ValidateTypeStringNullableIntAtLeast(0), + }, + { + val: "1", + f: ValidateTypeStringNullableIntAtLeast(2), + expectedErr: regexp.MustCompile(`expected [\w]+ to be at least \(2\), got 1`), + }, + { + val: 1, + f: ValidateTypeStringNullableIntAtLeast(2), + expectedErr: regexp.MustCompile(`expected type of [\w]+ to be string`), + }, + }) +} diff --git a/aws/internal/nullable/testing.go b/aws/internal/nullable/testing.go new file mode 100644 index 00000000000..9921ac5375f --- /dev/null +++ b/aws/internal/nullable/testing.go @@ -0,0 +1,45 @@ +package nullable + +import ( + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + testing "github.com/mitchellh/go-testing-interface" +) + +type testCase struct { + val interface{} + f schema.SchemaValidateFunc + expectedErr *regexp.Regexp +} + +func runTestCases(t testing.T, cases []testCase) { + t.Helper() + + matchErr := func(errs []error, r *regexp.Regexp) bool { + // err must match one provided + for _, err := range errs { + if r.MatchString(err.Error()) { + return true + } + } + + return false + } + + for i, tc := range cases { + _, errs := tc.f(tc.val, "test_property") + + if len(errs) == 0 && tc.expectedErr == nil { + continue + } + + if len(errs) != 0 && tc.expectedErr == nil { + t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) + } + + if !matchErr(errs, tc.expectedErr) { + t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) + } + } +} diff --git a/aws/internal/service/autoscaling/waiter/waiter.go b/aws/internal/service/autoscaling/waiter/waiter.go index b4bf99543d8..f278c3f859a 100644 --- a/aws/internal/service/autoscaling/waiter/waiter.go +++ b/aws/internal/service/autoscaling/waiter/waiter.go @@ -12,7 +12,7 @@ const ( InstanceRefreshSuccessfulTimeout = 5 * time.Minute // Maximum amount of time to wait for an InstanceRefresh to be Cancelled - InstanceRefreshCancelledTimeout = 5 * time.Minute + InstanceRefreshCancelledTimeout = 10 * time.Minute ) func InstanceRefreshSuccessful(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 0bff7005276..6d54e890ca8 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/nullable" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/autoscaling/waiter" ) @@ -493,10 +494,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "instance_warmup": { - Type: schema.TypeInt, + Type: nullable.TypeNullableInt, Optional: true, - Default: -1, // default to health_check_grace_period - ValidateFunc: validation.IntAtLeast(-1), + ValidateFunc: nullable.ValidateTypeStringNullableIntAtLeast(0), }, "min_healthy_percentage": { Type: schema.TypeInt, @@ -1843,8 +1843,7 @@ func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling return nil } -// TODO: rename -func expandAutoScalingGroupInstanceRefresh(asgName string, l []interface{}) *autoscaling.StartInstanceRefreshInput { +func createAutoScalingGroupInstanceRefreshInput(asgName string, l []interface{}) *autoscaling.StartInstanceRefreshInput { if len(l) == 0 || l[0] == nil { return nil } @@ -1868,7 +1867,10 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal refreshPreferences := &autoscaling.RefreshPreferences{} if v, ok := m["instance_warmup"]; ok { - refreshPreferences.InstanceWarmup = aws.Int64(int64(v.(int))) + i := nullable.Int(v.(string)) + if v, null, _ := i.Value(); !null { + refreshPreferences.InstanceWarmup = aws.Int64(v) + } } if v, ok := m["min_healthy_percentage"]; ok { @@ -1880,25 +1882,24 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal // autoScalingGroupRefreshInstances starts a new Instance Refresh in this // Auto Scaling Group. If there is already an active refresh, it is cancelled. -func autoScalingGroupRefreshInstances(conn *autoscaling.AutoScaling, asgName string, d []interface{}) error { - input := expandAutoScalingGroupInstanceRefresh(asgName, d) +func autoScalingGroupRefreshInstances(conn *autoscaling.AutoScaling, asgName string, refreshConfig []interface{}) error { - log.Printf("[DEBUG] Cancelling active refresh in ASG %s, if any...", asgName) + log.Printf("[DEBUG] Cancelling active Instance Refresh in ASG %s, if any...", asgName) if err := cancelAutoscalingInstanceRefresh(conn, asgName); err != nil { // todo: add comment about subsequent ASG updates not picking up the refresh? return fmt.Errorf("failed to cancel previous refresh: %w", err) } - log.Printf("[DEBUG] Starting instance refresh in ASG %s...", asgName) - + input := createAutoScalingGroupInstanceRefreshInput(asgName, refreshConfig) + log.Printf("[DEBUG] Starting Instance Refresh on ASG (%s): %s", asgName, input) output, err := conn.StartInstanceRefresh(input) if err != nil { return err } instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - log.Printf("[INFO] Started instance refresh %s in ASG %s", instanceRefreshID, asgName) + log.Printf("[INFO] Started Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) return nil } diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index d29c98502a1..441c9ae8de8 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -149,12 +149,9 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "desired_capacity", "5"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), - resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "protect_from_scale_in", "true"), + resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "desired_capacity", "5"), + resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), + resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "protect_from_scale_in", "true"), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testAccCheckAutoscalingTags(&group.Tags, "FromTags1Changed", map[string]interface{}{ "value": "value1changed", @@ -4585,3 +4582,142 @@ func testAccCheckAutoscalingLatestInstanceRefreshState( status) } } + +func TestCreateAutoScalingGroupInstanceRefreshInput(t *testing.T) { + const asgName = "test-asg" + testCases := []struct { + name string + input []interface{} + expected *autoscaling.StartInstanceRefreshInput + }{ + { + name: "empty list", + input: []interface{}{}, + expected: nil, + }, + { + name: "nil", + input: []interface{}{nil}, + expected: nil, + }, + { + name: "defaults", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{}, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: nil, + }, + }, + { + name: "instance_warmup only", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "instance_warmup": "60", + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + InstanceWarmup: aws.Int64(60), + MinHealthyPercentage: nil, + }, + }, + }, + { + name: "instance_warmup zero", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "instance_warmup": "0", + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + InstanceWarmup: aws.Int64(0), + MinHealthyPercentage: nil, + }, + }, + }, + { + name: "instance_warmup empty string", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "instance_warmup": "", + "min_healthy_percentage": 80, + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + InstanceWarmup: nil, + MinHealthyPercentage: aws.Int64(80), + }, + }, + }, + { + name: "min_healthy_percentage only", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "min_healthy_percentage": 80, + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + InstanceWarmup: nil, + MinHealthyPercentage: aws.Int64(80), + }, + }, + }, + { + name: "preferences", + input: []interface{}{map[string]interface{}{ + "strategy": "Rolling", + "preferences": []interface{}{ + map[string]interface{}{ + "instance_warmup": "60", + "min_healthy_percentage": 80, + }, + }, + }}, + expected: &autoscaling.StartInstanceRefreshInput{ + AutoScalingGroupName: aws.String(asgName), + Strategy: aws.String("Rolling"), + Preferences: &autoscaling.RefreshPreferences{ + InstanceWarmup: aws.Int64(60), + MinHealthyPercentage: aws.Int64(80), + }, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + got := createAutoScalingGroupInstanceRefreshInput(asgName, testCase.input) + + if !reflect.DeepEqual(got, testCase.expected) { + t.Errorf("got %s, expected %s", got, testCase.expected) + } + }) + } +} diff --git a/go.mod b/go.mod index 7f1a06c8ae7..afb8bb7b154 100644 --- a/go.mod +++ b/go.mod @@ -11,13 +11,13 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk v1.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.2.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba github.com/mattn/go-colorable v0.1.7 // indirect github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/go-testing-interface v1.0.4 github.com/pquerna/otp v1.3.0 github.com/stretchr/testify v1.6.1 // indirect gopkg.in/yaml.v2 v2.3.0 diff --git a/go.sum b/go.sum index be1108232cd..546e2fbba57 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -164,8 +162,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -181,7 +177,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -199,32 +194,22 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= -github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.2.0 h1:2m4uKA97R8ijHGLwhHdpSJyI8Op1FpS/ozpoF21jK7s= github.com/hashicorp/terraform-plugin-sdk/v2 v2.2.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -264,13 +249,11 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -279,7 +262,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -311,8 +293,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -320,10 +300,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -332,11 +309,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -344,13 +319,9 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -412,7 +383,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 5ad6477eb9b..901a0471ac7 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -352,10 +352,8 @@ This configuration block supports the following: * `strategy` - (Required) The strategy to use for instance refresh. The only allowed value is `Rolling`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. * `preferences` - (Optional) Override default parameters for Instance Refresh. - * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior (set with `-1` or `null`) is to match the Auto Scaling Group's health check grace period. - * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group - that must remain healthy during an instance refresh to allow the operation to continue, - as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. + * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. ~> **NOTE:** A refresh is only started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`, `vpc_zone_identifier`, `availability_zones`, `placement_group`, or any `tag` or `tags` configured to propagate at launch. From 6a4fe8e637389a92fd58cc0bc52196d49055b66c Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 2 Dec 2020 10:42:06 -0500 Subject: [PATCH 0088/1212] update lambda package type check to prevent panic --- aws/resource_aws_lambda_function.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_lambda_function.go b/aws/resource_aws_lambda_function.go index ee298898dff..9c216d2508c 100644 --- a/aws/resource_aws_lambda_function.go +++ b/aws/resource_aws_lambda_function.go @@ -808,26 +808,25 @@ func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) err return nil } - codeSigningConfigInput := &lambda.GetFunctionCodeSigningConfigInput{ - FunctionName: aws.String(d.Get("function_name").(string)), - } - // Code Signing is only supported on zip packaged lambda functions. - if *function.PackageType == lambda.PackageTypeZip { + var codeSigningConfigArn string + + if aws.StringValue(function.PackageType) == lambda.PackageTypeZip { + codeSigningConfigInput := &lambda.GetFunctionCodeSigningConfigInput{ + FunctionName: aws.String(d.Id()), + } getCodeSigningConfigOutput, err := conn.GetFunctionCodeSigningConfig(codeSigningConfigInput) if err != nil { return fmt.Errorf("error getting Lambda Function (%s) code signing config %w", d.Id(), err) } - if getCodeSigningConfigOutput == nil || getCodeSigningConfigOutput.CodeSigningConfigArn == nil { - d.Set("code_signing_config_arn", "") - } else { - d.Set("code_signing_config_arn", getCodeSigningConfigOutput.CodeSigningConfigArn) + if getCodeSigningConfigOutput != nil { + codeSigningConfigArn = aws.StringValue(getCodeSigningConfigOutput.CodeSigningConfigArn) } - } else { - d.Set("code_signing_config_arn", "") } + d.Set("code_signing_config_arn", codeSigningConfigArn) + return nil } From efd25e8af5e614e6f8e279e51bf8f895d5cf683c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 3 Dec 2020 12:45:55 -0800 Subject: [PATCH 0089/1212] Adds resource skipper function for sweepers --- aws/provider_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/aws/provider_test.go b/aws/provider_test.go index 79bebab0c85..69f34033d74 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -18,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/organizations" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -1061,6 +1062,14 @@ func testSweepSkipSweepError(err error) bool { return false } +// Check sweeper API call error for reasons to skip a specific resource +// These include AccessDeniedException for individual resources, e.g. managed by central IT +func testSweepSkipResourceError(err error) bool { + // Since acceptance test sweepers are best effort, we allow bypassing this error globally + // instead of individual test sweeper fixes. + return tfawserr.ErrCodeEquals(err, "AccessDeniedException") +} + func TestAccAWSProvider_Endpoints(t *testing.T) { var providers []*schema.Provider var endpoints strings.Builder From d105d4c7357987a714828529079cab89a53e86dc Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 3 Dec 2020 12:46:59 -0800 Subject: [PATCH 0090/1212] Factors IAM Role deletion and allows individual resource skipping in sweeper --- aws/resource_aws_iam_role.go | 97 +++++++++++++++---------------- aws/resource_aws_iam_role_test.go | 37 +++++------- 2 files changed, 63 insertions(+), 71 deletions(-) diff --git a/aws/resource_aws_iam_role.go b/aws/resource_aws_iam_role.go index 44f8aaa5699..9c5f149a50e 100644 --- a/aws/resource_aws_iam_role.go +++ b/aws/resource_aws_iam_role.go @@ -9,10 +9,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/iam/waiter" ) func resourceAwsIamRole() *schema.Resource { @@ -311,34 +313,42 @@ func resourceAwsIamRoleUpdate(d *schema.ResourceData, meta interface{}) error { } func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn + conn := meta.(*AWSClient).iamconn - // Roles cannot be destroyed when attached to an existing Instance Profile - if err := deleteAwsIamRoleInstanceProfiles(iamconn, d.Id()); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) instance profiles: %s", d.Id(), err) + err := deleteAwsIamRole(conn, d.Id(), d.Get("force_detach_policies").(bool)) + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { + return nil + } + if err != nil { + return fmt.Errorf("error deleting IAM Role (%s): %w", d.Id(), err) } - if d.Get("force_detach_policies").(bool) { - // For managed policies - if err := deleteAwsIamRolePolicyAttachments(iamconn, d.Id()); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) policy attachments: %s", d.Id(), err) + return nil +} + +func deleteAwsIamRole(conn *iam.IAM, rolename string, forceDetach bool) error { + if err := deleteAwsIamRoleInstanceProfiles(conn, rolename); err != nil { + return fmt.Errorf("unable to detach instance profiles: %w", err) + } + + if forceDetach { + if err := deleteAwsIamRolePolicyAttachments(conn, rolename); err != nil { + return fmt.Errorf("unable to detach policies: %w", err) } - // For inline policies - if err := deleteAwsIamRolePolicies(iamconn, d.Id()); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) policies: %s", d.Id(), err) + if err := deleteAwsIamRolePolicies(conn, rolename); err != nil { + return fmt.Errorf("unable to delete inline policies: %w", err) } } deleteRoleInput := &iam.DeleteRoleInput{ - RoleName: aws.String(d.Id()), + RoleName: aws.String(rolename), } - - // IAM is eventually consistent and deletion of attached policies may take time - err := resource.Retry(30*time.Second, func() *resource.RetryError { - _, err := iamconn.DeleteRole(deleteRoleInput) + log.Printf("[DEBUG] Deleting IAM Role: %s", deleteRoleInput) + err := resource.Retry(waiter.PropagationTimeout, func() *resource.RetryError { + _, err := conn.DeleteRole(deleteRoleInput) if err != nil { - if isAWSErr(err, iam.ErrCodeDeleteConflictException, "") { + if tfawserr.ErrCodeEquals(err, iam.ErrCodeDeleteConflictException) { return resource.RetryableError(err) } @@ -347,30 +357,21 @@ func resourceAwsIamRoleDelete(d *schema.ResourceData, meta interface{}) error { return nil }) if isResourceTimeoutError(err) { - _, err = iamconn.DeleteRole(deleteRoleInput) - } - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { - return nil + _, err = conn.DeleteRole(deleteRoleInput) } - if err != nil { - return fmt.Errorf("Error deleting IAM Role (%s): %s", d.Id(), err) - } - return nil + return err } func deleteAwsIamRoleInstanceProfiles(conn *iam.IAM, rolename string) error { resp, err := conn.ListInstanceProfilesForRole(&iam.ListInstanceProfilesForRoleInput{ RoleName: aws.String(rolename), }) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { return nil } - if err != nil { - return fmt.Errorf("Error listing Profiles for IAM Role (%s) when trying to delete: %s", rolename, err) + return err } // Loop and remove this Role from any Profiles @@ -380,14 +381,13 @@ func deleteAwsIamRoleInstanceProfiles(conn *iam.IAM, rolename string) error { RoleName: aws.String(rolename), } + log.Printf("[DEBUG] Detaching Role from Instance Profile: %s", input) _, err := conn.RemoveRoleFromInstanceProfile(input) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { continue } - if err != nil { - return fmt.Errorf("Error deleting IAM Role (%s) Instance Profile (%s): %s", rolename, aws.StringValue(i.InstanceProfileName), err) + return err } } @@ -406,28 +406,26 @@ func deleteAwsIamRolePolicyAttachments(conn *iam.IAM, rolename string) error { } return !lastPage }) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { return nil } - if err != nil { - return fmt.Errorf("Error listing Policies for IAM Role (%s) when trying to delete: %s", rolename, err) + return err } + for _, parn := range managedPolicies { input := &iam.DetachRolePolicyInput{ PolicyArn: parn, RoleName: aws.String(rolename), } + log.Printf("[DEBUG] Detaching Policy from Role: %s", input) _, err = conn.DetachRolePolicy(input) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { continue } - if err != nil { - return fmt.Errorf("Error deleting IAM Role %s: %s", rolename, err) + return err } } @@ -444,9 +442,11 @@ func deleteAwsIamRolePolicies(conn *iam.IAM, rolename string) error { inlinePolicies = append(inlinePolicies, page.PolicyNames...) return !lastPage }) - + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { + return nil + } if err != nil { - return fmt.Errorf("Error listing inline Policies for IAM Role (%s) when trying to delete: %s", rolename, err) + return err } for _, pname := range inlinePolicies { @@ -455,14 +455,13 @@ func deleteAwsIamRolePolicies(conn *iam.IAM, rolename string) error { RoleName: aws.String(rolename), } + log.Printf("[DEBUG] Deleting Inline Policy from Role: %s", input) _, err := conn.DeleteRolePolicy(input) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { - continue + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { + return nil } - if err != nil { - return fmt.Errorf("Error deleting inline policy of IAM Role %s: %s", rolename, err) + return err } } diff --git a/aws/resource_aws_iam_role_test.go b/aws/resource_aws_iam_role_test.go index 0a35e254ec6..8a4be8ec92f 100644 --- a/aws/resource_aws_iam_role_test.go +++ b/aws/resource_aws_iam_role_test.go @@ -10,6 +10,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -115,38 +117,29 @@ func testSweepIamRoles(region string) error { return nil } + var sweeperErrs *multierror.Error + for _, role := range roles { rolename := aws.StringValue(role.RoleName) - log.Printf("[DEBUG] Deleting IAM Role: %s", rolename) - - if err := deleteAwsIamRoleInstanceProfiles(conn, rolename); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) instance profiles: %s", rolename, err) - } - - if err := deleteAwsIamRolePolicyAttachments(conn, rolename); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) policy attachments: %s", rolename, err) - } - - if err := deleteAwsIamRolePolicies(conn, rolename); err != nil { - return fmt.Errorf("error deleting IAM Role (%s) policies: %s", rolename, err) - } + log.Printf("[DEBUG] Deleting IAM Role (%s)", rolename) - input := &iam.DeleteRoleInput{ - RoleName: aws.String(rolename), + err := deleteAwsIamRole(conn, rolename, true) + if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { + continue } - - _, err := conn.DeleteRole(input) - - if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + if testSweepSkipResourceError(err) { + log.Printf("[WARN] Skipping IAM Role (%s): %s", rolename, err) continue } - if err != nil { - return fmt.Errorf("Error deleting IAM Role (%s): %s", rolename, err) + sweeperErr := fmt.Errorf("error deleting IAM Role (%s): %w", rolename, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } - return nil + return sweeperErrs.ErrorOrNil() } func TestAccAWSIAMRole_basic(t *testing.T) { From a43f90480fd6a718fbdd8bb76f7fd7b92d1000e0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 3 Dec 2020 13:36:12 -0800 Subject: [PATCH 0091/1212] Uses correct code for IAM --- aws/provider_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/provider_test.go b/aws/provider_test.go index 69f34033d74..0945c25ddd5 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -1067,7 +1067,7 @@ func testSweepSkipSweepError(err error) bool { func testSweepSkipResourceError(err error) bool { // Since acceptance test sweepers are best effort, we allow bypassing this error globally // instead of individual test sweeper fixes. - return tfawserr.ErrCodeEquals(err, "AccessDeniedException") + return tfawserr.ErrCodeEquals(err, "AccessDenied") } func TestAccAWSProvider_Endpoints(t *testing.T) { From 57ab217812db4e6440625aba46d6bd704df9b9f7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 3 Dec 2020 14:18:01 -0800 Subject: [PATCH 0092/1212] Removes DEBUG log statements --- aws/resource_aws_iam_role.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/aws/resource_aws_iam_role.go b/aws/resource_aws_iam_role.go index 9c5f149a50e..39660bdd646 100644 --- a/aws/resource_aws_iam_role.go +++ b/aws/resource_aws_iam_role.go @@ -344,7 +344,6 @@ func deleteAwsIamRole(conn *iam.IAM, rolename string, forceDetach bool) error { deleteRoleInput := &iam.DeleteRoleInput{ RoleName: aws.String(rolename), } - log.Printf("[DEBUG] Deleting IAM Role: %s", deleteRoleInput) err := resource.Retry(waiter.PropagationTimeout, func() *resource.RetryError { _, err := conn.DeleteRole(deleteRoleInput) if err != nil { @@ -381,7 +380,6 @@ func deleteAwsIamRoleInstanceProfiles(conn *iam.IAM, rolename string) error { RoleName: aws.String(rolename), } - log.Printf("[DEBUG] Detaching Role from Instance Profile: %s", input) _, err := conn.RemoveRoleFromInstanceProfile(input) if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { continue @@ -419,7 +417,6 @@ func deleteAwsIamRolePolicyAttachments(conn *iam.IAM, rolename string) error { RoleName: aws.String(rolename), } - log.Printf("[DEBUG] Detaching Policy from Role: %s", input) _, err = conn.DetachRolePolicy(input) if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { continue @@ -455,7 +452,6 @@ func deleteAwsIamRolePolicies(conn *iam.IAM, rolename string) error { RoleName: aws.String(rolename), } - log.Printf("[DEBUG] Deleting Inline Policy from Role: %s", input) _, err := conn.DeleteRolePolicy(input) if tfawserr.ErrCodeEquals(err, iam.ErrCodeNoSuchEntityException) { return nil From 2be066f90b3b83bef009beded5103da81599d87a Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 3 Dec 2020 14:25:50 -0800 Subject: [PATCH 0093/1212] Allows skipping GuardDuty Detector in sweepers --- aws/provider_test.go | 9 +++++++++ aws/resource_aws_guardduty_detector_test.go | 9 ++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/aws/provider_test.go b/aws/provider_test.go index 79bebab0c85..c83a667ca6f 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -18,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/organizations" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -1061,6 +1062,14 @@ func testSweepSkipSweepError(err error) bool { return false } +// Check sweeper API call error for reasons to skip a specific resource +// These include AccessDenied or AccessDeniedException for individual resources, e.g. managed by central IT +func testSweepSkipResourceError(err error) bool { + // Since acceptance test sweepers are best effort, we allow bypassing this error globally + // instead of individual test sweeper fixes. + return tfawserr.ErrCodeContains(err, "AccessDenied") +} + func TestAccAWSProvider_Endpoints(t *testing.T) { var providers []*schema.Provider var endpoints strings.Builder diff --git a/aws/resource_aws_guardduty_detector_test.go b/aws/resource_aws_guardduty_detector_test.go index 4cf851a7a35..e41b211b667 100644 --- a/aws/resource_aws_guardduty_detector_test.go +++ b/aws/resource_aws_guardduty_detector_test.go @@ -25,7 +25,7 @@ func testSweepGuarddutyDetectors(region string) error { client, err := sharedClientForRegion(region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } conn := client.(*AWSClient).guarddutyconn @@ -41,7 +41,10 @@ func testSweepGuarddutyDetectors(region string) error { log.Printf("[INFO] Deleting GuardDuty Detector: %s", id) _, err := conn.DeleteDetector(input) - + if testSweepSkipResourceError(err) { + log.Printf("[WARN] Skipping GuardDuty Detector (%s): %s", id, err) + continue + } if err != nil { sweeperErr := fmt.Errorf("error deleting GuardDuty Detector (%s): %w", id, err) log.Printf("[ERROR] %s", sweeperErr) @@ -58,7 +61,7 @@ func testSweepGuarddutyDetectors(region string) error { } if err != nil { - return fmt.Errorf("error retrieving GuardDuty Detectors: %s", err) + return fmt.Errorf("error retrieving GuardDuty Detectors: %w", err) } return sweeperErrs.ErrorOrNil() From 5e22dfc379ba52d0775c53fbcbfe3218ed813afc Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Fri, 27 Nov 2020 09:56:53 +0200 Subject: [PATCH 0094/1212] aws_workspaces_workspace: Add error code for the failed requests --- aws/resource_aws_workspaces_workspace.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_workspaces_workspace.go b/aws/resource_aws_workspaces_workspace.go index aff74faabd3..8a8f71d374b 100644 --- a/aws/resource_aws_workspaces_workspace.go +++ b/aws/resource_aws_workspaces_workspace.go @@ -171,7 +171,7 @@ func resourceAwsWorkspacesWorkspaceCreate(d *schema.ResourceData, meta interface wsFail := resp.FailedRequests if len(wsFail) > 0 { - return fmt.Errorf("workspace creation failed: %s", *wsFail[0].ErrorMessage) + return fmt.Errorf("workspace creation failed: %s: %s", aws.StringValue(wsFail[0].ErrorCode), aws.StringValue(wsFail[0].ErrorMessage)) } workspaceID := aws.StringValue(resp.PendingRequests[0].WorkspaceId) @@ -288,7 +288,7 @@ func resourceAwsWorkspacesWorkspaceDelete(d *schema.ResourceData, meta interface func workspaceDelete(conn *workspaces.WorkSpaces, id string, timeout time.Duration) error { log.Printf("[DEBUG] Terminating workspace %q", id) - _, err := conn.TerminateWorkspaces(&workspaces.TerminateWorkspacesInput{ + resp, err := conn.TerminateWorkspaces(&workspaces.TerminateWorkspacesInput{ TerminateWorkspaceRequests: []*workspaces.TerminateRequest{ { WorkspaceId: aws.String(id), @@ -299,6 +299,11 @@ func workspaceDelete(conn *workspaces.WorkSpaces, id string, timeout time.Durati return err } + wsFail := resp.FailedRequests + if len(wsFail) > 0 { + return fmt.Errorf("workspace termination failed: %s: %s", aws.StringValue(wsFail[0].ErrorCode), aws.StringValue(wsFail[0].ErrorMessage)) + } + log.Printf("[DEBUG] Waiting for workspace %q to be terminated", id) _, err = waiter.WorkspaceTerminated(conn, id, timeout) if err != nil { From c5b5d532dbc224feeb07221b9932081c92d80769 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 4 Dec 2020 09:44:27 -0800 Subject: [PATCH 0095/1212] specify master as checkout ref to ensure next milestone can be found --- .github/workflows/milestone.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/milestone.yml b/.github/workflows/milestone.yml index ef065870cd2..70103d7fd93 100644 --- a/.github/workflows/milestone.yml +++ b/.github/workflows/milestone.yml @@ -8,6 +8,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + with: + ref: master - id: get-current-milestone run: | echo ::set-output name=current_milestone::v$(head -1 CHANGELOG.md | cut -d " " -f 2) From 9e7d13d25d24a739ae3ba196fc02772077ca73e2 Mon Sep 17 00:00:00 2001 From: Mark Tranter Date: Sat, 5 Dec 2020 07:58:55 +1100 Subject: [PATCH 0096/1212] Update documentation for aws_lambda_function 'runtime' parameter --- website/docs/r/lambda_function.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index 2fd39b66558..36d63545464 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -223,7 +223,7 @@ large files efficiently. * `description` - (Optional) Description of what your Lambda Function does. * `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] * `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] -* `runtime` - (Required) See [Runtimes][6] for valid values. +* `runtime` - (Optional) See [Runtimes][6] for valid values. * `timeout` - (Optional) The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5] * `reserved_concurrent_executions` - (Optional) The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. From 0041efca79e1eeb955f6d81c7c82869645eba590 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 4 Dec 2020 23:56:54 +0200 Subject: [PATCH 0097/1212] remove validation --- aws/resource_aws_backup_plan.go | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index d5dd49b6267..34cbe737214 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -101,9 +101,8 @@ func resourceAwsBackupPlan() *schema.Resource { Optional: true, }, "delete_after": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(90), + Type: schema.TypeInt, + Optional: true, }, }, }, @@ -314,19 +313,8 @@ func expandBackupPlanRules(vRules *schema.Set) []*backup.RuleInput { rule.RecoveryPointTags = keyvaluetags.New(vRecoveryPointTags).IgnoreAws().BackupTags() } - if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { - lifecycle := &backup.Lifecycle{} - - mLifecycle := vLifecycle[0].(map[string]interface{}) - - if vDeleteAfter, ok := mLifecycle["delete_after"].(int); ok && vDeleteAfter > 0 { - lifecycle.DeleteAfterDays = aws.Int64(int64(vDeleteAfter)) - } - if vColdStorageAfter, ok := mLifecycle["cold_storage_after"].(int); ok && vColdStorageAfter > 0 { - lifecycle.MoveToColdStorageAfterDays = aws.Int64(int64(vColdStorageAfter)) - } - - rule.Lifecycle = lifecycle + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 { + rule.Lifecycle = expandBackupPlanLifecycle(vLifecycle) } if vCopyActions := expandBackupPlanCopyActions(mRule["copy_action"].(*schema.Set).List()); len(vCopyActions) > 0 { @@ -409,12 +397,7 @@ func flattenBackupPlanRules(rules []*backup.Rule) *schema.Set { } if lifecycle := rule.Lifecycle; lifecycle != nil { - mRule["lifecycle"] = []interface{}{ - map[string]interface{}{ - "delete_after": int(aws.Int64Value(lifecycle.DeleteAfterDays)), - "cold_storage_after": int(aws.Int64Value(lifecycle.MoveToColdStorageAfterDays)), - }, - } + mRule["lifecycle"] = flattenBackupPlanCopyActionLifecycle(lifecycle) } mRule["copy_action"] = flattenBackupPlanCopyActions(rule.CopyActions) From 74649fcf0ee4dc85954f1e9b682fc0e661e41f01 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 4 Dec 2020 15:04:19 -0800 Subject: [PATCH 0098/1212] use base ref from event payload --- .github/workflows/milestone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/milestone.yml b/.github/workflows/milestone.yml index 70103d7fd93..8e93930efe1 100644 --- a/.github/workflows/milestone.yml +++ b/.github/workflows/milestone.yml @@ -9,7 +9,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - ref: master + ref: ${{ github.event.pull_request.base.ref }} - id: get-current-milestone run: | echo ::set-output name=current_milestone::v$(head -1 CHANGELOG.md | cut -d " " -f 2) From 09ab3277f2f94c3121a50dba42486abdee101373 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 4 Dec 2020 15:09:28 -0800 Subject: [PATCH 0099/1212] Moves nullable to experimental --- aws/internal/{ => experimental}/nullable/int.go | 0 aws/internal/{ => experimental}/nullable/int_test.go | 0 aws/internal/{ => experimental}/nullable/testing.go | 0 aws/resource_aws_autoscaling_group.go | 5 ++--- 4 files changed, 2 insertions(+), 3 deletions(-) rename aws/internal/{ => experimental}/nullable/int.go (100%) rename aws/internal/{ => experimental}/nullable/int_test.go (100%) rename aws/internal/{ => experimental}/nullable/testing.go (100%) diff --git a/aws/internal/nullable/int.go b/aws/internal/experimental/nullable/int.go similarity index 100% rename from aws/internal/nullable/int.go rename to aws/internal/experimental/nullable/int.go diff --git a/aws/internal/nullable/int_test.go b/aws/internal/experimental/nullable/int_test.go similarity index 100% rename from aws/internal/nullable/int_test.go rename to aws/internal/experimental/nullable/int_test.go diff --git a/aws/internal/nullable/testing.go b/aws/internal/experimental/nullable/testing.go similarity index 100% rename from aws/internal/nullable/testing.go rename to aws/internal/experimental/nullable/testing.go diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 6d54e890ca8..b420d9fc8b7 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -20,9 +20,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/experimental/nullable" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/nullable" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/autoscaling/waiter" ) @@ -1867,8 +1867,7 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal refreshPreferences := &autoscaling.RefreshPreferences{} if v, ok := m["instance_warmup"]; ok { - i := nullable.Int(v.(string)) - if v, null, _ := i.Value(); !null { + if v, null, _ := nullable.Int(v.(string)).Value(); !null { refreshPreferences.InstanceWarmup = aws.Int64(v) } } From 7abd257ef10dcc7f1a08c50dec847e2048e5e42d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 4 Dec 2020 15:15:20 -0800 Subject: [PATCH 0100/1212] Updates naming to "Auto Scaling Group" --- aws/resource_aws_autoscaling_group.go | 98 +++++++++---------- aws/resource_aws_autoscaling_group_test.go | 14 +-- aws/resource_aws_autoscaling_group_waiting.go | 6 +- 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index b420d9fc8b7..4b1d1c3c2db 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -617,7 +617,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) launchTemplateValue, launchTemplateOk := d.GetOk("launch_template") if createOpts.MixedInstancesPolicy == nil && !launchConfigurationOk && !launchTemplateOk { - return fmt.Errorf("One of `launch_configuration`, `launch_template`, or `mixed_instances_policy` must be set for an autoscaling group") + return fmt.Errorf("One of `launch_configuration`, `launch_template`, or `mixed_instances_policy` must be set for an Auto Scaling Group") } if launchConfigurationOk { @@ -688,7 +688,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) createOpts.MaxInstanceLifetime = aws.Int64(int64(v.(int))) } - log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", createOpts) + log.Printf("[DEBUG] Auto Scaling Group create configuration: %#v", createOpts) // Retry for IAM eventual consistency err := resource.Retry(1*time.Minute, func() *resource.RetryError { @@ -709,11 +709,11 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) _, err = conn.CreateAutoScalingGroup(&createOpts) } if err != nil { - return fmt.Errorf("Error creating AutoScaling Group: %s", err) + return fmt.Errorf("Error creating Auto Scaling Group: %s", err) } d.SetId(d.Get("name").(string)) - log.Printf("[INFO] AutoScaling Group ID: %s", d.Id()) + log.Printf("[INFO] Auto Scaling Group ID: %s", d.Id()) if twoPhases { for _, hook := range generatePutLifecycleHookInputs(asgName, initialLifecycleHooks) { @@ -724,7 +724,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) _, err = conn.UpdateAutoScalingGroup(&updateOpts) if err != nil { - return fmt.Errorf("Error setting AutoScaling Group initial capacity: %s", err) + return fmt.Errorf("Error setting Auto Scaling Group initial capacity: %s", err) } } @@ -759,7 +759,7 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e return err } if g == nil { - log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Auto Scaling Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -1056,10 +1056,10 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } - log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts) + log.Printf("[DEBUG] Auto Scaling Group update configuration: %#v", opts) _, err := conn.UpdateAutoScalingGroup(&opts) if err != nil { - return fmt.Errorf("Error updating Autoscaling group: %s", err) + return fmt.Errorf("Error updating Auto Scaling Group: %s", err) } if d.HasChange("load_balancers") { @@ -1095,11 +1095,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("error detaching AutoScaling Group (%s) Load Balancers: %s", d.Id(), err) + return fmt.Errorf("error detaching Auto Scaling Group (%s) Load Balancers: %s", d.Id(), err) } if err := waitUntilAutoscalingGroupLoadBalancersRemoved(conn, d.Id()); err != nil { - return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancers being removed: %s", d.Id(), err) + return fmt.Errorf("error describing Auto Scaling Group (%s) Load Balancers being removed: %s", d.Id(), err) } } } @@ -1122,11 +1122,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("error attaching AutoScaling Group (%s) Load Balancers: %s", d.Id(), err) + return fmt.Errorf("error attaching Auto Scaling Group (%s) Load Balancers: %s", d.Id(), err) } if err := waitUntilAutoscalingGroupLoadBalancersAdded(conn, d.Id()); err != nil { - return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancers being added: %s", d.Id(), err) + return fmt.Errorf("error describing Auto Scaling Group (%s) Load Balancers being added: %s", d.Id(), err) } } } @@ -1164,11 +1164,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) TargetGroupARNs: batch, }) if err != nil { - return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + return fmt.Errorf("Error updating Load Balancers Target Groups for Auto Scaling Group (%s), error: %s", d.Id(), err) } if err := waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn, d.Id()); err != nil { - return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancer Target Groups being removed: %s", d.Id(), err) + return fmt.Errorf("error describing Auto Scaling Group (%s) Load Balancer Target Groups being removed: %s", d.Id(), err) } } @@ -1191,11 +1191,11 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("Error updating Load Balancers Target Groups for AutoScaling Group (%s), error: %s", d.Id(), err) + return fmt.Errorf("Error updating Load Balancers Target Groups for Auto Scaling Group (%s), error: %s", d.Id(), err) } if err := waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn, d.Id()); err != nil { - return fmt.Errorf("error describing AutoScaling Group (%s) Load Balancer Target Groups being added: %s", d.Id(), err) + return fmt.Errorf("error describing Auto Scaling Group (%s) Load Balancer Target Groups being added: %s", d.Id(), err) } } } @@ -1203,25 +1203,25 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if shouldWaitForCapacity { if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil { - return fmt.Errorf("Error waiting for AutoScaling Group Capacity: %s", err) + return fmt.Errorf("Error waiting for Auto Scaling Group Capacity: %s", err) } } if d.HasChange("enabled_metrics") { if err := updateASGMetricsCollection(d, conn); err != nil { - return fmt.Errorf("Error updating AutoScaling Group Metrics collection: %s", err) + return fmt.Errorf("Error updating Auto Scaling Group Metrics collection: %s", err) } } if d.HasChange("suspended_processes") { if err := updateASGSuspendedProcesses(d, conn); err != nil { - return fmt.Errorf("Error updating AutoScaling Group Suspended Processes: %s", err) + return fmt.Errorf("Error updating Auto Scaling Group Suspended Processes: %s", err) } } if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok && shouldRefreshInstances { if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefreshRaw.([]interface{})); err != nil { - return fmt.Errorf("failed to start instance refresh of asg %s: %w", d.Id(), err) + return fmt.Errorf("failed to start instance refresh of Auto Scaling Group %s: %w", d.Id(), err) } } @@ -1231,7 +1231,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn - // Read the autoscaling group first. If it doesn't exist, we're done. + // Read the Auto Scaling Group first. If it doesn't exist, we're done. // We need the group in order to check if there are instances attached. // If so, we need to remove those first. g, err := getAwsAutoscalingGroup(d.Id(), conn) @@ -1239,7 +1239,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) return err } if g == nil { - log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Auto Scaling Group (%s) not found, removing from state", d.Id()) return nil } if len(g.Instances) > 0 || *g.DesiredCapacity > 0 { @@ -1248,7 +1248,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) } } - log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) + log.Printf("[DEBUG] Auto Scaling Group destroy: %v", d.Id()) deleteopts := autoscaling.DeleteAutoScalingGroupInput{ AutoScalingGroupName: aws.String(d.Id()), ForceDelete: aws.Bool(d.Get("force_delete").(bool)), @@ -1282,7 +1282,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) } } if err != nil { - return fmt.Errorf("Error deleting autoscaling group: %s", err) + return fmt.Errorf("Error deleting Auto Scaling Group: %s", err) } var group *autoscaling.Group @@ -1301,7 +1301,7 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) } } if err != nil { - return fmt.Errorf("Error deleting autoscaling group: %s", err) + return fmt.Errorf("Error deleting Auto Scaling Group: %s", err) } return nil } @@ -1313,7 +1313,7 @@ func getAwsAutoscalingGroup(asgName string, conn *autoscaling.AutoScaling) (*aut AutoScalingGroupNames: []*string{aws.String(asgName)}, } - log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts) + log.Printf("[DEBUG] Auto Scaling Group describe configuration: %#v", describeOpts) describeGroups, err := conn.DescribeAutoScalingGroups(&describeOpts) if err != nil { autoscalingerr, ok := err.(awserr.Error) @@ -1321,10 +1321,10 @@ func getAwsAutoscalingGroup(asgName string, conn *autoscaling.AutoScaling) (*aut return nil, nil } - return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err) + return nil, fmt.Errorf("Error retrieving Auto Scaling Groups: %s", err) } - // Search for the autoscaling group + // Search for the Auto Scaling Group for idx, asc := range describeGroups.AutoScalingGroups { if *asc.AutoScalingGroupName == asgName { return describeGroups.AutoScalingGroups[idx], nil @@ -1338,12 +1338,12 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).autoscalingconn if d.Get("force_delete").(bool) { - log.Printf("[DEBUG] Skipping ASG drain, force_delete was set.") + log.Printf("[DEBUG] Skipping Auto Scaling Group drain, force_delete was set.") return nil } // First, set the capacity to zero so the group will drain - log.Printf("[DEBUG] Reducing autoscaling group capacity to zero") + log.Printf("[DEBUG] Reducing Auto Scaling Group capacity to zero") opts := autoscaling.UpdateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(d.Id()), DesiredCapacity: aws.Int64(0), @@ -1363,7 +1363,7 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) return resource.NonRetryableError(err) } if g == nil { - log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Auto Scaling Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -1378,14 +1378,14 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) if isResourceTimeoutError(err) { g, err = getAwsAutoscalingGroup(d.Id(), conn) if err != nil { - return fmt.Errorf("Error getting autoscaling group info when draining: %s", err) + return fmt.Errorf("Error getting Auto Scaling Group info when draining: %s", err) } if g != nil && len(g.Instances) > 0 { return fmt.Errorf("Group still has %d instances", len(g.Instances)) } } if err != nil { - return fmt.Errorf("Error draining autoscaling group: %s", err) + return fmt.Errorf("Error draining Auto Scaling Group: %s", err) } return nil } @@ -1407,7 +1407,7 @@ func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()), } - log.Printf("[INFO] Enabling metrics collection for the ASG: %s", d.Id()) + log.Printf("[INFO] Enabling metrics collection for the Auto Scaling Group: %s", d.Id()) _, metricsErr := conn.EnableMetricsCollection(props) return metricsErr @@ -1434,7 +1434,7 @@ func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoS _, err := conn.ResumeProcesses(props) if err != nil { - return fmt.Errorf("Error Resuming Processes for ASG %q: %s", d.Id(), err) + return fmt.Errorf("Error Resuming Processes for Auto Scaling Group %q: %s", d.Id(), err) } } @@ -1447,7 +1447,7 @@ func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoS _, err := conn.SuspendProcesses(props) if err != nil { - return fmt.Errorf("Error Suspending Processes for ASG %q: %s", d.Id(), err) + return fmt.Errorf("Error Suspending Processes for Auto Scaling Group %q: %s", d.Id(), err) } } @@ -1477,7 +1477,7 @@ func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc _, err := conn.DisableMetricsCollection(props) if err != nil { - return fmt.Errorf("Failure to Disable metrics collection types for ASG %s: %s", d.Id(), err) + return fmt.Errorf("Failure to Disable metrics collection types for Auto Scaling Group %s: %s", d.Id(), err) } } @@ -1491,7 +1491,7 @@ func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc _, err := conn.EnableMetricsCollection(props) if err != nil { - return fmt.Errorf("Failure to Enable metrics collection types for ASG %s: %s", d.Id(), err) + return fmt.Errorf("Failure to Enable metrics collection types for Auto Scaling Group %s: %s", d.Id(), err) } } @@ -1883,22 +1883,22 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal // Auto Scaling Group. If there is already an active refresh, it is cancelled. func autoScalingGroupRefreshInstances(conn *autoscaling.AutoScaling, asgName string, refreshConfig []interface{}) error { - log.Printf("[DEBUG] Cancelling active Instance Refresh in ASG %s, if any...", asgName) + log.Printf("[DEBUG] Cancelling active Instance Refresh in Auto Scaling Group %s, if any...", asgName) if err := cancelAutoscalingInstanceRefresh(conn, asgName); err != nil { - // todo: add comment about subsequent ASG updates not picking up the refresh? + // todo: add comment about subsequent Auto Scaling Group updates not picking up the refresh? return fmt.Errorf("failed to cancel previous refresh: %w", err) } input := createAutoScalingGroupInstanceRefreshInput(asgName, refreshConfig) - log.Printf("[DEBUG] Starting Instance Refresh on ASG (%s): %s", asgName, input) + log.Printf("[DEBUG] Starting Instance Refresh on Auto Scaling Group (%s): %s", asgName, input) output, err := conn.StartInstanceRefresh(input) if err != nil { return err } instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - log.Printf("[INFO] Started Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) + log.Printf("[INFO] Started Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) return nil } @@ -1909,28 +1909,28 @@ func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName str input := autoscaling.CancelInstanceRefreshInput{ AutoScalingGroupName: aws.String(asgName), } - log.Printf("[DEBUG] Attempting to cancel Instance Refresh on ASG (%s): %s", asgName, input) + log.Printf("[DEBUG] Attempting to cancel Instance Refresh on Auto Scaling Group (%s): %s", asgName, input) output, err := conn.CancelInstanceRefresh(&input) if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault) { - log.Printf("[DEBUG] No active Instance Refresh on ASG (%s)", asgName) + log.Printf("[DEBUG] No active Instance Refresh on Auto Scaling Group (%s)", asgName) return nil } if err != nil { - return fmt.Errorf("error cancelling Instance Refresh on ASG (%s): %w", asgName, err) + return fmt.Errorf("error cancelling Instance Refresh on Auto Scaling Group (%s): %w", asgName, err) } if output == nil { - return fmt.Errorf("error cancelling Instance Refresh on ASG (%s): empty result", asgName) + return fmt.Errorf("error cancelling Instance Refresh on Auto Scaling Group (%s): empty result", asgName) } instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - log.Printf("[INFO] Requested cancellation of Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) + log.Printf("[INFO] Requested cancellation of Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) _, err = waiter.InstanceRefreshCancelled(conn, asgName, instanceRefreshID) if err != nil { - return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on ASG (%s): %w", instanceRefreshID, asgName, err) + return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on Auto Scaling Group (%s): %w", instanceRefreshID, asgName, err) } - log.Printf("[INFO] Cancelled Instance Refresh (%s) on ASG (%s)", instanceRefreshID, asgName) + log.Printf("[INFO] Cancelled Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) return nil } diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 441c9ae8de8..f86eb1ec17c 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -37,14 +37,14 @@ func testSweepAutoscalingGroups(region string) error { resp, err := conn.DescribeAutoScalingGroups(&autoscaling.DescribeAutoScalingGroupsInput{}) if err != nil { if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping AutoScaling Group sweep for %s: %s", region, err) + log.Printf("[WARN] Skipping Auto Scaling Group sweep for %s: %s", region, err) return nil } - return fmt.Errorf("Error retrieving AutoScaling Groups in Sweeper: %s", err) + return fmt.Errorf("Error retrieving Auto Scaling Groups in Sweeper: %s", err) } if len(resp.AutoScalingGroups) == 0 { - log.Print("[DEBUG] No aws autoscaling groups to sweep") + log.Print("[DEBUG] No Auto Scaling Groups to sweep") return nil } @@ -1124,7 +1124,7 @@ func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) r } if rs.Primary.ID == "" { - return fmt.Errorf("No AutoScaling Group ID is set") + return fmt.Errorf("No Auto Scaling Group ID is set") } conn := testAccProvider.Meta().(*AWSClient).autoscalingconn @@ -1140,7 +1140,7 @@ func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) r if len(describeGroups.AutoScalingGroups) != 1 || *describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID { - return fmt.Errorf("AutoScaling Group not found") + return fmt.Errorf("Auto Scaling Group not found") } *group = *describeGroups.AutoScalingGroups[0] @@ -1166,7 +1166,7 @@ func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { if err == nil { if len(describeGroups.AutoScalingGroups) != 0 && *describeGroups.AutoScalingGroups[0].AutoScalingGroupName == rs.Primary.ID { - return fmt.Errorf("AutoScaling Group still exists") + return fmt.Errorf("Auto Scaling Group still exists") } } @@ -1186,7 +1186,7 @@ func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error { func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group, name string) resource.TestCheckFunc { return func(s *terraform.State) error { if *group.AutoScalingGroupName != name { - return fmt.Errorf("Bad Autoscaling Group name, expected (%s), got (%s)", name, *group.AutoScalingGroupName) + return fmt.Errorf("Bad Auto Scaling Group name, expected (%s), got (%s)", name, *group.AutoScalingGroupName) } if *group.MaxSize != 5 { diff --git a/aws/resource_aws_autoscaling_group_waiting.go b/aws/resource_aws_autoscaling_group_waiting.go index 1e3279c3d3d..4a432d149a1 100644 --- a/aws/resource_aws_autoscaling_group_waiting.go +++ b/aws/resource_aws_autoscaling_group_waiting.go @@ -41,7 +41,7 @@ func waitForASGCapacity( return resource.NonRetryableError(err) } if g == nil { - log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Auto Scaling Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -57,11 +57,11 @@ func waitForASGCapacity( g, err := getAwsAutoscalingGroup(d.Id(), meta.(*AWSClient).autoscalingconn) if err != nil { - return fmt.Errorf("Error getting autoscaling group info: %s", err) + return fmt.Errorf("Error getting Auto Scaling Group info: %s", err) } if g == nil { - log.Printf("[WARN] Autoscaling Group (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Auto Scaling Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil } From 7cbb1e8425716fa95d30816388af4b5694dfa3ac Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 4 Dec 2020 15:20:48 -0800 Subject: [PATCH 0101/1212] Update CHANGELOG.md for #16167 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92d43bc8635..2252d8133c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ ## 3.21.0 (Unreleased) +ENHANCEMENTS + +* resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] +* resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] + ## 3.20.0 (December 03, 2020) ENHANCEMENTS From 707d2e456b92fe16b7d8dc272707578b794c184d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 4 Dec 2020 16:07:24 -0800 Subject: [PATCH 0102/1212] Only try to cancel Instance Refresh when we get an "InstanceRefreshInProgress" error --- .../service/autoscaling/waiter/waiter.go | 25 ++-------- aws/resource_aws_autoscaling_group.go | 48 ++++++++----------- 2 files changed, 26 insertions(+), 47 deletions(-) diff --git a/aws/internal/service/autoscaling/waiter/waiter.go b/aws/internal/service/autoscaling/waiter/waiter.go index f278c3f859a..b8985880b55 100644 --- a/aws/internal/service/autoscaling/waiter/waiter.go +++ b/aws/internal/service/autoscaling/waiter/waiter.go @@ -8,30 +8,15 @@ import ( ) const ( - // Maximum amount of time to wait for an InstanceRefresh to be Successful - InstanceRefreshSuccessfulTimeout = 5 * time.Minute + // Maximum amount of time to wait for an InstanceRefresh to be started + // Must be at least as long as InstanceRefreshCancelledTimeout, since we try to cancel any + // existing Instance Refreshes when starting. + InstanceRefreshStartedTimeout = InstanceRefreshCancelledTimeout - // Maximum amount of time to wait for an InstanceRefresh to be Cancelled + // Maximum amount of time to wait for an Instance Refresh to be Cancelled InstanceRefreshCancelledTimeout = 10 * time.Minute ) -func InstanceRefreshSuccessful(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress}, - Target: []string{autoscaling.InstanceRefreshStatusSuccessful}, - Refresh: InstanceRefreshStatus(conn, asgName, instanceRefreshId), - Timeout: InstanceRefreshSuccessfulTimeout, - } - - outputRaw, err := stateConf.WaitForState() - - if v, ok := outputRaw.(*autoscaling.InstanceRefresh); ok { - return v, err - } - - return nil, err -} - func InstanceRefreshCancelled(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { stateConf := &resource.StateChangeConf{ Pending: []string{autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress, autoscaling.InstanceRefreshStatusCancelling}, diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 4b1d1c3c2db..4430c0dec7e 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -1776,6 +1776,7 @@ func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, var lbAdding bool for { + // TODO: generate Pages function output, err := conn.DescribeLoadBalancers(input) if err != nil { @@ -1879,40 +1880,38 @@ func expandAutoScalingGroupInstanceRefreshPreferences(l []interface{}) *autoscal return refreshPreferences } -// autoScalingGroupRefreshInstances starts a new Instance Refresh in this -// Auto Scaling Group. If there is already an active refresh, it is cancelled. func autoScalingGroupRefreshInstances(conn *autoscaling.AutoScaling, asgName string, refreshConfig []interface{}) error { - - log.Printf("[DEBUG] Cancelling active Instance Refresh in Auto Scaling Group %s, if any...", asgName) - - if err := cancelAutoscalingInstanceRefresh(conn, asgName); err != nil { - // todo: add comment about subsequent Auto Scaling Group updates not picking up the refresh? - return fmt.Errorf("failed to cancel previous refresh: %w", err) - } - input := createAutoScalingGroupInstanceRefreshInput(asgName, refreshConfig) - log.Printf("[DEBUG] Starting Instance Refresh on Auto Scaling Group (%s): %s", asgName, input) - output, err := conn.StartInstanceRefresh(input) + err := resource.Retry(waiter.InstanceRefreshStartedTimeout, func() *resource.RetryError { + _, err := conn.StartInstanceRefresh(input) + if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeInstanceRefreshInProgressFault) { + cancelErr := cancelAutoscalingInstanceRefresh(conn, asgName) + if cancelErr != nil { + return resource.NonRetryableError(cancelErr) + } + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.StartInstanceRefresh(input) + } if err != nil { - return err + return fmt.Errorf("error starting Instance Refresh: %w", err) } - instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - - log.Printf("[INFO] Started Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) return nil } -// cancelAutoscalingInstanceRefresh cancels the currently active Instance Refresh -// of this Auto-Scaling Group, if any, and waits until the refresh is Cancelled. func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName string) error { input := autoscaling.CancelInstanceRefreshInput{ AutoScalingGroupName: aws.String(asgName), } - log.Printf("[DEBUG] Attempting to cancel Instance Refresh on Auto Scaling Group (%s): %s", asgName, input) output, err := conn.CancelInstanceRefresh(&input) if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault) { - log.Printf("[DEBUG] No active Instance Refresh on Auto Scaling Group (%s)", asgName) return nil } if err != nil { @@ -1922,15 +1921,10 @@ func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName str return fmt.Errorf("error cancelling Instance Refresh on Auto Scaling Group (%s): empty result", asgName) } - instanceRefreshID := aws.StringValue(output.InstanceRefreshId) - log.Printf("[INFO] Requested cancellation of Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) - - _, err = waiter.InstanceRefreshCancelled(conn, asgName, instanceRefreshID) + _, err = waiter.InstanceRefreshCancelled(conn, asgName, aws.StringValue(output.InstanceRefreshId)) if err != nil { - return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on Auto Scaling Group (%s): %w", instanceRefreshID, asgName, err) + return fmt.Errorf("error waiting for cancellation of Instance Refresh (%s) on Auto Scaling Group (%s): %w", aws.StringValue(output.InstanceRefreshId), asgName, err) } - log.Printf("[INFO] Cancelled Instance Refresh (%s) on Auto Scaling Group (%s)", instanceRefreshID, asgName) - return nil } From 086e4e0ef7b4a6a499f70e1ba1aba16698cc0f16 Mon Sep 17 00:00:00 2001 From: joyarackal Date: Sat, 5 Dec 2020 08:28:04 +0100 Subject: [PATCH 0103/1212] increased max retention period for kinesis stream to 8760 --- aws/resource_aws_kinesis_stream.go | 2 +- aws/resource_aws_kinesis_stream_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_kinesis_stream.go b/aws/resource_aws_kinesis_stream.go index 08c5e6556aa..74ee6e4828c 100644 --- a/aws/resource_aws_kinesis_stream.go +++ b/aws/resource_aws_kinesis_stream.go @@ -60,7 +60,7 @@ func resourceAwsKinesisStream() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 24, - ValidateFunc: validation.IntBetween(24, 168), + ValidateFunc: validation.IntBetween(24, 8760), }, "shard_level_metrics": { diff --git a/aws/resource_aws_kinesis_stream_test.go b/aws/resource_aws_kinesis_stream_test.go index 2bfa8b24748..f080ad624ae 100644 --- a/aws/resource_aws_kinesis_stream_test.go +++ b/aws/resource_aws_kinesis_stream_test.go @@ -293,7 +293,7 @@ func TestAccAWSKinesisStream_retentionPeriod(t *testing.T) { testAccCheckKinesisStreamExists(resourceName, &stream), testAccCheckAWSKinesisStreamAttributes(&stream), resource.TestCheckResourceAttr( - resourceName, "retention_period", "100"), + resourceName, "retention_period", "8760"), ), }, @@ -663,7 +663,7 @@ func testAccKinesisStreamConfigUpdateRetentionPeriod(rInt int) string { resource "aws_kinesis_stream" "test" { name = "terraform-kinesis-test-%d" shard_count = 2 - retention_period = 100 + retention_period = 8760 tags = { Name = "tf-test" From 56e912ffa16bee073758cf02939e5a34792519c5 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Wed, 1 Jul 2020 15:19:02 +0100 Subject: [PATCH 0104/1212] r/aws_ec2_managed_prefix_list: new resource --- aws/provider.go | 1 + aws/resource_aws_ec2_managed_prefix_list.go | 475 +++++++++++ ...source_aws_ec2_managed_prefix_list_test.go | 750 ++++++++++++++++++ .../r/ec2_managed_prefix_list.html.markdown | 85 ++ website/docs/r/security_group.html.markdown | 11 +- .../docs/r/security_group_rule.html.markdown | 11 +- 6 files changed, 1325 insertions(+), 8 deletions(-) create mode 100644 aws/resource_aws_ec2_managed_prefix_list.go create mode 100644 aws/resource_aws_ec2_managed_prefix_list_test.go create mode 100644 website/docs/r/ec2_managed_prefix_list.html.markdown diff --git a/aws/provider.go b/aws/provider.go index 79d050c6a0a..ab4733b46f7 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -599,6 +599,7 @@ func Provider() *schema.Provider { "aws_ec2_fleet": resourceAwsEc2Fleet(), "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), + "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), "aws_ec2_tag": resourceAwsEc2Tag(), "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), diff --git a/aws/resource_aws_ec2_managed_prefix_list.go b/aws/resource_aws_ec2_managed_prefix_list.go new file mode 100644 index 00000000000..aa88a32d50a --- /dev/null +++ b/aws/resource_aws_ec2_managed_prefix_list.go @@ -0,0 +1,475 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +var ( + awsPrefixListEntrySetHashFunc = schema.HashResource(prefixListEntrySchema()) +) + +func resourceAwsEc2ManagedPrefixList() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEc2ManagedPrefixListCreate, + Read: resourceAwsEc2ManagedPrefixListRead, + Update: resourceAwsEc2ManagedPrefixListUpdate, + Delete: resourceAwsEc2ManagedPrefixListDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "address_family": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice( + []string{"IPv4", "IPv6"}, + false), + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "entry": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: prefixListEntrySchema(), + Set: awsPrefixListEntrySetHashFunc, + }, + "max_entries": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + }, + } +} + +func prefixListEntrySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsCIDR, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + }, + } +} + +func resourceAwsEc2ManagedPrefixListCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := ec2.CreateManagedPrefixListInput{} + + input.AddressFamily = aws.String(d.Get("address_family").(string)) + + if v, ok := d.GetOk("entry"); ok { + input.Entries = expandAddPrefixListEntries(v) + } + + input.MaxEntries = aws.Int64(int64(d.Get("max_entries").(int))) + input.PrefixListName = aws.String(d.Get("name").(string)) + + if v, ok := d.GetOk("tags"); ok { + input.TagSpecifications = ec2TagSpecificationsFromMap( + v.(map[string]interface{}), + "prefix-list") // no ec2.ResourceTypePrefixList as of 01/07/20 + } + + output, err := conn.CreateManagedPrefixList(&input) + if err != nil { + return fmt.Errorf("failed to create managed prefix list: %v", err) + } + + id := aws.StringValue(output.PrefixList.PrefixListId) + + log.Printf("[INFO] Created Managed Prefix List %s (%s)", d.Get("name").(string), id) + + if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("prefix list %s did not settle after create: %s", id, err) + } + + d.SetId(id) + + return resourceAwsEc2ManagedPrefixListRead(d, meta) +} + +func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + id := d.Id() + + pl, ok, err := getManagedPrefixList(id, conn) + switch { + case err != nil: + return err + case !ok: + log.Printf("[WARN] Managed Prefix List %s not found; removing from state.", id) + d.SetId("") + return nil + } + + d.Set("address_family", pl.AddressFamily) + d.Set("arn", pl.PrefixListArn) + + entries, err := getPrefixListEntries(id, conn, 0) + if err != nil { + return err + } + + if err := d.Set("entry", flattenPrefixListEntries(entries)); err != nil { + return fmt.Errorf("error setting attribute entry of managed prefix list %s: %s", id, err) + } + + d.Set("max_entries", pl.MaxEntries) + d.Set("name", pl.PrefixListName) + d.Set("owner_id", pl.OwnerId) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pl.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error settings attribute tags of managed prefix list %s: %s", id, err) + } + + return nil +} + +func resourceAwsEc2ManagedPrefixListUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + id := d.Id() + modifyPrefixList := false + + input := ec2.ModifyManagedPrefixListInput{} + + input.PrefixListId = aws.String(id) + + if d.HasChange("name") { + input.PrefixListName = aws.String(d.Get("name").(string)) + modifyPrefixList = true + } + + if d.HasChange("entry") { + pl, ok, err := getManagedPrefixList(id, conn) + switch { + case err != nil: + return err + case !ok: + return &resource.NotFoundError{} + } + + currentVersion := aws.Int64Value(pl.Version) + + oldEntries, err := getPrefixListEntries(id, conn, currentVersion) + if err != nil { + return err + } + + newEntries := expandAddPrefixListEntries(d.Get("entry")) + adds, removes := computePrefixListEntriesModification(oldEntries, newEntries) + + if len(adds) > 0 || len(removes) > 0 { + if len(adds) > 0 { + // the Modify API doesn't like empty lists + input.AddEntries = adds + } + + if len(removes) > 0 { + // the Modify API doesn't like empty lists + input.RemoveEntries = removes + } + + input.CurrentVersion = aws.Int64(currentVersion) + modifyPrefixList = true + } + } + + if modifyPrefixList { + log.Printf("[INFO] modifying managed prefix list %s...", id) + + switch _, err := conn.ModifyManagedPrefixList(&input); { + case isAWSErr(err, "PrefixListVersionMismatch", "prefix list has the incorrect version number"): + return fmt.Errorf("failed to modify managed prefix list %s: conflicting change", id) + case err != nil: + return fmt.Errorf("failed to modify managed prefix list %s: %s", id, err) + } + + if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("prefix list did not settle after update: %s", err) + } + } + + if d.HasChange("tags") { + before, after := d.GetChange("tags") + if err := keyvaluetags.Ec2UpdateTags(conn, id, before, after); err != nil { + return fmt.Errorf("failed to update tags of managed prefix list %s: %s", id, err) + } + } + + return resourceAwsEc2ManagedPrefixListRead(d, meta) +} + +func resourceAwsEc2ManagedPrefixListDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + id := d.Id() + + input := ec2.DeleteManagedPrefixListInput{ + PrefixListId: aws.String(id), + } + + err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + _, err := conn.DeleteManagedPrefixList(&input) + switch { + case isManagedPrefixListModificationConflictErr(err): + return resource.RetryableError(err) + case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): + log.Printf("[WARN] managed prefix list %s has already been deleted", id) + return nil + case err != nil: + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + _, err = conn.DeleteManagedPrefixList(&input) + } + + if err != nil { + return fmt.Errorf("failed to delete managed prefix list %s: %s", id, err) + } + + if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("prefix list %s did not settle after delete: %s", id, err) + } + + return nil +} + +func expandAddPrefixListEntries(input interface{}) []*ec2.AddPrefixListEntry { + if input == nil { + return nil + } + + list := input.(*schema.Set).List() + result := make([]*ec2.AddPrefixListEntry, 0, len(list)) + + for _, entry := range list { + m := entry.(map[string]interface{}) + + output := ec2.AddPrefixListEntry{} + + output.Cidr = aws.String(m["cidr_block"].(string)) + + if v, ok := m["description"]; ok { + output.Description = aws.String(v.(string)) + } + + result = append(result, &output) + } + + return result +} + +func flattenPrefixListEntries(entries []*ec2.PrefixListEntry) *schema.Set { + list := make([]interface{}, 0, len(entries)) + + for _, entry := range entries { + m := make(map[string]interface{}, 2) + m["cidr_block"] = aws.StringValue(entry.Cidr) + + if entry.Description != nil { + m["description"] = aws.StringValue(entry.Description) + } + + list = append(list, m) + } + + return schema.NewSet(awsPrefixListEntrySetHashFunc, list) +} + +func getManagedPrefixList( + id string, + conn *ec2.EC2, +) (*ec2.ManagedPrefixList, bool, error) { + input := ec2.DescribeManagedPrefixListsInput{ + PrefixListIds: aws.StringSlice([]string{id}), + } + + output, err := conn.DescribeManagedPrefixLists(&input) + switch { + case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): + return nil, false, nil + case err != nil: + return nil, false, fmt.Errorf("describe managed prefix list %s: %v", id, err) + case len(output.PrefixLists) != 1: + return nil, false, nil + } + + return output.PrefixLists[0], true, nil +} + +func getPrefixListEntries( + id string, + conn *ec2.EC2, + version int64, +) ([]*ec2.PrefixListEntry, error) { + input := ec2.GetManagedPrefixListEntriesInput{ + PrefixListId: aws.String(id), + } + + if version > 0 { + input.TargetVersion = aws.Int64(version) + } + + result := []*ec2.PrefixListEntry(nil) + switch err := conn.GetManagedPrefixListEntriesPages( + &input, + func(output *ec2.GetManagedPrefixListEntriesOutput, last bool) bool { + result = append(result, output.Entries...) + return true + }); { + case err != nil: + return nil, fmt.Errorf("failed to get entries in prefix list %s: %v", id, err) + } + + return result, nil +} + +func computePrefixListEntriesModification( + oldEntries []*ec2.PrefixListEntry, + newEntries []*ec2.AddPrefixListEntry, +) ([]*ec2.AddPrefixListEntry, []*ec2.RemovePrefixListEntry) { + adds := map[string]string{} // CIDR => Description + + removes := map[string]struct{}{} // set of CIDR + for _, oldEntry := range oldEntries { + oldCIDR := aws.StringValue(oldEntry.Cidr) + removes[oldCIDR] = struct{}{} + } + + for _, newEntry := range newEntries { + newCIDR := aws.StringValue(newEntry.Cidr) + newDescription := aws.StringValue(newEntry.Description) + + for _, oldEntry := range oldEntries { + oldCIDR := aws.StringValue(oldEntry.Cidr) + oldDescription := aws.StringValue(oldEntry.Description) + + if oldCIDR == newCIDR { + delete(removes, oldCIDR) + + if oldDescription != newDescription { + adds[oldCIDR] = newDescription + } + + goto nextNewEntry + } + } + + // reach this point when no matching oldEntry found + adds[newCIDR] = newDescription + + nextNewEntry: + } + + addList := make([]*ec2.AddPrefixListEntry, 0, len(adds)) + for cidr, description := range adds { + addList = append(addList, &ec2.AddPrefixListEntry{ + Cidr: aws.String(cidr), + Description: aws.String(description), + }) + } + sort.Slice(addList, func(i, j int) bool { + return aws.StringValue(addList[i].Cidr) < aws.StringValue(addList[j].Cidr) + }) + + removeList := make([]*ec2.RemovePrefixListEntry, 0, len(removes)) + for cidr := range removes { + removeList = append(removeList, &ec2.RemovePrefixListEntry{ + Cidr: aws.String(cidr), + }) + } + sort.Slice(removeList, func(i, j int) bool { + return aws.StringValue(removeList[i].Cidr) < aws.StringValue(removeList[j].Cidr) + }) + + return addList, removeList +} + +func waitUntilAwsManagedPrefixListSettled( + id string, + conn *ec2.EC2, + timeout time.Duration, +) error { + log.Printf("[INFO] Waiting for managed prefix list %s to settle...", id) + + err := resource.Retry(timeout, func() *resource.RetryError { + settled, err := isAwsManagedPrefixListSettled(id, conn) + switch { + case err != nil: + return resource.NonRetryableError(err) + case !settled: + return resource.RetryableError(errors.New("resource not yet settled")) + } + + return nil + }) + + if isResourceTimeoutError(err) { + return fmt.Errorf("timed out: %s", err) + } + + return nil +} + +func isAwsManagedPrefixListSettled(id string, conn *ec2.EC2) (bool, error) { + pl, ok, err := getManagedPrefixList(id, conn) + switch { + case err != nil: + return false, err + case !ok: + return true, nil + } + + switch state := aws.StringValue(pl.State); state { + case ec2.PrefixListStateCreateComplete, ec2.PrefixListStateModifyComplete, ec2.PrefixListStateDeleteComplete: + return true, nil + case ec2.PrefixListStateCreateInProgress, ec2.PrefixListStateModifyInProgress, ec2.PrefixListStateDeleteInProgress: + return false, nil + case ec2.PrefixListStateCreateFailed, ec2.PrefixListStateModifyFailed, ec2.PrefixListStateDeleteFailed: + return false, fmt.Errorf("terminal state %s indicates failure", state) + default: + return false, fmt.Errorf("unexpected state %s", state) + } +} diff --git a/aws/resource_aws_ec2_managed_prefix_list_test.go b/aws/resource_aws_ec2_managed_prefix_list_test.go new file mode 100644 index 00000000000..ec2667036d3 --- /dev/null +++ b/aws/resource_aws_ec2_managed_prefix_list_test.go @@ -0,0 +1,750 @@ +package aws + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAwsEc2ManagedPrefixList_computePrefixListEntriesModification(t *testing.T) { + type testEntry struct { + CIDR string + Description string + } + + tests := []struct { + name string + oldEntries []testEntry + newEntries []testEntry + expectedAdds []testEntry + expectedRemoves []testEntry + }{ + { + name: "add two", + oldEntries: []testEntry{}, + newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + expectedAdds: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + expectedRemoves: []testEntry{}, + }, + { + name: "remove one", + oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + newEntries: []testEntry{{"1.2.3.4/32", "test1"}}, + expectedAdds: []testEntry{}, + expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, + }, + { + name: "modify description of one", + oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2-1"}}, + expectedAdds: []testEntry{{"2.3.4.5/32", "test2-1"}}, + expectedRemoves: []testEntry{}, + }, + { + name: "add third", + oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}, {"3.4.5.6/32", "test3"}}, + expectedAdds: []testEntry{{"3.4.5.6/32", "test3"}}, + expectedRemoves: []testEntry{}, + }, + { + name: "add and remove one", + oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"3.4.5.6/32", "test3"}}, + expectedAdds: []testEntry{{"3.4.5.6/32", "test3"}}, + expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, + }, + { + name: "add and remove one with description change", + oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, + newEntries: []testEntry{{"1.2.3.4/32", "test1-1"}, {"3.4.5.6/32", "test3"}}, + expectedAdds: []testEntry{{"1.2.3.4/32", "test1-1"}, {"3.4.5.6/32", "test3"}}, + expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, + }, + { + name: "basic test update", + oldEntries: []testEntry{{"1.0.0.0/8", "Test1"}}, + newEntries: []testEntry{{"1.0.0.0/8", "Test1-1"}, {"2.2.0.0/16", "Test2"}}, + expectedAdds: []testEntry{{"1.0.0.0/8", "Test1-1"}, {"2.2.0.0/16", "Test2"}}, + expectedRemoves: []testEntry{}, + }, + } + + for _, test := range tests { + oldEntryList := []*ec2.PrefixListEntry(nil) + for _, entry := range test.oldEntries { + oldEntryList = append(oldEntryList, &ec2.PrefixListEntry{ + Cidr: aws.String(entry.CIDR), + Description: aws.String(entry.Description), + }) + } + + newEntryList := []*ec2.AddPrefixListEntry(nil) + for _, entry := range test.newEntries { + newEntryList = append(newEntryList, &ec2.AddPrefixListEntry{ + Cidr: aws.String(entry.CIDR), + Description: aws.String(entry.Description), + }) + } + + addList, removeList := computePrefixListEntriesModification(oldEntryList, newEntryList) + + if len(addList) != len(test.expectedAdds) { + t.Errorf("expected %d adds, got %d", len(test.expectedAdds), len(addList)) + } + + for i, added := range addList { + expected := test.expectedAdds[i] + + actualCidr := aws.StringValue(added.Cidr) + expectedCidr := expected.CIDR + if actualCidr != expectedCidr { + t.Errorf("add[%d]: expected cidr %s, got %s", i, expectedCidr, actualCidr) + } + + actualDesc := aws.StringValue(added.Description) + expectedDesc := expected.Description + if actualDesc != expectedDesc { + t.Errorf("add[%d]: expected description '%s', got '%s'", i, expectedDesc, actualDesc) + } + } + + if len(removeList) != len(test.expectedRemoves) { + t.Errorf("expected %d removes, got %d", len(test.expectedRemoves), len(removeList)) + } + + for i, removed := range removeList { + expected := test.expectedRemoves[i] + + actualCidr := aws.StringValue(removed.Cidr) + expectedCidr := expected.CIDR + if actualCidr != expectedCidr { + t.Errorf("add[%d]: expected cidr %s, got %s", i, expectedCidr, actualCidr) + } + } + } +} + +func testAccCheckAwsEc2ManagedPrefixListDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ec2_managed_prefix_list" { + continue + } + + id := rs.Primary.ID + + switch _, ok, err := getManagedPrefixList(id, conn); { + case err != nil: + return err + case ok: + return fmt.Errorf("managed prefix list %s still exists", id) + } + } + + return nil +} + +func testAccCheckAwsEc2ManagedPrefixListVersion( + prefixList *ec2.ManagedPrefixList, + version int64, +) resource.TestCheckFunc { + return func(state *terraform.State) error { + if actual := aws.Int64Value(prefixList.Version); actual != version { + return fmt.Errorf("expected prefix list version %d, got %d", version, actual) + } + + return nil + } +} + +func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + pl, entries := ec2.ManagedPrefixList{}, []*ec2.PrefixListEntry(nil) + + checkAttributes := func(*terraform.State) error { + if actual := aws.StringValue(pl.AddressFamily); actual != "IPv4" { + return fmt.Errorf("bad address family: %s", actual) + } + + if actual := aws.Int64Value(pl.MaxEntries); actual != 5 { + return fmt.Errorf("bad max entries: %d", actual) + } + + if actual := aws.StringValue(pl.OwnerId); actual != testAccGetAccountID() { + return fmt.Errorf("bad owner id: %s", actual) + } + + if actual := aws.StringValue(pl.PrefixListName); actual != "tf-test-basic-create" { + return fmt.Errorf("bad name: %s", actual) + } + + sort.Slice(pl.Tags, func(i, j int) bool { + return aws.StringValue(pl.Tags[i].Key) < aws.StringValue(pl.Tags[j].Key) + }) + + expectTags := []*ec2.Tag{ + {Key: aws.String("Key1"), Value: aws.String("Value1")}, + {Key: aws.String("Key2"), Value: aws.String("Value2")}, + } + + if !reflect.DeepEqual(expectTags, pl.Tags) { + return fmt.Errorf("expected tags %#v, got %#v", expectTags, pl.Tags) + } + + sort.Slice(entries, func(i, j int) bool { + return aws.StringValue(entries[i].Cidr) < aws.StringValue(entries[j].Cidr) + }) + + expectEntries := []*ec2.PrefixListEntry{ + {Cidr: aws.String("1.0.0.0/8"), Description: aws.String("Test1")}, + {Cidr: aws.String("2.0.0.0/8"), Description: aws.String("Test2")}, + } + + if !reflect.DeepEqual(expectEntries, entries) { + return fmt.Errorf("expected entries %#v, got %#v", expectEntries, entries) + } + + return nil + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_basic_create, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), + checkAttributes, + resource.TestCheckResourceAttr(resourceName, "name", "tf-test-basic-create"), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`prefix-list/pl-[[:xdigit:]]+`)), + resource.TestCheckResourceAttr(resourceName, "address_family", "IPv4"), + resource.TestCheckResourceAttr(resourceName, "max_entries", "5"), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "1.0.0.0/8", + "description": "Test1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "2.0.0.0/8", + "description": "Test2", + }), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_basic_update, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), + resource.TestCheckResourceAttr(resourceName, "name", "tf-test-basic-update"), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "1.0.0.0/8", + "description": "Test1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "3.0.0.0/8", + "description": "Test3", + }), + testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + ), + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListConfig_basic_create = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-basic-create" + address_family = "IPv4" + max_entries = 5 + + entry { + cidr_block = "1.0.0.0/8" + description = "Test1" + } + + entry { + cidr_block = "2.0.0.0/8" + description = "Test2" + } + + tags = { + Key1 = "Value1" + Key2 = "Value2" + } +} +` + +const testAccAwsEc2ManagedPrefixListConfig_basic_update = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-basic-update" + address_family = "IPv4" + max_entries = 5 + + entry { + cidr_block = "1.0.0.0/8" + description = "Test1" + } + + entry { + cidr_block = "3.0.0.0/8" + description = "Test3" + } + + tags = { + Key1 = "Value1" + Key3 = "Value3" + } +} +` + +func testAccAwsEc2ManagedPrefixListExists( + name string, + out *ec2.ManagedPrefixList, + entries *[]*ec2.PrefixListEntry, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + switch { + case !ok: + return fmt.Errorf("resource %s not found", name) + case rs.Primary.ID == "": + return fmt.Errorf("resource %s has not set its id", name) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + id := rs.Primary.ID + + pl, ok, err := getManagedPrefixList(id, conn) + switch { + case err != nil: + return err + case !ok: + return fmt.Errorf("resource %s (%s) has not been created", name, id) + } + + if out != nil { + *out = *pl + } + + if entries != nil { + entries1, err := getPrefixListEntries(id, conn, *pl.Version) + if err != nil { + return err + } + + *entries = entries1 + } + + return nil + } +} + +func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + pl := ec2.ManagedPrefixList{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_disappears, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixList(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListConfig_disappears = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-disappears" + address_family = "IPv4" + max_entries = 2 + + entry { + cidr_block = "1.0.0.0/8" + } +} +` + +func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + pl := ec2.ManagedPrefixList{} + + checkName := func(name string) resource.TestCheckFunc { + return func(*terraform.State) error { + if actual := aws.StringValue(pl.PrefixListName); actual != name { + return fmt.Errorf("expected name %s, got %s", name, actual) + } + + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_name_create, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + resource.TestCheckResourceAttr(resourceName, "name", "tf-test-name-create"), + checkName("tf-test-name-create"), + testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), + ), + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_name_update, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + resource.TestCheckResourceAttr(resourceName, "name", "tf-test-name-update"), + checkName("tf-test-name-update"), + testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListConfig_name_create = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-name-create" + address_family = "IPv4" + max_entries = 5 +} +` + +const testAccAwsEc2ManagedPrefixListConfig_name_update = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-name-update" + address_family = "IPv4" + max_entries = 5 +} +` + +func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + pl := ec2.ManagedPrefixList{} + + checkTags := func(m map[string]string) resource.TestCheckFunc { + return func(*terraform.State) error { + sort.Slice(pl.Tags, func(i, j int) bool { + return aws.StringValue(pl.Tags[i].Key) < aws.StringValue(pl.Tags[j].Key) + }) + + expectTags := []*ec2.Tag(nil) + + if m != nil { + for k, v := range m { + expectTags = append(expectTags, &ec2.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }) + } + + sort.Slice(expectTags, func(i, j int) bool { + return aws.StringValue(expectTags[i].Key) < aws.StringValue(expectTags[j].Key) + }) + } + + if !reflect.DeepEqual(expectTags, pl.Tags) { + return fmt.Errorf("expected tags %#v, got %#v", expectTags, pl.Tags) + } + + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_tags_none, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + checkTags(nil), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_tags_addSome, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + checkTags(map[string]string{"Key1": "Value1", "Key2": "Value2", "Key3": "Value3"}), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + checkTags(map[string]string{"Key2": "Value2-1", "Key3": "Value3"}), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2-1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_tags_empty, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + checkTags(nil), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_tags_none, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + checkTags(nil), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListConfig_tags_none = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} +` + +const testAccAwsEc2ManagedPrefixListConfig_tags_addSome = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} +` + +const testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 + + tags = { + Key2 = "Value2-1" + Key3 = "Value3" + } +} +` + +const testAccAwsEc2ManagedPrefixListConfig_tags_empty = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 + tags = {} +} +` + +func TestAccAwsEc2ManagedPrefixList_entryConfigMode(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + prefixList := ec2.ManagedPrefixList{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_blocks, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_noBlocks, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + ), + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_zeroed, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), + resource.TestCheckResourceAttr(resourceName, "entry.#", "0"), + ), + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_blocks = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + max_entries = 5 + address_family = "IPv4" + + entry { + cidr_block = "1.0.0.0/8" + description = "Entry1" + } + + entry { + cidr_block = "2.0.0.0/8" + description = "Entry2" + } +} +` + +const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_noBlocks = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + max_entries = 5 + address_family = "IPv4" +} +` + +const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_zeroed = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + max_entries = 5 + address_family = "IPv4" + entry = [] +} +` + +func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + prefixList := ec2.ManagedPrefixList{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(2), + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + ), + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(3), + ResourceName: resourceName, + ExpectError: regexp.MustCompile(`You've reached the maximum number of entries for the prefix list.`), + }, + }, + }) +} + +func testAccAwsEc2ManagedPrefixListConfig_exceedLimit(count int) string { + entries := `` + for i := 0; i < count; i++ { + entries += fmt.Sprintf(` + entry { + cidr_block = "%[1]d.0.0.0/8" + description = "Test_%[1]d" + } +`, i+1) + } + + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 2 +%[1]s +} +`, + entries) +} diff --git a/website/docs/r/ec2_managed_prefix_list.html.markdown b/website/docs/r/ec2_managed_prefix_list.html.markdown new file mode 100644 index 00000000000..d372d874d75 --- /dev/null +++ b/website/docs/r/ec2_managed_prefix_list.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "VPC" +layout: "aws" +page_title: "AWS: aws_ec2_managed_prefix_list" +description: |- + Provides a managed prefix list resource. +--- + +# Resource: aws_ec2_managed_prefix_list + +Provides a managed prefix list resource. + +~> **NOTE on Prefix Lists and Prefix List Entries:** Terraform currently +provides both a standalone [Managed Prefix List Entry resource](ec2_managed_prefix_list_entry.html), +and a Prefix List resource with an `entry` set defined in-line. At this time you +cannot use a Prefix List with in-line rules in conjunction with any Prefix List Entry +resources. Doing so will cause a conflict of rule settings and will unpredictably +fail or overwrite rules. + +~> **NOTE on `max_entries`:** When you reference a Prefix List in a resource, +the maximum number of entries for the prefix lists counts as the same number of rules +or entries for the resource. For example, if you create a prefix list with a maximum +of 20 entries and you reference that prefix list in a security group rule, this counts +as 20 rules for the security group. + +## Example Usage + +Basic usage + +```hcl +resource "aws_ec2_managed_prefix_list" "example" { + name = "All VPC CIDR-s" + address_family = "IPv4" + max_entries = 5 + + entry { + cidr_block = aws_vpc.example.cidr_block + description = "Primary" + } + + entry { + cidr_block = aws_vpc_ipv4_cidr_block_association.example.cidr_block + description = "Secondary" + } + + tags = { + Env = "live" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of this resource. The name must not start with `com.amazonaws`. +* `address_family` - (Required, Forces new resource) The address family (`IPv4` or `IPv6`) of + this prefix list. +* `entry` - (Optional) Can be specified multiple times for each prefix list entry. + Each entry block supports fields documented below. Different entries may have + overlapping CIDR blocks, but a particular CIDR should not be duplicated. +* `max_entries` - (Required, Forces new resource) The maximum number of entries that + this prefix list can contain. +* `tags` - (Optional) A map of tags to assign to this resource. + +The `entry` block supports: + +* `cidr_block` - (Required) The CIDR block of this entry. +* `description` - (Optional) Description of this entry. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the prefix list. +* `arn` - The ARN of the prefix list. +* `owner_id` - The ID of the AWS account that owns this prefix list. + +## Import + +Prefix Lists can be imported using the `id`, e.g. + +``` +$ terraform import aws_ec2_managed_prefix_list.default pl-0570a1d2d725c16be +``` diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index 584297739a7..da0f0c21efd 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -84,7 +84,7 @@ The `ingress` block supports: * `cidr_blocks` - (Optional) List of CIDR blocks. * `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks. -* `prefix_list_ids` - (Optional) List of prefix list IDs. +* `prefix_list_ids` - (Optional) List of Prefix List IDs. * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp" or "icmpv6") * `protocol` - (Required) The protocol. If you select a protocol of "-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, icmpv6, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) @@ -99,7 +99,7 @@ The `egress` block supports: * `cidr_blocks` - (Optional) List of CIDR blocks. * `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks. -* `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints) +* `prefix_list_ids` - (Optional) List of Prefix List IDs. * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp") * `protocol` - (Required) The protocol. If you select a protocol of "-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) @@ -132,8 +132,9 @@ resource "aws_security_group" "example" { ## Usage with prefix list IDs -Prefix list IDs are managed by AWS internally. Prefix list IDs -are associated with a prefix list name, or service name, that is linked to a specific region. +Prefix Lists are either managed by AWS internally, or created by the customer using a +[Prefix List resource](ec2_managed_prefix_list.html). Prefix Lists provided by +AWS are associated with a prefix list name, or service name, that is linked to a specific region. Prefix list IDs are exported on VPC Endpoints, so you can use this format: ```hcl @@ -153,6 +154,8 @@ resource "aws_vpc_endpoint" "my_endpoint" { } ``` +You can also find a specific Prefix List using the `aws_prefix_list` data source. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: diff --git a/website/docs/r/security_group_rule.html.markdown b/website/docs/r/security_group_rule.html.markdown index dc28d84e627..9b1c0b24915 100644 --- a/website/docs/r/security_group_rule.html.markdown +++ b/website/docs/r/security_group_rule.html.markdown @@ -45,8 +45,7 @@ The following arguments are supported: or `egress` (outbound). * `cidr_blocks` - (Optional) List of CIDR blocks. Cannot be specified with `source_security_group_id`. * `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks. -* `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints). -Only valid with `egress`. +* `prefix_list_ids` - (Optional) List of Prefix List IDs. * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp" or "icmpv6"). * `protocol` - (Required) The protocol. If not icmp, icmpv6, tcp, udp, or all use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) * `security_group_id` - (Required) The security group to apply this rule to. @@ -59,8 +58,10 @@ Only valid with `egress`. ## Usage with prefix list IDs -Prefix list IDs are managed by AWS internally. Prefix list IDs -are associated with a prefix list name, or service name, that is linked to a specific region. +Prefix Lists are either managed by AWS internally, or created by the customer using a +[Managed Prefix List resource](ec2_managed_prefix_list.html). Prefix Lists provided by +AWS are associated with a prefix list name, or service name, that is linked to a specific region. + Prefix list IDs are exported on VPC Endpoints, so you can use this format: ```hcl @@ -79,6 +80,8 @@ resource "aws_vpc_endpoint" "my_endpoint" { } ``` +You can also find a specific Prefix List using the `aws_prefix_list` data source. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From efc5819d1b1fac57a7ac04dbe3609aa943464816 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Sat, 5 Dec 2020 12:56:07 +0000 Subject: [PATCH 0105/1212] r/aws_ec2_managed_prefix_list_entry: new resource --- aws/provider.go | 1 + ...ource_aws_ec2_managed_prefix_list_entry.go | 289 ++++++++++ ..._aws_ec2_managed_prefix_list_entry_test.go | 498 ++++++++++++++++++ ...c2_managed_prefix_list_entry.html.markdown | 66 +++ 4 files changed, 854 insertions(+) create mode 100644 aws/resource_aws_ec2_managed_prefix_list_entry.go create mode 100644 aws/resource_aws_ec2_managed_prefix_list_entry_test.go create mode 100644 website/docs/r/ec2_managed_prefix_list_entry.html.markdown diff --git a/aws/provider.go b/aws/provider.go index ab4733b46f7..61c70d73e10 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -600,6 +600,7 @@ func Provider() *schema.Provider { "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), + "aws_ec2_managed_prefix_list_entry": resourceAwsEc2ManagedPrefixListEntry(), "aws_ec2_tag": resourceAwsEc2Tag(), "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), diff --git a/aws/resource_aws_ec2_managed_prefix_list_entry.go b/aws/resource_aws_ec2_managed_prefix_list_entry.go new file mode 100644 index 00000000000..82a91db7301 --- /dev/null +++ b/aws/resource_aws_ec2_managed_prefix_list_entry.go @@ -0,0 +1,289 @@ +package aws + +import ( + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" +) + +func resourceAwsEc2ManagedPrefixListEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEc2ManagedPrefixListEntryCreate, + Read: resourceAwsEc2ManagedPrefixListEntryRead, + Update: resourceAwsEc2ManagedPrefixListEntryUpdate, + Delete: resourceAwsEc2ManagedPrefixListEntryDelete, + + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + ss := strings.Split(d.Id(), "_") + if len(ss) != 2 || ss[0] == "" || ss[1] == "" { + return nil, fmt.Errorf("invalid id %s: expected pl-123456_1.0.0.0/8", d.Id()) + } + + d.Set("prefix_list_id", ss[0]) + d.Set("cidr_block", ss[1]) + return []*schema.ResourceData{d}, nil + }, + }, + + Schema: map[string]*schema.Schema{ + "prefix_list_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cidr_block": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.IsCIDR, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateFunc: validation.StringLenBetween(0, 255), + }, + }, + } +} + +func resourceAwsEc2ManagedPrefixListEntryCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + prefixListId := d.Get("prefix_list_id").(string) + cidrBlock := d.Get("cidr_block").(string) + + log.Printf( + "[INFO] adding entry %s to prefix list %s...", + cidrBlock, prefixListId) + + err := modifyAwsManagedPrefixListConcurrently( + prefixListId, conn, d.Timeout(schema.TimeoutUpdate), + ec2.ModifyManagedPrefixListInput{ + PrefixListId: aws.String(prefixListId), + CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently + AddEntries: []*ec2.AddPrefixListEntry{ + { + Cidr: aws.String(cidrBlock), + Description: aws.String(d.Get("description").(string)), + }, + }, + }, + func(pl *ec2.ManagedPrefixList) *resource.RetryError { + currentVersion := int(aws.Int64Value(pl.Version)) + + _, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, currentVersion, cidrBlock) + switch { + case err != nil: + return resource.NonRetryableError(err) + case ok: + return resource.NonRetryableError(errors.New("an entry for this cidr block already exists")) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("failed to add entry %s to prefix list %s: %s", cidrBlock, prefixListId, err) + } + + d.SetId(fmt.Sprintf("%s_%s", prefixListId, cidrBlock)) + + return resourceAwsEc2ManagedPrefixListEntryRead(d, meta) +} + +func resourceAwsEc2ManagedPrefixListEntryRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + prefixListId := d.Get("prefix_list_id").(string) + cidrBlock := d.Get("cidr_block").(string) + + entry, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, 0, cidrBlock) + switch { + case err != nil: + return err + case !ok: + log.Printf( + "[WARN] entry %s of managed prefix list %s not found; removing from state.", + cidrBlock, prefixListId) + d.SetId("") + return nil + } + + d.Set("description", entry.Description) + + return nil +} + +func resourceAwsEc2ManagedPrefixListEntryUpdate(d *schema.ResourceData, meta interface{}) error { + if !d.HasChange("description") { + return fmt.Errorf("all attributes except description should force new resource") + } + + conn := meta.(*AWSClient).ec2conn + prefixListId := d.Get("prefix_list_id").(string) + cidrBlock := d.Get("cidr_block").(string) + + err := modifyAwsManagedPrefixListConcurrently( + prefixListId, conn, d.Timeout(schema.TimeoutUpdate), + ec2.ModifyManagedPrefixListInput{ + PrefixListId: aws.String(prefixListId), + CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently + AddEntries: []*ec2.AddPrefixListEntry{ + { + Cidr: aws.String(cidrBlock), + Description: aws.String(d.Get("description").(string)), + }, + }, + }, + nil) + + if err != nil { + return fmt.Errorf("failed to update entry %s in prefix list %s: %s", cidrBlock, prefixListId, err) + } + + return resourceAwsEc2ManagedPrefixListEntryRead(d, meta) +} + +func resourceAwsEc2ManagedPrefixListEntryDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + prefixListId := d.Get("prefix_list_id").(string) + cidrBlock := d.Get("cidr_block").(string) + + err := modifyAwsManagedPrefixListConcurrently( + prefixListId, conn, d.Timeout(schema.TimeoutUpdate), + ec2.ModifyManagedPrefixListInput{ + PrefixListId: aws.String(prefixListId), + CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently + RemoveEntries: []*ec2.RemovePrefixListEntry{ + { + Cidr: aws.String(cidrBlock), + }, + }, + }, + nil) + + switch { + case isResourceNotFoundError(err): + log.Printf("[WARN] managed prefix list %s not found; removing from state", prefixListId) + return nil + case err != nil: + return fmt.Errorf("failed to remove entry %s from prefix list %s: %s", cidrBlock, prefixListId, err) + } + + return nil +} + +func getManagedPrefixListEntryByCIDR( + id string, + conn *ec2.EC2, + version int, + cidr string, +) (*ec2.PrefixListEntry, bool, error) { + input := ec2.GetManagedPrefixListEntriesInput{ + PrefixListId: aws.String(id), + } + + if version > 0 { + input.TargetVersion = aws.Int64(int64(version)) + } + + result := (*ec2.PrefixListEntry)(nil) + + err := conn.GetManagedPrefixListEntriesPages( + &input, + func(output *ec2.GetManagedPrefixListEntriesOutput, last bool) bool { + for _, entry := range output.Entries { + entryCidr := aws.StringValue(entry.Cidr) + if entryCidr == cidr { + result = entry + return false + } + } + + return true + }) + + switch { + case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): + return nil, false, nil + case err != nil: + return nil, false, fmt.Errorf("failed to get entries in prefix list %s: %v", id, err) + case result == nil: + return nil, false, nil + } + + return result, true, nil +} + +func modifyAwsManagedPrefixListConcurrently( + id string, + conn *ec2.EC2, + timeout time.Duration, + input ec2.ModifyManagedPrefixListInput, + check func(pl *ec2.ManagedPrefixList) *resource.RetryError, +) error { + isModified := false + err := resource.Retry(timeout, func() *resource.RetryError { + if !isModified { + pl, ok, err := getManagedPrefixList(id, conn) + switch { + case err != nil: + return resource.NonRetryableError(err) + case !ok: + return resource.NonRetryableError(&resource.NotFoundError{}) + } + + input.CurrentVersion = pl.Version + + if check != nil { + if err := check(pl); err != nil { + return err + } + } + + switch _, err := conn.ModifyManagedPrefixList(&input); { + case isManagedPrefixListModificationConflictErr(err): + return resource.RetryableError(err) + case err != nil: + return resource.NonRetryableError(fmt.Errorf("modify failed: %s", err)) + } + + isModified = true + } + + switch settled, err := isAwsManagedPrefixListSettled(id, conn); { + case err != nil: + return resource.NonRetryableError(fmt.Errorf("resource failed to settle: %s", err)) + case !settled: + return resource.RetryableError(errors.New("resource not yet settled")) + } + + return nil + }) + + if tfresource.TimedOut(err) { + return err + } + + if err != nil { + return err + } + + return nil +} + +func isManagedPrefixListModificationConflictErr(err error) bool { + return isAWSErr(err, "IncorrectState", "in the current state (modify-in-progress)") || + isAWSErr(err, "IncorrectState", "in the current state (create-in-progress)") || + isAWSErr(err, "PrefixListVersionMismatch", "") || + isAWSErr(err, "ConcurrentMutationLimitExceeded", "") +} diff --git a/aws/resource_aws_ec2_managed_prefix_list_entry_test.go b/aws/resource_aws_ec2_managed_prefix_list_entry_test.go new file mode 100644 index 00000000000..4c241a4068e --- /dev/null +++ b/aws/resource_aws_ec2_managed_prefix_list_entry_test.go @@ -0,0 +1,498 @@ +package aws + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAwsEc2ManagedPrefixListEntry_basic(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list_entry.test" + entry := ec2.PrefixListEntry{} + + checkAttributes := func(*terraform.State) error { + if actual := aws.StringValue(entry.Cidr); actual != "1.0.0.0/8" { + return fmt.Errorf("bad cidr: %s", actual) + } + + if actual := aws.StringValue(entry.Description); actual != "Create" { + return fmt.Errorf("bad description: %s", actual) + } + + return nil + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_basic_create, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + checkAttributes, + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", "Create"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_basic_update, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", "Update"), + ), + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListEntryConfig_basic_create = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "Create" +} +` + +const testAccAwsEc2ManagedPrefixListEntryConfig_basic_update = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "Update" +} +` + +func testAccAwsEc2ManagedPrefixListEntryExists( + name string, + out *ec2.PrefixListEntry, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + switch { + case !ok: + return fmt.Errorf("resource %s not found", name) + case rs.Primary.ID == "": + return fmt.Errorf("resource %s has not set its id", name) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + ss := strings.Split(rs.Primary.ID, "_") + prefixListId, cidrBlock := ss[0], ss[1] + + entry, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, 0, cidrBlock) + switch { + case err != nil: + return err + case !ok: + return fmt.Errorf("resource %s (%s) has not been created", name, prefixListId) + } + + if out != nil { + *out = *entry + } + + return nil + } +} + +func TestAccAwsEc2ManagedPrefixListEntry_disappears(t *testing.T) { + prefixListResourceName := "aws_ec2_managed_prefix_list.test" + resourceName := "aws_ec2_managed_prefix_list_entry.test" + pl := ec2.ManagedPrefixList{} + entry := ec2.PrefixListEntry{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_disappears, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, nil), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixListEntry(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListEntryConfig_disappears = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" +} +` + +func TestAccAwsEc2ManagedPrefixListEntry_prefixListDisappears(t *testing.T) { + prefixListResourceName := "aws_ec2_managed_prefix_list.test" + resourceName := "aws_ec2_managed_prefix_list_entry.test" + pl := ec2.ManagedPrefixList{} + entry := ec2.PrefixListEntry{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_disappears, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, nil), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixList(), prefixListResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsEc2ManagedPrefixListEntry_alreadyExists(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list_entry.test" + entry := ec2.PrefixListEntry{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_alreadyExists, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + ), + ExpectError: regexp.MustCompile(`an entry for this cidr block already exists`), + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListEntryConfig_alreadyExists = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 + + entry { + cidr_block = "1.0.0.0/8" + } +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "Test" +} +` + +func TestAccAwsEc2ManagedPrefixListEntry_description(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list_entry.test" + entry := ec2.PrefixListEntry{} + + checkDescription := func(expect string) resource.TestCheckFunc { + return func(*terraform.State) error { + if actual := aws.StringValue(entry.Description); actual != expect { + return fmt.Errorf("bad description: %s", actual) + } + + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_none, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + checkDescription("Test1"), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", "Test1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_some, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + checkDescription("Test2"), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", "Test2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_empty, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + checkDescription(""), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_null, + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), + checkDescription(""), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +const testAccAwsEc2ManagedPrefixListEntryConfig_description_none = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "Test1" +} +` + +const testAccAwsEc2ManagedPrefixListEntryConfig_description_some = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "Test2" +} +` + +const testAccAwsEc2ManagedPrefixListEntryConfig_description_empty = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" + description = "" +} +` + +const testAccAwsEc2ManagedPrefixListEntryConfig_description_null = ` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "1.0.0.0/8" +} +` + +func TestAccAwsEc2ManagedPrefixListEntry_exceedLimit(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list_entry.test_1" + entry := ec2.PrefixListEntry{} + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(2), + ResourceName: resourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry)), + }, + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(3), + ResourceName: resourceName, + ExpectError: regexp.MustCompile(`You've reached the maximum number of entries for the prefix list.`), + }, + }, + }) +} + +func testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(count int) string { + entries := `` + for i := 0; i < count; i++ { + entries += fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list_entry" "test_%[1]d" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "%[1]d.0.0.0/8" + description = "Test_%[1]d" +} +`, + i+1) + } + + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 2 +} + +%[1]s +`, + entries) +} + +func testAccAwsEc2ManagedPrefixListSortEntries(list []*ec2.PrefixListEntry) { + sort.Slice(list, func(i, j int) bool { + return aws.StringValue(list[i].Cidr) < aws.StringValue(list[j].Cidr) + }) +} + +func TestAccAwsEc2ManagedPrefixListEntry_concurrentModification(t *testing.T) { + prefixListResourceName := "aws_ec2_managed_prefix_list.test" + pl, entries := ec2.ManagedPrefixList{}, []*ec2.PrefixListEntry(nil) + + checkAllEntriesExist := func(prefix string, count int) resource.TestCheckFunc { + return func(state *terraform.State) error { + if len(entries) != count { + return fmt.Errorf("expected %d entries", count) + } + + expectEntries := make([]*ec2.PrefixListEntry, 0, count) + for i := 0; i < count; i++ { + expectEntries = append(expectEntries, &ec2.PrefixListEntry{ + Cidr: aws.String(fmt.Sprintf("%d.0.0.0/8", i+1)), + Description: aws.String(fmt.Sprintf("%s%d", prefix, i+1))}) + } + testAccAwsEc2ManagedPrefixListSortEntries(expectEntries) + + testAccAwsEc2ManagedPrefixListSortEntries(entries) + + if !reflect.DeepEqual(expectEntries, entries) { + return fmt.Errorf("expected entries %#v, got %#v", expectEntries, entries) + } + + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification("Step0_", 20), + ResourceName: prefixListResourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, &entries), + checkAllEntriesExist("Step0_", 20)), + }, + { + // update the first 10 and drop the last 10 + Config: testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification("Step1_", 10), + ResourceName: prefixListResourceName, + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, &entries), + checkAllEntriesExist("Step1_", 10)), + }, + }, + }) +} + +func testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification(prefix string, count int) string { + entries := `` + for i := 0; i < count; i++ { + entries += fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list_entry" "test_%[1]d" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + cidr_block = "%[1]d.0.0.0/8" + description = "%[2]s%[1]d" +} +`, + i+1, + prefix) + } + + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + name = "tf-test-acc" + address_family = "IPv4" + max_entries = 20 +} + +%[1]s +`, + entries) +} diff --git a/website/docs/r/ec2_managed_prefix_list_entry.html.markdown b/website/docs/r/ec2_managed_prefix_list_entry.html.markdown new file mode 100644 index 00000000000..3c2ab6b03bd --- /dev/null +++ b/website/docs/r/ec2_managed_prefix_list_entry.html.markdown @@ -0,0 +1,66 @@ +--- +subcategory: "VPC" +layout: "aws" +page_title: "AWS: aws_ec2_managed_prefix_list_entry" +description: |- + Provides a managed prefix list entry resource. +--- + +# Resource: aws_ec2_managed_prefix_list_entry + +Provides a managed prefix list entry resource. Represents a single `entry`, which +can be added to external Prefix Lists. + +~> **NOTE on Prefix Lists and Prefix List Entries:** Terraform currently +provides both a standalone Prefix List Entry, and a [Managed Prefix List resource](ec2_managed_prefix_list.html) +with an `entry` set defined in-line. At this time you +cannot use a Prefix List with in-line rules in conjunction with any Prefix List Entry +resources. Doing so will cause a conflict of rule settings and will unpredictably +fail or overwrite rules. + +~> **NOTE:** A Prefix List will have an upper bound on the number of rules +that it can support. + +~> **NOTE:** Resource creation will fail if the target Prefix List already has a +rule against the given CIDR block. + +## Example Usage + +Basic usage + +```hcl +resource "aws_ec2_managed_prefix_list" "example" { + name = "All VPC CIDR-s" + address_family = "IPv4" + max_entries = 5 +} + +resource "aws_ec2_managed_prefix_list_entry" "example" { + prefix_list_id = aws_ec2_managed_prefix_list.example.id + cidr_block = aws_vpc.example.cidr_block + description = "Primary" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `prefix_list_id` - (Required, Forces new resource) ID of the Prefix List to add this entry to. +* `cidr_block` - (Required, Forces new resource) The CIDR block to add an entry for. Different entries may have + overlapping CIDR blocks, but duplicating a particular block is not allowed. +* `description` - (Optional, Up to 255 characters) The description of this entry. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the prefix list entry. + +## Import + +Prefix List Entries can be imported using a concatenation of the `prefix_list_id` and `cidr_block` by an underscore (`_`). For example: + +```console +$ terraform import aws_ec2_managed_prefix_list_entry.example pl-0570a1d2d725c16be_10.30.0.0/16 +``` From f4eb603feaabdb8053fe4e2ef36e12f23df4e856 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 25 Nov 2020 15:30:26 +0200 Subject: [PATCH 0106/1212] add code schema resource --- aws/internal/service/glue/finder/finder.go | 14 ++ aws/internal/service/glue/id.go | 6 + aws/internal/service/glue/waiter/status.go | 17 ++ aws/internal/service/glue/waiter/waiter.go | 37 ++++ aws/resource_aws_glue_schema.go | 236 +++++++++++++++++++++ 5 files changed, 310 insertions(+) create mode 100644 aws/resource_aws_glue_schema.go diff --git a/aws/internal/service/glue/finder/finder.go b/aws/internal/service/glue/finder/finder.go index 5218f3c531c..9201355a9df 100644 --- a/aws/internal/service/glue/finder/finder.go +++ b/aws/internal/service/glue/finder/finder.go @@ -18,3 +18,17 @@ func RegistryByID(conn *glue.Glue, id string) (*glue.GetRegistryOutput, error) { return output, nil } + +// SchemaByID returns the Schema corresponding to the specified ID. +func SchemaByID(conn *glue.Glue, id string) (*glue.GetSchemaOutput, error) { + input := &glue.GetSchemaInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(id), + } + + output, err := conn.GetSchema(input) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/aws/internal/service/glue/id.go b/aws/internal/service/glue/id.go index 3eeada183f3..2bc05fff411 100644 --- a/aws/internal/service/glue/id.go +++ b/aws/internal/service/glue/id.go @@ -38,3 +38,9 @@ func CreateAwsGlueRegistryID(id string) *glue.RegistryId { RegistryArn: aws.String(id), } } + +func CreateAwsGlueSchemaID(id string) *glue.SchemaId { + return &glue.SchemaId{ + SchemaArn: aws.String(id), + } +} diff --git a/aws/internal/service/glue/waiter/status.go b/aws/internal/service/glue/waiter/status.go index 4f653d3fd20..58693961a16 100644 --- a/aws/internal/service/glue/waiter/status.go +++ b/aws/internal/service/glue/waiter/status.go @@ -13,6 +13,7 @@ import ( const ( MLTransformStatusUnknown = "Unknown" RegistryStatusUnknown = "Unknown" + SchemaStatusUnknown = "Unknown" TriggerStatusUnknown = "Unknown" ) @@ -53,6 +54,22 @@ func RegistryStatus(conn *glue.Glue, id string) resource.StateRefreshFunc { } } +// SchemaStatus fetches the Schema and its Status +func SchemaStatus(conn *glue.Glue, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := finder.SchemaByID(conn, id) + if err != nil { + return nil, SchemaStatusUnknown, err + } + + if output == nil { + return output, SchemaStatusUnknown, nil + } + + return output, aws.StringValue(output.SchemaStatus), nil + } +} + // TriggerStatus fetches the Trigger and its Status func TriggerStatus(conn *glue.Glue, triggerName string) resource.StateRefreshFunc { return func() (interface{}, string, error) { diff --git a/aws/internal/service/glue/waiter/waiter.go b/aws/internal/service/glue/waiter/waiter.go index 36772e72261..b9553480f09 100644 --- a/aws/internal/service/glue/waiter/waiter.go +++ b/aws/internal/service/glue/waiter/waiter.go @@ -11,6 +11,7 @@ const ( // Maximum amount of time to wait for an Operation to return Deleted MLTransformDeleteTimeout = 2 * time.Minute RegistryDeleteTimeout = 2 * time.Minute + SchemaDeleteTimeout = 2 * time.Minute TriggerCreateTimeout = 2 * time.Minute TriggerDeleteTimeout = 2 * time.Minute ) @@ -51,6 +52,42 @@ func RegistryDeleted(conn *glue.Glue, registryID string) (*glue.GetRegistryOutpu return nil, err } +// SchemaAvailable waits for a Schema to return Available +func SchemaAvailable(conn *glue.Glue, registryID string) (*glue.GetSchemaOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{glue.SchemaStatusPending}, + Target: []string{glue.SchemaStatusAvailable}, + Refresh: SchemaStatus(conn, registryID), + Timeout: SchemaDeleteTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*glue.GetSchemaOutput); ok { + return output, err + } + + return nil, err +} + +// SchemaDeleted waits for a Schema to return Deleted +func SchemaDeleted(conn *glue.Glue, registryID string) (*glue.GetSchemaOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{glue.SchemaStatusDeleting}, + Target: []string{}, + Refresh: SchemaStatus(conn, registryID), + Timeout: SchemaDeleteTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*glue.GetSchemaOutput); ok { + return output, err + } + + return nil, err +} + // TriggerCreated waits for a Trigger to return Created func TriggerCreated(conn *glue.Glue, triggerName string) (*glue.GetTriggerOutput, error) { stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_glue_schema.go b/aws/resource_aws_glue_schema.go new file mode 100644 index 00000000000..b488aabb543 --- /dev/null +++ b/aws/resource_aws_glue_schema.go @@ -0,0 +1,236 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/glue" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfglue "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/waiter" +) + +func resourceAwsGlueSchema() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsGlueSchemaCreate, + Read: resourceAwsGlueSchemaRead, + Update: resourceAwsGlueSchemaUpdate, + Delete: resourceAwsGlueSchemaDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 2048), + }, + "registry_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateArn, + }, + "registry_name": { + Type: schema.TypeString, + Computed: true, + }, + "latest_schema_version": { + Type: schema.TypeInt, + Computed: true, + }, + "next_schema_version": { + Type: schema.TypeInt, + Computed: true, + }, + "compatibility": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(glue.Compatibility_Values(), false), + }, + "data_format": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(glue.DataFormat_Values(), false), + }, + "schema_definiton": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 170000), + validation.StringMatch(regexp.MustCompile(`.*\S.*`), ""), + ), + }, + "schema_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 255), + validation.StringMatch(regexp.MustCompile(`[a-zA-Z0-9-_$#]+$`), ""), + ), + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsGlueSchemaCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).glueconn + + input := &glue.CreateSchemaInput{ + SchemaName: aws.String(d.Get("schema_name").(string)), + SchemaDefinition: aws.String(d.Get("schema_definiton").(string)), + DataFormat: aws.String(d.Get("data_format").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().GlueTags(), + } + + if v, ok := d.GetOk("registry_arn"); ok { + input.RegistryId = tfglue.CreateAwsGlueRegistryID(v.(string)) + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("compatibility"); ok { + input.Compatibility = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Creating Glue Schema: %s", input) + output, err := conn.CreateSchema(input) + if err != nil { + return fmt.Errorf("error creating Glue Schema: %w", err) + } + d.SetId(aws.StringValue(output.SchemaArn)) + + _, err = waiter.SchemaAvailable(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for Glue Schema (%s) to be Available: %w", d.Id(), err) + } + + return resourceAwsGlueSchemaRead(d, meta) +} + +func resourceAwsGlueSchemaRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).glueconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + output, err := finder.SchemaByID(conn, d.Id()) + if err != nil { + if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { + log.Printf("[WARN] Glue Schema (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("error reading Glue Schema (%s): %w", d.Id(), err) + } + + if output == nil { + log.Printf("[WARN] Glue Schema (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + arn := aws.StringValue(output.SchemaArn) + d.Set("arn", arn) + d.Set("description", output.Description) + d.Set("schema_name", output.SchemaName) + d.Set("compatibility", output.Compatibility) + d.Set("data_format", output.DataFormat) + d.Set("latest_schema_version", output.LatestSchemaVersion) + d.Set("next_schema_version", output.NextSchemaVersion) + d.Set("registry_arn", output.RegistryArn) + d.Set("registry_name", output.RegistryName) + + tags, err := keyvaluetags.GlueListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for Glue Schema (%s): %w", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsGlueSchemaUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).glueconn + + if d.HasChanges("description", "") { + input := &glue.UpdateSchemaInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), + SchemaVersionNumber: &glue.SchemaVersionNumber{ + VersionNumber: aws.Int64(int64(d.Get("next_schema_version").(int))), + LatestVersion: aws.Bool(true), + }, + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("compatibility"); ok { + input.Compatibility = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Updating Glue Schema: %#v", input) + _, err := conn.UpdateSchema(input) + if err != nil { + return fmt.Errorf("error updating Glue Schema (%s): %w", d.Id(), err) + } + + _, err = waiter.SchemaAvailable(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for Glue Schema (%s) to be Available: %w", d.Id(), err) + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.GlueUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + + return resourceAwsGlueSchemaRead(d, meta) +} + +func resourceAwsGlueSchemaDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).glueconn + + log.Printf("[DEBUG] Deleting Glue Schema: %s", d.Id()) + input := &glue.DeleteSchemaInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), + } + + _, err := conn.DeleteSchema(input) + if err != nil { + if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { + return nil + } + return fmt.Errorf("error deleting Glue Schema (%s): %w", d.Id(), err) + } + + _, err = waiter.SchemaDeleted(conn, d.Id()) + if err != nil { + if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { + return nil + } + return fmt.Errorf("error waiting for Glue Schema (%s) to be deleted: %w", d.Id(), err) + } + + return nil +} From 111d5b9e7bcad018bce246546a0526a67f6f3378 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 10:53:08 +0200 Subject: [PATCH 0107/1212] add schema version stuff --- aws/internal/service/glue/finder/finder.go | 18 + aws/internal/service/glue/waiter/status.go | 25 +- aws/internal/service/glue/waiter/waiter.go | 32 +- aws/provider.go | 3 +- aws/resource_aws_glue_schema.go | 61 ++- aws/resource_aws_glue_schema_test.go | 419 +++++++++++++++++++++ 6 files changed, 532 insertions(+), 26 deletions(-) create mode 100644 aws/resource_aws_glue_schema_test.go diff --git a/aws/internal/service/glue/finder/finder.go b/aws/internal/service/glue/finder/finder.go index 9201355a9df..fdceee9166b 100644 --- a/aws/internal/service/glue/finder/finder.go +++ b/aws/internal/service/glue/finder/finder.go @@ -1,6 +1,7 @@ package finder import ( + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" tfglue "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue" ) @@ -32,3 +33,20 @@ func SchemaByID(conn *glue.Glue, id string) (*glue.GetSchemaOutput, error) { return output, nil } + +// SchemaVersionByID returns the Schema corresponding to the specified ID. +func SchemaVersionByID(conn *glue.Glue, id string) (*glue.GetSchemaVersionOutput, error) { + input := &glue.GetSchemaVersionInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(id), + SchemaVersionNumber: &glue.SchemaVersionNumber{ + LatestVersion: aws.Bool(true), + }, + } + + output, err := conn.GetSchemaVersion(input) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/aws/internal/service/glue/waiter/status.go b/aws/internal/service/glue/waiter/status.go index 58693961a16..ed80b5ed589 100644 --- a/aws/internal/service/glue/waiter/status.go +++ b/aws/internal/service/glue/waiter/status.go @@ -11,10 +11,11 @@ import ( ) const ( - MLTransformStatusUnknown = "Unknown" - RegistryStatusUnknown = "Unknown" - SchemaStatusUnknown = "Unknown" - TriggerStatusUnknown = "Unknown" + MLTransformStatusUnknown = "Unknown" + RegistryStatusUnknown = "Unknown" + SchemaStatusUnknown = "Unknown" + SchemaVersionStatusUnknown = "Unknown" + TriggerStatusUnknown = "Unknown" ) // MLTransformStatus fetches the MLTransform and its Status @@ -70,6 +71,22 @@ func SchemaStatus(conn *glue.Glue, id string) resource.StateRefreshFunc { } } +// SchemaVersionStatus fetches the Schema Version and its Status +func SchemaVersionStatus(conn *glue.Glue, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := finder.SchemaVersionByID(conn, id) + if err != nil { + return nil, SchemaVersionStatusUnknown, err + } + + if output == nil { + return output, SchemaVersionStatusUnknown, nil + } + + return output, aws.StringValue(output.Status), nil + } +} + // TriggerStatus fetches the Trigger and its Status func TriggerStatus(conn *glue.Glue, triggerName string) resource.StateRefreshFunc { return func() (interface{}, string, error) { diff --git a/aws/internal/service/glue/waiter/waiter.go b/aws/internal/service/glue/waiter/waiter.go index b9553480f09..aea848fa40e 100644 --- a/aws/internal/service/glue/waiter/waiter.go +++ b/aws/internal/service/glue/waiter/waiter.go @@ -9,11 +9,13 @@ import ( const ( // Maximum amount of time to wait for an Operation to return Deleted - MLTransformDeleteTimeout = 2 * time.Minute - RegistryDeleteTimeout = 2 * time.Minute - SchemaDeleteTimeout = 2 * time.Minute - TriggerCreateTimeout = 2 * time.Minute - TriggerDeleteTimeout = 2 * time.Minute + MLTransformDeleteTimeout = 2 * time.Minute + RegistryDeleteTimeout = 2 * time.Minute + SchemaAvailableTimeout = 2 * time.Minute + SchemaDeleteTimeout = 2 * time.Minute + SchemaVersionAvailableTimeout = 2 * time.Minute + TriggerCreateTimeout = 2 * time.Minute + TriggerDeleteTimeout = 2 * time.Minute ) // MLTransformDeleted waits for an MLTransform to return Deleted @@ -58,7 +60,7 @@ func SchemaAvailable(conn *glue.Glue, registryID string) (*glue.GetSchemaOutput, Pending: []string{glue.SchemaStatusPending}, Target: []string{glue.SchemaStatusAvailable}, Refresh: SchemaStatus(conn, registryID), - Timeout: SchemaDeleteTimeout, + Timeout: SchemaAvailableTimeout, } outputRaw, err := stateConf.WaitForState() @@ -88,6 +90,24 @@ func SchemaDeleted(conn *glue.Glue, registryID string) (*glue.GetSchemaOutput, e return nil, err } +// SchemaVersionAvailable waits for a Schema to return Available +func SchemaVersionAvailable(conn *glue.Glue, registryID string) (*glue.GetSchemaVersionOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{glue.SchemaVersionStatusPending}, + Target: []string{glue.SchemaVersionStatusAvailable}, + Refresh: SchemaVersionStatus(conn, registryID), + Timeout: SchemaVersionAvailableTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*glue.GetSchemaVersionOutput); ok { + return output, err + } + + return nil, err +} + // TriggerCreated waits for a Trigger to return Created func TriggerCreated(conn *glue.Glue, triggerName string) (*glue.GetTriggerOutput, error) { stateConf := &resource.StateChangeConf{ diff --git a/aws/provider.go b/aws/provider.go index 79d050c6a0a..7f56d5f9c18 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -674,11 +674,12 @@ func Provider() *schema.Provider { "aws_glue_ml_transform": resourceAwsGlueMLTransform(), "aws_glue_partition": resourceAwsGluePartition(), "aws_glue_registry": resourceAwsGlueRegistry(), + "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), + "aws_glue_schema": resourceAwsGlueSchema(), "aws_glue_security_configuration": resourceAwsGlueSecurityConfiguration(), "aws_glue_trigger": resourceAwsGlueTrigger(), "aws_glue_user_defined_function": resourceAwsGlueUserDefinedFunction(), "aws_glue_workflow": resourceAwsGlueWorkflow(), - "aws_glue_resource_policy": resourceAwsGlueResourcePolicy(), "aws_guardduty_detector": resourceAwsGuardDutyDetector(), "aws_guardduty_filter": resourceAwsGuardDutyFilter(), "aws_guardduty_invite_accepter": resourceAwsGuardDutyInviteAccepter(), diff --git a/aws/resource_aws_glue_schema.go b/aws/resource_aws_glue_schema.go index b488aabb543..ea7fb8c4253 100644 --- a/aws/resource_aws_glue_schema.go +++ b/aws/resource_aws_glue_schema.go @@ -53,9 +53,13 @@ func resourceAwsGlueSchema() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "schema_checkpoint": { + Type: schema.TypeInt, + Computed: true, + }, "compatibility": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice(glue.Compatibility_Values(), false), }, "data_format": { @@ -152,6 +156,7 @@ func resourceAwsGlueSchemaRead(d *schema.ResourceData, meta interface{}) error { d.Set("next_schema_version", output.NextSchemaVersion) d.Set("registry_arn", output.RegistryArn) d.Set("registry_name", output.RegistryName) + d.Set("schema_checkpoint", output.SchemaCheckpoint) tags, err := keyvaluetags.GlueListTags(conn, arn) @@ -163,29 +168,38 @@ func resourceAwsGlueSchemaRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags: %w", err) } + schemeDefOutput, err := finder.SchemaVersionByID(conn, d.Id()) + if err != nil { + return fmt.Errorf("error reading Glue Schema Definition (%s): %w", d.Id(), err) + } + + d.Set("schema_definiton", schemeDefOutput.SchemaDefinition) + return nil } func resourceAwsGlueSchemaUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).glueconn - if d.HasChanges("description", "") { - input := &glue.UpdateSchemaInput{ - SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), - SchemaVersionNumber: &glue.SchemaVersionNumber{ - VersionNumber: aws.Int64(int64(d.Get("next_schema_version").(int))), - LatestVersion: aws.Bool(true), - }, - } + input := &glue.UpdateSchemaInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), + SchemaVersionNumber: &glue.SchemaVersionNumber{ + LatestVersion: aws.Bool(true), + }, + } + update := false - if v, ok := d.GetOk("description"); ok { - input.Description = aws.String(v.(string)) - } + if d.HasChange("description") { + input.Description = aws.String(d.Get("description").(string)) + update = true + } - if v, ok := d.GetOk("compatibility"); ok { - input.Compatibility = aws.String(v.(string)) - } + if d.HasChange("compatibility") { + input.Compatibility = aws.String(d.Get("compatibility").(string)) + update = true + } + if update { log.Printf("[DEBUG] Updating Glue Schema: %#v", input) _, err := conn.UpdateSchema(input) if err != nil { @@ -205,6 +219,23 @@ func resourceAwsGlueSchemaUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("schema_definiton") { + defInput := &glue.RegisterSchemaVersionInput{ + SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), + SchemaDefinition: aws.String(d.Get("schema_definiton").(string)), + } + + _, err := conn.RegisterSchemaVersion(defInput) + if err != nil { + return fmt.Errorf("error updating Glue Schema Definition (%s): %w", d.Id(), err) + } + + _, err = waiter.SchemaVersionAvailable(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for Glue Schema Version (%s) to be Available: %w", d.Id(), err) + } + } + return resourceAwsGlueSchemaRead(d, meta) } diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go new file mode 100644 index 00000000000..a17f272529b --- /dev/null +++ b/aws/resource_aws_glue_schema_test.go @@ -0,0 +1,419 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/glue" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" +) + +func init() { + resource.AddTestSweepers("aws_glue_schema", &resource.Sweeper{ + Name: "aws_glue_schema", + F: testSweepGlueSchema, + }) +} + +func testSweepGlueSchema(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).glueconn + + listOutput, err := conn.ListSchemas(&glue.ListSchemasInput{}) + if err != nil { + // Some endpoints that do not support Glue Schemas return InternalFailure + if testSweepSkipSweepError(err) || isAWSErr(err, "InternalFailure", "") { + log.Printf("[WARN] Skipping Glue Schema sweep for %s: %s", region, err) + return nil + } + return fmt.Errorf("Error retrieving Glue Schema: %s", err) + } + for _, schema := range listOutput.Schemas { + arn := aws.StringValue(schema.SchemaArn) + r := resourceAwsGlueSchema() + d := r.Data(nil) + d.SetId(arn) + + err := r.Delete(d, client) + if err != nil { + log.Printf("[ERROR] Failed to delete Glue Schema %s: %s", arn, err) + } + } + return nil +} + +func TestAccAWSGlueSchema_basic(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + registryResourceName := "aws_glue_registry.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "glue", fmt.Sprintf("schema/%s/%s", rName, rName)), + resource.TestCheckResourceAttr(resourceName, "schema_name", rName), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "compatibility", "NONE"), + resource.TestCheckResourceAttr(resourceName, "data_format", "AVRO"), + resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "1"), + resource.TestCheckResourceAttr(resourceName, "latest_schema_version", "1"), + resource.TestCheckResourceAttr(resourceName, "next_schema_version", "2"), + resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), + resource.TestCheckResourceAttrPair(resourceName, "registry_name", registryResourceName, "registry_name"), + resource.TestCheckResourceAttrPair(resourceName, "registry_arn", registryResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSGlueSchema_description(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaDescriptionConfig(rName, "First Description"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "description", "First Description"), + ), + }, + { + Config: testAccAWSGlueSchemaDescriptionConfig(rName, "Second Description"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "description", "Second Description"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSGlueSchema_compatibility(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaCompatibillityConfig(rName, "DISABLED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "compatibility", "DISABLED"), + ), + }, + { + Config: testAccAWSGlueSchemaCompatibillityConfig(rName, "FULL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "compatibility", "FULL"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSGlueSchema_tags(t *testing.T) { + var schema glue.GetSchemaOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSGlueSchemaConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSGlueSchemaConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccAWSGlueSchema_schemaDefUpdated(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), + ), + }, + { + Config: testAccAWSGlueSchemaConfigSchemaDefinitionUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSGlueSchema_disappears(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + testAccCheckResourceDisappears(testAccProvider, resourceAwsGlueSchema(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccPreCheckAWSGlueSchema(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).glueconn + + _, err := conn.ListRegistries(&glue.ListRegistriesInput{}) + + // Some endpoints that do not support Glue Schemas return InternalFailure + if testAccPreCheckSkipError(err) || isAWSErr(err, "InternalFailure", "") { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccCheckAWSGlueSchemaExists(resourceName string, schema *glue.GetSchemaOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Glue Schema ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).glueconn + output, err := finder.SchemaByID(conn, rs.Primary.ID) + if err != nil { + return err + } + + if output == nil { + return fmt.Errorf("Glue Schema (%s) not found", rs.Primary.ID) + } + + if aws.StringValue(output.SchemaArn) == rs.Primary.ID { + *schema = *output + return nil + } + + return fmt.Errorf("Glue Schema (%s) not found", rs.Primary.ID) + } +} + +func testAccCheckAWSGlueSchemaDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_glue_schema" { + continue + } + + conn := testAccProvider.Meta().(*AWSClient).glueconn + output, err := finder.SchemaByID(conn, rs.Primary.ID) + if err != nil { + if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { + return nil + } + + } + + if output != nil && aws.StringValue(output.SchemaArn) == rs.Primary.ID { + return fmt.Errorf("Glue Schema %s still exists", rs.Primary.ID) + } + + return err + } + + return nil +} + +func testAccAWSGlueSchemaBase(rName string) string { + return fmt.Sprintf(` +resource "aws_glue_registry" "test" { + registry_name = %[1]q +} +`, rName) +} + +func testAccAWSGlueSchemaDescriptionConfig(rName, description string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + description = %[2]q + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} +`, rName, description) +} + +func testAccAWSGlueSchemaCompatibillityConfig(rName, compat string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = %[2]q + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} +`, rName, compat) +} + +func testAccAWSGlueSchemaBasicConfig(rName string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} +`, rName) +} + +func testAccAWSGlueSchemaConfigTags1(rName, tagKey1, tagValue1 string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSGlueSchemaConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccAWSGlueSchemaConfigSchemaDefinitionUpdated(rName string) string { + return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}" +} +`, rName) +} From ad1dce72b45b5cfe8af77cbe51d3d6ecb618d5a5 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 12:56:05 +0200 Subject: [PATCH 0108/1212] add parent disappears test --- aws/resource_aws_glue_schema_test.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index a17f272529b..46125b8fbaa 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -258,6 +258,29 @@ func TestAccAWSGlueSchema_disappears(t *testing.T) { }) } +func TestAccAWSGlueSchema_disappears_registry(t *testing.T) { + var schema glue.GetSchemaOutput + + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_schema.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGlueSchema(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueSchemaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueSchemaBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueSchemaExists(resourceName, &schema), + testAccCheckResourceDisappears(testAccProvider, resourceAwsGlueRegistry(), "aws_glue_registry.test"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccPreCheckAWSGlueSchema(t *testing.T) { conn := testAccProvider.Meta().(*AWSClient).glueconn From 0e90f9998f33468b42c5b1b5c6413159ded6f4e1 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 13:56:10 +0200 Subject: [PATCH 0109/1212] misspell --- aws/resource_aws_glue_schema.go | 10 +++++----- aws/resource_aws_glue_schema_test.go | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/aws/resource_aws_glue_schema.go b/aws/resource_aws_glue_schema.go index ea7fb8c4253..5e3d796289d 100644 --- a/aws/resource_aws_glue_schema.go +++ b/aws/resource_aws_glue_schema.go @@ -67,7 +67,7 @@ func resourceAwsGlueSchema() *schema.Resource { Required: true, ValidateFunc: validation.StringInSlice(glue.DataFormat_Values(), false), }, - "schema_definiton": { + "schema_definition": { Type: schema.TypeString, Required: true, ValidateFunc: validation.All( @@ -94,7 +94,7 @@ func resourceAwsGlueSchemaCreate(d *schema.ResourceData, meta interface{}) error input := &glue.CreateSchemaInput{ SchemaName: aws.String(d.Get("schema_name").(string)), - SchemaDefinition: aws.String(d.Get("schema_definiton").(string)), + SchemaDefinition: aws.String(d.Get("schema_definition").(string)), DataFormat: aws.String(d.Get("data_format").(string)), Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().GlueTags(), } @@ -173,7 +173,7 @@ func resourceAwsGlueSchemaRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading Glue Schema Definition (%s): %w", d.Id(), err) } - d.Set("schema_definiton", schemeDefOutput.SchemaDefinition) + d.Set("schema_definition", schemeDefOutput.SchemaDefinition) return nil } @@ -219,10 +219,10 @@ func resourceAwsGlueSchemaUpdate(d *schema.ResourceData, meta interface{}) error } } - if d.HasChange("schema_definiton") { + if d.HasChange("schema_definition") { defInput := &glue.RegisterSchemaVersionInput{ SchemaId: tfglue.CreateAwsGlueSchemaID(d.Id()), - SchemaDefinition: aws.String(d.Get("schema_definiton").(string)), + SchemaDefinition: aws.String(d.Get("schema_definition").(string)), } _, err := conn.RegisterSchemaVersion(defInput) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index 46125b8fbaa..3e018320618 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -74,7 +74,7 @@ func TestAccAWSGlueSchema_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "1"), resource.TestCheckResourceAttr(resourceName, "latest_schema_version", "1"), resource.TestCheckResourceAttr(resourceName, "next_schema_version", "2"), - resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), + resource.TestCheckResourceAttr(resourceName, "schema_definition", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), resource.TestCheckResourceAttrPair(resourceName, "registry_name", registryResourceName, "registry_name"), resource.TestCheckResourceAttrPair(resourceName, "registry_arn", registryResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -216,14 +216,14 @@ func TestAccAWSGlueSchema_schemaDefUpdated(t *testing.T) { Config: testAccAWSGlueSchemaBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), - resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), + resource.TestCheckResourceAttr(resourceName, "schema_definition", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), ), }, { Config: testAccAWSGlueSchemaConfigSchemaDefinitionUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), - resource.TestCheckResourceAttr(resourceName, "schema_definiton", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}"), + resource.TestCheckResourceAttr(resourceName, "schema_definition", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}"), ), }, { @@ -367,7 +367,7 @@ resource "aws_glue_schema" "test" { data_format = "AVRO" compatibility = "NONE" description = %[2]q - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName, description) } @@ -379,7 +379,7 @@ resource "aws_glue_schema" "test" { registry_arn = aws_glue_registry.test.arn data_format = "AVRO" compatibility = %[2]q - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName, compat) } @@ -391,7 +391,7 @@ resource "aws_glue_schema" "test" { registry_arn = aws_glue_registry.test.arn data_format = "AVRO" compatibility = "NONE" - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName) } @@ -403,7 +403,7 @@ resource "aws_glue_schema" "test" { registry_arn = aws_glue_registry.test.arn data_format = "AVRO" compatibility = "NONE" - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" tags = { %[2]q = %[3]q @@ -419,7 +419,7 @@ resource "aws_glue_schema" "test" { registry_arn = aws_glue_registry.test.arn data_format = "AVRO" compatibility = "NONE" - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" tags = { %[2]q = %[3]q @@ -436,7 +436,7 @@ resource "aws_glue_schema" "test" { registry_arn = aws_glue_registry.test.arn data_format = "AVRO" compatibility = "NONE" - schema_definiton = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}" } `, rName) } From 002136b2f567a02b647d0bdf8410d09dd4146277 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:10:59 +0200 Subject: [PATCH 0110/1212] add docs --- website/docs/r/glue_schema.html.markdown | 54 ++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 website/docs/r/glue_schema.html.markdown diff --git a/website/docs/r/glue_schema.html.markdown b/website/docs/r/glue_schema.html.markdown new file mode 100644 index 00000000000..f845904e46f --- /dev/null +++ b/website/docs/r/glue_schema.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "Glue" +layout: "aws" +page_title: "AWS: aws_glue_schema" +description: |- + Provides a Glue Schema resource. +--- + +# Resource: aws_glue_schema + +Provides a Glue Schema resource. + +## Example Usage + +```hcl +resource "aws_glue_schema" "example" { + schema_name = "example" + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `schema_name` – (Required) The Name of the schema. +* `registry_arn` - (Required) The ARN of the Glue Registry to create the schema in. +* `data_format` - (Required) The data format of the schema definition. Currently only `AVRO` is supported. +* `compatibility` - (Required) The compatibility mode of the schema. Values values are: `NONE`, `DISABLED`, `BACKWARD`, `BACKWARD_ALL`, `FORWARD`, `FORWARD_ALL`, `FULL`, and `FULL_ALL`. +* `schema_definition` - (Required) The schema definition using the `data_format` setting for `schema_name`. +* `description` – (Optional) A description of the schema. +* `tags` - (Optional) Key-value map of resource tags + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name (ARN) of Glue Schema. +* `id` - Amazon Resource Name (ARN) of Glue Schema. +* `registry_name` - The name of the registry. +* `latest_schema_version` - The latest version of the schema associated with the returned schema definition. +* `next_schema_version` - The next version of the schema associated with the returned schema definition. +* `schema_checkpoint` - The version number of the checkpoint (the last time the compatibility mode was changed). + +## Import + +Glue Registries can be imported using `arn`, e.g. + +``` +$ terraform import aws_glue_schema.example arn:aws:glue:us-west-2:123456789012:schema/example/example +``` From 008cbbf5e7151e822de908dd59f3ec14018e8552 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:11:10 +0200 Subject: [PATCH 0111/1212] add checks for version uptates --- aws/resource_aws_glue_schema_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index 3e018320618..fe95c0190b3 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -139,6 +139,7 @@ func TestAccAWSGlueSchema_compatibility(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "compatibility", "DISABLED"), + resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "1"), ), }, { @@ -146,6 +147,7 @@ func TestAccAWSGlueSchema_compatibility(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "compatibility", "FULL"), + resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "2"), ), }, { @@ -217,6 +219,8 @@ func TestAccAWSGlueSchema_schemaDefUpdated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "schema_definition", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}"), + resource.TestCheckResourceAttr(resourceName, "latest_schema_version", "1"), + resource.TestCheckResourceAttr(resourceName, "next_schema_version", "2"), ), }, { @@ -224,6 +228,8 @@ func TestAccAWSGlueSchema_schemaDefUpdated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "schema_definition", "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}"), + resource.TestCheckResourceAttr(resourceName, "latest_schema_version", "2"), + resource.TestCheckResourceAttr(resourceName, "next_schema_version", "3"), ), }, { From 21db2d19a13d29fc734f73baa82383b1575ede18 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:17:33 +0200 Subject: [PATCH 0112/1212] remove check --- aws/resource_aws_glue_schema_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index fe95c0190b3..811ffb19428 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -139,7 +139,6 @@ func TestAccAWSGlueSchema_compatibility(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "compatibility", "DISABLED"), - resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "1"), ), }, { @@ -147,7 +146,6 @@ func TestAccAWSGlueSchema_compatibility(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSGlueSchemaExists(resourceName, &schema), resource.TestCheckResourceAttr(resourceName, "compatibility", "FULL"), - resource.TestCheckResourceAttr(resourceName, "schema_checkpoint", "2"), ), }, { From 73c0a9de3c7740174958a3a40689d4141202534f Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:19:49 +0200 Subject: [PATCH 0113/1212] fmt --- aws/resource_aws_glue_schema_test.go | 40 ++++++++++++++-------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index 811ffb19428..2d21dfe684d 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -379,10 +379,10 @@ resource "aws_glue_schema" "test" { func testAccAWSGlueSchemaCompatibillityConfig(rName, compat string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = %[2]q + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = %[2]q schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName, compat) @@ -391,10 +391,10 @@ resource "aws_glue_schema" "test" { func testAccAWSGlueSchemaBasicConfig(rName string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName) @@ -403,10 +403,10 @@ resource "aws_glue_schema" "test" { func testAccAWSGlueSchemaConfigTags1(rName, tagKey1, tagValue1 string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" tags = { @@ -419,10 +419,10 @@ resource "aws_glue_schema" "test" { func testAccAWSGlueSchemaConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" tags = { @@ -436,10 +436,10 @@ resource "aws_glue_schema" "test" { func testAccAWSGlueSchemaConfigSchemaDefinitionUpdated(rName string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"string\"}, {\"name\": \"f2\", \"type\": \"int\"} ]}" } `, rName) From af01f6d909a4fa5b284e851a4500db93357e3a95 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:22:49 +0200 Subject: [PATCH 0114/1212] fmt --- aws/resource_aws_glue_schema_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_glue_schema_test.go b/aws/resource_aws_glue_schema_test.go index 2d21dfe684d..706a5f6d481 100644 --- a/aws/resource_aws_glue_schema_test.go +++ b/aws/resource_aws_glue_schema_test.go @@ -366,11 +366,11 @@ resource "aws_glue_registry" "test" { func testAccAWSGlueSchemaDescriptionConfig(rName, description string) string { return testAccAWSGlueSchemaBase(rName) + fmt.Sprintf(` resource "aws_glue_schema" "test" { - schema_name = %[1]q - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" - description = %[2]q + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + description = %[2]q schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } `, rName, description) From 369785787fa10b5efbab67006590eb0052ff5a3e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 6 Dec 2020 14:30:46 +0200 Subject: [PATCH 0115/1212] doc fmt --- website/docs/r/glue_schema.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/r/glue_schema.html.markdown b/website/docs/r/glue_schema.html.markdown index f845904e46f..10f303e73f2 100644 --- a/website/docs/r/glue_schema.html.markdown +++ b/website/docs/r/glue_schema.html.markdown @@ -14,10 +14,10 @@ Provides a Glue Schema resource. ```hcl resource "aws_glue_schema" "example" { - schema_name = "example" - registry_arn = aws_glue_registry.test.arn - data_format = "AVRO" - compatibility = "NONE" + schema_name = "example" + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" } ``` From e6d74f22df4f54de5f90867d0131068b77bb2644 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Sun, 6 Dec 2020 09:46:05 -0500 Subject: [PATCH 0116/1212] for cognito user pools type, set authcred via PatchOperation --- aws/resource_aws_api_gateway_authorizer.go | 29 +++++++- ...esource_aws_api_gateway_authorizer_test.go | 73 +++++++++++++++++++ 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_api_gateway_authorizer.go b/aws/resource_aws_api_gateway_authorizer.go index aef14d5540c..c48f7d8d36e 100644 --- a/aws/resource_aws_api_gateway_authorizer.go +++ b/aws/resource_aws_api_gateway_authorizer.go @@ -93,6 +93,7 @@ func resourceAwsApiGatewayAuthorizer() *schema.Resource { func resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn + var postCreateOps []*apigateway.PatchOperation input := apigateway.CreateAuthorizerInput{ IdentitySource: aws.String(d.Get("identity_source").(string)), @@ -109,7 +110,19 @@ func resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interfac input.AuthorizerUri = aws.String(v.(string)) } if v, ok := d.GetOk("authorizer_credentials"); ok { - input.AuthorizerCredentials = aws.String(v.(string)) + // While the CreateAuthorizer method allows one to pass AuthorizerCredentials + // regardless of authorizer Type, the API ignores this setting if the authorizer + // is of Type "COGNITO_USER_POOLS"; thus, a PatchOperation is used as an alternative. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16613 + if aws.StringValue(input.Type) != apigateway.AuthorizerTypeCognitoUserPools { + input.AuthorizerCredentials = aws.String(v.(string)) + } else { + postCreateOps = append(postCreateOps, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/authorizerCredentials"), + Value: aws.String(v.(string)), + }) + } } if v, ok := d.GetOk("identity_validation_expression"); ok { @@ -127,6 +140,20 @@ func resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interfac d.SetId(aws.StringValue(out.Id)) + if postCreateOps != nil { + input := apigateway.UpdateAuthorizerInput{ + AuthorizerId: aws.String(d.Id()), + PatchOperations: postCreateOps, + RestApiId: input.RestApiId, + } + + log.Printf("[INFO] Applying update operations to API Gateway Authorizer: %s", d.Id()) + _, err := conn.UpdateAuthorizer(&input) + if err != nil { + return fmt.Errorf("applying update operations to API Gateway Authorizer (%s) failed: %w", d.Id(), err) + } + } + return resourceAwsApiGatewayAuthorizerRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_authorizer_test.go b/aws/resource_aws_api_gateway_authorizer_test.go index b05bf2c5477..aaf8293087f 100644 --- a/aws/resource_aws_api_gateway_authorizer_test.go +++ b/aws/resource_aws_api_gateway_authorizer_test.go @@ -97,6 +97,36 @@ func TestAccAWSAPIGatewayAuthorizer_cognito(t *testing.T) { }) } +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16613 +func TestAccAWSAPIGatewayAuthorizer_cognito_authorizerCredentials(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_authorizer.test" + iamRoleResourceName := "aws_iam_role.lambda" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayAuthorizerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayAuthorizerConfig_cognitoAuthorizerCredentials(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "authorizer_credentials", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "type", "COGNITO_USER_POOLS"), + resource.TestCheckResourceAttr(resourceName, "provider_arns.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSAPIGatewayAuthorizerImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSAPIGatewayAuthorizer_switchAuthType(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_authorizer.test" @@ -489,6 +519,49 @@ resource "aws_api_gateway_authorizer" "test" { `, rName) } +func testAccAWSAPIGatewayAuthorizerConfig_cognitoAuthorizerCredentials(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "lambda" { + name = "%[1]s-lambda" + + assume_role_policy = < Date: Sun, 8 Nov 2020 13:05:59 +0200 Subject: [PATCH 0117/1212] sagemaker image --- .../service/sagemaker/finder/finder.go | 19 ++ .../service/sagemaker/waiter/status.go | 27 +++ .../service/sagemaker/waiter/waiter.go | 40 ++++ aws/provider.go | 3 +- aws/resource_aws_sagemaker_image.go | 209 ++++++++++++++++++ aws/resource_aws_sagemaker_image_test.go | 183 +++++++++++++++ 6 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 aws/resource_aws_sagemaker_image.go create mode 100644 aws/resource_aws_sagemaker_image_test.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 025fd543c0c..0abb4a7a7e9 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -23,3 +23,22 @@ func CodeRepositoryByName(conn *sagemaker.SageMaker, name string) (*sagemaker.De return output, nil } + +// ImageByName returns the code repository corresponding to the specified name. +// Returns nil if no code repository is found. +func ImageByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { + input := &sagemaker.DescribeImageInput{ + ImageName: aws.String(name), + } + + output, err := conn.DescribeImage(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 6a4943e22ab..8d615f69f93 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -9,6 +9,8 @@ import ( const ( SagemakerNotebookInstanceStatusNotFound = "NotFound" + SagemakerImageStatusNotFound = "NotFound" + SagemakerImageStatusFailed = "Failed" ) // NotebookInstanceStatus fetches the NotebookInstance and its Status @@ -35,3 +37,28 @@ func NotebookInstanceStatus(conn *sagemaker.SageMaker, notebookName string) reso return output, aws.StringValue(output.NotebookInstanceStatus), nil } } + +// ImageStatus fetches the Image and its Status +func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &sagemaker.DescribeImageInput{ + ImageName: aws.String(name), + } + + output, err := conn.DescribeImage(input) + + if tfawserr.ErrMessageContains(err, "ValidationException", "RecordNotFound") { + return nil, SagemakerImageStatusNotFound, nil + } + + if err != nil { + return nil, SagemakerImageStatusFailed, err + } + + if output == nil { + return nil, SagemakerImageStatusNotFound, nil + } + + return output, aws.StringValue(output.ImageStatus), nil + } +} diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index 61330bea240..42a901ebefa 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -11,6 +11,7 @@ const ( NotebookInstanceInServiceTimeout = 10 * time.Minute NotebookInstanceStoppedTimeout = 10 * time.Minute NotebookInstanceDeletedTimeout = 10 * time.Minute + ImageCreatedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -76,3 +77,42 @@ func NotebookInstanceDeleted(conn *sagemaker.SageMaker, notebookName string) (*s return nil, err } + +// ImageCreated waits for a Image to return Created +func ImageCreated(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.ImageStatusCreating, + sagemaker.ImageStatusUpdating, + }, + Target: []string{sagemaker.ImageStatusCreated}, + Refresh: ImageStatus(conn, name), + Timeout: ImageCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeImageOutput); ok { + return output, err + } + + return nil, err +} + +// ImageDeleted waits for a Image to return Deleted +func ImageDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.ImageStatusDeleting}, + Target: []string{}, + Refresh: ImageStatus(conn, name), + Timeout: ImageCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeImageOutput); ok { + return output, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 79d050c6a0a..5fda836eed7 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -857,9 +857,10 @@ func Provider() *schema.Provider { "aws_default_route_table": resourceAwsDefaultRouteTable(), "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), - "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), + "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), + "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go new file mode 100644 index 00000000000..56f6bb512a0 --- /dev/null +++ b/aws/resource_aws_sagemaker_image.go @@ -0,0 +1,209 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" +) + +func resourceAwsSagemakerImage() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerImageCreate, + Read: resourceAwsSagemakerImageRead, + Update: resourceAwsSagemakerImageUpdate, + Delete: resourceAwsSagemakerImageDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "image_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*$`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + ), + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 512), + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSagemakerImageCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + name := d.Get("image_name").(string) + input := &sagemaker.CreateImageInput{ + ImageName: aws.String(name), + RoleArn: aws.String(d.Get("role_arn").(string)), + } + + if v, ok := d.GetOk("display_name"); ok { + input.DisplayName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + + log.Printf("[DEBUG] sagemaker Image create config: %#v", *input) + _, err := conn.CreateImage(input) + if err != nil { + return fmt.Errorf("error creating SageMaker Image: %w", err) + } + + d.SetId(name) + + if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for sagemaker image (%s) to create: %w", d.Id(), err) + } + + return resourceAwsSagemakerImageRead(d, meta) +} + +func resourceAwsSagemakerImageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + image, err := finder.ImageByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, "ValidationException", "Cannot find Image") { + d.SetId("") + log.Printf("[WARN] Unable to find SageMaker Image (%s); removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading SageMaker Image (%s): %w", d.Id(), err) + + } + + arn := aws.StringValue(image.ImageArn) + d.Set("image_name", image.ImageName) + d.Set("arn", arn) + d.Set("role_arn", image.RoleArn) + d.Set("display_name", image.DisplayName) + d.Set("description", image.Description) + + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for Sagemaker Image (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + needsUpdate := false + + input := &sagemaker.UpdateImageInput{ + ImageName: aws.String(d.Id()), + } + + var deleteProperties []*string + + if d.HasChange("role_arn") { + input.Description = aws.String(d.Get("role_arn").(string)) + } + + if d.HasChange("description") { + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } else { + deleteProperties = append(deleteProperties, aws.String("Description")) + } + needsUpdate = true + } + + if d.HasChange("display_name") { + if v, ok := d.GetOk("display_name"); ok { + input.DisplayName = aws.String(v.(string)) + } else { + deleteProperties = append(deleteProperties, aws.String("DisplayName")) + } + needsUpdate = true + } + + if needsUpdate { + log.Printf("[DEBUG] sagemaker Image update config: %#v", *input) + _, err := conn.UpdateImage(input) + if err != nil { + return fmt.Errorf("error updating SageMaker Image: %w", err) + } + + if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for sagemaker image (%s) to update: %w", d.Id(), err) + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Image (%s) tags: %s", d.Id(), err) + } + } + + return resourceAwsSagemakerImageRead(d, meta) +} + +func resourceAwsSagemakerImageDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteImageInput{ + ImageName: aws.String(d.Id()), + } + + if _, err := conn.DeleteImage(input); err != nil { + if isAWSErr(err, "ValidationException", "Cannot find Image") { + return nil + } + return fmt.Errorf("error deleting SageMaker Image (%s): %w", d.Id(), err) + } + + if _, err := waiter.ImageDeleted(conn, d.Id()); err != nil { + if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + return fmt.Errorf("error waiting for sagemaker image (%s) to delete: %w", d.Id(), err) + } + } + + return nil +} diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go new file mode 100644 index 00000000000..71494ed5139 --- /dev/null +++ b/aws/resource_aws_sagemaker_image_test.go @@ -0,0 +1,183 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func init() { + resource.AddTestSweepers("aws_sagemaker_image", &resource.Sweeper{ + Name: "aws_sagemaker_image", + F: testSweepSagemakerImages, + }) +} + +func testSweepSagemakerImages(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).sagemakerconn + + err = conn.ListImagesPages(&sagemaker.ListImagesInput{}, func(page *sagemaker.ListImagesOutput, lastPage bool) bool { + for _, Image := range page.Images { + name := aws.StringValue(Image.ImageName) + + input := &sagemaker.DeleteImageInput{ + ImageName: Image.ImageName, + } + + log.Printf("[INFO] Deleting SageMaker Image: %s", name) + if _, err := conn.DeleteImage(input); err != nil { + log.Printf("[ERROR] Error deleting SageMaker Image (%s): %s", name, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker Image sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("Error retrieving SageMaker Images: %w", err) + } + + return nil +} + +func TestAccAWSSagemakerImage_basic(t *testing.T) { + var notebook sagemaker.DescribeImageOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "image_name", rName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("image/%s", rName)), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerImage_disappears(t *testing.T) { + var notebook sagemaker.DescribeImageOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, ¬ebook), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImage(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerImageDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_image" { + continue + } + + Image, err := finder.ImageByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + if aws.StringValue(Image.ImageName) == rs.Primary.ID { + return fmt.Errorf("sagemaker Image %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerImageExists(n string, image *sagemaker.DescribeImageOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker Image ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.ImageByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *image = *resp + + return nil + } +} + +func testAccAWSSagemakerImageConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} +`, rName) +} + +func testAccAWSSagemakerImageBasicConfig(rName string) string { + return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn +} +`, rName) +} From 7cfce5532e9f852dc99d8b3671b6179251a0d75b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 14:25:28 +0200 Subject: [PATCH 0118/1212] basic tests passing --- aws/resource_aws_sagemaker_image.go | 10 ++++++---- aws/resource_aws_sagemaker_image_test.go | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 56f6bb512a0..b8ef838473e 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -101,7 +101,7 @@ func resourceAwsSagemakerImageRead(d *schema.ResourceData, meta interface{}) err image, err := finder.ImageByName(conn, d.Id()) if err != nil { - if isAWSErr(err, "ValidationException", "Cannot find Image") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { d.SetId("") log.Printf("[WARN] Unable to find SageMaker Image (%s); removing from state", d.Id()) return nil @@ -193,16 +193,18 @@ func resourceAwsSagemakerImageDelete(d *schema.ResourceData, meta interface{}) e } if _, err := conn.DeleteImage(input); err != nil { - if isAWSErr(err, "ValidationException", "Cannot find Image") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { return nil } return fmt.Errorf("error deleting SageMaker Image (%s): %w", d.Id(), err) } if _, err := waiter.ImageDeleted(conn, d.Id()); err != nil { - if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { - return fmt.Errorf("error waiting for sagemaker image (%s) to delete: %w", d.Id(), err) + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + return nil } + return fmt.Errorf("error waiting for sagemaker image (%s) to delete: %w", d.Id(), err) + } return nil diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go index 71494ed5139..5fd0858c467 100644 --- a/aws/resource_aws_sagemaker_image_test.go +++ b/aws/resource_aws_sagemaker_image_test.go @@ -87,7 +87,7 @@ func TestAccAWSSagemakerImage_basic(t *testing.T) { } func TestAccAWSSagemakerImage_disappears(t *testing.T) { - var notebook sagemaker.DescribeImageOutput + var image sagemaker.DescribeImageOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image.test" @@ -99,7 +99,7 @@ func TestAccAWSSagemakerImage_disappears(t *testing.T) { { Config: testAccAWSSagemakerImageBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerImageExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerImageExists(resourceName, &image), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImage(), resourceName), ), ExpectNonEmptyPlan: true, From b792b208d4c52e02efc863c37c65a70f6a65e723 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 14:34:39 +0200 Subject: [PATCH 0119/1212] add tags test --- aws/resource_aws_sagemaker_image_test.go | 75 +++++++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go index 5fd0858c467..23158277a46 100644 --- a/aws/resource_aws_sagemaker_image_test.go +++ b/aws/resource_aws_sagemaker_image_test.go @@ -58,7 +58,7 @@ func testSweepSagemakerImages(region string) error { } func TestAccAWSSagemakerImage_basic(t *testing.T) { - var notebook sagemaker.DescribeImageOutput + var image sagemaker.DescribeImageOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image.test" @@ -70,7 +70,7 @@ func TestAccAWSSagemakerImage_basic(t *testing.T) { { Config: testAccAWSSagemakerImageBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerImageExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerImageExists(resourceName, &image), resource.TestCheckResourceAttr(resourceName, "image_name", rName), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("image/%s", rName)), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), @@ -86,6 +86,50 @@ func TestAccAWSSagemakerImage_basic(t *testing.T) { }) } +func TestAccAWSSagemakerImage_tags(t *testing.T) { + var image sagemaker.DescribeImageOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerImageConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerImageConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccAWSSagemakerImage_disappears(t *testing.T) { var image sagemaker.DescribeImageOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -181,3 +225,30 @@ resource "aws_sagemaker_image" "test" { } `, rName) } + +func testAccAWSSagemakerImageConfigTags1(rName, tagKey1, tagValue1 string) string { + return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSagemakerImageConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From f1fa30ef053f01f38ad5bb837c1d38c775bd9d39 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 19:30:54 +0200 Subject: [PATCH 0120/1212] add description and displayname tests --- aws/resource_aws_sagemaker_image.go | 2 + aws/resource_aws_sagemaker_image_test.go | 100 +++++++++++++++++++++++ 2 files changed, 102 insertions(+) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index b8ef838473e..ff5eba23144 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -149,6 +149,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e input.Description = aws.String(v.(string)) } else { deleteProperties = append(deleteProperties, aws.String("Description")) + input.DeleteProperties = deleteProperties } needsUpdate = true } @@ -158,6 +159,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e input.DisplayName = aws.String(v.(string)) } else { deleteProperties = append(deleteProperties, aws.String("DisplayName")) + input.DeleteProperties = deleteProperties } needsUpdate = true } diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go index 23158277a46..f6d2851c6ba 100644 --- a/aws/resource_aws_sagemaker_image_test.go +++ b/aws/resource_aws_sagemaker_image_test.go @@ -86,6 +86,86 @@ func TestAccAWSSagemakerImage_basic(t *testing.T) { }) } +func TestAccAWSSagemakerImage_description(t *testing.T) { + var image sagemaker.DescribeImageOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageDescription(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "description", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerImageBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + Config: testAccAWSSagemakerImageDescription(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "description", rName), + ), + }, + }, + }) +} + +func TestAccAWSSagemakerImage_displayName(t *testing.T) { + var image sagemaker.DescribeImageOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageDisplayName(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "display_name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerImageBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "display_name", ""), + ), + }, + { + Config: testAccAWSSagemakerImageDisplayName(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "display_name", rName), + ), + }, + }, + }) +} + func TestAccAWSSagemakerImage_tags(t *testing.T) { var image sagemaker.DescribeImageOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -226,6 +306,26 @@ resource "aws_sagemaker_image" "test" { `, rName) } +func testAccAWSSagemakerImageDescription(rName string) string { + return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + description = %[1]q +} +`, rName) +} + +func testAccAWSSagemakerImageDisplayName(rName string) string { + return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + display_name = %[1]q +} +`, rName) +} + func testAccAWSSagemakerImageConfigTags1(rName, tagKey1, tagValue1 string) string { return testAccAWSSagemakerImageConfigBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_image" "test" { From 7b059dfc6312cf900431777ac68cbae18cfa8a0f Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 19:35:51 +0200 Subject: [PATCH 0121/1212] docs --- website/docs/r/sagemaker_image.html.markdown | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 website/docs/r/sagemaker_image.html.markdown diff --git a/website/docs/r/sagemaker_image.html.markdown b/website/docs/r/sagemaker_image.html.markdown new file mode 100644 index 00000000000..ebe5416cee1 --- /dev/null +++ b/website/docs/r/sagemaker_image.html.markdown @@ -0,0 +1,47 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_image" +description: |- + Provides a Sagemaker Image resource. +--- + +# Resource: aws_sagemaker_image + +Provides a Sagemaker Image resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_image" "example" { + image_name = "example" + role_arn = aws_iam_role.test.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `image_name` - (Required) The name of the image. Must be unique to your account. +* `role_arn` - (Required) The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf. +* `display_name` - (Optional) The display name of the image. When the image is added to a domain (must be unique to the domain). +* `description` - (Optional) The description of the image. +* `tags` - (Optional) A map of tags to assign to the resource. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The name of the Image. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Image. + +## Import + +Sagemaker Code Images can be imported using the `name`, e.g. + +``` +$ terraform import aws_sagemaker_image.test_image my-code-repo +``` From 401900edf7dc00b86612c36b0f802a8a89e26a77 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 19:38:23 +0200 Subject: [PATCH 0122/1212] delete timeput --- aws/internal/service/sagemaker/waiter/waiter.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index 42a901ebefa..e6ff40fb82a 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -12,6 +12,7 @@ const ( NotebookInstanceStoppedTimeout = 10 * time.Minute NotebookInstanceDeletedTimeout = 10 * time.Minute ImageCreatedTimeout = 10 * time.Minute + ImageDeletedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -105,7 +106,7 @@ func ImageDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeIm Pending: []string{sagemaker.ImageStatusDeleting}, Target: []string{}, Refresh: ImageStatus(conn, name), - Timeout: ImageCreatedTimeout, + Timeout: ImageDeletedTimeout, } outputRaw, err := stateConf.WaitForState() From ba3dc6d62eecd5be7dd17367f8af8e595733b5a4 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 19:39:36 +0200 Subject: [PATCH 0123/1212] fmt --- aws/resource_aws_sagemaker_image_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go index f6d2851c6ba..d6876d83a6c 100644 --- a/aws/resource_aws_sagemaker_image_test.go +++ b/aws/resource_aws_sagemaker_image_test.go @@ -286,12 +286,12 @@ resource "aws_iam_role" "test" { data "aws_iam_policy_document" "test" { statement { - actions = ["sts:AssumeRole"] + actions = ["sts:AssumeRole"] - principals { - type = "Service" - identifiers = ["sagemaker.amazonaws.com"] - } + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } } } `, rName) From 79c57648c051281d864c2918571f93d2ef863fff Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Mon, 9 Nov 2020 22:54:20 +0200 Subject: [PATCH 0124/1212] Update aws/resource_aws_sagemaker_image.go Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index ff5eba23144..a8079504c2f 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -141,7 +141,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e var deleteProperties []*string if d.HasChange("role_arn") { - input.Description = aws.String(d.Get("role_arn").(string)) + input.RoleArn= aws.String(d.Get("role_arn").(string)) } if d.HasChange("description") { From cee842aefd374341d9292cf4967dac6c7a882476 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 10 Nov 2020 09:04:32 +0200 Subject: [PATCH 0125/1212] Apply suggestions from code review Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_image.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index a8079504c2f..9a603bad8d5 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -89,7 +89,7 @@ func resourceAwsSagemakerImageCreate(d *schema.ResourceData, meta interface{}) e d.SetId(name) if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for sagemaker image (%s) to create: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Image (%s) to create: %w", d.Id(), err) } return resourceAwsSagemakerImageRead(d, meta) @@ -120,7 +120,7 @@ func resourceAwsSagemakerImageRead(d *schema.ResourceData, meta interface{}) err tags, err := keyvaluetags.SagemakerListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for Sagemaker Image (%s): %w", d.Id(), err) + return fmt.Errorf("error listing tags for SageMaker Image (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { @@ -172,7 +172,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e } if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for sagemaker image (%s) to update: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Image (%s) to update: %w", d.Id(), err) } } @@ -180,7 +180,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e o, n := d.GetChange("tags") if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Sagemaker Image (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating SageMaker Image (%s) tags: %s", d.Id(), err) } } From 46d47430e3e0342a29fb98884f5d41f8b73dc396 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 10 Nov 2020 09:05:07 +0200 Subject: [PATCH 0126/1212] Apply suggestions from code review Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 9a603bad8d5..69ad2364c2c 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -205,7 +205,7 @@ func resourceAwsSagemakerImageDelete(d *schema.ResourceData, meta interface{}) e if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { return nil } - return fmt.Errorf("error waiting for sagemaker image (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Image (%s) to delete: %w", d.Id(), err) } From ef0a2a393f7a41aae8937fe3021323d14729af52 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 10 Nov 2020 15:56:06 +0200 Subject: [PATCH 0127/1212] Update aws/resource_aws_sagemaker_image.go Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 69ad2364c2c..c7556bc47c9 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -141,7 +141,7 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e var deleteProperties []*string if d.HasChange("role_arn") { - input.RoleArn= aws.String(d.Get("role_arn").(string)) + input.RoleArn = aws.String(d.Get("role_arn").(string)) } if d.HasChange("description") { From 2cd0552f85a56e30932313489837865807b9b4cc Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 16 Nov 2020 17:47:35 +0200 Subject: [PATCH 0128/1212] wait before create --- .../service/sagemaker/waiter/status.go | 8 +++- aws/resource_aws_sagemaker_image.go | 40 ++++++++++++++----- aws/resource_aws_sagemaker_image_test.go | 5 ++- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 8d615f69f93..1f8f5d00dbe 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -1,6 +1,8 @@ package waiter import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/aws-sdk-go-base/tfawserr" @@ -47,7 +49,7 @@ func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFu output, err := conn.DescribeImage(input) - if tfawserr.ErrMessageContains(err, "ValidationException", "RecordNotFound") { + if tfawserr.ErrMessageContains(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { return nil, SagemakerImageStatusNotFound, nil } @@ -59,6 +61,10 @@ func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFu return nil, SagemakerImageStatusNotFound, nil } + if aws.StringValue(output.ImageStatus) == sagemaker.ImageStatusCreateFailed { + return output, sagemaker.ImageStatusCreateFailed, fmt.Errorf("%s", aws.StringValue(output.FailureReason)) + } + return output, aws.StringValue(output.ImageStatus), nil } } diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index c7556bc47c9..03db1228ffe 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -4,9 +4,12 @@ import ( "fmt" "log" "regexp" + "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" @@ -42,6 +45,7 @@ func resourceAwsSagemakerImage() *schema.Resource { "role_arn": { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validateArn, }, "display_name": { @@ -80,16 +84,34 @@ func resourceAwsSagemakerImageCreate(d *schema.ResourceData, meta interface{}) e input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } + // for some reason even if the operation is retried the same response is given even though the role is valid. a short sleep before creation solves it. + time.Sleep(1 * time.Minute) log.Printf("[DEBUG] sagemaker Image create config: %#v", *input) - _, err := conn.CreateImage(input) - if err != nil { - return fmt.Errorf("error creating SageMaker Image: %w", err) - } + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + _, err = conn.CreateImage(input) + if err != nil { + return resource.NonRetryableError(fmt.Errorf("error creating SageMaker Image: %w", err)) + } + + d.SetId(name) - d.SetId(name) + out, err := waiter.ImageCreated(conn, d.Id()) - if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for SageMaker Image (%s) to create: %w", d.Id(), err) + if strings.Contains(aws.StringValue(out.FailureReason), "Unable to assume role with RoleArn") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(fmt.Errorf("error waiting for SageMaker Image (%s) to create: %w", d.Id(), err)) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.CreateImage(input) + _, err = waiter.ImageCreated(conn, d.Id()) + } + if err != nil { + return fmt.Errorf("error creating SageMaker Image %s: %w", name, err) } return resourceAwsSagemakerImageRead(d, meta) @@ -140,10 +162,6 @@ func resourceAwsSagemakerImageUpdate(d *schema.ResourceData, meta interface{}) e var deleteProperties []*string - if d.HasChange("role_arn") { - input.RoleArn = aws.String(d.Get("role_arn").(string)) - } - if d.HasChange("description") { if v, ok := d.GetOk("description"); ok { input.Description = aws.String(v.(string)) diff --git a/aws/resource_aws_sagemaker_image_test.go b/aws/resource_aws_sagemaker_image_test.go index d6876d83a6c..d2f25826f59 100644 --- a/aws/resource_aws_sagemaker_image_test.go +++ b/aws/resource_aws_sagemaker_image_test.go @@ -278,9 +278,10 @@ func testAccCheckAWSSagemakerImageExists(n string, image *sagemaker.DescribeImag func testAccAWSSagemakerImageConfigBase(rName string) string { return fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_iam_role" "test" { name = %[1]q - path = "/" assume_role_policy = data.aws_iam_policy_document.test.json } @@ -290,7 +291,7 @@ data "aws_iam_policy_document" "test" { principals { type = "Service" - identifiers = ["sagemaker.amazonaws.com"] + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] } } } From 43c6942bba423f7e44b2c474647e64bd9bee14cf Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 16 Nov 2020 17:48:06 +0200 Subject: [PATCH 0129/1212] comment --- aws/resource_aws_sagemaker_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 03db1228ffe..4a434c31588 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -84,7 +84,7 @@ func resourceAwsSagemakerImageCreate(d *schema.ResourceData, meta interface{}) e input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } - // for some reason even if the operation is retried the same response is given even though the role is valid. a short sleep before creation solves it. + // for some reason even if the operation is retried the same error response is given even though the role is valid. a short sleep before creation solves it. time.Sleep(1 * time.Minute) log.Printf("[DEBUG] sagemaker Image create config: %#v", *input) err := resource.Retry(1*time.Minute, func() *resource.RetryError { From 5e85d3d8243bd20d66ff6ad5680ee9755ed0a792 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Fri, 4 Dec 2020 14:30:36 +0200 Subject: [PATCH 0130/1212] aws_workspaces_directory: Fix empty custom_security_group_id & default_ou --- aws/resource_aws_workspaces_directory.go | 14 +- aws/resource_aws_workspaces_directory_test.go | 346 ++++++++++++++---- 2 files changed, 279 insertions(+), 81 deletions(-) diff --git a/aws/resource_aws_workspaces_directory.go b/aws/resource_aws_workspaces_directory.go index d00cbfd05f8..a2f90e7e696 100644 --- a/aws/resource_aws_workspaces_directory.go +++ b/aws/resource_aws_workspaces_directory.go @@ -419,13 +419,21 @@ func expandWorkspaceCreationProperties(properties []interface{}) *workspaces.Wor p := properties[0].(map[string]interface{}) - return &workspaces.WorkspaceCreationProperties{ - CustomSecurityGroupId: aws.String(p["custom_security_group_id"].(string)), - DefaultOu: aws.String(p["default_ou"].(string)), + result := &workspaces.WorkspaceCreationProperties{ EnableInternetAccess: aws.Bool(p["enable_internet_access"].(bool)), EnableMaintenanceMode: aws.Bool(p["enable_maintenance_mode"].(bool)), UserEnabledAsLocalAdministrator: aws.Bool(p["user_enabled_as_local_administrator"].(bool)), } + + if p["custom_security_group_id"].(string) != "" { + result.CustomSecurityGroupId = aws.String(p["custom_security_group_id"].(string)) + } + + if p["default_ou"].(string) != "" { + result.DefaultOu = aws.String(p["default_ou"].(string)) + } + + return result } func flattenSelfServicePermissions(permissions *workspaces.SelfservicePermissions) []interface{} { diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 0b4f927003a..16f3e1a4863 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -283,6 +283,55 @@ func TestAccAwsWorkspacesDirectory_workspaceCreationProperties(t *testing.T) { }) } +func TestAccAwsWorkspacesDirectory_workspaceCreationProperties_customSecurityGroupId_defaultOu(t *testing.T) { + var v workspaces.WorkspaceDirectory + rName := acctest.RandString(8) + + resourceName := "aws_workspaces_directory.main" + resourceSecurityGroup := "aws_security_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckWorkspacesDirectory(t) + testAccPreCheckAWSDirectoryServiceSimpleDirectory(t) + testAccPreCheckHasIAMRole(t, "workspaces_DefaultRole") + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsWorkspacesDirectoryDestroy, + Steps: []resource.TestStep{ + { + Config: testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurityGroupId_defaultOu_Absent(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.custom_security_group_id", ""), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.default_ou", ""), + ), + }, + { + Config: testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurityGroupId_defaultOu_Present(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "workspace_creation_properties.0.custom_security_group_id", resourceSecurityGroup, "id"), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.default_ou", "OU=AWS,DC=Workgroup,DC=Example,DC=com"), + ), + }, + { + Config: testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurityGroupId_defaultOu_Absent(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "workspace_creation_properties.0.custom_security_group_id", resourceSecurityGroup, "id"), + resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.default_ou", "OU=AWS,DC=Workgroup,DC=Example,DC=com"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAwsWorkspacesDirectory_ipGroupIds(t *testing.T) { var v workspaces.WorkspaceDirectory rName := acctest.RandString(8) @@ -325,6 +374,181 @@ func TestAccAwsWorkspacesDirectory_ipGroupIds(t *testing.T) { }) } +func TestExpandSelfServicePermissions(t *testing.T) { + cases := []struct { + input []interface{} + expected *workspaces.SelfservicePermissions + }{ + // Empty + { + input: []interface{}{}, + expected: nil, + }, + // Full + { + input: []interface{}{ + map[string]interface{}{ + "change_compute_type": false, + "increase_volume_size": false, + "rebuild_workspace": true, + "restart_workspace": true, + "switch_running_mode": true, + }, + }, + expected: &workspaces.SelfservicePermissions{ + ChangeComputeType: aws.String(workspaces.ReconnectEnumDisabled), + IncreaseVolumeSize: aws.String(workspaces.ReconnectEnumDisabled), + RebuildWorkspace: aws.String(workspaces.ReconnectEnumEnabled), + RestartWorkspace: aws.String(workspaces.ReconnectEnumEnabled), + SwitchRunningMode: aws.String(workspaces.ReconnectEnumEnabled), + }, + }, + } + + for _, c := range cases { + actual := expandSelfServicePermissions(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + +func TestFlattenSelfServicePermissions(t *testing.T) { + cases := []struct { + input *workspaces.SelfservicePermissions + expected []interface{} + }{ + // Empty + { + input: nil, + expected: []interface{}{}, + }, + // Full + { + input: &workspaces.SelfservicePermissions{ + ChangeComputeType: aws.String(workspaces.ReconnectEnumDisabled), + IncreaseVolumeSize: aws.String(workspaces.ReconnectEnumDisabled), + RebuildWorkspace: aws.String(workspaces.ReconnectEnumEnabled), + RestartWorkspace: aws.String(workspaces.ReconnectEnumEnabled), + SwitchRunningMode: aws.String(workspaces.ReconnectEnumEnabled), + }, + expected: []interface{}{ + map[string]interface{}{ + "change_compute_type": false, + "increase_volume_size": false, + "rebuild_workspace": true, + "restart_workspace": true, + "switch_running_mode": true, + }, + }, + }, + } + + for _, c := range cases { + actual := flattenSelfServicePermissions(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + +func TestExpandWorkspaceCreationProperties(t *testing.T) { + cases := []struct { + input []interface{} + expected *workspaces.WorkspaceCreationProperties + }{ + // Empty + { + input: []interface{}{}, + expected: nil, + }, + // Full + { + input: []interface{}{ + map[string]interface{}{ + "custom_security_group_id": "sg-123456789012", + "default_ou": "OU=AWS,DC=Workgroup,DC=Example,DC=com", + "enable_internet_access": true, + "enable_maintenance_mode": true, + "user_enabled_as_local_administrator": true, + }, + }, + expected: &workspaces.WorkspaceCreationProperties{ + CustomSecurityGroupId: aws.String("sg-123456789012"), + DefaultOu: aws.String("OU=AWS,DC=Workgroup,DC=Example,DC=com"), + EnableInternetAccess: aws.Bool(true), + EnableMaintenanceMode: aws.Bool(true), + UserEnabledAsLocalAdministrator: aws.Bool(true), + }, + }, + // Without Custom Security Group ID & Default OU + { + input: []interface{}{ + map[string]interface{}{ + "custom_security_group_id": "", + "default_ou": "", + "enable_internet_access": true, + "enable_maintenance_mode": true, + "user_enabled_as_local_administrator": true, + }, + }, + expected: &workspaces.WorkspaceCreationProperties{ + CustomSecurityGroupId: nil, + DefaultOu: nil, + EnableInternetAccess: aws.Bool(true), + EnableMaintenanceMode: aws.Bool(true), + UserEnabledAsLocalAdministrator: aws.Bool(true), + }, + }, + } + + for _, c := range cases { + actual := expandWorkspaceCreationProperties(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + +func TestFlattenWorkspaceCreationProperties(t *testing.T) { + cases := []struct { + input *workspaces.DefaultWorkspaceCreationProperties + expected []interface{} + }{ + // Empty + { + input: nil, + expected: []interface{}{}, + }, + // Full + { + input: &workspaces.DefaultWorkspaceCreationProperties{ + CustomSecurityGroupId: aws.String("sg-123456789012"), + DefaultOu: aws.String("OU=AWS,DC=Workgroup,DC=Example,DC=com"), + EnableInternetAccess: aws.Bool(true), + EnableMaintenanceMode: aws.Bool(true), + UserEnabledAsLocalAdministrator: aws.Bool(true), + }, + expected: []interface{}{ + map[string]interface{}{ + "custom_security_group_id": "sg-123456789012", + "default_ou": "OU=AWS,DC=Workgroup,DC=Example,DC=com", + "enable_internet_access": true, + "enable_maintenance_mode": true, + "user_enabled_as_local_administrator": true, + }, + }, + }, + } + + for _, c := range cases { + actual := flattenWorkspaceCreationProperties(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + func testAccPreCheckHasIAMRole(t *testing.T, roleName string) { conn := testAccProvider.Meta().(*AWSClient).iamconn @@ -406,84 +630,6 @@ func testAccCheckAwsWorkspacesDirectoryExists(n string, v *workspaces.WorkspaceD } } -func TestExpandSelfServicePermissions(t *testing.T) { - cases := []struct { - input []interface{} - expected *workspaces.SelfservicePermissions - }{ - // Empty - { - input: []interface{}{}, - expected: nil, - }, - // Full - { - input: []interface{}{ - map[string]interface{}{ - "change_compute_type": false, - "increase_volume_size": false, - "rebuild_workspace": true, - "restart_workspace": true, - "switch_running_mode": true, - }, - }, - expected: &workspaces.SelfservicePermissions{ - ChangeComputeType: aws.String(workspaces.ReconnectEnumDisabled), - IncreaseVolumeSize: aws.String(workspaces.ReconnectEnumDisabled), - RebuildWorkspace: aws.String(workspaces.ReconnectEnumEnabled), - RestartWorkspace: aws.String(workspaces.ReconnectEnumEnabled), - SwitchRunningMode: aws.String(workspaces.ReconnectEnumEnabled), - }, - }, - } - - for _, c := range cases { - actual := expandSelfServicePermissions(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestFlattenSelfServicePermissions(t *testing.T) { - cases := []struct { - input *workspaces.SelfservicePermissions - expected []interface{} - }{ - // Empty - { - input: nil, - expected: []interface{}{}, - }, - // Full - { - input: &workspaces.SelfservicePermissions{ - ChangeComputeType: aws.String(workspaces.ReconnectEnumDisabled), - IncreaseVolumeSize: aws.String(workspaces.ReconnectEnumDisabled), - RebuildWorkspace: aws.String(workspaces.ReconnectEnumEnabled), - RestartWorkspace: aws.String(workspaces.ReconnectEnumEnabled), - SwitchRunningMode: aws.String(workspaces.ReconnectEnumEnabled), - }, - expected: []interface{}{ - map[string]interface{}{ - "change_compute_type": false, - "increase_volume_size": false, - "rebuild_workspace": true, - "restart_workspace": true, - "switch_running_mode": true, - }, - }, - }, - } - - for _, c := range cases { - actual := flattenSelfServicePermissions(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - func testAccPreCheckWorkspacesDirectory(t *testing.T) { conn := testAccProvider.Meta().(*AWSClient).workspacesconn @@ -658,6 +804,50 @@ resource "aws_workspaces_directory" "main" { `, rName)) } +func testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurityGroupId_defaultOu_Absent(rName string) string { + return composeConfig( + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` +resource "aws_security_group" "test" { + vpc_id = aws_vpc.main.id + name = "tf-acctest-%[1]s" +} + +resource "aws_workspaces_directory" "main" { + directory_id = aws_directory_service_directory.main.id + + workspace_creation_properties { + enable_internet_access = true + enable_maintenance_mode = false + user_enabled_as_local_administrator = false + } +} +`, rName)) +} + +func testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurityGroupId_defaultOu_Present(rName string) string { + return composeConfig( + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` +resource "aws_security_group" "test" { + vpc_id = aws_vpc.main.id + name = "tf-acctest-%[1]s" +} + +resource "aws_workspaces_directory" "main" { + directory_id = aws_directory_service_directory.main.id + + workspace_creation_properties { + custom_security_group_id = aws_security_group.test.id + default_ou = "OU=AWS,DC=Workgroup,DC=Example,DC=com" + enable_internet_access = true + enable_maintenance_mode = false + user_enabled_as_local_administrator = false + } +} +`, rName)) +} + func testAccWorkspacesDirectoryConfig_ipGroupIds_create(rName string) string { return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), From 35cbae73ce3208faf1cc16434f841a00276ef7ab Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Fri, 4 Dec 2020 18:33:11 +0200 Subject: [PATCH 0131/1212] @review Drop redundant aws_security_group test resource --- aws/resource_aws_workspaces_directory_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 16f3e1a4863..bb67c9a2f56 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -808,11 +808,6 @@ func testAccWorkspacesDirectoryConfig_workspaceCreationProperties_customSecurity return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), fmt.Sprintf(` -resource "aws_security_group" "test" { - vpc_id = aws_vpc.main.id - name = "tf-acctest-%[1]s" -} - resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id From 20304a18eb84bae50d9eb14a68bb7c410e02970d Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Mon, 7 Dec 2020 13:35:47 +0200 Subject: [PATCH 0132/1212] Add terraform acctest tags --- aws/resource_aws_workspaces_directory_test.go | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index bb67c9a2f56..87d78d4270a 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -717,6 +717,10 @@ func testAccWorkspacesDirectoryConfig(rName string) string { testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } data "aws_iam_role" "workspaces-default" { @@ -738,6 +742,10 @@ resource "aws_workspaces_directory" "main" { restart_workspace = false switch_running_mode = true } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `) } @@ -800,6 +808,10 @@ resource "aws_workspaces_directory" "main" { enable_maintenance_mode = false user_enabled_as_local_administrator = false } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `, rName)) } @@ -816,6 +828,10 @@ resource "aws_workspaces_directory" "main" { enable_maintenance_mode = false user_enabled_as_local_administrator = false } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `, rName)) } @@ -839,6 +855,10 @@ resource "aws_workspaces_directory" "main" { enable_maintenance_mode = false user_enabled_as_local_administrator = false } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `, rName)) } @@ -857,6 +877,10 @@ resource "aws_workspaces_directory" "test" { ip_group_ids = [ aws_workspaces_ip_group.test_alpha.id ] + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `, rName)) } @@ -880,6 +904,10 @@ resource "aws_workspaces_directory" "test" { aws_workspaces_ip_group.test_beta.id, aws_workspaces_ip_group.test_gamma.id ] + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } `, rName)) } From f05b945487fa1c92e806dba520710c0be5da5752 Mon Sep 17 00:00:00 2001 From: joyarackal Date: Mon, 7 Dec 2020 13:06:40 +0100 Subject: [PATCH 0133/1212] update documentation --- website/docs/r/kinesis_stream.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/kinesis_stream.html.markdown b/website/docs/r/kinesis_stream.html.markdown index 8f0d786c1b6..5391e4a0236 100644 --- a/website/docs/r/kinesis_stream.html.markdown +++ b/website/docs/r/kinesis_stream.html.markdown @@ -39,7 +39,7 @@ The following arguments are supported: * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. * `shard_count` – (Required) The number of shards that the stream will use. Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more. -* `retention_period` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours. Minimum value is 24. Default is 24. +* `retention_period` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24. * `shard_level_metrics` - (Optional) A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch][3] for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. * `enforce_consumer_deletion` - (Optional) A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`. * `encryption_type` - (Optional) The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`. From ce1c1bc8b2872b1376b0fedc838e734617cca49c Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Mon, 7 Dec 2020 16:02:44 +0200 Subject: [PATCH 0134/1212] Fix custom security attribute check --- aws/resource_aws_workspaces_directory_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 87d78d4270a..48ed67ded79 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -323,8 +323,8 @@ func TestAccAwsWorkspacesDirectory_workspaceCreationProperties_customSecurityGro Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "workspace_creation_properties.0.custom_security_group_id", resourceSecurityGroup, "id"), - resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.default_ou", "OU=AWS,DC=Workgroup,DC=Example,DC=com"), + resource.TestCheckResourceAttrSet(resourceName, "workspace_creation_properties.0.custom_security_group_id"), + resource.TestCheckResourceAttrSet(resourceName, "workspace_creation_properties.0.default_ou"), ), ExpectNonEmptyPlan: true, }, From 81847da36db03f7e1b111f0c69f3fb2aeb578eea Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 7 Dec 2020 09:57:32 -0500 Subject: [PATCH 0135/1212] tests/res/security_group_rule: Fix hardcoded region --- aws/resource_aws_security_group_rule_test.go | 23 +++++++++++--------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_security_group_rule_test.go b/aws/resource_aws_security_group_rule_test.go index 963e19447c7..0453961b4db 100644 --- a/aws/resource_aws_security_group_rule_test.go +++ b/aws/resource_aws_security_group_rule_test.go @@ -679,7 +679,7 @@ func TestAccAWSSecurityGroupRule_PrefixListEgress(t *testing.T) { testAccCheckAWSSecurityGroupRuleExists("aws_security_group.egress", &group), // lookup info on the VPC Endpoint created, to populate the expected // IP Perm - testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3-us-west-2", &endpoint), + testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3_endpoint", &endpoint), setupSG, testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, &p, "egress"), ), @@ -1075,7 +1075,7 @@ func TestAccAWSSecurityGroupRule_MultiDescription(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.worker", &group), testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), - testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3-us-west-2", &endpoint), + testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3_endpoint", &endpoint), testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.rule_1", &group, &rule1, "ingress"), resource.TestCheckResourceAttr("aws_security_group_rule.rule_1", "description", "CIDR Description"), @@ -1111,7 +1111,7 @@ func TestAccAWSSecurityGroupRule_MultiDescription(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.worker", &group), testAccCheckAWSSecurityGroupRuleExists("aws_security_group.nat", &nat), - testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3-us-west-2", &endpoint), + testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3_endpoint", &endpoint), testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.rule_1", &group, &rule1, "egress"), resource.TestCheckResourceAttr("aws_security_group_rule.rule_1", "description", "CIDR Description"), @@ -1588,9 +1588,11 @@ resource "aws_vpc" "tf_sgrule_description_test" { } } -resource "aws_vpc_endpoint" "s3-us-west-2" { +data "aws_region" "current" {} + +resource "aws_vpc_endpoint" "s3_endpoint" { vpc_id = aws_vpc.tf_sgrule_description_test.id - service_name = "com.amazonaws.us-west-2.s3" + service_name = "com.amazonaws.${data.aws_region.current.name}.s3" } resource "aws_security_group" "worker" { @@ -1649,7 +1651,7 @@ resource "aws_security_group_rule" "rule_4" { protocol = "tcp" from_port = 22 to_port = 22 - prefix_list_ids = [aws_vpc_endpoint.s3-us-west-2.prefix_list_id] + prefix_list_ids = [aws_vpc_endpoint.s3_endpoint.prefix_list_id] } `) } @@ -1798,7 +1800,6 @@ resource "aws_security_group_rule" "other_ingress" { } const testAccAWSSecurityGroupRulePrefixListEgressConfig = ` - resource "aws_vpc" "tf_sg_prefix_list_egress_test" { cidr_block = "10.0.0.0/16" @@ -1811,9 +1812,11 @@ resource "aws_route_table" "default" { vpc_id = aws_vpc.tf_sg_prefix_list_egress_test.id } -resource "aws_vpc_endpoint" "s3-us-west-2" { +data "aws_region" "current" {} + +resource "aws_vpc_endpoint" "s3_endpoint" { vpc_id = aws_vpc.tf_sg_prefix_list_egress_test.id - service_name = "com.amazonaws.us-west-2.s3" + service_name = "com.amazonaws.${data.aws_region.current.name}.s3" route_table_ids = [aws_route_table.default.id] policy = < Date: Mon, 7 Dec 2020 12:07:55 -0500 Subject: [PATCH 0136/1212] tests/resource/aws_default_route_table: Refactor acceptance tests in preparation for future fixes/enhancements (#14012) * r/aws_default_route_table: Rework 'TestAccAWSDefaultRouteTable_basic' and 'TestAccAWSDefaultRouteTable_disappears_Vpc'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_basic -timeout 120m === RUN TestAccAWSDefaultRouteTable_basic === PAUSE TestAccAWSDefaultRouteTable_basic === CONT TestAccAWSDefaultRouteTable_basic --- PASS: TestAccAWSDefaultRouteTable_basic (53.01s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 53.064s $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_disappears_Vpc' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_disappears_Vpc -timeout 120m === RUN TestAccAWSDefaultRouteTable_disappears_Vpc === PAUSE TestAccAWSDefaultRouteTable_disappears_Vpc === CONT TestAccAWSDefaultRouteTable_disappears_Vpc --- PASS: TestAccAWSDefaultRouteTable_disappears_Vpc (24.54s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 24.591s * r/aws_default_route_table: Rename 'TestAccAWSDefaultRouteTable_Route' to 'TestAccAWSDefaultRouteTable_Route_ConfigMode' to match r/aws_route_table. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_Route_ConfigMode' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_Route_ConfigMode -timeout 120m === RUN TestAccAWSDefaultRouteTable_Route_ConfigMode === PAUSE TestAccAWSDefaultRouteTable_Route_ConfigMode === CONT TestAccAWSDefaultRouteTable_Route_ConfigMode --- PASS: TestAccAWSDefaultRouteTable_Route_ConfigMode (102.58s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 102.647s * r/aws_default_route_table: Rework 'TestAccAWSDefaultRouteTable_swap'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_swap' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_swap -timeout 120m === RUN TestAccAWSDefaultRouteTable_swap === PAUSE TestAccAWSDefaultRouteTable_swap === CONT TestAccAWSDefaultRouteTable_swap --- PASS: TestAccAWSDefaultRouteTable_swap (109.41s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 109.499s * r/aws_default_route_table: 'TestAccAWSDefaultRouteTable_disappears_Vpc' -> 'TestAccAWSDefaultRouteTable_vpcDisappears'. * r/aws_default_route_table: Rework 'TestAccAWSDefaultRouteTable_ConditionalCidrBlock'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_ConditionalCidrBlock' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_ConditionalCidrBlock -timeout 120m === RUN TestAccAWSDefaultRouteTable_ConditionalCidrBlock === PAUSE TestAccAWSDefaultRouteTable_ConditionalCidrBlock === CONT TestAccAWSDefaultRouteTable_ConditionalCidrBlock --- PASS: TestAccAWSDefaultRouteTable_ConditionalCidrBlock (79.63s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 80.192s * r/aws_default_route_table: Rename 'TestAccAWSDefaultRouteTable_vpc_endpoint' to 'TestAccAWSDefaultRouteTable_VpcEndpointAssociation'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_VpcEndpointAssociation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_VpcEndpointAssociation -timeout 120m === RUN TestAccAWSDefaultRouteTable_VpcEndpointAssociation === PAUSE TestAccAWSDefaultRouteTable_VpcEndpointAssociation === CONT TestAccAWSDefaultRouteTable_VpcEndpointAssociation --- PASS: TestAccAWSDefaultRouteTable_VpcEndpointAssociation (57.87s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 57.930s * r/aws_default_route_table: Rename 'TestAccAWSDefaultRouteTable_Route_TransitGatewayID' to 'TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway -timeout 120m === RUN TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway === PAUSE TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway === CONT TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway --- PASS: TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway (317.38s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 317.429s * r/aws_default_route_table: Add 'testAccCheckAWSDefaultRouteTableRoute'. * r/aws_default_route_table: Use 'testAccAvailableAZsNoOptInExcludeConfig'. * Use 'testAccAvailableAZsNoOptInDefaultExcludeConfig'. * r/aws_default_route_table: Add acceptance tests from #16131. * Rename 'TestAccAWSDefaultRouteTable_Route_VpcEndpointId' to 'TestAccAWSDefaultRouteTable_IPv4_To_VpcEndpoint'. * r/aws_default_route_table: Missing commit from rebase. * Update aws/resource_aws_default_route_table_test.go Co-authored-by: Brian Flad Co-authored-by: Brian Flad --- aws/resource_aws_default_route_table_test.go | 467 +++++++++++-------- aws/resource_aws_route_table_test.go | 10 + 2 files changed, 283 insertions(+), 194 deletions(-) diff --git a/aws/resource_aws_default_route_table_test.go b/aws/resource_aws_default_route_table_test.go index e83d9ce8fe3..69a2ec8100b 100644 --- a/aws/resource_aws_default_route_table_test.go +++ b/aws/resource_aws_default_route_table_test.go @@ -13,9 +13,10 @@ import ( ) func TestAccAWSDefaultRouteTable_basic(t *testing.T) { - var routeTable1 ec2.RouteTable + var routeTable ec2.RouteTable resourceName := "aws_default_route_table.test" vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -33,9 +34,9 @@ func TestAccAWSDefaultRouteTable_basic(t *testing.T) { ExpectError: regexp.MustCompile(`EC2 Default Route Table \(vpc-00000000\): not found`), }, { - Config: testAccDefaultRouteTableConfigRequired(), + Config: testAccDefaultRouteTableConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), resource.TestCheckResourceAttr(resourceName, "route.#", "0"), @@ -54,10 +55,11 @@ func TestAccAWSDefaultRouteTable_basic(t *testing.T) { } func TestAccAWSDefaultRouteTable_disappears_Vpc(t *testing.T) { - var routeTable1 ec2.RouteTable - var vpc1 ec2.Vpc + var routeTable ec2.RouteTable + var vpc ec2.Vpc resourceName := "aws_default_route_table.test" vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -65,11 +67,11 @@ func TestAccAWSDefaultRouteTable_disappears_Vpc(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccDefaultRouteTableConfigRequired(), + Config: testAccDefaultRouteTableConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), - testAccCheckVpcExists(vpcResourceName, &vpc1), - testAccCheckVpcDisappears(&vpc1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckVpcExists(vpcResourceName, &vpc), + testAccCheckResourceDisappears(testAccProvider, resourceAwsVpc(), vpcResourceName), ), ExpectNonEmptyPlan: true, }, @@ -77,9 +79,12 @@ func TestAccAWSDefaultRouteTable_disappears_Vpc(t *testing.T) { }) } -func TestAccAWSDefaultRouteTable_Route(t *testing.T) { - var v ec2.RouteTable - resourceName := "aws_default_route_table.foo" +func TestAccAWSDefaultRouteTable_Route_ConfigMode(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_default_route_table.test" + igwResourceName := "aws_internet_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -88,10 +93,16 @@ func TestAccAWSDefaultRouteTable_Route(t *testing.T) { CheckDestroy: testAccCheckDefaultRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccDefaultRouteTableConfig, + Config: testAccDefaultRouteTableConfigIpv4InternetGateway(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -101,19 +112,32 @@ func TestAccAWSDefaultRouteTable_Route(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccDefaultRouteTableConfig_noRouteBlock, + Config: testAccDefaultRouteTableConfigNoRouteBlock(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), // The route block from the previous step should still be // present, because no blocks means "ignore existing blocks". resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { - Config: testAccDefaultRouteTableConfig_routeBlocksExplicitZero, + Config: testAccDefaultRouteTableConfigRouteBlocksExplicitZero(rName), Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 1), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), // This config uses attribute syntax to set zero routes // explicitly, so should remove the one we created before. resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, }, @@ -121,8 +145,13 @@ func TestAccAWSDefaultRouteTable_Route(t *testing.T) { } func TestAccAWSDefaultRouteTable_swap(t *testing.T) { - var v ec2.RouteTable - resourceName := "aws_default_route_table.foo" + var routeTable ec2.RouteTable + resourceName := "aws_default_route_table.test" + igwResourceName := "aws_internet_gateway.test" + rtResourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr1 := "10.2.0.0/16" + destinationCidr2 := "10.3.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -131,10 +160,16 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) { CheckDestroy: testAccCheckDefaultRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccDefaultRouteTable_change, + Config: testAccDefaultRouteTableConfigIpv4InternetGateway(rName, destinationCidr1), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - resourceName, &v), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -150,20 +185,38 @@ func TestAccAWSDefaultRouteTable_swap(t *testing.T) { // this case) a diff as the table now needs to be updated to match the // config { - Config: testAccDefaultRouteTable_change_mod, + Config: testAccDefaultRouteTableConfigSwap(rName, destinationCidr1, destinationCidr2), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - resourceName, &v), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), ), ExpectNonEmptyPlan: true, }, + { + Config: testAccDefaultRouteTableConfigSwap(rName, destinationCidr1, destinationCidr2), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttrPair(resourceName, "id", rtResourceName, "id"), + ), + // Follow up plan will now show a diff as the destination CIDR on the aws_route_table + // (now also the aws_default_route_table) will change from destinationCidr1 to destinationCidr2. + ExpectNonEmptyPlan: true, + }, }, }) } -func TestAccAWSDefaultRouteTable_Route_TransitGatewayID(t *testing.T) { - var routeTable1 ec2.RouteTable +func TestAccAWSDefaultRouteTable_IPv4_To_TransitGateway(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_default_route_table.test" + tgwResourceName := "aws_ec2_transit_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -171,9 +224,12 @@ func TestAccAWSDefaultRouteTable_Route_TransitGatewayID(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDefaultRouteTableConfigRouteTransitGatewayID(), + Config: testAccDefaultRouteTableConfigIpv4TransitGateway(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "transit_gateway_id", tgwResourceName, "id"), ), }, { @@ -186,10 +242,12 @@ func TestAccAWSDefaultRouteTable_Route_TransitGatewayID(t *testing.T) { }) } -func TestAccAWSDefaultRouteTable_Route_VpcEndpointId(t *testing.T) { - var routeTable1 ec2.RouteTable - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestAccAWSDefaultRouteTable_IPv4_To_VpcEndpoint(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_default_route_table.test" + vpceResourceName := "aws_vpc_endpoint.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "0.0.0.0/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -197,9 +255,12 @@ func TestAccAWSDefaultRouteTable_Route_VpcEndpointId(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDefaultRouteTableConfigRouteVpcEndpointId(rName), + Config: testAccDefaultRouteTableConfigIpv4VpcEndpoint(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "vpc_endpoint_id", vpceResourceName, "id"), ), }, { @@ -212,18 +273,21 @@ func TestAccAWSDefaultRouteTable_Route_VpcEndpointId(t *testing.T) { // VPC Endpoints will not delete unless the route is removed prior, otherwise will error: // InvalidParameter: Endpoint must be removed from route table before deletion { - Config: testAccAWSDefaultRouteTableConfigRouteVpcEndpointIdNoRoute(rName), + Config: testAccDefaultRouteTableConfigIpv4VpcEndpointNoRoute(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), ), }, }, }) } -func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) { - var v ec2.RouteTable - resourceName := "aws_default_route_table.foo" +func TestAccAWSDefaultRouteTable_VpcEndpointAssociation(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_default_route_table.test" + igwResourceName := "aws_internet_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -232,10 +296,12 @@ func TestAccAWSDefaultRouteTable_vpc_endpoint(t *testing.T) { CheckDestroy: testAccCheckDefaultRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccDefaultRouteTable_vpc_endpoint, + Config: testAccDefaultRouteTableConfigVpcEndpointAssociation(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists( - resourceName, &v), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "gateway_id", igwResourceName, "id"), ), }, { @@ -290,7 +356,10 @@ func TestAccAWSDefaultRouteTable_tags(t *testing.T) { func TestAccAWSDefaultRouteTable_ConditionalCidrBlock(t *testing.T) { var routeTable ec2.RouteTable resourceName := "aws_default_route_table.test" + igwResourceName := "aws_internet_gateway.test" rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" + destinationIpv6Cidr := "::/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -298,23 +367,17 @@ func TestAccAWSDefaultRouteTable_ConditionalCidrBlock(t *testing.T) { CheckDestroy: testAccCheckAWSRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSDefaultRouteTableConfigConditionalIpv4Ipv6(rName, false), + Config: testAccDefaultRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr, false), Check: resource.ComposeTestCheckFunc( testAccCheckRouteTableExists(resourceName, &routeTable), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ - "cidr_block": "0.0.0.0/0", - "ipv6_cidr_block": "", - }), + testAccCheckDefaultRouteTableRoute(resourceName, "cidr_block", destinationCidr, "gateway_id", igwResourceName, "id"), ), }, { - Config: testAccAWSDefaultRouteTableConfigConditionalIpv4Ipv6(rName, true), + Config: testAccDefaultRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr, true), Check: resource.ComposeTestCheckFunc( testAccCheckRouteTableExists(resourceName, &routeTable), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ - "cidr_block": "", - "ipv6_cidr_block": "::/0", - }), + testAccCheckDefaultRouteTableRoute(resourceName, "ipv6_cidr_block", destinationIpv6Cidr, "gateway_id", igwResourceName, "id"), ), }, { @@ -356,6 +419,25 @@ func testAccCheckDefaultRouteTableDestroy(s *terraform.State) error { return nil } +func testAccCheckDefaultRouteTableRoute(resourceName, destinationAttr, destination, targetAttr, targetResourceName, targetResourceAttr string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[targetResourceName] + if !ok { + return fmt.Errorf("Not found: %s", targetResourceName) + } + + target := rs.Primary.Attributes[targetResourceAttr] + if target == "" { + return fmt.Errorf("Not found: %s.%s", targetResourceName, targetResourceAttr) + } + + return resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ + destinationAttr: destination, + targetAttr: target, + })(s) + } +} + func testAccDefaultRouteTableConfigDefaultRouteTableId(defaultRouteTableId string) string { return fmt.Sprintf(` resource "aws_default_route_table" "test" { @@ -364,222 +446,189 @@ resource "aws_default_route_table" "test" { `, defaultRouteTableId) } -func testAccDefaultRouteTableConfigRequired() string { - return ` +func testAccDefaultRouteTableConfigBasic(rName string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } } resource "aws_default_route_table" "test" { default_route_table_id = aws_vpc.test.default_route_table_id } -` +`, rName) } -const testAccDefaultRouteTableConfig = ` -resource "aws_vpc" "foo" { +func testAccDefaultRouteTableConfigIpv4InternetGateway(rName, destinationCidr string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" enable_dns_hostnames = true tags = { - Name = "terraform-testacc-default-route-table" + Name = %[1]q } } -resource "aws_default_route_table" "foo" { - default_route_table_id = aws_vpc.foo.default_route_table_id +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id route { - cidr_block = "10.0.1.0/32" - gateway_id = aws_internet_gateway.gw.id + cidr_block = %[2]q + gateway_id = aws_internet_gateway.test.id } tags = { - Name = "tf-default-route-table-test" + Name = %[1]q } } -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.foo.id - - tags = { - Name = "tf-default-route-table-test" - } -}` - -const testAccDefaultRouteTableConfig_noRouteBlock = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" - enable_dns_hostnames = true +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-default-route-table" + Name = %[1]q } } - -resource "aws_default_route_table" "foo" { - default_route_table_id = aws_vpc.foo.default_route_table_id - - tags = { - Name = "tf-default-route-table-test" - } +`, rName, destinationCidr) } -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.foo.id - - tags = { - Name = "tf-default-route-table-test" - } -}` - -const testAccDefaultRouteTableConfig_routeBlocksExplicitZero = ` -resource "aws_vpc" "foo" { +func testAccDefaultRouteTableConfigNoRouteBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" enable_dns_hostnames = true tags = { - Name = "terraform-testacc-default-route-table" + Name = %[1]q } } -resource "aws_default_route_table" "foo" { - default_route_table_id = aws_vpc.foo.default_route_table_id - - route = [] +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id tags = { - Name = "tf-default-route-table-test" + Name = %[1]q } } -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.foo.id +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id tags = { - Name = "tf-default-route-table-test" + Name = %[1]q } -}` +}`, rName) +} -const testAccDefaultRouteTable_change = ` -resource "aws_vpc" "foo" { +func testAccDefaultRouteTableConfigRouteBlocksExplicitZero(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" enable_dns_hostnames = true tags = { - Name = "terraform-testacc-default-route-table-change" + Name = %[1]q } } -resource "aws_default_route_table" "foo" { - default_route_table_id = aws_vpc.foo.default_route_table_id - - route { - cidr_block = "10.0.1.0/32" - gateway_id = aws_internet_gateway.gw.id - } - - tags = { - Name = "this was the first main" - } -} +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.foo.id + route = [] tags = { - Name = "main-igw" + Name = %[1]q } } -# Thing to help testing changes -resource "aws_route_table" "r" { - vpc_id = aws_vpc.foo.id - - route { - cidr_block = "10.0.1.0/24" - gateway_id = aws_internet_gateway.gw.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id tags = { - Name = "other" + Name = %[1]q } +}`, rName) } -` -const testAccDefaultRouteTable_change_mod = ` -resource "aws_vpc" "foo" { +func testAccDefaultRouteTableConfigSwap(rName, destinationCidr1, destinationCidr2 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" enable_dns_hostnames = true tags = { - Name = "terraform-testacc-default-route-table-change" + Name = %[1]q } } -resource "aws_default_route_table" "foo" { - default_route_table_id = aws_vpc.foo.default_route_table_id +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id route { - cidr_block = "10.0.1.0/32" - gateway_id = aws_internet_gateway.gw.id + cidr_block = %[2]q + gateway_id = aws_internet_gateway.test.id } tags = { - Name = "this was the first main" + Name = %[1]q } } -resource "aws_internet_gateway" "gw" { - vpc_id = aws_vpc.foo.id +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id tags = { - Name = "main-igw" + Name = %[1]q } } -# Thing to help testing changes -resource "aws_route_table" "r" { - vpc_id = aws_vpc.foo.id +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id route { - cidr_block = "10.0.1.0/24" - gateway_id = aws_internet_gateway.gw.id + cidr_block = %[3]q + gateway_id = aws_internet_gateway.test.id } tags = { - Name = "other" + Name = %[1]q } } -resource "aws_main_route_table_association" "a" { - vpc_id = aws_vpc.foo.id - route_table_id = aws_route_table.r.id +resource "aws_main_route_table_association" "test" { + vpc_id = aws_vpc.test.id + route_table_id = aws_route_table.test.id +} +`, rName, destinationCidr1, destinationCidr2) } -` -func testAccAWSDefaultRouteTableConfigRouteTransitGatewayID() string { - return ` +func testAccDefaultRouteTableConfigIpv4TransitGateway(rName, destinationCidr string) string { + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.1.0.0/16" tags = { - Name = "tf-acc-test-ec2-default-route-table-transit-gateway-id" + Name = %[1]q } } resource "aws_subnet" "test" { - cidr_block = "10.0.0.0/24" - vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-ec2-default-route-table-transit-gateway-id" + Name = %[1]q } } resource "aws_ec2_transit_gateway" "test" { tags = { - Name = "tf-acc-test-ec2-default-route-table-transit-gateway-id" + Name = %[1]q } } @@ -589,7 +638,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-ec2-default-route-table-transit-gateway-id" + Name = %[1]q } } @@ -597,14 +646,18 @@ resource "aws_default_route_table" "test" { default_route_table_id = aws_vpc.test.default_route_table_id route { - cidr_block = "0.0.0.0/0" + cidr_block = %[2]q transit_gateway_id = aws_ec2_transit_gateway_vpc_attachment.test.transit_gateway_id } + + tags = { + Name = %[1]q + } } -` +`, rName, destinationCidr)) } -func testAccAWSDefaultRouteTableConfigRouteVpcEndpointId(rName string) string { +func testAccDefaultRouteTableConfigIpv4VpcEndpoint(rName, destinationCidr string) string { return composeConfig( testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` @@ -614,13 +667,17 @@ resource "aws_vpc" "test" { cidr_block = "10.10.10.0/25" tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } # Another route destination for update resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_subnet" "test" { @@ -629,7 +686,7 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } @@ -646,6 +703,10 @@ resource "aws_vpc_endpoint_service" "test" { acceptance_required = false allowed_principals = [data.aws_caller_identity.current.arn] gateway_load_balancer_arns = [aws_lb.test.arn] + + tags = { + Name = %[1]q + } } resource "aws_vpc_endpoint" "test" { @@ -653,20 +714,28 @@ resource "aws_vpc_endpoint" "test" { subnet_ids = [aws_subnet.test.id] vpc_endpoint_type = aws_vpc_endpoint_service.test.service_type vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_default_route_table" "test" { default_route_table_id = aws_vpc.test.default_route_table_id route { - cidr_block = "0.0.0.0/0" + cidr_block = %[2]q vpc_endpoint_id = aws_vpc_endpoint.test.id } + + tags = { + Name = %[1]q + } } -`, rName)) +`, rName, destinationCidr)) } -func testAccAWSDefaultRouteTableConfigRouteVpcEndpointIdNoRoute(rName string) string { +func testAccDefaultRouteTableConfigIpv4VpcEndpointNoRoute(rName string) string { return composeConfig( testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` @@ -676,13 +745,17 @@ resource "aws_vpc" "test" { cidr_block = "10.10.10.0/25" tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } # Another route destination for update resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_subnet" "test" { @@ -691,7 +764,7 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } @@ -708,6 +781,10 @@ resource "aws_vpc_endpoint_service" "test" { acceptance_required = false allowed_principals = [data.aws_caller_identity.current.arn] gateway_load_balancer_arns = [aws_lb.test.arn] + + tags = { + Name = %[1]q + } } resource "aws_vpc_endpoint" "test" { @@ -715,6 +792,10 @@ resource "aws_vpc_endpoint" "test" { subnet_ids = [aws_subnet.test.id] vpc_endpoint_type = aws_vpc_endpoint_service.test.service_type vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_default_route_table" "test" { @@ -724,52 +805,58 @@ resource "aws_default_route_table" "test" { cidr_block = "0.0.0.0/0" gateway_id = aws_internet_gateway.test.id } + + tags = { + Name = %[1]q + } } `, rName)) } -const testAccDefaultRouteTable_vpc_endpoint = ` +func testAccDefaultRouteTableConfigVpcEndpointAssociation(rName, destinationCidr string) string { + return fmt.Sprintf(` data "aws_region" "current" {} resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags = { - Name = "terraform-testacc-default-route-table-vpc-endpoint" + Name = %[1]q } } -resource "aws_internet_gateway" "igw" { +resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-default-route-table-vpc-endpoint" + Name = %[1]q } } -resource "aws_vpc_endpoint" "s3" { +resource "aws_vpc_endpoint" "test" { vpc_id = aws_vpc.test.id service_name = "com.amazonaws.${data.aws_region.current.name}.s3" route_table_ids = [aws_vpc.test.default_route_table_id] tags = { - Name = "terraform-testacc-default-route-table-vpc-endpoint" + Name = %[1]q } } -resource "aws_default_route_table" "foo" { +resource "aws_default_route_table" "test" { default_route_table_id = aws_vpc.test.default_route_table_id tags = { - Name = "test" + Name = %[1]q } route { - cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.igw.id + cidr_block = %[2]q + gateway_id = aws_internet_gateway.test.id } } -` +`, rName, destinationCidr) +} func testAccDefaultRouteTableConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` @@ -812,7 +899,7 @@ resource "aws_default_route_table" "test" { `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } -func testAccAWSDefaultRouteTableConfigConditionalIpv4Ipv6(rName string, ipv6Route bool) string { +func testAccDefaultRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr string, ipv6Route bool) string { return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -824,14 +911,6 @@ resource "aws_vpc" "test" { } } -resource "aws_egress_only_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id @@ -841,9 +920,9 @@ resource "aws_internet_gateway" "test" { } locals { - ipv6 = %[2]t - destination = "0.0.0.0/0" - destination_ipv6 = "::/0" + ipv6 = %[4]t + destination = %[2]q + destination_ipv6 = %[3]q } resource "aws_default_route_table" "test" { @@ -859,7 +938,7 @@ resource "aws_default_route_table" "test" { Name = %[1]q } } -`, rName, ipv6Route) +`, rName, destinationCidr, destinationIpv6Cidr, ipv6Route) } func testAccAWSDefaultRouteTableImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/aws/resource_aws_route_table_test.go b/aws/resource_aws_route_table_test.go index 50cabed6603..244de46c8b9 100644 --- a/aws/resource_aws_route_table_test.go +++ b/aws/resource_aws_route_table_test.go @@ -630,6 +630,16 @@ func TestAccAWSRouteTable_ConditionalCidrBlock(t *testing.T) { }) } +func testAccCheckAWSRouteTableNumberOfRoutes(routeTable *ec2.RouteTable, n int) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len := len(routeTable.Routes); len != n { + return fmt.Errorf("Route Table has incorrect number of routes (Expected=%d, Actual=%d)\n", n, len) + } + + return nil + } +} + const testAccRouteTableConfig = ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" From 4805bec8eaa00a37f9b7c98a1212fa68b5dca919 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 7 Dec 2020 11:09:42 -0600 Subject: [PATCH 0137/1212] tests/res/route53: Fix hardcoded regions --- aws/resource_aws_route53_health_check_test.go | 18 ++++++---- aws/resource_aws_route53_record_test.go | 33 ++++++++++--------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/aws/resource_aws_route53_health_check_test.go b/aws/resource_aws_route53_health_check_test.go index a0c3c99e999..84238eacd73 100644 --- a/aws/resource_aws_route53_health_check_test.go +++ b/aws/resource_aws_route53_health_check_test.go @@ -3,8 +3,10 @@ package aws import ( "fmt" "log" + "strings" "testing" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/route53" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -154,13 +156,13 @@ func TestAccAWSRoute53HealthCheck_withHealthCheckRegions(t *testing.T) { var check route53.HealthCheck resourceName := "aws_route53_health_check.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionPreCheck("aws", t) }, // GovCloud has 2 regions, test requires 3 ErrorCheck: testAccErrorCheckSkipRoute53(t), Providers: testAccProviders, CheckDestroy: testAccCheckRoute53HealthCheckDestroy, Steps: []resource.TestStep{ { - Config: testAccRoute53HealthCheckConfig_withHealthCheckRegions, + Config: testAccRoute53HealthCheckConfig_withHealthCheckRegions(endpoints.UsWest2RegionID, endpoints.UsEast1RegionID, endpoints.EuWest1RegionID), Check: resource.ComposeTestCheckFunc( testAccCheckRoute53HealthCheckExists(resourceName, &check), resource.TestCheckResourceAttr(resourceName, "regions.#", "3"), @@ -560,7 +562,8 @@ resource "aws_route53_health_check" "test" { } ` -const testAccRoute53HealthCheckConfig_withHealthCheckRegions = ` +func testAccRoute53HealthCheckConfig_withHealthCheckRegions(regions ...string) string { + return fmt.Sprintf(` resource "aws_route53_health_check" "test" { ip_address = "1.2.3.4" port = 80 @@ -569,13 +572,14 @@ resource "aws_route53_health_check" "test" { failure_threshold = "2" request_interval = "30" - regions = ["us-west-1", "us-east-1", "eu-west-1"] + regions = ["%s"] tags = { Name = "tf-test-check-with-regions" } } -` +`, strings.Join(regions, "\", \"")) +} const testAccRoute53HealthCheckCloudWatchAlarm = ` resource "aws_cloudwatch_metric_alarm" "test" { @@ -590,10 +594,12 @@ resource "aws_cloudwatch_metric_alarm" "test" { alarm_description = "This metric monitors ec2 cpu utilization" } +data "aws_region" "current" {} + resource "aws_route53_health_check" "test" { type = "CLOUDWATCH_METRIC" cloudwatch_alarm_name = aws_cloudwatch_metric_alarm.test.alarm_name - cloudwatch_alarm_region = "us-west-2" + cloudwatch_alarm_region = data.aws_region.current.name insufficient_data_health_status = "Healthy" } ` diff --git a/aws/resource_aws_route53_record_test.go b/aws/resource_aws_route53_record_test.go index 56c4d18d95e..d7428797113 100644 --- a/aws/resource_aws_route53_record_test.go +++ b/aws/resource_aws_route53_record_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/route53" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -762,7 +763,7 @@ func TestAccAWSRoute53Record_HealthCheckId_TypeChange(t *testing.T) { func TestAccAWSRoute53Record_latency_basic(t *testing.T) { var record1, record2, record3 route53.ResourceRecordSet - resourceName := "aws_route53_record.us-east-1" + resourceName := "aws_route53_record.first_region" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -771,11 +772,11 @@ func TestAccAWSRoute53Record_latency_basic(t *testing.T) { CheckDestroy: testAccCheckRoute53RecordDestroy, Steps: []resource.TestStep{ { - Config: testAccRoute53LatencyCNAMERecord, + Config: testAccRoute53LatencyCNAMERecord(endpoints.UsEast1RegionID, endpoints.EuWest1RegionID, endpoints.ApNortheast1RegionID), Check: resource.ComposeTestCheckFunc( testAccCheckRoute53RecordExists(resourceName, &record1), - testAccCheckRoute53RecordExists("aws_route53_record.eu-west-1", &record2), - testAccCheckRoute53RecordExists("aws_route53_record.ap-northeast-1", &record3), + testAccCheckRoute53RecordExists("aws_route53_record.second_region", &record2), + testAccCheckRoute53RecordExists("aws_route53_record.third_region", &record3), ), }, { @@ -1595,53 +1596,55 @@ resource "aws_route53_record" "denmark" { } ` -const testAccRoute53LatencyCNAMERecord = ` +func testAccRoute53LatencyCNAMERecord(firstRegion, secondRegion, thirdRegion string) string { + return fmt.Sprintf(` resource "aws_route53_zone" "main" { name = "notexample.com" } -resource "aws_route53_record" "us-east-1" { +resource "aws_route53_record" "first_region" { zone_id = aws_route53_zone.main.zone_id name = "www" type = "CNAME" ttl = "5" latency_routing_policy { - region = "us-east-1" + region = %[1]q } - set_identifier = "us-east-1" + set_identifier = %[1]q records = ["dev.notexample.com"] } -resource "aws_route53_record" "eu-west-1" { +resource "aws_route53_record" "second_region" { zone_id = aws_route53_zone.main.zone_id name = "www" type = "CNAME" ttl = "5" latency_routing_policy { - region = "eu-west-1" + region = %[2]q } - set_identifier = "eu-west-1" + set_identifier = %[2]q records = ["dev.notexample.com"] } -resource "aws_route53_record" "ap-northeast-1" { +resource "aws_route53_record" "third_region" { zone_id = aws_route53_zone.main.zone_id name = "www" type = "CNAME" ttl = "5" latency_routing_policy { - region = "ap-northeast-1" + region = %[3]q } - set_identifier = "ap-northeast-1" + set_identifier = %[3]q records = ["dev.notexample.com"] } -` +`, firstRegion, secondRegion, thirdRegion) +} const testAccRoute53RecordConfigAliasElb = ` data "aws_availability_zones" "available" { From 4d98411e226c22faa62d4181ec3286f09e50664d Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 10 Nov 2020 19:56:05 -0500 Subject: [PATCH 0138/1212] tests/resource/sagemaker_endpoint: Make region agnostic --- ...source_aws_sagemaker_prebuilt_ecr_image.go | 50 ++++++++++++++++--- aws/resource_aws_sagemaker_endpoint_test.go | 11 +++- 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/aws/data_source_aws_sagemaker_prebuilt_ecr_image.go b/aws/data_source_aws_sagemaker_prebuilt_ecr_image.go index 91b084ab560..fbee2c66f69 100644 --- a/aws/data_source_aws_sagemaker_prebuilt_ecr_image.go +++ b/aws/data_source_aws_sagemaker_prebuilt_ecr_image.go @@ -47,6 +47,10 @@ const ( sageMakerRepositoryScikitLearn = "sagemaker-scikit-learn" // SageMaker Library Spark ML sageMakerRepositorySparkML = "sagemaker-sparkml-serving" + // SageMaker Library TensorFlow Serving + sageMakerRepositoryTensorFlowServing = "sagemaker-tensorflow-serving" + // SageMaker Library TensorFlow Serving EIA + sageMakerRepositoryTensorFlowServingEIA = "sagemaker-tensorflow-serving-eia" // SageMaker Repo MXNet Inference sageMakerRepositoryMXNetInference = "mxnet-inference" // SageMaker Repo MXNet Inference EIA @@ -204,6 +208,7 @@ var sageMakerPrebuiltECRImageIDByRegion_SparkML = map[string]string{ } // https://github.com/aws/deep-learning-containers/blob/master/available_images.md +// https://github.com/aws/sagemaker-tensorflow-serving-container var sageMakerPrebuiltECRImageIDByRegion_DeepLearning = map[string]string{ endpoints.ApEast1RegionID: "871362719292", endpoints.ApNortheast1RegionID: "763104351884", @@ -223,10 +228,35 @@ var sageMakerPrebuiltECRImageIDByRegion_DeepLearning = map[string]string{ endpoints.SaEast1RegionID: "763104351884", endpoints.UsEast1RegionID: "763104351884", endpoints.UsEast2RegionID: "763104351884", + endpoints.UsIsoEast1RegionID: "886529160074", endpoints.UsWest1RegionID: "763104351884", endpoints.UsWest2RegionID: "763104351884", } +// https://github.com/aws/sagemaker-tensorflow-serving-container +var sageMakerPrebuiltECRImageIDByRegion_TensorFlowServing = map[string]string{ + endpoints.ApEast1RegionID: "057415533634", + endpoints.ApNortheast1RegionID: "520713654638", + endpoints.ApNortheast2RegionID: "520713654638", + endpoints.ApSouth1RegionID: "520713654638", + endpoints.ApSoutheast1RegionID: "520713654638", + endpoints.ApSoutheast2RegionID: "520713654638", + endpoints.CaCentral1RegionID: "520713654638", + endpoints.CnNorth1RegionID: "520713654638", + endpoints.CnNorthwest1RegionID: "520713654638", + endpoints.EuCentral1RegionID: "520713654638", + endpoints.EuNorth1RegionID: "520713654638", + endpoints.EuWest1RegionID: "520713654638", + endpoints.EuWest2RegionID: "520713654638", + endpoints.EuWest3RegionID: "520713654638", + endpoints.MeSouth1RegionID: "724002660598", + endpoints.SaEast1RegionID: "520713654638", + endpoints.UsEast1RegionID: "520713654638", + endpoints.UsEast2RegionID: "520713654638", + endpoints.UsWest1RegionID: "520713654638", + endpoints.UsWest2RegionID: "520713654638", +} + func dataSourceAwsSageMakerPrebuiltECRImage() *schema.Resource { return &schema.Resource{ Read: dataSourceAwsSageMakerPrebuiltECRImageRead, @@ -244,25 +274,27 @@ func dataSourceAwsSageMakerPrebuiltECRImage() *schema.Resource { sageMakerRepositoryKNearestNeighbor, sageMakerRepositoryLDA, sageMakerRepositoryLinearLearner, + sageMakerRepositoryMXNetInference, + sageMakerRepositoryMXNetInferenceEIA, + sageMakerRepositoryMXNetTraining, sageMakerRepositoryNeuralTopicModel, sageMakerRepositoryObject2Vec, sageMakerRepositoryObjectDetection, sageMakerRepositoryPCA, + sageMakerRepositoryPyTorchInference, + sageMakerRepositoryPyTorchInferenceEIA, + sageMakerRepositoryPyTorchTraining, sageMakerRepositoryRandomCutForest, + sageMakerRepositoryScikitLearn, sageMakerRepositorySemanticSegmentation, sageMakerRepositorySeq2Seq, - sageMakerRepositoryXGBoost, - sageMakerRepositoryScikitLearn, sageMakerRepositorySparkML, - sageMakerRepositoryMXNetInference, - sageMakerRepositoryMXNetInferenceEIA, - sageMakerRepositoryMXNetTraining, - sageMakerRepositoryPyTorchInference, - sageMakerRepositoryPyTorchInferenceEIA, - sageMakerRepositoryPyTorchTraining, sageMakerRepositoryTensorFlowInference, sageMakerRepositoryTensorFlowInferenceEIA, + sageMakerRepositoryTensorFlowServing, + sageMakerRepositoryTensorFlowServingEIA, sageMakerRepositoryTensorFlowTraining, + sageMakerRepositoryXGBoost, }, false), }, @@ -324,6 +356,8 @@ func dataSourceAwsSageMakerPrebuiltECRImageRead(d *schema.ResourceData, meta int id = sageMakerPrebuiltECRImageIDByRegion_XGBoost[region] case sageMakerRepositoryScikitLearn, sageMakerRepositorySparkML: id = sageMakerPrebuiltECRImageIDByRegion_SparkML[region] + case sageMakerRepositoryTensorFlowServing, sageMakerRepositoryTensorFlowServingEIA: + id = sageMakerPrebuiltECRImageIDByRegion_TensorFlowServing[region] case sageMakerRepositoryMXNetInference, sageMakerRepositoryMXNetInferenceEIA, sageMakerRepositoryMXNetTraining, diff --git a/aws/resource_aws_sagemaker_endpoint_test.go b/aws/resource_aws_sagemaker_endpoint_test.go index ff193274622..fc8a76cd745 100644 --- a/aws/resource_aws_sagemaker_endpoint_test.go +++ b/aws/resource_aws_sagemaker_endpoint_test.go @@ -225,13 +225,15 @@ data "aws_iam_policy_document" "access" { } } +data "aws_partition" "current" {} + data "aws_iam_policy_document" "assume_role" { statement { actions = ["sts:AssumeRole"] principals { type = "Service" - identifiers = ["sagemaker.amazonaws.com"] + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] } } } @@ -258,12 +260,17 @@ resource "aws_s3_bucket_object" "test" { source = "test-fixtures/sagemaker-tensorflow-serving-test-model.tar.gz" } +data "aws_sagemaker_prebuilt_ecr_image" "test" { + repository_name = "sagemaker-tensorflow-serving" + image_tag = "1.12-cpu" +} + resource "aws_sagemaker_model" "test" { name = %[1]q execution_role_arn = aws_iam_role.test.arn primary_container { - image = "520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-tensorflow-serving:1.12-cpu" + image = data.aws_sagemaker_prebuilt_ecr_image.test.registry_path model_data_url = "https://${aws_s3_bucket.test.bucket_regional_domain_name}/${aws_s3_bucket_object.test.key}" } From 60b4ad4f300b86cce497756a6fbd46919e7d9c8f Mon Sep 17 00:00:00 2001 From: Matthew Burgess <549318+mattburgess@users.noreply.github.com> Date: Mon, 7 Dec 2020 18:30:01 +0000 Subject: [PATCH 0139/1212] deps: Update github.com/bflad/tfproviderlint@v0.21.0 (#16617) --- GNUmakefile | 2 + awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../tfproviderlint/passes/AT009/AT009.go | 2 +- .../bflad/tfproviderlint/passes/R019/R019.go | 62 +++++++++++++++++++ .../tfproviderlint/passes/R019/README.md | 28 +++++++++ .../tfproviderlint/passes/V009/README.md | 23 +++++++ .../bflad/tfproviderlint/passes/V009/V009.go | 56 +++++++++++++++++ .../tfproviderlint/passes/V010/README.md | 23 +++++++ .../bflad/tfproviderlint/passes/V010/V010.go | 56 +++++++++++++++++ .../bflad/tfproviderlint/passes/checks.go | 6 ++ .../resourcedatahaschangescallexpr.go | 14 +++++ .../stringdoesnotmatchcallexpr.go | 13 ++++ .../stringmatchcallexpr.go | 13 ++++ awsproviderlint/vendor/modules.txt | 8 ++- 15 files changed, 307 insertions(+), 5 deletions(-) create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/R019.go create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/README.md create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/README.md create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/V009.go create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/README.md create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/V010.go create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr/resourcedatahaschangescallexpr.go create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr/stringdoesnotmatchcallexpr.go create mode 100644 awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr/stringmatchcallexpr.go diff --git a/GNUmakefile b/GNUmakefile index a899ac0112b..5c422bf71f7 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -94,7 +94,9 @@ awsproviderlint: -R001=false \ -R010=false \ -R018=false \ + -R019=false \ -V001=false \ + -V009=false \ -XR001=false \ -XR002=false \ -XR003=false \ diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index e4428066b7d..a83718d80ef 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -4,7 +4,7 @@ go 1.15 require ( github.com/aws/aws-sdk-go v1.36.0 - github.com/bflad/tfproviderlint v0.20.0 + github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab ) diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 2ed3b1bbbb1..61fccec7cad 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -59,8 +59,8 @@ github.com/aws/aws-sdk-go v1.36.0 h1:CscTrS+szX5iu34zk2bZrChnGO/GMtUYgMK1Xzs2hYo github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= -github.com/bflad/tfproviderlint v0.20.0 h1:uAy6SsEFQglnkYdz2Dv0VGXld66T3TAHm2r5gOhWpCc= -github.com/bflad/tfproviderlint v0.20.0/go.mod h1:0fdh7JywihC58Io8AZ+gpcmQtJggse0MCOXF2tMmnAQ= +github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= +github.com/bflad/tfproviderlint v0.21.0/go.mod h1:0fdh7JywihC58Io8AZ+gpcmQtJggse0MCOXF2tMmnAQ= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/AT009/AT009.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/AT009/AT009.go index 4d351bd96f5..97501b84dfa 100644 --- a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/AT009/AT009.go +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/AT009/AT009.go @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (interface{}, error) { continue } - if !acctest.IsConst(callExpr.Args[1], pass.TypesInfo, acctest.ConstNameCharSetAlpha) { + if !acctest.IsConst(callExpr.Args[1], pass.TypesInfo, acctest.ConstNameCharSetAlphaNum) { continue } diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/R019.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/R019.go new file mode 100644 index 00000000000..82f8732fe05 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/R019.go @@ -0,0 +1,62 @@ +package R019 + +import ( + "flag" + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr" +) + +const Doc = `check for (*schema.ResourceData).HasChanges() calls with many arguments + +The R019 analyzer reports when there are a large number of arguments being +passed to (*schema.ResourceData).HasChanges(), which it may be preferable to +use (*schema.ResourceData).HasChangesExcept() instead. + +Optional parameters: + -threshold=5 Number of arguments for reporting +` + +const analyzerName = "R019" + +var ( + threshold int +) + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Flags: parseFlags(), + Requires: []*analysis.Analyzer{ + commentignore.Analyzer, + resourcedatahaschangescallexpr.Analyzer, + }, + Run: run, +} + +func parseFlags() flag.FlagSet { + var flags = flag.NewFlagSet(analyzerName, flag.ExitOnError) + flags.IntVar(&threshold, "threshold", 5, "Number of arguments for reporting") + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + callExprs := pass.ResultOf[resourcedatahaschangescallexpr.Analyzer].([]*ast.CallExpr) + for _, callExpr := range callExprs { + if ignorer.ShouldIgnore(analyzerName, callExpr) { + continue + } + + if len(callExpr.Args) < threshold { + continue + } + + pass.Reportf(callExpr.Pos(), "%s: d.HasChanges() has many arguments, consider d.HasChangesExcept()", analyzerName) + } + + return nil, nil +} diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/README.md b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/README.md new file mode 100644 index 00000000000..c47870ab5ed --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/R019/README.md @@ -0,0 +1,28 @@ +# R019 + +The R019 analyzer reports when there are a large number of arguments being passed to [`(*schema.ResourceData).HasChanges()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema#ResourceData.HasChanges), which it may be preferable to use [`(*schema.ResourceData).HasChangesExcept()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema#ResourceData.HasChangesExcept) instead. + +## Optional Arguments + +- `-threshold=5` Number of arguments before reporting + +## Flagged Code + +```go +d.HasChanges("attr1", "attr2", "attr3", "attr4", "attr5") +``` + +## Passing Code + +```go +d.HasChangesExcept("metadata_attr") +``` + +## Ignoring Reports + +Singular reports can be ignored by adding the a `//lintignore:R019` Go code comment at the end of the offending line or on the line immediately proceding, e.g. + +```go +//lintignore:R019 +d.HasChanges("attr1", "attr2", "attr3", "attr4", "attr5") +``` diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/README.md b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/README.md new file mode 100644 index 00000000000..bd247bd61ce --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/README.md @@ -0,0 +1,23 @@ +# V009 + +The V009 analyzer reports when the second argument for a [`validation.StringMatch()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation#StringMatch) call is an empty string. It is preferred to provide a friendly validation message, rather than allowing the function to return the raw regular expression as the message, since not all practitioners may be familiar with regular expression syntax. + +## Flagged Code + +```go +validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9.-]+$`), "") +``` + +## Passing Code + +```go +validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9.-]+$`), "must contain only alphanumeric characters, periods, or hyphens") +``` + +## Ignoring Reports + +Singular reports can be ignored by adding the a `//lintignore:V009` Go code comment at the end of the offending line or on the line immediately proceding, e.g. + +```go +validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9.-]+$`), "") //lintignore:V009 +``` diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/V009.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/V009.go new file mode 100644 index 00000000000..5f648ebcccd --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V009/V009.go @@ -0,0 +1,56 @@ +package V009 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/helper/astutils" + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr" +) + +const Doc = `check for validation.StringMatch() calls with empty message argument + +The V009 analyzer reports when the second argument for a validation.StringMatch() +call is an empty string. It is preferred to provide a friendly validation +message, rather than allowing the function to return the raw regular expression +as the message, since not all practitioners may be familiar with regular +expression syntax.` + +const analyzerName = "V009" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + commentignore.Analyzer, + stringmatchcallexpr.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + sets := pass.ResultOf[stringmatchcallexpr.Analyzer].([]*ast.CallExpr) + for _, set := range sets { + if ignorer.ShouldIgnore(analyzerName, set) { + continue + } + + if len(set.Args) < 2 { + continue + } + + switch v := set.Args[1].(type) { + default: + continue + case *ast.BasicLit: + if value := astutils.ExprStringValue(v); value != nil && *value == "" { + pass.Reportf(v.Pos(), "%s: validation.StringMatch() message argument should be non-empty", analyzerName) + } + } + } + + return nil, nil +} diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/README.md b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/README.md new file mode 100644 index 00000000000..becee7376a0 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/README.md @@ -0,0 +1,23 @@ +# V010 + +The V010 analyzer reports when the second argument for a [`validation.StringDoesNotMatch()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation#StringDoesNotMatch) call is an empty string. It is preferred to provide a friendly validation message, rather than allowing the function to return the raw regular expression as the message, since not all practitioners may be familiar with regular expression syntax. + +## Flagged Code + +```go +validation.StringDoesNotMatch(regexp.MustCompile(`^[!@#$%^&*()]+$`), "") +``` + +## Passing Code + +```go +validation.StringDoesNotMatch(regexp.MustCompile(`^[!@#$%^&*()]+$`), "must not contain exclamation, at, octothorp, US dollar, percentage, carat, ampersand, star, or parenthesis symbols") +``` + +## Ignoring Reports + +Singular reports can be ignored by adding the a `//lintignore:V010` Go code comment at the end of the offending line or on the line immediately proceding, e.g. + +```go +validation.StringDoesNotMatch(regexp.MustCompile(`^[!@#$%^&*()]+$`), "") //lintignore:V010 +``` diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/V010.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/V010.go new file mode 100644 index 00000000000..7cf6beb79c6 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/V010/V010.go @@ -0,0 +1,56 @@ +package V010 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/helper/astutils" + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr" +) + +const Doc = `check for validation.StringDoesNotMatch() calls with empty message argument + +The V010 analyzer reports when the second argument for a validation.StringDoesNotMatch() +call is an empty string. It is preferred to provide a friendly validation +message, rather than allowing the function to return the raw regular expression +as the message, since not all practitioners may be familiar with regular +expression syntax.` + +const analyzerName = "V010" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + commentignore.Analyzer, + stringdoesnotmatchcallexpr.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + sets := pass.ResultOf[stringdoesnotmatchcallexpr.Analyzer].([]*ast.CallExpr) + for _, set := range sets { + if ignorer.ShouldIgnore(analyzerName, set) { + continue + } + + if len(set.Args) < 2 { + continue + } + + switch v := set.Args[1].(type) { + default: + continue + case *ast.BasicLit: + if value := astutils.ExprStringValue(v); value != nil && *value == "" { + pass.Reportf(v.Pos(), "%s: validation.StringDoesNotMatch() message argument should be non-empty", analyzerName) + } + } + } + + return nil, nil +} diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/checks.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/checks.go index e10951e4586..92cd064a782 100644 --- a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/checks.go +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/checks.go @@ -28,6 +28,7 @@ import ( "github.com/bflad/tfproviderlint/passes/R016" "github.com/bflad/tfproviderlint/passes/R017" "github.com/bflad/tfproviderlint/passes/R018" + "github.com/bflad/tfproviderlint/passes/R019" "github.com/bflad/tfproviderlint/passes/S001" "github.com/bflad/tfproviderlint/passes/S002" "github.com/bflad/tfproviderlint/passes/S003" @@ -73,6 +74,8 @@ import ( "github.com/bflad/tfproviderlint/passes/V006" "github.com/bflad/tfproviderlint/passes/V007" "github.com/bflad/tfproviderlint/passes/V008" + "github.com/bflad/tfproviderlint/passes/V009" + "github.com/bflad/tfproviderlint/passes/V010" "golang.org/x/tools/go/analysis" ) @@ -107,6 +110,7 @@ var AllChecks = []*analysis.Analyzer{ R016.Analyzer, R017.Analyzer, R018.Analyzer, + R019.Analyzer, S001.Analyzer, S002.Analyzer, S003.Analyzer, @@ -152,4 +156,6 @@ var AllChecks = []*analysis.Analyzer{ V006.Analyzer, V007.Analyzer, V008.Analyzer, + V009.Analyzer, + V010.Analyzer, } diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr/resourcedatahaschangescallexpr.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr/resourcedatahaschangescallexpr.go new file mode 100644 index 00000000000..b31b46d1f46 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr/resourcedatahaschangescallexpr.go @@ -0,0 +1,14 @@ +package resourcedatahaschangescallexpr + +import ( + "github.com/bflad/tfproviderlint/helper/analysisutils" + "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" +) + +var Analyzer = analysisutils.ReceiverMethodCallExprAnalyzer( + "resourcedatahaschangescallexpr", + schema.IsReceiverMethod, + schema.PackagePath, + schema.TypeNameResourceData, + "HasChanges", +) diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr/stringdoesnotmatchcallexpr.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr/stringdoesnotmatchcallexpr.go new file mode 100644 index 00000000000..ba3681e8b74 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr/stringdoesnotmatchcallexpr.go @@ -0,0 +1,13 @@ +package stringdoesnotmatchcallexpr + +import ( + "github.com/bflad/tfproviderlint/helper/analysisutils" + "github.com/bflad/tfproviderlint/helper/terraformtype/helper/validation" +) + +var Analyzer = analysisutils.FunctionCallExprAnalyzer( + "stringdoesnotmatchcallexpr", + validation.IsFunc, + validation.PackagePath, + validation.FuncNameStringDoesNotMatch, +) diff --git a/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr/stringmatchcallexpr.go b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr/stringmatchcallexpr.go new file mode 100644 index 00000000000..77a0d329289 --- /dev/null +++ b/awsproviderlint/vendor/github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr/stringmatchcallexpr.go @@ -0,0 +1,13 @@ +package stringmatchcallexpr + +import ( + "github.com/bflad/tfproviderlint/helper/analysisutils" + "github.com/bflad/tfproviderlint/helper/terraformtype/helper/validation" +) + +var Analyzer = analysisutils.FunctionCallExprAnalyzer( + "stringmatchcallexpr", + validation.IsFunc, + validation.PackagePath, + validation.FuncNameStringMatch, +) diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index d31685e44fb..962d179f461 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -63,7 +63,7 @@ github.com/bflad/gopaniccheck/passes/logpaniccallexpr github.com/bflad/gopaniccheck/passes/logpanicfcallexpr github.com/bflad/gopaniccheck/passes/logpaniclncallexpr github.com/bflad/gopaniccheck/passes/paniccallexpr -# github.com/bflad/tfproviderlint v0.20.0 +# github.com/bflad/tfproviderlint v0.21.0 ## explicit github.com/bflad/tfproviderlint/helper/analysisutils github.com/bflad/tfproviderlint/helper/astutils @@ -100,6 +100,7 @@ github.com/bflad/tfproviderlint/passes/R015 github.com/bflad/tfproviderlint/passes/R016 github.com/bflad/tfproviderlint/passes/R017 github.com/bflad/tfproviderlint/passes/R018 +github.com/bflad/tfproviderlint/passes/R019 github.com/bflad/tfproviderlint/passes/S001 github.com/bflad/tfproviderlint/passes/S002 github.com/bflad/tfproviderlint/passes/S003 @@ -145,6 +146,8 @@ github.com/bflad/tfproviderlint/passes/V005 github.com/bflad/tfproviderlint/passes/V006 github.com/bflad/tfproviderlint/passes/V007 github.com/bflad/tfproviderlint/passes/V008 +github.com/bflad/tfproviderlint/passes/V009 +github.com/bflad/tfproviderlint/passes/V010 github.com/bflad/tfproviderlint/passes/commentignore github.com/bflad/tfproviderlint/passes/helper/acctest/randstringfromcharsetcallexpr github.com/bflad/tfproviderlint/passes/helper/resource/retryfuncinfo @@ -154,6 +157,7 @@ github.com/bflad/tfproviderlint/passes/helper/resource/testmatchresourceattrcall github.com/bflad/tfproviderlint/passes/helper/schema/crudfuncinfo github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetchangeassignstmt github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr +github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatahaschangescallexpr github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatapartialcallexpr github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatapartialselectorexpr github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatasetcallexpr @@ -173,7 +177,9 @@ github.com/bflad/tfproviderlint/passes/helper/validation/iprangecallexpr github.com/bflad/tfproviderlint/passes/helper/validation/iprangeselectorexpr github.com/bflad/tfproviderlint/passes/helper/validation/singleipcallexpr github.com/bflad/tfproviderlint/passes/helper/validation/singleipselectorexpr +github.com/bflad/tfproviderlint/passes/helper/validation/stringdoesnotmatchcallexpr github.com/bflad/tfproviderlint/passes/helper/validation/stringinslicecallexpr +github.com/bflad/tfproviderlint/passes/helper/validation/stringmatchcallexpr github.com/bflad/tfproviderlint/passes/helper/validation/validatejsonstringselectorexpr github.com/bflad/tfproviderlint/passes/helper/validation/validatelistuniquestringsselectorexpr github.com/bflad/tfproviderlint/passes/helper/validation/validateregexpselectorexpr From d4b5c283494fe00421110e4d441cf60fa4cea3fb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 7 Dec 2020 13:58:21 -0500 Subject: [PATCH 0140/1212] tests/resource/aws_route_table: Refactor acceptance tests in preparation for future fixes/enhancements (#14013) * r/aws_route: New 'TestAccAWSRouteTable_basic'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_basic -timeout 120m === RUN TestAccAWSRouteTable_basic === PAUSE TestAccAWSRouteTable_basic === CONT TestAccAWSRouteTable_basic --- PASS: TestAccAWSRouteTable_basic (37.78s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 37.839s * r/aws_route: Add 'TestAccAWSRouteTable_disappears'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_disappears' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_disappears -timeout 120m === RUN TestAccAWSRouteTable_disappears === PAUSE TestAccAWSRouteTable_disappears === CONT TestAccAWSRouteTable_disappears --- PASS: TestAccAWSRouteTable_disappears (35.58s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 35.625s * r/aws_route: Add 'TestAccAWSRouteTable_IPv4_To_InternetGateway'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_InternetGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_InternetGateway -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_InternetGateway === PAUSE TestAccAWSRouteTable_IPv4_To_InternetGateway === CONT TestAccAWSRouteTable_IPv4_To_InternetGateway --- PASS: TestAccAWSRouteTable_IPv4_To_InternetGateway (78.31s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 78.372s * r/aws_route: Update 'TestAccAWSRouteTable_tags'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_tags' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_tags -timeout 120m === RUN TestAccAWSRouteTable_tags === PAUSE TestAccAWSRouteTable_tags === CONT TestAccAWSRouteTable_tags --- PASS: TestAccAWSRouteTable_tags (84.18s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 84.234s * r/aws_route_table: 'TestAccAWSRouteTable_instance' -> 'TestAccAWSRouteTable_IPv4_To_Instance'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_Instance' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_Instance -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_Instance === PAUSE TestAccAWSRouteTable_IPv4_To_Instance === CONT TestAccAWSRouteTable_IPv4_To_Instance --- PASS: TestAccAWSRouteTable_IPv4_To_Instance (109.73s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 109.799s * r/aws_route_table: 'TestAccAWSRouteTable_ipv6' -> 'TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway -timeout 120m === RUN TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === PAUSE TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === CONT TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway --- PASS: TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway (41.79s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 41.826s * r/aws_route_table: Fix 'testAccRouteTableConfigPanicEmptyRoute'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_panicEmptyRoute' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_panicEmptyRoute -timeout 120m === RUN TestAccAWSRouteTable_panicEmptyRoute === PAUSE TestAccAWSRouteTable_panicEmptyRoute === CONT TestAccAWSRouteTable_panicEmptyRoute --- PASS: TestAccAWSRouteTable_panicEmptyRoute (24.60s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 24.636s * r/aws_route_table: Rework 'TestAccAWSRouteTable_Route_ConfigMode'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_Route_ConfigMode' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_Route_ConfigMode -timeout 120m === RUN TestAccAWSRouteTable_Route_ConfigMode === PAUSE TestAccAWSRouteTable_Route_ConfigMode === CONT TestAccAWSRouteTable_Route_ConfigMode --- PASS: TestAccAWSRouteTable_Route_ConfigMode (102.67s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 102.732s * r/aws_route_table: 'TestAccAWSRouteTable_Route_TransitGatewayID' -> 'TestAccAWSRouteTable_IPv4_To_TransitGateway'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_TransitGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_TransitGateway -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_TransitGateway === PAUSE TestAccAWSRouteTable_IPv4_To_TransitGateway === CONT TestAccAWSRouteTable_IPv4_To_TransitGateway --- PASS: TestAccAWSRouteTable_IPv4_To_TransitGateway (338.85s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 338.907s * r/aws_route_table: 'TestAccAWSRouteTable_vpcPeering' -> 'TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection === PAUSE TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection === CONT TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection --- PASS: TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection (43.87s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 43.905s * r/aws_route_table: Rework 'TestAccAWSRouteTable_vgwRoutePropagation'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_vgwRoutePropagation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_vgwRoutePropagation -timeout 120m === RUN TestAccAWSRouteTable_vgwRoutePropagation === PAUSE TestAccAWSRouteTable_vgwRoutePropagation === CONT TestAccAWSRouteTable_vgwRoutePropagation --- PASS: TestAccAWSRouteTable_vgwRoutePropagation (112.21s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 112.290s * r/aws_route_table: Add 'TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation'. Currently fails. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation -timeout 120m === RUN TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation === PAUSE TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation === CONT TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation --- FAIL: TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation (47.64s) testing.go:684: Step 0 error: Check failed: Check 2/7 error: Route Table has incorrect number of routes (Expected=3, Actual=2) FAIL FAIL github.com/terraform-providers/terraform-provider-aws/aws 47.694s FAIL GNUmakefile:26: recipe for target 'testacc' failed make: *** [testacc] Error 1 * Comment out 'TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation'. * r/aws_route_table: Add 'TestAccAWSRouteTable_IPv4_To_NatGateway'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_NatGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_NatGateway -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_NatGateway === PAUSE TestAccAWSRouteTable_IPv4_To_NatGateway === CONT TestAccAWSRouteTable_IPv4_To_NatGateway --- PASS: TestAccAWSRouteTable_IPv4_To_NatGateway (228.80s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 228.838s * r/aws_route_table: Add 'TestAccAWSRouteTable_IPv6_To_NetworkInterface'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv6_To_NetworkInterface' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv6_To_NetworkInterface -timeout 120m === RUN TestAccAWSRouteTable_IPv6_To_NetworkInterface === PAUSE TestAccAWSRouteTable_IPv6_To_NetworkInterface === CONT TestAccAWSRouteTable_IPv6_To_NetworkInterface --- PASS: TestAccAWSRouteTable_IPv6_To_NetworkInterface (48.88s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 48.917s * r/aws_route_table: Rework 'TestAccAWSRouteTable_ConditionalCidrBlock'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_ConditionalCidrBlock' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_ConditionalCidrBlock -timeout 120m === RUN TestAccAWSRouteTable_ConditionalCidrBlock === PAUSE TestAccAWSRouteTable_ConditionalCidrBlock === CONT TestAccAWSRouteTable_ConditionalCidrBlock --- PASS: TestAccAWSRouteTable_ConditionalCidrBlock (78.76s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 78.837s * r/aws_route_table: Rework 'testAccCheckAWSRouteTablePropagatingVgw'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_vgwRoutePropagation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_vgwRoutePropagation -timeout 120m === RUN TestAccAWSRouteTable_vgwRoutePropagation === PAUSE TestAccAWSRouteTable_vgwRoutePropagation === CONT TestAccAWSRouteTable_vgwRoutePropagation --- PASS: TestAccAWSRouteTable_vgwRoutePropagation (115.39s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 115.474s * Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 2 -run=TestAccAWSRouteTable_ -timeout 120m === RUN TestAccAWSRouteTable_basic === PAUSE TestAccAWSRouteTable_basic === RUN TestAccAWSRouteTable_disappears === PAUSE TestAccAWSRouteTable_disappears === RUN TestAccAWSRouteTable_IPv4_To_InternetGateway === PAUSE TestAccAWSRouteTable_IPv4_To_InternetGateway === RUN TestAccAWSRouteTable_IPv4_To_Instance === PAUSE TestAccAWSRouteTable_IPv4_To_Instance === RUN TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === PAUSE TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === RUN TestAccAWSRouteTable_tags === PAUSE TestAccAWSRouteTable_tags === RUN TestAccAWSRouteTable_panicEmptyRoute === PAUSE TestAccAWSRouteTable_panicEmptyRoute === RUN TestAccAWSRouteTable_Route_ConfigMode === PAUSE TestAccAWSRouteTable_Route_ConfigMode === RUN TestAccAWSRouteTable_IPv4_To_TransitGateway === PAUSE TestAccAWSRouteTable_IPv4_To_TransitGateway === RUN TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection === PAUSE TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection === RUN TestAccAWSRouteTable_vgwRoutePropagation === PAUSE TestAccAWSRouteTable_vgwRoutePropagation === RUN TestAccAWSRouteTable_ConditionalCidrBlock === PAUSE TestAccAWSRouteTable_ConditionalCidrBlock === RUN TestAccAWSRouteTable_IPv4_To_NatGateway === PAUSE TestAccAWSRouteTable_IPv4_To_NatGateway === RUN TestAccAWSRouteTable_IPv6_To_NetworkInterface === PAUSE TestAccAWSRouteTable_IPv6_To_NetworkInterface === CONT TestAccAWSRouteTable_basic === CONT TestAccAWSRouteTable_IPv4_To_TransitGateway --- PASS: TestAccAWSRouteTable_basic (37.06s) === CONT TestAccAWSRouteTable_IPv6_To_NetworkInterface --- PASS: TestAccAWSRouteTable_IPv6_To_NetworkInterface (47.79s) === CONT TestAccAWSRouteTable_IPv4_To_NatGateway --- PASS: TestAccAWSRouteTable_IPv4_To_NatGateway (196.98s) === CONT TestAccAWSRouteTable_ConditionalCidrBlock --- PASS: TestAccAWSRouteTable_IPv4_To_TransitGateway (348.59s) === CONT TestAccAWSRouteTable_vgwRoutePropagation --- PASS: TestAccAWSRouteTable_ConditionalCidrBlock (77.19s) === CONT TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection --- PASS: TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection (44.68s) === CONT TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway --- PASS: TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway (42.42s) === CONT TestAccAWSRouteTable_Route_ConfigMode --- PASS: TestAccAWSRouteTable_vgwRoutePropagation (115.86s) === CONT TestAccAWSRouteTable_panicEmptyRoute --- PASS: TestAccAWSRouteTable_panicEmptyRoute (24.55s) === CONT TestAccAWSRouteTable_tags --- PASS: TestAccAWSRouteTable_Route_ConfigMode (101.57s) === CONT TestAccAWSRouteTable_IPv4_To_InternetGateway --- PASS: TestAccAWSRouteTable_tags (83.12s) === CONT TestAccAWSRouteTable_IPv4_To_Instance --- PASS: TestAccAWSRouteTable_IPv4_To_InternetGateway (78.17s) === CONT TestAccAWSRouteTable_disappears --- PASS: TestAccAWSRouteTable_disappears (34.81s) --- PASS: TestAccAWSRouteTable_IPv4_To_Instance (121.04s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 693.250s * r/aws_route_table: Add (and comment out) 'TestAccAWSRouteTable_IPv4_To_NetworkInterface_Attached' and 'TestAccAWSRouteTable_IPv4_To_NetworkInterface_TwoAttachments'. These two tests fail as expected (#1426 and #5745). * r/aws_route_table: Add 'testAccCheckAWSRouteTableRoute'. * r/aws_route_table: Rename 'TestAccAWSRouteTable_VpcMultipleCidrs_VpcEndpointAssociation' to 'TestAccAWSRouteTable_VpcMultipleCidrs'. Gateway VPC Endpoint routes are added asynchronously so don't attempt to test. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_VpcMultipleCidrs' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_VpcMultipleCidrs -timeout 120m === RUN TestAccAWSRouteTable_VpcMultipleCidrs === PAUSE TestAccAWSRouteTable_VpcMultipleCidrs === CONT TestAccAWSRouteTable_VpcMultipleCidrs --- PASS: TestAccAWSRouteTable_VpcMultipleCidrs (62.16s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 62.213s * r/aws_route_table: Add 'TestAccAWSRouteTable_VpcClassicLink' Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_VpcClassicLink' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_VpcClassicLink -timeout 120m === RUN TestAccAWSRouteTable_VpcClassicLink === PAUSE TestAccAWSRouteTable_VpcClassicLink === CONT TestAccAWSRouteTable_VpcClassicLink --- PASS: TestAccAWSRouteTable_VpcClassicLink (38.84s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 38.888s * r/aws_route_table: Add 'TestAccAWSRouteTable_GatewayVpcEndpoint' Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_GatewayVpcEndpoint' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_GatewayVpcEndpoint -timeout 120m === RUN TestAccAWSRouteTable_GatewayVpcEndpoint === PAUSE TestAccAWSRouteTable_GatewayVpcEndpoint === CONT TestAccAWSRouteTable_GatewayVpcEndpoint --- PASS: TestAccAWSRouteTable_GatewayVpcEndpoint (210.60s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 210.650s * r/aws_route_table: Add 'TestAccAWSRouteTable_disappears_SubnetAssociation'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_disappears_SubnetAssociation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_disappears_SubnetAssociation -timeout 120m === RUN TestAccAWSRouteTable_disappears_SubnetAssociation === PAUSE TestAccAWSRouteTable_disappears_SubnetAssociation === CONT TestAccAWSRouteTable_disappears_SubnetAssociation --- PASS: TestAccAWSRouteTable_disappears_SubnetAssociation (44.11s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 44.160s * r/aws_route_table: Add 'TestAccAWSRouteTable_MultipleRoutes'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_MultipleRoutes' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_MultipleRoutes -timeout 120m === RUN TestAccAWSRouteTable_MultipleRoutes === PAUSE TestAccAWSRouteTable_MultipleRoutes === CONT TestAccAWSRouteTable_MultipleRoutes --- PASS: TestAccAWSRouteTable_MultipleRoutes (191.08s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 191.139s * r/aws_route_table: Use Amazon NAT instance AMI for instance tests. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_Instance' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv4_To_Instance -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_Instance === PAUSE TestAccAWSRouteTable_IPv4_To_Instance === CONT TestAccAWSRouteTable_IPv4_To_Instance --- PASS: TestAccAWSRouteTable_IPv4_To_Instance (109.91s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 110.478s $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_MultipleRoutes' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_MultipleRoutes -timeout 120m === RUN TestAccAWSRouteTable_MultipleRoutes === PAUSE TestAccAWSRouteTable_MultipleRoutes === CONT TestAccAWSRouteTable_MultipleRoutes --- PASS: TestAccAWSRouteTable_MultipleRoutes (204.15s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 204.201s * r/aws_route_table: Delete 'TestAccAWSRouteTable_IPv4_To_NetworkInterface_Attached' and 'TestAccAWSRouteTable_IPv4_To_NetworkInterface_TwoAttachments'. If we mark `instance_id` and `network_interface_id` as both `Computed: true` in the `route` attribute's schema then we end up having to change the associated set hash function to choose one or other of those attributes to include in the hash and ignore the other. This means that either 'TestAccAWSRouteTable_IPv4_To_Instance' will show continuous diffs or 'TestAccAWSRouteTable_IPv4_To_NetworkInterface_Attached' will. The longer term solution is to remove `instance_id` as a route target as the instance's primary ENI's ID can be used in the `network_interface_id`. This will also simplify the `aws_route` resource. * r/aws_route_table: Use 'testAccAvailableAZsNoOptInExcludeConfig'. * Use 'testAccAvailableAZsNoOptInDefaultExcludeConfig'. * Fix compilation errors after rebase. * Fix compilation errors after rebase. * Exclude 'resource_aws_route_table_test.go' from acceptance test Terraform linting (testAccAWSRouteTableConfigMultipleRoutes). * r/aws_route_table: Ensure no diff when expanded form of IPv6 CIDR block is used. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway -timeout 120m === RUN TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === PAUSE TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway === CONT TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway --- PASS: TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway (60.43s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 60.474s * r/aws_vpc_endpoint_route_table_association: Rename 'TestAccAWSRouteTable_Route_VpcEndpointId' to 'TestAccAWSRouteTable_IPv4_To_VpcEndpoint'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_IPv4_To_VpcEndpoint' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccAWSRouteTable_IPv4_To_VpcEndpoint -timeout 120m === RUN TestAccAWSRouteTable_IPv4_To_VpcEndpoint === PAUSE TestAccAWSRouteTable_IPv4_To_VpcEndpoint === CONT TestAccAWSRouteTable_IPv4_To_VpcEndpoint --- PASS: TestAccAWSRouteTable_IPv4_To_VpcEndpoint (410.02s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 410.081s * r/aws_route_table: Missing commit from rebase. * Update aws/resource_aws_route_table_test.go Co-authored-by: Brian Flad * Update aws/resource_aws_route_table_test.go Co-authored-by: Brian Flad * Update aws/resource_aws_route_table_test.go Co-authored-by: Brian Flad * Update aws/resource_aws_route_table_test.go Co-authored-by: Brian Flad * Update aws/resource_aws_route_table_test.go Co-authored-by: Brian Flad * r/aws_route_table: 'make fmt'. * r/aws_route_table: Refactor 'TestAccAWSRouteTable_MultipleRoutes' so as to avoid dynamic attribute names. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_MultipleRoutes' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_MultipleRoutes -timeout 120m === RUN TestAccAWSRouteTable_MultipleRoutes === PAUSE TestAccAWSRouteTable_MultipleRoutes === CONT TestAccAWSRouteTable_MultipleRoutes --- PASS: TestAccAWSRouteTable_MultipleRoutes (165.99s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 166.047s * Fix 'terrafmt' issues. Co-authored-by: Brian Flad --- .github/workflows/acctest-terraform-lint.yml | 2 + aws/resource_aws_route_table.go | 2 +- aws/resource_aws_route_table_test.go | 1785 +++++++++++++----- aws/validators_test.go | 17 + 4 files changed, 1317 insertions(+), 489 deletions(-) diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index 10fcff35f1e..d6ad542d3a8 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -40,6 +40,7 @@ jobs: | grep -v resource_aws_kinesis_stream_test.go \ | grep -v resource_aws_kms_grant_test.go \ | grep -v resource_aws_quicksight_user_test.go \ + | grep -v resource_aws_route_table_test.go \ | grep -v resource_aws_s3_bucket_object_test.go \ | grep -v resource_aws_sns_platform_application_test.go \ | xargs -I {} terrafmt diff --check --fmtcompat {} @@ -70,6 +71,7 @@ jobs: | grep -v resource_aws_kms_grant_test.go \ | grep -v resource_aws_lambda_permission_test.go \ | grep -v resource_aws_quicksight_user_test.go \ + | grep -v resource_aws_route_table_test.go \ | grep -v resource_aws_s3_bucket_object_test.go \ | grep -v resource_aws_sns_platform_application_test.go \ | ./scripts/validate-terraform.sh diff --git a/aws/resource_aws_route_table.go b/aws/resource_aws_route_table.go index 54526914694..89e1c27cbee 100644 --- a/aws/resource_aws_route_table.go +++ b/aws/resource_aws_route_table.go @@ -548,7 +548,7 @@ func resourceAwsRouteTableHash(v interface{}) int { } if v, ok := m["ipv6_cidr_block"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + buf.WriteString(fmt.Sprintf("%s-", canonicalCidrBlock(v.(string)))) } if v, ok := m["cidr_block"]; ok { diff --git a/aws/resource_aws_route_table_test.go b/aws/resource_aws_route_table_test.go index 244de46c8b9..95769e51fc3 100644 --- a/aws/resource_aws_route_table_test.go +++ b/aws/resource_aws_route_table_test.go @@ -5,6 +5,7 @@ import ( "log" "regexp" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -81,51 +82,87 @@ func testSweepRouteTables(region string) error { } func TestAccAWSRouteTable_basic(t *testing.T) { - var v ec2.RouteTable + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 1), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - return nil - } +func TestAccAWSRouteTable_disappears(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") - testCheckChange := func(*terraform.State) error { - if len(v.Routes) != 3 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckResourceDisappears(testAccProvider, resourceAwsRouteTable(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } +func TestAccAWSRouteTable_disappears_SubnetAssociation(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.3.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.4.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigSubnetAssociation(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckResourceDisappears(testAccProvider, resourceAwsRouteTable(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} - return nil - } +func TestAccAWSRouteTable_IPv4_To_InternetGateway(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + igwResourceName := "aws_internet_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr1 := "10.2.0.0/16" + destinationCidr2 := "10.3.0.0/16" + destinationCidr3 := "10.4.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -134,54 +171,48 @@ func TestAccAWSRouteTable_basic(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfig, + Config: testAccAWSRouteTableConfigIpv4InternetGateway(rName, destinationCidr1, destinationCidr2), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testCheck, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "2"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccRouteTableConfigChange, + Config: testAccAWSRouteTableConfigIpv4InternetGateway(rName, destinationCidr2, destinationCidr3), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testCheckChange, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "2"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr3, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSRouteTable_instance(t *testing.T) { - var v ec2.RouteTable +func TestAccAWSRouteTable_IPv4_To_Instance(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" - - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } + instanceResourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -190,10 +221,16 @@ func TestAccAWSRouteTable_instance(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfigInstance(), + Config: testAccAWSRouteTableConfigIpv4Instance(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testCheck, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "instance_id", instanceResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -205,18 +242,12 @@ func TestAccAWSRouteTable_instance(t *testing.T) { }) } -func TestAccAWSRouteTable_ipv6(t *testing.T) { - var v ec2.RouteTable +func TestAccAWSRouteTable_IPv6_To_EgressOnlyInternetGateway(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" - - testCheck := func(*terraform.State) error { - // Expect 3: 2 IPv6 (local + all outbound) + 1 IPv4 - if len(v.Routes) != 3 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } + eoigwResourceName := "aws_egress_only_internet_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "::/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -225,10 +256,16 @@ func TestAccAWSRouteTable_ipv6(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfigIpv6, + Config: testAccAWSRouteTableConfigIpv6EgressOnlyInternetGateway(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testCheck, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationCidr, "egress_only_gateway_id", eoigwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -236,6 +273,11 @@ func TestAccAWSRouteTable_ipv6(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + // Verify that expanded form of the destination CIDR causes no diff. + Config: testAccAWSRouteTableConfigIpv6EgressOnlyInternetGateway(rName, "::0/0"), + PlanOnly: true, + }, }, }) } @@ -287,6 +329,7 @@ func TestAccAWSRouteTable_tags(t *testing.T) { func TestAccAWSRouteTable_RequireRouteDestination(t *testing.T) { resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -295,7 +338,7 @@ func TestAccAWSRouteTable_RequireRouteDestination(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfigNoDestination(), + Config: testAccRouteTableConfigNoDestination(rName), ExpectError: regexp.MustCompile("error creating route: one of `cidr_block"), }, }, @@ -304,6 +347,7 @@ func TestAccAWSRouteTable_RequireRouteDestination(t *testing.T) { func TestAccAWSRouteTable_RequireRouteTarget(t *testing.T) { resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -312,7 +356,7 @@ func TestAccAWSRouteTable_RequireRouteTarget(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableConfigNoTarget, + Config: testAccRouteTableConfigNoTarget(rName), ExpectError: regexp.MustCompile("error creating route: one of `egress_only_gateway_id"), }, }, @@ -320,8 +364,12 @@ func TestAccAWSRouteTable_RequireRouteTarget(t *testing.T) { } func TestAccAWSRouteTable_Route_ConfigMode(t *testing.T) { - var routeTable1, routeTable2, routeTable3 ec2.RouteTable + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + igwResourceName := "aws_internet_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr1 := "10.2.0.0/16" + destinationCidr2 := "10.3.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -329,10 +377,17 @@ func TestAccAWSRouteTable_Route_ConfigMode(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteTableConfigRouteConfigModeBlocks(), + Config: testAccAWSRouteTableConfigIpv4InternetGateway(rName, destinationCidr1, destinationCidr2), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), resource.TestCheckResourceAttr(resourceName, "route.#", "2"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -341,10 +396,17 @@ func TestAccAWSRouteTable_Route_ConfigMode(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSRouteTableConfigRouteConfigModeNoBlocks(), + Config: testAccAWSRouteTableConfigRouteConfigModeNoBlocks(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable2), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), resource.TestCheckResourceAttr(resourceName, "route.#", "2"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -353,10 +415,15 @@ func TestAccAWSRouteTable_Route_ConfigMode(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSRouteTableConfigRouteConfigModeZeroed(), + Config: testAccAWSRouteTableConfigRouteConfigModeZeroed(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable3), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 1), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -368,9 +435,12 @@ func TestAccAWSRouteTable_Route_ConfigMode(t *testing.T) { }) } -func TestAccAWSRouteTable_Route_TransitGatewayID(t *testing.T) { - var routeTable1 ec2.RouteTable +func TestAccAWSRouteTable_IPv4_To_TransitGateway(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + tgwResourceName := "aws_ec2_transit_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -378,9 +448,16 @@ func TestAccAWSRouteTable_Route_TransitGatewayID(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteTableConfigRouteTransitGatewayID(), + Config: testAccAWSRouteTableConfigIpv4TransitGateway(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "transit_gateway_id", tgwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -392,10 +469,12 @@ func TestAccAWSRouteTable_Route_TransitGatewayID(t *testing.T) { }) } -func TestAccAWSRouteTable_Route_VpcEndpointId(t *testing.T) { - var routeTable1 ec2.RouteTable - rName := acctest.RandomWithPrefix("tf-acc-test") +func TestAccAWSRouteTable_IPv4_To_VpcEndpoint(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + vpceResourceName := "aws_vpc_endpoint.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "0.0.0.0/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckElbv2GatewayLoadBalancer(t) }, @@ -403,9 +482,16 @@ func TestAccAWSRouteTable_Route_VpcEndpointId(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteTableConfigRouteVpcEndpointId(rName), + Config: testAccAWSRouteTableConfigRouteIpv4VpcEndpointId(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "vpc_endpoint_id", vpceResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -417,9 +503,12 @@ func TestAccAWSRouteTable_Route_VpcEndpointId(t *testing.T) { }) } -func TestAccAWSRouteTable_Route_LocalGatewayID(t *testing.T) { - var routeTable1 ec2.RouteTable +func TestAccAWSRouteTable_IPv4_To_LocalGateway(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + lgwDataSourceName := "data.aws_ec2_local_gateway.first" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "0.0.0.0/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, @@ -427,9 +516,16 @@ func TestAccAWSRouteTable_Route_LocalGatewayID(t *testing.T) { CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteTableConfigRouteLocalGatewayID(), + Config: testAccAWSRouteTableConfigRouteIpv4LocalGateway(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &routeTable1), + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "local_gateway_id", lgwDataSourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -441,98 +537,29 @@ func TestAccAWSRouteTable_Route_LocalGatewayID(t *testing.T) { }) } -func testAccCheckRouteTableDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_route_table" { - continue - } - - // Try to find the resource - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err == nil { - if len(resp.RouteTables) > 0 { - return fmt.Errorf("still exist.") - } - - return nil - } - - // Verify the error is what we want - if !isAWSErr(err, "InvalidRouteTableID.NotFound", "") { - return err - } - } - - return nil -} - -func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ - RouteTableIds: []*string{aws.String(rs.Primary.ID)}, - }) - if err != nil { - return err - } - if len(resp.RouteTables) == 0 { - return fmt.Errorf("RouteTable not found") - } - - *v = *resp.RouteTables[0] - - return nil - } -} - -// VPC Peering connections are prefixed with pcx -// Right now there is no VPC Peering resource -func TestAccAWSRouteTable_vpcPeering(t *testing.T) { - var v ec2.RouteTable +func TestAccAWSRouteTable_IPv4_To_VpcPeeringConnection(t *testing.T) { + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + pcxResourceName := "aws_vpc_peering_connection.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" - testCheck := func(*terraform.State) error { - if len(v.Routes) != 2 { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - routes := make(map[string]*ec2.Route) - for _, r := range v.Routes { - routes[*r.DestinationCidrBlock] = r - } - - if _, ok := routes["10.1.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - if _, ok := routes["10.2.0.0/16"]; !ok { - return fmt.Errorf("bad routes: %#v", v.Routes) - } - - return nil - } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableVpcPeeringConfig, + Config: testAccAWSRouteTableConfigIpv4VpcPeeringConnection(rName, destinationCidr), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testCheck, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "vpc_peering_connection_id", pcxResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -545,41 +572,41 @@ func TestAccAWSRouteTable_vpcPeering(t *testing.T) { } func TestAccAWSRouteTable_vgwRoutePropagation(t *testing.T) { - var v ec2.RouteTable - var vgw ec2.VpnGateway + var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + vgwResourceName1 := "aws_vpn_gateway.test1" + vgwResourceName2 := "aws_vpn_gateway.test2" + rName := acctest.RandomWithPrefix("tf-acc-test") - testCheck := func(*terraform.State) error { - if len(v.PropagatingVgws) != 1 { - return fmt.Errorf("bad propagating vgws: %#v", v.PropagatingVgws) - } - - propagatingVGWs := make(map[string]*ec2.PropagatingVgw) - for _, gw := range v.PropagatingVgws { - propagatingVGWs[*gw.GatewayId] = gw - } - - if _, ok := propagatingVGWs[*vgw.VpnGatewayId]; !ok { - return fmt.Errorf("bad propagating vgws: %#v", v.PropagatingVgws) - } - - return nil - - } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testAccCheckVpnGatewayDestroy, - testAccCheckRouteTableDestroy, - ), + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckRouteTableDestroy, Steps: []resource.TestStep{ { - Config: testAccRouteTableVgwRoutePropagationConfig, + Config: testAccAWSRouteTableConfigVgwRoutePropagation(rName, vgwResourceName1), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 1), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "propagating_vgws.*", vgwResourceName1, "id"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + Config: testAccAWSRouteTableConfigVgwRoutePropagation(rName, vgwResourceName2), Check: resource.ComposeTestCheckFunc( - testAccCheckRouteTableExists(resourceName, &v), - testAccCheckVpnGatewayExists("aws_vpn_gateway.test", &vgw), - testCheck, + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 1), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "propagating_vgws.*", vgwResourceName2, "id"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, { @@ -594,7 +621,10 @@ func TestAccAWSRouteTable_vgwRoutePropagation(t *testing.T) { func TestAccAWSRouteTable_ConditionalCidrBlock(t *testing.T) { var routeTable ec2.RouteTable resourceName := "aws_route_table.test" + igwResourceName := "aws_internet_gateway.test" rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" + destinationIpv6Cidr := "::/0" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -602,23 +632,17 @@ func TestAccAWSRouteTable_ConditionalCidrBlock(t *testing.T) { CheckDestroy: testAccCheckAWSRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName, false), + Config: testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr, false), Check: resource.ComposeTestCheckFunc( testAccCheckRouteTableExists(resourceName, &routeTable), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ - "cidr_block": "0.0.0.0/0", - "ipv6_cidr_block": "", - }), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "gateway_id", igwResourceName, "id"), ), }, { - Config: testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName, true), + Config: testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr, true), Check: resource.ComposeTestCheckFunc( testAccCheckRouteTableExists(resourceName, &routeTable), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ - "cidr_block": "", - "ipv6_cidr_block": "::/0", - }), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationIpv6Cidr, "gateway_id", igwResourceName, "id"), ), }, { @@ -630,57 +654,626 @@ func TestAccAWSRouteTable_ConditionalCidrBlock(t *testing.T) { }) } -func testAccCheckAWSRouteTableNumberOfRoutes(routeTable *ec2.RouteTable, n int) resource.TestCheckFunc { - return func(s *terraform.State) error { - if len := len(routeTable.Routes); len != n { - return fmt.Errorf("Route Table has incorrect number of routes (Expected=%d, Actual=%d)\n", n, len) - } - - return nil - } -} +func TestAccAWSRouteTable_IPv4_To_NatGateway(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + ngwResourceName := "aws_nat_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "10.2.0.0/16" -const testAccRouteTableConfig = ` -resource "aws_vpc" "test" { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckRouteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigIpv4NatGateway(rName, destinationCidr), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr, "nat_gateway_id", ngwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRouteTable_IPv6_To_NetworkInterface_Unattached(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + eniResourceName := "aws_network_interface.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr := "::/0" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckRouteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigIpv6NetworkInterfaceUnattached(rName, destinationCidr), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 3), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "1"), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationCidr, "network_interface_id", eniResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRouteTable_VpcMultipleCidrs(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigVpcMultipleCidrs(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRouteTable_VpcClassicLink(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigVpcClassicLink(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRouteTable_GatewayVpcEndpoint(t *testing.T) { + var routeTable ec2.RouteTable + var vpce ec2.VpcEndpoint + resourceName := "aws_route_table.test" + vpceResourceName := "aws_vpc_endpoint.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigGatewayVpcEndpoint(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckVpcEndpointExists(vpceResourceName, &vpce), + testAccCheckAWSRouteTableWaitForVpcEndpointRoute(&routeTable, &vpce), + // Refresh the route table once the VPC endpoint route is present. + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 2), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRouteTable_MultipleRoutes(t *testing.T) { + var routeTable ec2.RouteTable + resourceName := "aws_route_table.test" + eoigwResourceName := "aws_egress_only_internet_gateway.test" + igwResourceName := "aws_internet_gateway.test" + instanceResourceName := "aws_instance.test" + pcxResourceName := "aws_vpc_peering_connection.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + destinationCidr1 := "10.2.0.0/16" + destinationCidr2 := "10.3.0.0/16" + destinationCidr3 := "10.4.0.0/16" + destinationCidr4 := "2001:db8::/122" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckRouteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteTableConfigMultipleRoutes(rName, + "cidr_block", destinationCidr1, "gateway_id", igwResourceName, + "cidr_block", destinationCidr2, "instance_id", instanceResourceName, + "ipv6_cidr_block", destinationCidr4, "egress_only_gateway_id", eoigwResourceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 5), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "3"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "instance_id", instanceResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationCidr4, "egress_only_gateway_id", eoigwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + Config: testAccAWSRouteTableConfigMultipleRoutes(rName, + "cidr_block", destinationCidr1, "vpc_peering_connection_id", pcxResourceName, + "cidr_block", destinationCidr3, "instance_id", instanceResourceName, + "ipv6_cidr_block", destinationCidr4, "egress_only_gateway_id", eoigwResourceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 5), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "3"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr1, "vpc_peering_connection_id", pcxResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr3, "instance_id", instanceResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationCidr4, "egress_only_gateway_id", eoigwResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + Config: testAccAWSRouteTableConfigMultipleRoutes(rName, + "ipv6_cidr_block", destinationCidr4, "vpc_peering_connection_id", pcxResourceName, + "cidr_block", destinationCidr3, "gateway_id", igwResourceName, + "cidr_block", destinationCidr2, "instance_id", instanceResourceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRouteTableExists(resourceName, &routeTable), + testAccCheckAWSRouteTableNumberOfRoutes(&routeTable, 5), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "propagating_vgws.#", "0"), + resource.TestCheckResourceAttr(resourceName, "route.#", "3"), + testAccCheckAWSRouteTableRoute(resourceName, "ipv6_cidr_block", destinationCidr4, "vpc_peering_connection_id", pcxResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr3, "gateway_id", igwResourceName, "id"), + testAccCheckAWSRouteTableRoute(resourceName, "cidr_block", destinationCidr2, "instance_id", instanceResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckRouteTableExists(n string, v *ec2.RouteTable) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{aws.String(rs.Primary.ID)}, + }) + if err != nil { + return err + } + if len(resp.RouteTables) == 0 { + return fmt.Errorf("RouteTable not found") + } + + *v = *resp.RouteTables[0] + + return nil + } +} + +func testAccCheckRouteTableDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_route_table" { + continue + } + + // Try to find the resource + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{aws.String(rs.Primary.ID)}, + }) + if err == nil { + if len(resp.RouteTables) > 0 { + return fmt.Errorf("still exist.") + } + + return nil + } + + // Verify the error is what we want + if !isAWSErr(err, "InvalidRouteTableID.NotFound", "") { + return err + } + } + + return nil +} + +func testAccCheckAWSRouteTableNumberOfRoutes(routeTable *ec2.RouteTable, n int) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len := len(routeTable.Routes); len != n { + return fmt.Errorf("Route Table has incorrect number of routes (Expected=%d, Actual=%d)\n", n, len) + } + + return nil + } +} + +func testAccCheckAWSRouteTableRoute(resourceName, destinationAttr, destination, targetAttr, targetResourceName, targetResourceAttr string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[targetResourceName] + if !ok { + return fmt.Errorf("Not found: %s", targetResourceName) + } + + target := rs.Primary.Attributes[targetResourceAttr] + if target == "" { + return fmt.Errorf("Not found: %s.%s", targetResourceName, targetResourceAttr) + } + + return resource.TestCheckTypeSetElemNestedAttrs(resourceName, "route.*", map[string]string{ + destinationAttr: destination, + targetAttr: target, + })(s) + } +} + +// testAccCheckAWSRouteTableWaitForVpcEndpointRoute returns a TestCheckFunc which waits for +// a route to the specified VPC endpoint's prefix list to appear in the specified route table. +func testAccCheckAWSRouteTableWaitForVpcEndpointRoute(routeTable *ec2.RouteTable, vpce *ec2.VpcEndpoint) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + resp, err := conn.DescribePrefixLists(&ec2.DescribePrefixListsInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "prefix-list-name": aws.StringValue(vpce.ServiceName), + }), + }) + if err != nil { + return err + } + + if resp == nil || len(resp.PrefixLists) == 0 { + return fmt.Errorf("Prefix List not found") + } + + plId := aws.StringValue(resp.PrefixLists[0].PrefixListId) + + err = resource.Retry(3*time.Minute, func() *resource.RetryError { + resp, err := conn.DescribeRouteTables(&ec2.DescribeRouteTablesInput{ + RouteTableIds: []*string{routeTable.RouteTableId}, + }) + if err != nil { + return resource.NonRetryableError(err) + } + if resp == nil || len(resp.RouteTables) == 0 { + return resource.NonRetryableError(fmt.Errorf("Route Table not found")) + } + + for _, route := range resp.RouteTables[0].Routes { + if aws.StringValue(route.DestinationPrefixListId) == plId { + return nil + } + } + + return resource.RetryableError(fmt.Errorf("Route not found")) + }) + + return err + } +} + +func testAccAWSRouteTableConfigBasic(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id +} +`, rName) +} + +func testAccAWSRouteTableConfigSubnetAssociation(rName string) string { + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table_association" "test" { + route_table_id = aws_route_table.test.id + subnet_id = aws_subnet.test.id +} +`, rName)) +} + +func testAccAWSRouteTableConfigIpv4InternetGateway(rName, destinationCidr1, destinationCidr2 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = %[2]q + gateway_id = aws_internet_gateway.test.id + } + + route { + cidr_block = %[3]q + gateway_id = aws_internet_gateway.test.id + } + + tags = { + Name = %[1]q + } +} +`, rName, destinationCidr1, destinationCidr2) +} + +func testAccAWSRouteTableConfigIpv6EgressOnlyInternetGateway(rName, destinationCidr string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + assign_generated_ipv6_cidr_block = true + + tags = { + Name = %[1]q + } +} + +resource "aws_egress_only_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + ipv6_cidr_block = %[2]q + egress_only_gateway_id = aws_egress_only_internet_gateway.test.id + } + + tags = { + Name = %[1]q + } +} +`, rName, destinationCidr) +} + +func testAccAWSRouteTableConfigIpv4Instance(rName, destinationCidr string) string { + return composeConfig( + testAccLatestAmazonNatInstanceAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-nat-instance.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = %[2]q + instance_id = aws_instance.test.id + } + + tags = { + Name = %[1]q + } +} +`, rName, destinationCidr)) +} + +func testAccAWSRouteTableConfigTags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-route-table" + Name = %[1]q } } -resource "aws_internet_gateway" "test" { +resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-route-table" + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSRouteTableConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q } } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id - route { - cidr_block = "10.2.0.0/16" - gateway_id = aws_internet_gateway.test.id + tags = { + %[2]q = %[3]q + %[4]q = %[5]q } } -` +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} -const testAccRouteTableConfigChange = ` +func testAccAWSRouteTableConfigIpv4VpcPeeringConnection(rName, destinationCidr string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-route-table" + Name = %[1]q } } -resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id +resource "aws_vpc" "target" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc.test.id + peer_vpc_id = aws_vpc.target.id + auto_accept = true tags = { - Name = "terraform-testacc-route-table" + Name = %[1]q } } @@ -688,50 +1281,79 @@ resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - cidr_block = "10.3.0.0/16" - gateway_id = aws_internet_gateway.test.id + cidr_block = %[2]q + vpc_peering_connection_id = aws_vpc_peering_connection.test.id } - route { - cidr_block = "10.4.0.0/16" - gateway_id = aws_internet_gateway.test.id + tags = { + Name = %[1]q } } -` +`, rName, destinationCidr) +} -const testAccRouteTableConfigIpv6 = ` +func testAccAWSRouteTableConfigVgwRoutePropagation(rName, vgwResourceName string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - assign_generated_ipv6_cidr_block = true + cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-route-table-ipv6" + Name = %[1]q } } -resource "aws_egress_only_internet_gateway" "test" { - vpc_id = aws_vpc.test.id +resource "aws_vpn_gateway" "test1" { + tags = { + Name = %[1]q + } +} + +resource "aws_vpn_gateway" "test2" { + tags = { + Name = %[1]q + } +} + +resource "aws_vpn_gateway_attachment" "test" { + vpc_id = aws_vpc.test.id + vpn_gateway_id = %[2]s.id } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id - route { - ipv6_cidr_block = "::/0" - egress_only_gateway_id = aws_egress_only_internet_gateway.test.id + propagating_vgws = [aws_vpn_gateway_attachment.test.vpn_gateway_id] + + tags = { + Name = %[1]q } } -` +`, rName, vgwResourceName) +} -func testAccRouteTableConfigInstance() string { +func testAccRouteTableConfigNoDestination(rName string) string { return composeConfig( + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), testAccLatestAmazonLinuxHvmEbsAmiConfig(), - testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` + fmt.Sprintf(` +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + instance_id = aws_instance.test.id + } + + tags = { + Name = %[1]q + } +} + resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-route-table-instance" + Name = %[1]q } } @@ -741,28 +1363,47 @@ resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] tags = { - Name = "tf-acc-route-table-instance" + Name = %[1]q } } resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.micro" + instance_type = data.aws_ec2_instance_type_offering.available.instance_type subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} +`, rName)) } +func testAccRouteTableConfigNoTarget(rName string) string { + return fmt.Sprintf(` resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - cidr_block = "10.2.0.0/16" - instance_id = aws_instance.test.id + cidr_block = "10.1.0.0/16" + } + + tags = { + Name = %[1]q } } -`) + +resource "aws_vpc" "test" { + cidr_block = "10.2.0.0/16" + + tags = { + Name = %[1]q + } +} +`, rName) } -func testAccAWSRouteTableConfigTags1(rName, tagKey1, tagValue1 string) string { +func testAccAWSRouteTableConfigRouteConfigModeNoBlocks(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -772,17 +1413,25 @@ resource "aws_vpc" "test" { } } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id tags = { - %[2]q = %[3]q + Name = %[1]q } } -`, rName, tagKey1, tagValue1) +`, rName) } -func testAccAWSRouteTableConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { +func testAccAWSRouteTableConfigRouteConfigModeZeroed(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -792,57 +1441,183 @@ resource "aws_vpc" "test" { } } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id + route = [] + tags = { - %[2]q = %[3]q - %[4]q = %[5]q + Name = %[1]q } } -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +`, rName) } -// VPC Peering connections are prefixed with pcx -const testAccRouteTableVpcPeeringConfig = ` +func testAccAWSRouteTableConfigIpv4TransitGateway(rName, destinationCidr string) string { + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-route-table-vpc-peering-foo" + Name = %[1]q } } -resource "aws_internet_gateway" "test" { +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_transit_gateway_vpc_attachment" "test" { + subnet_ids = [aws_subnet.test.id] + transit_gateway_id = aws_ec2_transit_gateway.test.id + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = %[2]q + transit_gateway_id = aws_ec2_transit_gateway_vpc_attachment.test.transit_gateway_id + } + + tags = { + Name = %[1]q + } +} +`, rName, destinationCidr)) +} + +func testAccAWSRouteTableConfigRouteIpv4VpcEndpointId(rName, destinationCidr string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.10.10.0/25" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 2, 0) + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_lb" "test" { + load_balancer_type = "gateway" + name = %[1]q + + subnet_mapping { + subnet_id = aws_subnet.test.id + } +} + +resource "aws_vpc_endpoint_service" "test" { + acceptance_required = false + allowed_principals = [data.aws_caller_identity.current.arn] + gateway_load_balancer_arns = [aws_lb.test.arn] + + tags = { + Name = %[1]q + } +} + +resource "aws_vpc_endpoint" "test" { + service_name = aws_vpc_endpoint_service.test.service_name + subnet_ids = [aws_subnet.test.id] + vpc_endpoint_type = aws_vpc_endpoint_service.test.service_type + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id + route { + cidr_block = %[2]q + vpc_endpoint_id = aws_vpc_endpoint.test.id + } + tags = { - Name = "terraform-testacc-route-table-vpc-peering-foo" + Name = %[1]q } } +`, rName, destinationCidr)) +} + +func testAccAWSRouteTableConfigRouteIpv4LocalGateway(rName, destinationCidr string) string { + return fmt.Sprintf(` +data "aws_ec2_local_gateways" "all" {} + +data "aws_ec2_local_gateway" "first" { + id = tolist(data.aws_ec2_local_gateways.all.ids)[0] +} + +data "aws_ec2_local_gateway_route_tables" "all" {} + +data "aws_ec2_local_gateway_route_table" "first" { + local_gateway_route_table_id = tolist(data.aws_ec2_local_gateway_route_tables.all.ids)[0] +} -resource "aws_vpc" "bar" { - cidr_block = "10.3.0.0/16" +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" tags = { - Name = "terraform-testacc-route-table-vpc-peering-bar" + Name = %[1]q } } -resource "aws_internet_gateway" "bar" { - vpc_id = aws_vpc.bar.id +resource "aws_subnet" "test" { + cidr_block = "10.0.0.0/24" + vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-route-table-vpc-peering-bar" + Name = %[1]q } } -resource "aws_vpc_peering_connection" "test" { - vpc_id = aws_vpc.test.id - peer_vpc_id = aws_vpc.bar.id +resource "aws_ec2_local_gateway_route_table_vpc_association" "example" { + local_gateway_route_table_id = data.aws_ec2_local_gateway_route_table.first.id + vpc_id = aws_vpc.test.id tags = { - foo = "bar" + Name = %[1]q } } @@ -850,315 +1625,272 @@ resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - cidr_block = "10.2.0.0/16" - vpc_peering_connection_id = aws_vpc_peering_connection.test.id + cidr_block = %[2]q + local_gateway_id = data.aws_ec2_local_gateway.first.id + } + + tags = { + Name = %[1]q } + + depends_on = [aws_ec2_local_gateway_route_table_vpc_association.example] +} +`, rName, destinationCidr) } -` -const testAccRouteTableVgwRoutePropagationConfig = ` +func testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName, destinationCidr, destinationIpv6Cidr string, ipv6Route bool) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" + assign_generated_ipv6_cidr_block = true + tags = { - Name = "terraform-testacc-route-table-vgw-route-propagation" + Name = %[1]q } } -resource "aws_vpn_gateway" "test" { +resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } -resource "aws_route_table" "test" { - vpc_id = aws_vpc.test.id - propagating_vgws = [aws_vpn_gateway.test.id] +locals { + ipv6 = %[4]t + destination = %[2]q + destination_ipv6 = %[3]q } -` -func testAccRouteTableConfigNoDestination() string { - return composeConfig( - testAccLatestAmazonLinuxHvmEbsAmiConfig(), - testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - instance_id = aws_instance.test.id + cidr_block = local.ipv6 ? "" : local.destination + ipv6_cidr_block = local.ipv6 ? local.destination_ipv6 : "" + gateway_id = aws_internet_gateway.test.id + } + + tags = { + Name = %[1]q } } +`, rName, destinationCidr, destinationIpv6Cidr, ipv6Route) +} +func testAccAWSRouteTableConfigIpv4NatGateway(rName, destinationCidr string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { - Name = "tf-acc-route-table-no-destination" + Name = %[1]q } } resource "aws_subnet" "test" { - cidr_block = "10.1.1.0/24" - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + + map_public_ip_on_launch = true tags = { - Name = "tf-acc-route-table-no-destination" + Name = %[1]q } } -resource "aws_instance" "test" { - ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.micro" - subnet_id = aws_subnet.test.id -} -`) -} - -const testAccRouteTableConfigNoTarget = ` -resource "aws_route_table" "test" { +resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id - route { - cidr_block = "10.1.0.0/16" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.2.0.0/16" - tags = { - Name = "tf-acc-route-table-no-target" + Name = %[1]q } } -` -func testAccAWSRouteTableConfigRouteConfigModeBlocks() string { - return ` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" +resource "aws_eip" "test" { + vpc = true tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" + Name = %[1]q } } -resource "aws_internet_gateway" "test" { +resource "aws_nat_gateway" "test" { + allocation_id = aws_eip.test.id + subnet_id = aws_subnet.test.id + tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" + Name = %[1]q } - vpc_id = aws_vpc.test.id + depends_on = [aws_internet_gateway.test] } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - cidr_block = "10.1.0.0/16" - gateway_id = aws_internet_gateway.test.id + cidr_block = %[2]q + nat_gateway_id = aws_nat_gateway.test.id } - route { - cidr_block = "10.2.0.0/16" - gateway_id = aws_internet_gateway.test.id + tags = { + Name = %[1]q } } -` +`, rName, destinationCidr) } -func testAccAWSRouteTableConfigRouteConfigModeNoBlocks() string { - return ` +func testAccAWSRouteTableConfigIpv6NetworkInterfaceUnattached(rName, destinationCidr string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" - } -} + cidr_block = "10.1.0.0/16" + assign_generated_ipv6_cidr_block = true -resource "aws_internet_gateway" "test" { tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" + Name = %[1]q } - - vpc_id = aws_vpc.test.id -} - -resource "aws_route_table" "test" { - vpc_id = aws_vpc.test.id -} -` } -func testAccAWSRouteTableConfigRouteConfigModeZeroed() string { - return ` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1) tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" + Name = %[1]q } } -resource "aws_internet_gateway" "test" { +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id + tags = { - Name = "tf-acc-test-ec2-route-table-config-mode" + Name = %[1]q } - - vpc_id = aws_vpc.test.id } resource "aws_route_table" "test" { - route = [] vpc_id = aws_vpc.test.id -} -` -} -func testAccAWSRouteTableConfigRouteTransitGatewayID() string { - return composeConfig( - testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" + route { + ipv6_cidr_block = %[2]q + network_interface_id = aws_network_interface.test.id + } tags = { - Name = "tf-acc-test-ec2-route-table-transit-gateway-id" + Name = %[1]q } } +`, rName, destinationCidr) +} -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = "10.0.0.0/24" - vpc_id = aws_vpc.test.id +func testAccAWSRouteTableConfigVpcMultipleCidrs(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" tags = { - Name = "tf-acc-test-ec2-route-table-transit-gateway-id" + Name = %[1]q } } -resource "aws_ec2_transit_gateway" "test" {} - -resource "aws_ec2_transit_gateway_vpc_attachment" "test" { - subnet_ids = [aws_subnet.test.id] - transit_gateway_id = aws_ec2_transit_gateway.test.id - vpc_id = aws_vpc.test.id +resource "aws_vpc_ipv4_cidr_block_association" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.2.0.0/16" } resource "aws_route_table" "test" { - vpc_id = aws_vpc.test.id + vpc_id = aws_vpc_ipv4_cidr_block_association.test.vpc_id - route { - cidr_block = "0.0.0.0/0" - transit_gateway_id = aws_ec2_transit_gateway_vpc_attachment.test.transit_gateway_id + tags = { + Name = %[1]q } } -`) +`, rName) } -func testAccAWSRouteTableConfigRouteVpcEndpointId(rName string) string { - return composeConfig( - testAccAvailableAZsNoOptInConfig(), - fmt.Sprintf(` -data "aws_caller_identity" "current" {} - +func testAccAWSRouteTableConfigVpcClassicLink(rName string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.10.10.0/25" + cidr_block = "10.1.0.0/16" + enable_classiclink = true tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 2, 0) - vpc_id = aws_vpc.test.id +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-load-balancer" + Name = %[1]q } } - -resource "aws_lb" "test" { - load_balancer_type = "gateway" - name = %[1]q - - subnet_mapping { - subnet_id = aws_subnet.test.id - } +`, rName) } -resource "aws_vpc_endpoint_service" "test" { - acceptance_required = false - allowed_principals = [data.aws_caller_identity.current.arn] - gateway_load_balancer_arns = [aws_lb.test.arn] -} +func testAccAWSRouteTableConfigGatewayVpcEndpoint(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" -resource "aws_vpc_endpoint" "test" { - service_name = aws_vpc_endpoint_service.test.service_name - subnet_ids = [aws_subnet.test.id] - vpc_endpoint_type = aws_vpc_endpoint_service.test.service_type - vpc_id = aws_vpc.test.id + tags = { + Name = %[1]q + } } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id - route { - cidr_block = "0.0.0.0/0" - vpc_endpoint_id = aws_vpc_endpoint.test.id + tags = { + Name = %[1]q } } -`, rName)) -} -func testAccAWSRouteTableConfigRouteLocalGatewayID() string { - return ` -data "aws_ec2_local_gateways" "all" {} -data "aws_ec2_local_gateway" "first" { - id = tolist(data.aws_ec2_local_gateways.all.ids)[0] -} +data "aws_region" "current" {} -data "aws_ec2_local_gateway_route_tables" "all" {} -data "aws_ec2_local_gateway_route_table" "first" { - local_gateway_route_table_id = tolist(data.aws_ec2_local_gateway_route_tables.all.ids)[0] +resource "aws_vpc_endpoint" "test" { + vpc_id = aws_vpc.test.id + service_name = "com.amazonaws.${data.aws_region.current.name}.s3" + route_table_ids = [aws_route_table.test.id] +} +`, rName) } +func testAccAWSRouteTableConfigMultipleRoutes(rName, + destinationAttr1, destinationValue1, targetAttribute1, targetValue1, + destinationAttr2, destinationValue2, targetAttribute2, targetValue2, + destinationAttr3, destinationValue3, targetAttribute3, targetValue3 string) string { + return composeConfig( + testAccLatestAmazonNatInstanceAmiConfig(), + testAccAvailableAZsNoOptInDefaultExcludeConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" + cidr_block = "10.1.0.0/16" + assign_generated_ipv6_cidr_block = true tags = { - Name = "tf-acc-test-ec2-route-table-transit-gateway-id" + Name = %[1]q } } -resource "aws_subnet" "test" { - cidr_block = "10.0.0.0/24" - vpc_id = aws_vpc.test.id -} - -resource "aws_ec2_local_gateway_route_table_vpc_association" "example" { - local_gateway_route_table_id = data.aws_ec2_local_gateway_route_table.first.id - vpc_id = aws_vpc.test.id -} - -resource "aws_route_table" "test" { - vpc_id = aws_vpc.test.id +resource "aws_vpc" "target" { + cidr_block = "10.0.0.0/16" - route { - cidr_block = "0.0.0.0/0" - local_gateway_id = data.aws_ec2_local_gateway.first.id + tags = { + Name = %[1]q } - depends_on = [aws_ec2_local_gateway_route_table_vpc_association.example] -} -` } -func testAccAWSRouteTableConfigConditionalIpv4Ipv6(rName string, ipv6Route bool) string { - return fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - assign_generated_ipv6_cidr_block = true +resource "aws_subnet" "test" { + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id + availability_zone = data.aws_availability_zones.available.names[0] tags = { Name = %[1]q @@ -1181,24 +1913,101 @@ resource "aws_internet_gateway" "test" { } } -locals { - ipv6 = %[2]t - destination = "0.0.0.0/0" - destination_ipv6 = "::/0" +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-nat-instance.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc.test.id + peer_vpc_id = aws_vpc.target.id + auto_accept = true + + tags = { + Name = %[1]q + } } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id route { - cidr_block = local.ipv6 ? "" : local.destination - ipv6_cidr_block = local.ipv6 ? local.destination_ipv6 : "" - gateway_id = aws_internet_gateway.test.id + # Destination. + cidr_block = (%[2]q == "cidr_block") ? %[3]q : null + ipv6_cidr_block = (%[2]q == "ipv6_cidr_block") ? %[3]q : null + + # Target + egress_only_gateway_id = (%[4]q == "egress_only_gateway_id") ? %[5]s.id : null + gateway_id = (%[4]q == "gateway_id") ? %[5]s.id : null + instance_id = (%[4]q == "instance_id") ? %[5]s.id : null + local_gateway_id = (%[4]q == "local_gateway_id") ? %[5]s.id : null + nat_gateway_id = (%[4]q == "nat_gateway_id") ? %[5]s.id : null + network_interface_id = (%[4]q == "network_interface_id") ? %[5]s.id : null + transit_gateway_id = (%[4]q == "transit_gateway_id") ? %[5]s.id : null + vpc_endpoint_id = (%[4]q == "vpc_endpoint_id") ? %[5]s.id : null + vpc_peering_connection_id = (%[4]q == "vpc_peering_connection_id") ? %[5]s.id : null + } + + route { + # Destination. + cidr_block = (%[6]q == "cidr_block") ? %[7]q : null + ipv6_cidr_block = (%[6]q == "ipv6_cidr_block") ? %[7]q : null + + # Target + egress_only_gateway_id = (%[8]q == "egress_only_gateway_id") ? %[9]s.id : null + gateway_id = (%[8]q == "gateway_id") ? %[9]s.id : null + instance_id = (%[8]q == "instance_id") ? %[9]s.id : null + local_gateway_id = (%[8]q == "local_gateway_id") ? %[9]s.id : null + nat_gateway_id = (%[8]q == "nat_gateway_id") ? %[9]s.id : null + network_interface_id = (%[8]q == "network_interface_id") ? %[9]s.id : null + transit_gateway_id = (%[8]q == "transit_gateway_id") ? %[9]s.id : null + vpc_endpoint_id = (%[8]q == "vpc_endpoint_id") ? %[9]s.id : null + vpc_peering_connection_id = (%[8]q == "vpc_peering_connection_id") ? %[9]s.id : null + } + + route { + # Destination. + cidr_block = (%[10]q == "cidr_block") ? %[11]q : null + ipv6_cidr_block = (%[10]q == "ipv6_cidr_block") ? %[11]q : null + + # Target + egress_only_gateway_id = (%[12]q == "egress_only_gateway_id") ? %[13]s.id : null + gateway_id = (%[12]q == "gateway_id") ? %[13]s.id : null + instance_id = (%[12]q == "instance_id") ? %[13]s.id : null + local_gateway_id = (%[12]q == "local_gateway_id") ? %[13]s.id : null + nat_gateway_id = (%[12]q == "nat_gateway_id") ? %[13]s.id : null + network_interface_id = (%[12]q == "network_interface_id") ? %[13]s.id : null + transit_gateway_id = (%[12]q == "transit_gateway_id") ? %[13]s.id : null + vpc_endpoint_id = (%[12]q == "vpc_endpoint_id") ? %[13]s.id : null + vpc_peering_connection_id = (%[12]q == "vpc_peering_connection_id") ? %[13]s.id : null } tags = { Name = %[1]q } } -`, rName, ipv6Route) +`, rName, destinationAttr1, destinationValue1, targetAttribute1, targetValue1, destinationAttr2, destinationValue2, targetAttribute2, targetValue2, destinationAttr3, destinationValue3, targetAttribute3, targetValue3)) +} + +// testAccLatestAmazonNatInstanceAmiConfig returns the configuration for a data source that +// describes the latest Amazon NAT instance AMI. +// See https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#nat-instance-ami. +// The data source is named 'amzn-ami-nat-instance'. +func testAccLatestAmazonNatInstanceAmiConfig() string { + return fmt.Sprintf(` +data "aws_ami" "amzn-ami-nat-instance" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-vpc-nat-*"] + } +} +`) } diff --git a/aws/validators_test.go b/aws/validators_test.go index 270a912298c..900c11308f2 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -564,6 +564,23 @@ func TestCanonicalCidrBlock(t *testing.T) { } } +func Test_canonicalCidrBlock(t *testing.T) { + for _, ts := range []struct { + cidr string + expected string + }{ + {"10.2.2.0/24", "10.2.2.0/24"}, + {"::/0", "::/0"}, + {"::0/0", "::/0"}, + {"", ""}, + } { + got := canonicalCidrBlock(ts.cidr) + if ts.expected != got { + t.Fatalf("canonicalCidrBlock(%q) should be: %q, got: %q", ts.cidr, ts.expected, got) + } + } +} + func TestValidateLogMetricFilterName(t *testing.T) { validNames := []string{ "YadaHereAndThere", From e11a809f661b31527d932ba6356ab3c6613312c8 Mon Sep 17 00:00:00 2001 From: Matt Newcombe Date: Mon, 7 Dec 2020 20:16:38 +0000 Subject: [PATCH 0141/1212] resource/aws_ssm_maintenance_window: Add schedule_offset argument (#16569) * add schedule_offset parameter to aws_ssm_maintenance_window * add aws_ssm_maintenance_window schedule_offset to docs * change schedule_offset to integer in test Co-authored-by: Brian Flad Co-authored-by: Matt Newcombe Co-authored-by: Brian Flad --- aws/resource_aws_ssm_maintenance_window.go | 16 +++++++ ...esource_aws_ssm_maintenance_window_test.go | 46 +++++++++++++++++++ .../r/ssm_maintenance_window.html.markdown | 1 + 3 files changed, 63 insertions(+) diff --git a/aws/resource_aws_ssm_maintenance_window.go b/aws/resource_aws_ssm_maintenance_window.go index aca2cad64e5..419768c0cbf 100644 --- a/aws/resource_aws_ssm_maintenance_window.go +++ b/aws/resource_aws_ssm_maintenance_window.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssm" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) @@ -63,6 +64,12 @@ func resourceAwsSsmMaintenanceWindow() *schema.Resource { Optional: true, }, + "schedule_offset": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 6), + }, + "start_date": { Type: schema.TypeString, Optional: true, @@ -100,6 +107,10 @@ func resourceAwsSsmMaintenanceWindowCreate(d *schema.ResourceData, meta interfac params.ScheduleTimezone = aws.String(v.(string)) } + if v, ok := d.GetOk("schedule_offset"); ok { + params.ScheduleOffset = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("start_date"); ok { params.StartDate = aws.String(v.(string)) } @@ -154,6 +165,10 @@ func resourceAwsSsmMaintenanceWindowUpdate(d *schema.ResourceData, meta interfac params.ScheduleTimezone = aws.String(v.(string)) } + if v, ok := d.GetOk("schedule_offset"); ok { + params.ScheduleOffset = aws.Int64(int64(v.(int))) + } + if v, ok := d.GetOk("start_date"); ok { params.StartDate = aws.String(v.(string)) } @@ -208,6 +223,7 @@ func resourceAwsSsmMaintenanceWindowRead(d *schema.ResourceData, meta interface{ d.Set("end_date", resp.EndDate) d.Set("name", resp.Name) d.Set("schedule_timezone", resp.ScheduleTimezone) + d.Set("schedule_offset", resp.ScheduleOffset) d.Set("schedule", resp.Schedule) d.Set("start_date", resp.StartDate) d.Set("description", resp.Description) diff --git a/aws/resource_aws_ssm_maintenance_window_test.go b/aws/resource_aws_ssm_maintenance_window_test.go index f954c4dc1aa..489b037b31b 100644 --- a/aws/resource_aws_ssm_maintenance_window_test.go +++ b/aws/resource_aws_ssm_maintenance_window_test.go @@ -96,6 +96,7 @@ func TestAccAWSSSMMaintenanceWindow_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "end_date", ""), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "schedule_timezone", ""), + resource.TestCheckResourceAttr(resourceName, "schedule_offset", "0"), resource.TestCheckResourceAttr(resourceName, "schedule", "cron(0 16 ? * TUE *)"), resource.TestCheckResourceAttr(resourceName, "start_date", ""), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -462,6 +463,39 @@ func TestAccAWSSSMMaintenanceWindow_ScheduleTimezone(t *testing.T) { }) } +func TestAccAWSSSMMaintenanceWindow_ScheduleOffset(t *testing.T) { + var maintenanceWindow1, maintenanceWindow2 ssm.MaintenanceWindowIdentity + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ssm_maintenance_window.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMMaintenanceWindowDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMMaintenanceWindowConfigScheduleOffset(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowExists(resourceName, &maintenanceWindow1), + resource.TestCheckResourceAttr(resourceName, "schedule_offset", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMMaintenanceWindowConfigScheduleOffset(rName, 5), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowExists(resourceName, &maintenanceWindow2), + resource.TestCheckResourceAttr(resourceName, "schedule_offset", "5"), + ), + }, + }, + }) +} + func TestAccAWSSSMMaintenanceWindow_StartDate(t *testing.T) { var maintenanceWindow1, maintenanceWindow2, maintenanceWindow3 ssm.MaintenanceWindowIdentity startDate1 := time.Now().UTC().Add(1 * time.Hour).Format(time.RFC3339) @@ -719,6 +753,18 @@ resource "aws_ssm_maintenance_window" "test" { `, rName, scheduleTimezone) } +func testAccAWSSSMMaintenanceWindowConfigScheduleOffset(rName string, scheduleOffset int) string { + return fmt.Sprintf(` +resource "aws_ssm_maintenance_window" "test" { + cutoff = 1 + duration = 3 + name = %q + schedule = "cron(0 16 ? * TUE *)" + schedule_offset = %d +} +`, rName, scheduleOffset) +} + func testAccAWSSSMMaintenanceWindowConfigStartDate(rName, startDate string) string { return fmt.Sprintf(` resource "aws_ssm_maintenance_window" "test" { diff --git a/website/docs/r/ssm_maintenance_window.html.markdown b/website/docs/r/ssm_maintenance_window.html.markdown index af0899bb009..41c0bee3794 100644 --- a/website/docs/r/ssm_maintenance_window.html.markdown +++ b/website/docs/r/ssm_maintenance_window.html.markdown @@ -34,6 +34,7 @@ The following arguments are supported: * `enabled` - (Optional) Whether the maintenance window is enabled. Default: `true`. * `end_date` - (Optional) Timestamp in [ISO-8601 extended format](https://www.iso.org/iso-8601-date-and-time-format.html) when to no longer run the maintenance window. * `schedule_timezone` - (Optional) Timezone for schedule in [Internet Assigned Numbers Authority (IANA) Time Zone Database format](https://www.iana.org/time-zones). For example: `America/Los_Angeles`, `etc/UTC`, or `Asia/Seoul`. +* `schedule_offset` - (Optional) The number of days to wait after the date and time specified by a CRON expression before running the maintenance window. * `start_date` - (Optional) Timestamp in [ISO-8601 extended format](https://www.iso.org/iso-8601-date-and-time-format.html) when to begin the maintenance window. * `tags` - (Optional) A map of tags to assign to the resource. From ff14b33ea3d51cb7015010341582101a11ba9115 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 7 Dec 2020 15:18:54 -0500 Subject: [PATCH 0142/1212] Update CHANGELOG for #16569 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2252d8133c3..a06cebe142d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ENHANCEMENTS * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] +* resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] ## 3.20.0 (December 03, 2020) From 64e459ff8fdf016c13bcac3bc0780c3dec1ba0b0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 7 Dec 2020 15:19:43 -0500 Subject: [PATCH 0143/1212] tests/resource/aws_ssm_maintenance_window: Fix TestAccAWSSSMMaintenanceWindow_ScheduleOffset configuration Output from acceptance testing: ``` --- PASS: TestAccAWSSSMMaintenanceWindow_ScheduleOffset (22.05s) ``` --- aws/resource_aws_ssm_maintenance_window_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_ssm_maintenance_window_test.go b/aws/resource_aws_ssm_maintenance_window_test.go index 489b037b31b..0f2b01e61a2 100644 --- a/aws/resource_aws_ssm_maintenance_window_test.go +++ b/aws/resource_aws_ssm_maintenance_window_test.go @@ -759,7 +759,7 @@ resource "aws_ssm_maintenance_window" "test" { cutoff = 1 duration = 3 name = %q - schedule = "cron(0 16 ? * TUE *)" + schedule = "cron(0 16 ? * TUE#3 *)" schedule_offset = %d } `, rName, scheduleOffset) From 6ca440c4349903b43f3a23861c4b6170265f5fb6 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 7 Dec 2020 22:30:12 +0200 Subject: [PATCH 0144/1212] remove validation --- aws/resource_aws_backup_plan.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index 34cbe737214..89f4515e96a 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -78,9 +78,8 @@ func resourceAwsBackupPlan() *schema.Resource { Optional: true, }, "delete_after": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(90), + Type: schema.TypeInt, + Optional: true, }, }, }, From cfed90e5a7bbfd84f8a4bd9a9f4f157ac82afdc0 Mon Sep 17 00:00:00 2001 From: pjaudiomv Date: Mon, 7 Dec 2020 15:36:25 -0500 Subject: [PATCH 0145/1212] update ssm maintenance window task resource docs --- website/docs/r/ssm_maintenance_window_task.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssm_maintenance_window_task.html.markdown b/website/docs/r/ssm_maintenance_window_task.html.markdown index 2149d8dc500..e41674dc108 100644 --- a/website/docs/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/r/ssm_maintenance_window_task.html.markdown @@ -140,7 +140,7 @@ The following arguments are supported: * `window_id` - (Required) The Id of the maintenance window to register the task with. * `max_concurrency` - (Required) The maximum number of targets this task can be run for in parallel. * `max_errors` - (Required) The maximum number of errors allowed before this task stops being scheduled. -* `task_type` - (Required) The type of task being registered. The only allowed value is `RUN_COMMAND`. +* `task_type` - (Required) The type of task being registered. Valid options are `AUTOMATION`, `LAMBDA`, `RUN_COMMAND` or `STEP_FUNCTIONS`. * `task_arn` - (Required) The ARN of the task to execute. * `service_role_arn` - (Required) The role that should be assumed when executing the task. * `name` - (Optional) The name of the maintenance window task. From abddf85414bbc9a651b1acf1c065b97ffab5d49a Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 17:42:26 +0200 Subject: [PATCH 0146/1212] New resource: aws_lakeformation_datalake_settings --- aws/provider.go | 1 + ...rce_aws_lakeformation_datalake_settings.go | 135 ++++++++++++++++++ ...ws_lakeformation_datalake_settings_test.go | 66 +++++++++ 3 files changed, 202 insertions(+) create mode 100644 aws/resource_aws_lakeformation_datalake_settings.go create mode 100644 aws/resource_aws_lakeformation_datalake_settings_test.go diff --git a/aws/provider.go b/aws/provider.go index 79d050c6a0a..098c03c073b 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -740,6 +740,7 @@ func Provider() *schema.Provider { "aws_kms_grant": resourceAwsKmsGrant(), "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), + "aws_lakeformation_datalake_settings": resourceAwsLakeFormationDataLakeSettings(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go new file mode 100644 index 00000000000..c708abcef59 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -0,0 +1,135 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLakeFormationDataLakeSettingsPut, + Update: resourceAwsLakeFormationDataLakeSettingsPut, + Read: resourceAwsLakeFormationDataLakeSettingsRead, + Delete: resourceAwsLakeFormationDataLakeSettingsReset, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + "admins": { + Type: schema.TypeList, + Required: true, + MinItems: 0, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + }, + } +} + +func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: expandAdmins(d), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error updating DataLakeSettings: %s", err) + } + + d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) + d.Set("catalog_id", catalogId) + + return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) +} + +func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.GetDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + } + + out, err := conn.GetDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reading DataLakeSettings: %s", err) + } + + d.Set("catalog_id", catalogId) + if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { + return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) + } + // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions + + return nil +} + +func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reseting DataLakeSettings: %s", err) + } + + return nil +} + +func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { + if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { + catalogId = inputCatalogId.(string) + } else { + catalogId = accountId + } + return +} + +func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { + xs := d.Get("admins") + ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) + + for i, x := range xs.([]interface{}) { + ys[i] = &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(x.(string)), + } + } + + return ys +} + +func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { + admins := make([]string, len(xs)) + for i, x := range xs { + admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) + } + + return admins +} diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go new file mode 100644 index 00000000000..d2ac3b0c538 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -0,0 +1,66 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = ["${data.aws_caller_identity.current.arn}"] +} +` + +func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + catalog_id = "${data.aws_caller_identity.current.account_id}" + admins = ["${data.aws_caller_identity.current.arn}"] +} +` From 9d54d249bd8ebb59231c68c8b9e7cfe80b1b451f Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 19:21:03 +0200 Subject: [PATCH 0147/1212] Check resource state on destroy --- ...ws_lakeformation_datalake_settings_test.go | 44 ++++++++++++++++--- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go index d2ac3b0c538..19597dc2571 100644 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -1,9 +1,14 @@ package aws import ( + "fmt" "testing" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { @@ -11,13 +16,14 @@ func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { resourceName := "aws_lakeformation_datalake_settings.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsEmpty, Steps: []resource.TestStep{ { Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), @@ -40,13 +46,14 @@ func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { resourceName := "aws_lakeformation_datalake_settings.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsEmpty, Steps: []resource.TestStep{ { Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), @@ -64,3 +71,28 @@ resource "aws_lakeformation_datalake_settings" "test" { admins = ["${data.aws_caller_identity.current.arn}"] } ` + +func testAccCheckAWSLakeFormationDataLakeSettingsEmpty(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_datalake_settings" { + continue + } + + input := &lakeformation.GetDataLakeSettingsInput{ + CatalogId: aws.String(testAccProvider.Meta().(*AWSClient).accountid), + } + + out, err := conn.GetDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reading DataLakeSettings: %s", err) + } + + if len(out.DataLakeSettings.DataLakeAdmins) > 0 { + return fmt.Errorf("Error admins list not empty in DataLakeSettings: %s", out) + } + } + + return nil +} From e7f3f7d05a3938208f62c8b2e2a41cf837d21715 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 19:21:36 +0200 Subject: [PATCH 0148/1212] Add documentation --- ...eformation_datalake_settings.html.markdown | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 website/docs/r/lakeformation_datalake_settings.html.markdown diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown new file mode 100644 index 00000000000..9bffac8da53 --- /dev/null +++ b/website/docs/r/lakeformation_datalake_settings.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "LakeFormation" +layout: "aws" +page_title: "AWS: aws_lakeformation_datalake_settings" +description: |- + Manages the data lake settings for the current account +--- + +# Resource: aws_lakeformation_datalake_settings + +Manages the data lake settings for the current account. + +## Example Usage + +```hcl +data "aws_iam_user" "existing_user" { + user_name = "an_existing_user_name" +} + +data "aws_iam_role" "existing_role" { + name = "an_existing_role_name" +} + +resource "aws_lakeformation_datalake_settings" "example" { + admins = [ + "${aws_iam_user.existing_user.arn}", + "${aws_iam_user.existing_role.arn}", + ] +} +``` + +## Argument Reference + +The following arguments are required: + +* `admins` – (Required) A list of up to 10 AWS Lake Formation principals (users or roles). + +The following arguments are optional: + +* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Resource identifier with the pattern `lakeformation:settings:ACCOUNT_ID`. From c4e99923ad0c50c019e6e4f767e2cc8838669eee Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 14:57:08 +0200 Subject: [PATCH 0149/1212] Add AWS region in settings identifier --- aws/resource_aws_lakeformation_datalake_settings.go | 3 ++- website/docs/r/lakeformation_datalake_settings.html.markdown | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index c708abcef59..ad09ced92e7 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -56,7 +56,8 @@ func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta in return fmt.Errorf("Error updating DataLakeSettings: %s", err) } - d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) + awsRegion := meta.(*AWSClient).region + d.SetId(fmt.Sprintf("lakeformation:%s:%s", awsRegion, catalogId)) d.Set("catalog_id", catalogId) return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown index 9bffac8da53..a0271448b5e 100644 --- a/website/docs/r/lakeformation_datalake_settings.html.markdown +++ b/website/docs/r/lakeformation_datalake_settings.html.markdown @@ -43,4 +43,4 @@ The following arguments are optional: In addition to all arguments above, the following attributes are exported: -* `id` - Resource identifier with the pattern `lakeformation:settings:ACCOUNT_ID`. +* `id` - Resource identifier with the pattern `lakeformation:AWS_REGION:ACCOUNT_ID`. From 5b52ecff2d35071de4f695cff58649f67eb5eec5 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 19:19:00 +0200 Subject: [PATCH 0150/1212] Add missing website LakeFormation subcategory --- website/allowed-subcategories.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index e80eb37e243..2bf7117b651 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -71,6 +71,7 @@ Kinesis Data Analytics (SQL Applications) Kinesis Data Analytics v2 (SQL and Flink Applications) Kinesis Firehose Kinesis Video +LakeFormation Lambda Lex License Manager From e62033e77518d367168a0b3110563647eafe96bf Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 08:37:29 +0200 Subject: [PATCH 0151/1212] Rename expand and flatten functions for admins list --- aws/resource_aws_lakeformation_datalake_settings.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index ad09ced92e7..f6fc1eb0dab 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -47,7 +47,7 @@ func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta in input := &lakeformation.PutDataLakeSettingsInput{ CatalogId: aws.String(catalogId), DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: expandAdmins(d), + DataLakeAdmins: expandLakeFormationDataLakePrincipal(d), }, } @@ -77,7 +77,7 @@ func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta i } d.Set("catalog_id", catalogId) - if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { + if err := d.Set("admins", flattenLakeFormationDataLakePrincipal(out.DataLakeSettings.DataLakeAdmins)); err != nil { return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) } // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions @@ -113,7 +113,7 @@ func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId return } -func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { +func expandLakeFormationDataLakePrincipal(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { xs := d.Get("admins") ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) @@ -126,7 +126,7 @@ func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { return ys } -func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { +func flattenLakeFormationDataLakePrincipal(xs []*lakeformation.DataLakePrincipal) []string { admins := make([]string, len(xs)) for i, x := range xs { admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) From efbe990ffe7654600011a3ca75a8716b2a93a1d6 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 09:41:49 +0200 Subject: [PATCH 0152/1212] Make data lake admins list a set --- ...rce_aws_lakeformation_datalake_settings.go | 15 +++++++------ ...ws_lakeformation_datalake_settings_test.go | 22 +++++++++++++++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index f6fc1eb0dab..6fdd016fd53 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -27,7 +27,8 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Computed: true, }, "admins": { - Type: schema.TypeList, + Type: schema.TypeSet, + Set: schema.HashString, Required: true, MinItems: 0, MaxItems: 10, @@ -114,10 +115,10 @@ func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId } func expandLakeFormationDataLakePrincipal(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { - xs := d.Get("admins") - ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) + xs := d.Get("admins").(*schema.Set).List() + ys := make([]*lakeformation.DataLakePrincipal, len(xs)) - for i, x := range xs.([]interface{}) { + for i, x := range xs { ys[i] = &lakeformation.DataLakePrincipal{ DataLakePrincipalIdentifier: aws.String(x.(string)), } @@ -127,10 +128,10 @@ func expandLakeFormationDataLakePrincipal(d *schema.ResourceData) []*lakeformati } func flattenLakeFormationDataLakePrincipal(xs []*lakeformation.DataLakePrincipal) []string { - admins := make([]string, len(xs)) + ys := make([]string, len(xs)) for i, x := range xs { - admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) + ys[i] = aws.StringValue(x.DataLakePrincipalIdentifier) } - return admins + return ys } diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go index 19597dc2571..718a8033497 100644 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/terraform" ) @@ -26,7 +27,7 @@ func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + testAccCheckAWSLakeFormationDataLakePrincipal(callerIdentityName, "arn", resourceName, "admins"), ), }, }, @@ -56,7 +57,7 @@ func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + testAccCheckAWSLakeFormationDataLakePrincipal(callerIdentityName, "arn", resourceName, "admins"), ), }, }, @@ -96,3 +97,20 @@ func testAccCheckAWSLakeFormationDataLakeSettingsEmpty(s *terraform.State) error return nil } + +func testAccCheckAWSLakeFormationDataLakePrincipal(nameFirst, keyFirst, nameSecond, keySecond string) resource.TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + valueFirst, okFirst := isFirst.Attributes[keyFirst] + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set", nameFirst, keyFirst) + } + + expandedKey := fmt.Sprintf("%s.%d", keySecond, schema.HashString(valueFirst)) + return resource.TestCheckResourceAttr(nameSecond, expandedKey, valueFirst)(s) + } +} From 6673cd56ea90acf17eaea467d7fe1411c276dc07 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 09:48:05 +0200 Subject: [PATCH 0153/1212] Use Lake Formation official spelling --- website/allowed-subcategories.txt | 2 +- website/docs/r/lakeformation_datalake_settings.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 2bf7117b651..d6ab65240cd 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -71,7 +71,7 @@ Kinesis Data Analytics (SQL Applications) Kinesis Data Analytics v2 (SQL and Flink Applications) Kinesis Firehose Kinesis Video -LakeFormation +Lake Formation Lambda Lex License Manager diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown index a0271448b5e..0385b9243db 100644 --- a/website/docs/r/lakeformation_datalake_settings.html.markdown +++ b/website/docs/r/lakeformation_datalake_settings.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "LakeFormation" +subcategory: "Lake Formation" layout: "aws" page_title: "AWS: aws_lakeformation_datalake_settings" description: |- From 92cb24ab906475ff4a49d2979dd031c4ea6a3db4 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 22:39:23 +0200 Subject: [PATCH 0154/1212] Cleanup --- aws/resource_aws_lakeformation_datalake_settings.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index 6fdd016fd53..0a6f313e376 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -15,9 +15,6 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Update: resourceAwsLakeFormationDataLakeSettingsPut, Read: resourceAwsLakeFormationDataLakeSettingsRead, Delete: resourceAwsLakeFormationDataLakeSettingsReset, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, Schema: map[string]*schema.Schema{ "catalog_id": { From 5ed60ca738ed185ffe4bf4fd300c67229f1c0745 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 12:30:38 +0200 Subject: [PATCH 0155/1212] New resource: aws_lakeformation_resource --- aws/provider.go | 1 + aws/resource_aws_lakeformation_resource.go | 105 ++++++++++++ ...esource_aws_lakeformation_resource_test.go | 154 ++++++++++++++++++ 3 files changed, 260 insertions(+) create mode 100644 aws/resource_aws_lakeformation_resource.go create mode 100644 aws/resource_aws_lakeformation_resource_test.go diff --git a/aws/provider.go b/aws/provider.go index 098c03c073b..9be5075741d 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -741,6 +741,7 @@ func Provider() *schema.Provider { "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), "aws_lakeformation_datalake_settings": resourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_resource": resourceAwsLakeFormationResource(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go new file mode 100644 index 00000000000..55a1503d119 --- /dev/null +++ b/aws/resource_aws_lakeformation_resource.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsLakeFormationResource() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLakeFormationResourceRegister, + Read: resourceAwsLakeFormationResourceDescribe, + Delete: resourceAwsLakeFormationResourceDeregister, + + Schema: map[string]*schema.Schema{ + "resource_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "use_service_linked_role": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + "last_modified": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsLakeFormationResourceRegister(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + resourceArn := d.Get("resource_arn").(string) + useServiceLinkedRole := d.Get("use_service_linked_role").(bool) + + input := &lakeformation.RegisterResourceInput{ + ResourceArn: aws.String(resourceArn), + UseServiceLinkedRole: aws.Bool(useServiceLinkedRole), + } + if v, ok := d.GetOk("role_arn"); ok { + input.RoleArn = aws.String(v.(string)) + } + + _, err := conn.RegisterResource(input) + if err != nil { + return fmt.Errorf("Error registering LakeFormation Resource: %s", err) + } + + d.SetId(fmt.Sprintf("lakeformation:resource:%s", resourceArn)) + + return resourceAwsLakeFormationResourceDescribe(d, meta) +} + +func resourceAwsLakeFormationResourceDescribe(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + resourceArn := d.Get("resource_arn").(string) + + input := &lakeformation.DescribeResourceInput{ + ResourceArn: aws.String(resourceArn), + } + + out, err := conn.DescribeResource(input) + if err != nil { + return fmt.Errorf("Error reading LakeFormation Resource: %s", err) + } + + d.Set("resource_arn", resourceArn) + d.Set("role_arn", out.ResourceInfo.RoleArn) + if out.ResourceInfo.LastModified != nil { + d.Set("last_modified", out.ResourceInfo.LastModified.Format(time.RFC3339)) + } + + return nil +} + +func resourceAwsLakeFormationResourceDeregister(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + resourceArn := d.Get("resource_arn").(string) + + input := &lakeformation.DeregisterResourceInput{ + ResourceArn: aws.String(resourceArn), + } + + _, err := conn.DeregisterResource(input) + if err != nil { + return fmt.Errorf("Error deregistering LakeFormation Resource: %s", err) + } + + return nil +} diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go new file mode 100644 index 00000000000..37576f22479 --- /dev/null +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -0,0 +1,154 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func TestAccAWSLakeFormationResource_basic(t *testing.T) { + bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + resourceName := "aws_lakeformation_resource.test" + bucketName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceConfig_basic(bName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), + resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "true"), + resource.TestCheckResourceAttrSet(resourceName, "role_arn"), + ), + }, + }, + }) +} + +func TestAccAWSLakeFormationResource_withRole(t *testing.T) { + bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + resourceName := "aws_lakeformation_resource.test" + bucketName := "aws_s3_bucket.test" + roleName := "data.aws_iam_role.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceConfig_withRole(bName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), + resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "false"), + ), + }, + }, + }) +} + +func TestAccAWSLakeFormationResource_update(t *testing.T) { + bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + resourceName := "aws_lakeformation_resource.test" + bucketName := "aws_s3_bucket.test" + roleName := "data.aws_iam_role.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceConfig_basic(bName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), + resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "true"), + resource.TestCheckResourceAttrSet(resourceName, "role_arn"), + ), + }, + { + Config: testAccAWSLakeFormationResourceConfig_withRole(bName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), + resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "false"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationResourceConfig_basic(bName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_lakeformation_resource" "test" { + resource_arn = "${aws_s3_bucket.test.arn}" + use_service_linked_role = true +} +`, bName) +} + +func testAccAWSLakeFormationResourceConfig_withRole(bName string) string { + return fmt.Sprintf(` +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_lakeformation_resource" "test" { + resource_arn = "${aws_s3_bucket.test.arn}" + role_arn = "${data.aws_iam_role.test.arn}" + use_service_linked_role = false +} +`, bName) +} + +func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_resource" { + continue + } + + resourceArn := rs.Primary.Attributes["resource_arn"] + fmt.Printf("resourceArn: %s", resourceArn) + + input := &lakeformation.DescribeResourceInput{ + ResourceArn: aws.String(resourceArn), + } + + _, err := conn.DescribeResource(input) + if err == nil { + return fmt.Errorf("Resource still registered: %s", resourceArn) + } + if !isLakeFormationResourceNotFoundErr(err) { + return err + } + } + + return nil +} + +func isLakeFormationResourceNotFoundErr(err error) bool { + return isAWSErr( + err, + "EntityNotFoundException", + "Entity not found") +} From acc11529345672b3c06d596307fbebccae4850cb Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 14:48:21 +0200 Subject: [PATCH 0156/1212] Add resource documentation --- .../r/lakeformation_resource.html.markdown | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 website/docs/r/lakeformation_resource.html.markdown diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown new file mode 100644 index 00000000000..2fd8aee6d11 --- /dev/null +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -0,0 +1,42 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_resource" +description: |- + Manages the data (Amazon S3 buckets and folders) that is being registered with AWS Lake Formation +--- + +# Resource: aws_lakeformation_resource + +Manages the data (Amazon S3 buckets and folders) that is being registered with AWS Lake Formation. + +## Example Usage + +```hcl +data "aws_s3_bucket" "example" { + bucket = "an-example-bucket" +} + +resource "aws_lakeformation_resource" "example" { + resource_arn = "${data.aws_s3_bucket.example.arn}" + use_service_linked_role = true +} +``` + +## Argument Reference + +The following arguments are required: + +* `resource_arn` – (Required) The Amazon Resource Name (ARN) of the resource. + +* `use_service_linked_role` – (Required) Designates a trusted caller, an IAM principal, by registering this caller with the Data Catalog. + +The following arguments are optional: + +* `role_arn` – (Optional) The IAM role that registered a resource. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `last_modified` - (Optional) The date and time the resource was last modified. From 6ba35a392ecc687bde507e7e29d8ff4f30c959fd Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 12:37:44 +0200 Subject: [PATCH 0157/1212] Cleanup --- aws/resource_aws_lakeformation_resource_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index 37576f22479..6210409ea2b 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -128,7 +128,6 @@ func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { } resourceArn := rs.Primary.Attributes["resource_arn"] - fmt.Printf("resourceArn: %s", resourceArn) input := &lakeformation.DescribeResourceInput{ ResourceArn: aws.String(resourceArn), From 75d25e88ac9b9b653df3147594ba2337dd76840d Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Tue, 12 May 2020 23:10:21 +0200 Subject: [PATCH 0158/1212] Better validations --- aws/resource_aws_lakeformation_resource.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index 55a1503d119..76e7c81f813 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" ) func resourceAwsLakeFormationResource() *schema.Resource { @@ -21,14 +20,14 @@ func resourceAwsLakeFormationResource() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.NoZeroValues, + ValidateFunc: validateArn, }, "role_arn": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, - ValidateFunc: validation.NoZeroValues, + ValidateFunc: validateArn, }, "use_service_linked_role": { Type: schema.TypeBool, From 7243d6da0666f9f612354b10ac5c15e6871e62d0 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 8 Dec 2020 18:01:25 +0200 Subject: [PATCH 0159/1212] Apply suggestions from code review Docs Co-authored-by: Kit Ewbank --- website/docs/r/glue_schema.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/glue_schema.html.markdown b/website/docs/r/glue_schema.html.markdown index 10f303e73f2..f2f53fb1ec6 100644 --- a/website/docs/r/glue_schema.html.markdown +++ b/website/docs/r/glue_schema.html.markdown @@ -38,9 +38,9 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `arn` - Amazon Resource Name (ARN) of Glue Schema. -* `id` - Amazon Resource Name (ARN) of Glue Schema. -* `registry_name` - The name of the registry. +* `arn` - Amazon Resource Name (ARN) of the schema. +* `id` - Amazon Resource Name (ARN) of the schema. +* `registry_name` - The name of the Glue Registry. * `latest_schema_version` - The latest version of the schema associated with the returned schema definition. * `next_schema_version` - The next version of the schema associated with the returned schema definition. * `schema_checkpoint` - The version number of the checkpoint (the last time the compatibility mode was changed). From dbef45ecc0d324a7e742ba336c1118f42e73f5ca Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 8 Dec 2020 08:38:12 -0800 Subject: [PATCH 0160/1212] Update CHANGELOG.md for #16589 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a06cebe142d..c7fced44683 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ ENHANCEMENTS * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] * resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] +BUG FIXES + +* resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou [GH-16589] + ## 3.20.0 (December 03, 2020) ENHANCEMENTS From af919bfb2cbb4bba86fce8536e04c5873fde0121 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 8 Dec 2020 09:14:01 -0800 Subject: [PATCH 0161/1212] Update CHANGELOG.md for #16459 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7fced44683..309aec0327a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ENHANCEMENTS * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] * resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] +* resource/aws_workspaces_workspace: Add failed request error code along with message [GH-16459] BUG FIXES From cb4c3cf062b55a7b406f0fad862694b3cb3b7984 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 8 Dec 2020 10:13:38 -0800 Subject: [PATCH 0162/1212] Update CHANGELOG.md for #16612 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 309aec0327a..a370e78817c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.21.0 (Unreleased) +FEATURES + +* **New Resource:** `resource_aws_glue_schema` [GH-16612] + ENHANCEMENTS * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] From 35d8d96efbd9fffc854656985036bdbe51cbe813 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 8 Dec 2020 12:23:59 -0600 Subject: [PATCH 0163/1212] Rework Lake Formation to design --- aws/provider.go | 1 - ...ws_lakeformation_datalake_settings_test.go | 7 +- aws/resource_aws_lakeformation_resource.go | 100 +++++++---- ...esource_aws_lakeformation_resource_test.go | 156 +++++++++++++----- .../r/lakeformation_resource.html.markdown | 20 +-- 5 files changed, 189 insertions(+), 95 deletions(-) diff --git a/aws/provider.go b/aws/provider.go index 9be5075741d..914682ef195 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -740,7 +740,6 @@ func Provider() *schema.Provider { "aws_kms_grant": resourceAwsKmsGrant(), "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), - "aws_lakeformation_datalake_settings": resourceAwsLakeFormationDataLakeSettings(), "aws_lakeformation_resource": resourceAwsLakeFormationResource(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go index 718a8033497..6ec6e1be397 100644 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -6,10 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index 76e7c81f813..87d2afb65c3 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -2,70 +2,69 @@ package aws import ( "fmt" + "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func resourceAwsLakeFormationResource() *schema.Resource { return &schema.Resource{ - Create: resourceAwsLakeFormationResourceRegister, - Read: resourceAwsLakeFormationResourceDescribe, - Delete: resourceAwsLakeFormationResourceDeregister, + Create: resourceAwsLakeFormationResourceCreate, + Read: resourceAwsLakeFormationResourceRead, + Update: resourceAwsLakeFormationResourceUpdate, + Delete: resourceAwsLakeFormationResourceDelete, Schema: map[string]*schema.Schema{ + "last_modified": { + Type: schema.TypeString, + Computed: true, + }, "resource_arn": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validateArn, }, "role_arn": { Type: schema.TypeString, Optional: true, Computed: true, - ForceNew: true, ValidateFunc: validateArn, }, - "use_service_linked_role": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - }, - "last_modified": { - Type: schema.TypeString, - Computed: true, - }, }, } } -func resourceAwsLakeFormationResourceRegister(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationResourceCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn resourceArn := d.Get("resource_arn").(string) - useServiceLinkedRole := d.Get("use_service_linked_role").(bool) input := &lakeformation.RegisterResourceInput{ - ResourceArn: aws.String(resourceArn), - UseServiceLinkedRole: aws.Bool(useServiceLinkedRole), + ResourceArn: aws.String(resourceArn), } + if v, ok := d.GetOk("role_arn"); ok { input.RoleArn = aws.String(v.(string)) + } else { + input.UseServiceLinkedRole = aws.Bool(true) } _, err := conn.RegisterResource(input) - if err != nil { - return fmt.Errorf("Error registering LakeFormation Resource: %s", err) - } - d.SetId(fmt.Sprintf("lakeformation:resource:%s", resourceArn)) + if tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeAlreadyExistsException) { + log.Printf("[WARN] Lake Formation Resource (%s) already exists", resourceArn) + } else if err != nil { + return fmt.Errorf("error registering Lake Formation Resource (%s): %s", resourceArn, err) + } - return resourceAwsLakeFormationResourceDescribe(d, meta) + d.SetId(resourceArn) + return resourceAwsLakeFormationResourceRead(d, meta) } -func resourceAwsLakeFormationResourceDescribe(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn resourceArn := d.Get("resource_arn").(string) @@ -73,21 +72,56 @@ func resourceAwsLakeFormationResourceDescribe(d *schema.ResourceData, meta inter ResourceArn: aws.String(resourceArn), } - out, err := conn.DescribeResource(input) + output, err := conn.DescribeResource(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Lake Formation Resource (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { - return fmt.Errorf("Error reading LakeFormation Resource: %s", err) + return fmt.Errorf("error getting Lake Formation Resource (%s): %w", d.Id(), err) } - d.Set("resource_arn", resourceArn) - d.Set("role_arn", out.ResourceInfo.RoleArn) - if out.ResourceInfo.LastModified != nil { - d.Set("last_modified", out.ResourceInfo.LastModified.Format(time.RFC3339)) + if output == nil || output.ResourceInfo == nil { + return fmt.Errorf("error getting Lake Formation Resource (%s): empty response", d.Id()) + } + + if err != nil { + return fmt.Errorf("error reading Lake Formation Resource: %s", err) + } + + d.Set("resource_arn", output.ResourceInfo.ResourceArn) + d.Set("role_arn", output.ResourceInfo.RoleArn) + if output.ResourceInfo.LastModified != nil { + d.Set("last_modified", output.ResourceInfo.LastModified.Format(time.RFC3339)) } return nil } -func resourceAwsLakeFormationResourceDeregister(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationResourceUpdate(d *schema.ResourceData, meta interface{}) error { + if _, ok := d.GetOk("role_arn"); !ok { + return resourceAwsLakeFormationResourceCreate(d, meta) + } + + conn := meta.(*AWSClient).lakeformationconn + + input := &lakeformation.UpdateResourceInput{ + ResourceArn: aws.String(d.Get("resource_arn").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + } + + _, err := conn.UpdateResource(input) + if err != nil { + return fmt.Errorf("error updating Lake Formation Resource: %s", err) + } + + return resourceAwsLakeFormationResourceRead(d, meta) +} + +func resourceAwsLakeFormationResourceDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn resourceArn := d.Get("resource_arn").(string) @@ -97,7 +131,7 @@ func resourceAwsLakeFormationResourceDeregister(d *schema.ResourceData, meta int _, err := conn.DeregisterResource(input) if err != nil { - return fmt.Errorf("Error deregistering LakeFormation Resource: %s", err) + return fmt.Errorf("error deregistering Lake Formation Resource: %s", err) } return nil diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index 6210409ea2b..a63504cd449 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -6,16 +6,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSLakeFormationResource_basic(t *testing.T) { - bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_resource.test" bucketName := "aws_s3_bucket.test" + roleName := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,11 +23,11 @@ func TestAccAWSLakeFormationResource_basic(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_basic(bName), + Config: testAccAWSLakeFormationResourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), - resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "true"), - resource.TestCheckResourceAttrSet(resourceName, "role_arn"), + resource.TestCheckResourceAttrPair(resourceName, "resource_arn", bucketName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", roleName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "last_modified"), ), }, }, @@ -35,7 +35,7 @@ func TestAccAWSLakeFormationResource_basic(t *testing.T) { } func TestAccAWSLakeFormationResource_withRole(t *testing.T) { - bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_resource.test" bucketName := "aws_s3_bucket.test" roleName := "data.aws_iam_role.test" @@ -46,7 +46,7 @@ func TestAccAWSLakeFormationResource_withRole(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_withRole(bName), + Config: testAccAWSLakeFormationResourceConfig_withRole(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), @@ -58,7 +58,7 @@ func TestAccAWSLakeFormationResource_withRole(t *testing.T) { } func TestAccAWSLakeFormationResource_update(t *testing.T) { - bName := acctest.RandomWithPrefix("lakeformation-test-bucket") + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_resource.test" bucketName := "aws_s3_bucket.test" roleName := "data.aws_iam_role.test" @@ -69,7 +69,7 @@ func TestAccAWSLakeFormationResource_update(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_basic(bName), + Config: testAccAWSLakeFormationResourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "true"), @@ -77,7 +77,7 @@ func TestAccAWSLakeFormationResource_update(t *testing.T) { ), }, { - Config: testAccAWSLakeFormationResourceConfig_withRole(bName), + Config: testAccAWSLakeFormationResourceConfig_withRole(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), @@ -88,37 +88,6 @@ func TestAccAWSLakeFormationResource_update(t *testing.T) { }) } -func testAccAWSLakeFormationResourceConfig_basic(bName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_lakeformation_resource" "test" { - resource_arn = "${aws_s3_bucket.test.arn}" - use_service_linked_role = true -} -`, bName) -} - -func testAccAWSLakeFormationResourceConfig_withRole(bName string) string { - return fmt.Sprintf(` -data "aws_iam_role" "test" { - name = "AWSServiceRoleForLakeFormationDataAccess" -} - -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_lakeformation_resource" "test" { - resource_arn = "${aws_s3_bucket.test.arn}" - role_arn = "${data.aws_iam_role.test.arn}" - use_service_linked_role = false -} -`, bName) -} - func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lakeformationconn @@ -151,3 +120,100 @@ func isLakeFormationResourceNotFoundErr(err error) bool { "EntityNotFoundException", "Entity not found") } + +func testAccAWSLakeFormationResourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/test/" + + assume_role_policy = < Date: Tue, 8 Dec 2020 11:29:32 -0800 Subject: [PATCH 0164/1212] Removes non-default refresh triggers and adds testing for cancelling in-progress refreshes --- .../service/autoscaling/waiter/waiter.go | 2 +- aws/resource_aws_autoscaling_group.go | 31 +- aws/resource_aws_autoscaling_group_test.go | 264 ++++++++++-------- .../docs/r/autoscaling_group.html.markdown | 2 +- 4 files changed, 156 insertions(+), 143 deletions(-) diff --git a/aws/internal/service/autoscaling/waiter/waiter.go b/aws/internal/service/autoscaling/waiter/waiter.go index b8985880b55..54e11e736c7 100644 --- a/aws/internal/service/autoscaling/waiter/waiter.go +++ b/aws/internal/service/autoscaling/waiter/waiter.go @@ -14,7 +14,7 @@ const ( InstanceRefreshStartedTimeout = InstanceRefreshCancelledTimeout // Maximum amount of time to wait for an Instance Refresh to be Cancelled - InstanceRefreshCancelledTimeout = 10 * time.Minute + InstanceRefreshCancelledTimeout = 15 * time.Minute ) func InstanceRefreshCancelled(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 4430c0dec7e..0def21ee46c 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -864,7 +864,6 @@ func resourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) e return nil } -// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), @@ -872,7 +871,6 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling. var tgRemoving bool for { - // TODO: generate Pages function output, err := conn.DescribeLoadBalancerTargetGroups(input) if err != nil { @@ -902,7 +900,6 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsRemoved(conn *autoscaling. return nil } -// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), @@ -910,7 +907,6 @@ func waitUntilAutoscalingGroupLoadBalancerTargetGroupsAdded(conn *autoscaling.Au var tgAdding bool for { - // TODO: generate Pages function output, err := conn.DescribeLoadBalancerTargetGroups(input) if err != nil { @@ -976,7 +972,6 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("mixed_instances_policy") { opts.MixedInstancesPolicy = expandAutoScalingMixedInstancesPolicy(d.Get("mixed_instances_policy").([]interface{})) - // TODO: optional trigger shouldRefreshInstances = true } @@ -1002,26 +997,18 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) opts.HealthCheckType = aws.String(d.Get("health_check_type").(string)) } - // TODO: this probably needs a wait for capacity if d.HasChange("vpc_zone_identifier") { opts.VPCZoneIdentifier = expandVpcZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) - // TODO: no - shouldRefreshInstances = true } - // TODO: this probably needs a wait for capacity if d.HasChange("availability_zones") { if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) } - // TODO: no - shouldRefreshInstances = true } if d.HasChange("placement_group") { opts.PlacementGroup = aws.String(d.Get("placement_group").(string)) - // TODO: optional trigger - shouldRefreshInstances = true } if d.HasChange("termination_policies") { @@ -1201,9 +1188,15 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } + if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok && shouldRefreshInstances { + if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefreshRaw.([]interface{})); err != nil { + return fmt.Errorf("failed to start instance refresh of Auto Scaling Group %s: %w", d.Id(), err) + } + } + if shouldWaitForCapacity { if err := waitForASGCapacity(d, meta, capacitySatisfiedUpdate); err != nil { - return fmt.Errorf("Error waiting for Auto Scaling Group Capacity: %s", err) + return fmt.Errorf("error waiting for Auto Scaling Group Capacity: %w", err) } } @@ -1219,12 +1212,6 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } - if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok && shouldRefreshInstances { - if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefreshRaw.([]interface{})); err != nil { - return fmt.Errorf("failed to start instance refresh of Auto Scaling Group %s: %w", d.Id(), err) - } - } - return resourceAwsAutoscalingGroupRead(d, meta) } @@ -1768,7 +1755,6 @@ func flattenAutoScalingMixedInstancesPolicy(mixedInstancesPolicy *autoscaling.Mi return []interface{}{m} } -// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), @@ -1776,7 +1762,6 @@ func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, var lbAdding bool for { - // TODO: generate Pages function output, err := conn.DescribeLoadBalancers(input) if err != nil { @@ -1806,7 +1791,6 @@ func waitUntilAutoscalingGroupLoadBalancersAdded(conn *autoscaling.AutoScaling, return nil } -// TODO: make this a waiter function func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling, asgName string) error { input := &autoscaling.DescribeLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), @@ -1814,7 +1798,6 @@ func waitUntilAutoscalingGroupLoadBalancersRemoved(conn *autoscaling.AutoScaling var lbRemoving bool for { - // TODO: generate Pages function output, err := conn.DescribeLoadBalancers(input) if err != nil { diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index f86eb1ec17c..2c1efaef165 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -98,7 +98,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSAutoScalingGroupAttributes(&group, randName), testAccMatchResourceAttrRegionalARN("aws_autoscaling_group.bar", "arn", "autoscaling", regexp.MustCompile(`autoScalingGroup:.+`)), resource.TestCheckTypeSetElemAttrPair("aws_autoscaling_group.bar", "availability_zones.*", "data.aws_availability_zones.available", "names.0"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + testAccCheckAutoScalingInstanceRefreshCount(&group, 0), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "default_cooldown", "300"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "desired_capacity", "4"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "enabled_metrics.#", "0"), @@ -148,7 +148,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + testAccCheckAutoScalingInstanceRefreshCount(&group, 0), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "desired_capacity", "5"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"), resource.TestCheckResourceAttr("aws_autoscaling_group.bar", "protect_from_scale_in", "true"), @@ -992,7 +992,7 @@ func TestAccAWSAutoScalingGroup_ALB_TargetGroups_ELBCapacity(t *testing.T) { }) } -func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { +func TestAccAWSAutoScalingGroup_InstanceRefresh_Basic(t *testing.T) { var group autoscaling.Group resourceName := "aws_autoscaling_group.test" @@ -1002,14 +1002,9 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, Steps: []resource.TestStep{ { - // check that an instance refresh isn't started by a new asg - Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic("Alpha", 1), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic(), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "min_size", "1"), - resource.TestCheckResourceAttr(resourceName, "max_size", "2"), - resource.TestCheckResourceAttr(resourceName, "desired_capacity", "1"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "0"), @@ -1026,14 +1021,9 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { }, }, { - // check that changing asg size doesn't trigger a refresh - Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled("Alpha", 2), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Full(), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckResourceAttr(resourceName, "min_size", "2"), - resource.TestCheckResourceAttr(resourceName, "max_size", "4"), - resource.TestCheckResourceAttr(resourceName, "desired_capacity", "2"), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "1"), @@ -1042,46 +1032,78 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Enabled(t *testing.T) { ), }, { - // check that changing tags doesn't trigger a refresh - Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled("Bravo", 1), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled(), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, 0, 0, nil), + resource.TestCheckNoResourceAttr(resourceName, "instance_refresh.#"), ), }, + }, + }) +} + +func TestAccAWSAutoScalingGroup_InstanceRefresh_Start(t *testing.T) { + var group autoscaling.Group + resourceName := "aws_autoscaling_group.test" + launchConfigurationName := "aws_launch_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: []resource.TestStep{ { - Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled("Bravo", 1), + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Start("one"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - resource.TestCheckNoResourceAttr(resourceName, "instance_refresh.#"), + resource.TestCheckResourceAttrPair(resourceName, "launch_configuration", launchConfigurationName, "name"), + testAccCheckAutoScalingInstanceRefreshCount(&group, 0), + ), + }, + { + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Start("two"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttrPair(resourceName, "launch_configuration", launchConfigurationName, "name"), + testAccCheckAutoScalingInstanceRefreshCount(&group, 1), + testAccCheckAutoScalingInstanceRefreshStatus(&group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), + ), + }, + { + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Start("three"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttrPair(resourceName, "launch_configuration", launchConfigurationName, "name"), + testAccCheckAutoScalingInstanceRefreshCount(&group, 2), + testAccCheckAutoScalingInstanceRefreshStatus(&group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), + testAccCheckAutoScalingInstanceRefreshStatus(&group, 1, autoscaling.InstanceRefreshStatusCancelled), ), }, }, }) } -// TODO: check that an active refresh is cancelled in favour of a new one - func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { matrix := []struct { AvailabilityZoneCount int SubnetCount int InstanceType string + Capacity int UseLaunchConfiguration bool UseLaunchTemplate bool UseMixedInstancesPolicy bool UsePlacementGroup bool ExpectRefreshCount int }{ - {2, 0, "t3.nano", true, false, false, false, 0}, // create asg with 2 az-s - {1, 0, "t3.nano", true, false, false, false, 1}, // drop 1 az - {0, 2, "t3.nano", true, false, false, false, 2}, // add 2 subnets, drop az-s - {0, 1, "t3.nano", true, false, false, false, 3}, // drop 1 subnet - {0, 1, "t3.nano", false, true, false, false, 4}, // drop launch config, add template - {0, 1, "t3.micro", false, true, false, false, 5}, // update template - {0, 1, "t3.micro", false, false, true, false, 6}, // drop template, add mixed policy - {0, 1, "t3.nano", false, false, true, false, 7}, // update mixed policy - {0, 1, "t3.nano", false, false, true, true, 8}, // use placement group + {2, 0, "t3.nano", 1, true, false, false, false, 0}, // create asg with 2 az-s + {1, 0, "t3.nano", 1, true, false, false, false, 0}, // drop 1 az + {0, 2, "t3.nano", 4, true, false, false, false, 0}, // add 2 subnets, drop az-s + {0, 1, "t3.nano", 1, true, false, false, false, 0}, // drop 1 subnet + {0, 1, "t3.nano", 1, false, true, false, false, 1}, // drop launch config, add template + {0, 1, "t3.micro", 1, false, true, false, false, 2}, // update template + {0, 1, "t3.micro", 1, false, false, true, false, 3}, // drop template, add mixed policy + {0, 1, "t3.nano", 1, false, false, true, false, 4}, // update mixed policy + {0, 1, "t3.nano", 1, false, false, true, true, 4}, // use placement group } var group autoscaling.Group @@ -1095,6 +1117,7 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { test.AvailabilityZoneCount, test.SubnetCount, test.InstanceType, + test.Capacity, test.UseLaunchConfiguration, test.UseLaunchTemplate, test.UseMixedInstancesPolicy, @@ -1103,7 +1126,7 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { ), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - testAccCheckAutoscalingLatestInstanceRefreshState(&group, test.ExpectRefreshCount, 0, nil), + testAccCheckAutoScalingInstanceRefreshCount(&group, test.ExpectRefreshCount), ), } } @@ -4276,30 +4299,20 @@ resource "aws_autoscaling_group" "test" { `, rName) } -func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic(tagValue string, sizeFactor int) string { +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic() string { return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] - max_size = 2 * local.size_factor - min_size = 1 * local.size_factor - desired_capacity = 1 * local.size_factor + max_size = 2 + min_size = 1 + desired_capacity = 1 launch_configuration = aws_launch_configuration.test.name - tag { - key = "Test" - value = %[1]q - propagate_at_launch = true - } - instance_refresh { strategy = "Rolling" } } -locals { - size_factor = %[2]d -} - data "aws_ami" "test" { most_recent = true owners = ["amazon"] @@ -4323,24 +4336,18 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`, tagValue, sizeFactor) +`) } -func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Enabled(tagValue string, sizeFactor int) string { +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Full() string { return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] - max_size = 2 * local.size_factor - min_size = 1 * local.size_factor - desired_capacity = 1 * local.size_factor + max_size = 2 + min_size = 1 + desired_capacity = 1 launch_configuration = aws_launch_configuration.test.name - tag { - key = "Test" - value = %[1]q - propagate_at_launch = true - } - instance_refresh { strategy = "Rolling" preferences { @@ -4350,10 +4357,6 @@ resource "aws_autoscaling_group" "test" { } } -locals { - size_factor = %[2]d -} - data "aws_ami" "test" { most_recent = true owners = ["amazon"] @@ -4377,27 +4380,57 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`, tagValue, sizeFactor) +`) } -func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled(tagValue string, sizeFactor int) string { +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled() string { return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] - max_size = 2 * local.size_factor - min_size = 1 * local.size_factor - desired_capacity = 1 * local.size_factor + max_size = 2 + min_size = 1 + desired_capacity = 1 launch_configuration = aws_launch_configuration.test.name +} - tag { - key = "Test" - value = %[1]q - propagate_at_launch = true +data "aws_ami" "test" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] } } -locals { - size_factor = %[2]d +data "aws_availability_zones" "current" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.test.id + instance_type = "t3.nano" +} +`) +} + +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Start(launchConfigurationName string) string { + return fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 + min_size = 1 + desired_capacity = 1 + launch_configuration = aws_launch_configuration.test.name + + instance_refresh { + strategy = "Rolling" + } } data "aws_ami" "test" { @@ -4420,16 +4453,22 @@ data "aws_availability_zones" "current" { } resource "aws_launch_configuration" "test" { + name_prefix = %[1]q image_id = data.aws_ami.test.id instance_type = "t3.nano" + + lifecycle { + create_before_destroy = true + } } -`, tagValue, sizeFactor) +`, launchConfigurationName) } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( availabilityZoneCount int, subnetCount int, instanceType string, + capacity int, useLaunchConfiguration bool, useLaunchTemplate bool, useMixedInstancesPolicy bool, @@ -4439,9 +4478,9 @@ func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { availability_zones = local.availability_zone_count > 0 ? slice(data.aws_availability_zones.current.names, 0, local.availability_zone_count) : null - max_size = 1 min_size = 1 - desired_capacity = 1 + max_size = %[9]d + desired_capacity = %[9]d launch_configuration = local.use_launch_configuration ? aws_launch_configuration.test.name : null vpc_zone_identifier = local.subnet_count > 0 ? slice(aws_subnet.test.*.id, 0, local.subnet_count) : null placement_group = local.use_placement_group ? aws_placement_group.test.name : null @@ -4526,60 +4565,51 @@ resource "aws_placement_group" "test" { name = local.placement_group_name strategy = "cluster" } -`, availabilityZoneCount, subnetCount, instanceType, useLaunchConfiguration, useLaunchTemplate, useMixedInstancesPolicy, usePlacementGroup, placementGroupName) -} - -// testAccCheckAutoscalingLatestInstanceRefreshState checks the Instance Refreshes -// of an Auto-Scaling Group. -// -// Use length to set the number of refreshes (of any state) that are expected. -// -// Use the offset parameter to choose the instance refresh to check. Offset 0 -// is the latest refresh, with negative offsets yielding successive elements. -// When length is 0, this argument has no effect. -// -// When length is greater than 0 and acceptedStatuses is non-nil, expect the -// refresh at the given offset to have one of the given accepted statuses. -func testAccCheckAutoscalingLatestInstanceRefreshState( - group *autoscaling.Group, - length int, - offset int, - acceptedStatuses []string, -) resource.TestCheckFunc { +`, availabilityZoneCount, subnetCount, instanceType, useLaunchConfiguration, useLaunchTemplate, useMixedInstancesPolicy, usePlacementGroup, placementGroupName, capacity) +} + +func testAccCheckAutoScalingInstanceRefreshCount(group *autoscaling.Group, expected int) resource.TestCheckFunc { return func(state *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).autoscalingconn - name := aws.StringValue(group.AutoScalingGroupName) + input := autoscaling.DescribeInstanceRefreshesInput{ - AutoScalingGroupName: aws.String(name), + AutoScalingGroupName: group.AutoScalingGroupName, + } + resp, err := conn.DescribeInstanceRefreshes(&input) + if err != nil { + return fmt.Errorf("error describing Auto Scaling Group (%s) Instance Refreshes: %w", aws.StringValue(group.AutoScalingGroupName), err) } - output, err := conn.DescribeInstanceRefreshes(&input) - switch { - case err != nil: - return err - case len(output.InstanceRefreshes) != length: - return fmt.Errorf("expected %d instance refreshes, but found %d", length, len(output.InstanceRefreshes)) + if len(resp.InstanceRefreshes) != expected { + return fmt.Errorf("expected %d Instance Refreshes, got %d", expected, len(resp.InstanceRefreshes)) + } + return nil + } +} + +func testAccCheckAutoScalingInstanceRefreshStatus(group *autoscaling.Group, offset int, expected ...string) resource.TestCheckFunc { + return func(state *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).autoscalingconn + + input := autoscaling.DescribeInstanceRefreshesInput{ + AutoScalingGroupName: group.AutoScalingGroupName, + } + resp, err := conn.DescribeInstanceRefreshes(&input) + if err != nil { + return fmt.Errorf("error describing Auto Scaling Group (%s) Instance Refreshes: %w", aws.StringValue(group.AutoScalingGroupName), err) } - switch { - case length == 0: - return nil - case len(acceptedStatuses) == 0: - return nil + if len(resp.InstanceRefreshes) < offset { + return fmt.Errorf("expected at least %d Instance Refreshes, got %d", offset+1, len(resp.InstanceRefreshes)) } - status := aws.StringValue(output.InstanceRefreshes[offset].Status) - for _, acceptedStatus := range acceptedStatuses { - if status == acceptedStatus { + actual := aws.StringValue(resp.InstanceRefreshes[offset].Status) + for _, s := range expected { + if actual == s { return nil } } - - return fmt.Errorf( - "expected status of refresh at offset %d to be one of %s, got %s", - offset, - strings.Join(acceptedStatuses, " or "), - status) + return fmt.Errorf("expected Instance Refresh at index %d to be in %q, got %q", offset, expected, actual) } } diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 901a0471ac7..7d8b1372765 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -355,7 +355,7 @@ This configuration block supports the following: * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. -~> **NOTE:** A refresh is only started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`, `vpc_zone_identifier`, `availability_zones`, `placement_group`, or any `tag` or `tags` configured to propagate at launch. +~> **NOTE:** A refresh is only started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`. ~> **NOTE:** Auto Scaling Groups support up to one active instance refresh at a time. When this resource is updated, any existing refresh is cancelled. From 3720d6624f35cdfb1fcde600130ecdcad5d4bf2c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 8 Dec 2020 16:25:36 -0800 Subject: [PATCH 0165/1212] Adds triggers parameter to instance_refresh --- aws/resource_aws_autoscaling_group.go | 34 +++- aws/resource_aws_autoscaling_group_test.go | 151 ++++-------------- .../docs/r/autoscaling_group.html.markdown | 43 ++--- 3 files changed, 87 insertions(+), 141 deletions(-) diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 0def21ee46c..28ec2208eca 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -507,6 +507,12 @@ func resourceAwsAutoscalingGroup() *schema.Resource { }, }, }, + "triggers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, }, }, }, @@ -1188,9 +1194,29 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) } } - if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok && shouldRefreshInstances { - if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefreshRaw.([]interface{})); err != nil { - return fmt.Errorf("failed to start instance refresh of Auto Scaling Group %s: %w", d.Id(), err) + if instanceRefreshRaw, ok := d.GetOk("instance_refresh"); ok { + instanceRefresh := instanceRefreshRaw.([]interface{}) + if !shouldRefreshInstances { + if len(instanceRefresh) > 0 && instanceRefresh[0] != nil { + m := instanceRefresh[0].(map[string]interface{}) + attrsSet := m["triggers"].(*schema.Set) + attrs := attrsSet.List() + strs := make([]string, len(attrs)) + for i, a := range attrs { + strs[i] = a.(string) + } + if attrsSet.Contains("tag") && !attrsSet.Contains("tags") { + strs = append(strs, "tags") + } else if !attrsSet.Contains("tag") && attrsSet.Contains("tags") { + strs = append(strs, "tag") + } + shouldRefreshInstances = d.HasChanges(strs...) + } + } + if shouldRefreshInstances { + if err := autoScalingGroupRefreshInstances(conn, d.Id(), instanceRefresh); err != nil { + return fmt.Errorf("failed to start instance refresh of Auto Scaling Group %s: %w", d.Id(), err) + } } } @@ -1341,7 +1367,7 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting capacity to zero to drain: %s", err) } - // Next, wait for the autoscale group to drain + // Next, wait for the Auto Scaling Group to drain log.Printf("[DEBUG] Waiting for group to have zero instances") var g *autoscaling.Group err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 2c1efaef165..f1366a6b0d3 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -1084,58 +1084,34 @@ func TestAccAWSAutoScalingGroup_InstanceRefresh_Start(t *testing.T) { } func TestAccAWSAutoScalingGroup_InstanceRefresh_Triggers(t *testing.T) { - matrix := []struct { - AvailabilityZoneCount int - SubnetCount int - InstanceType string - Capacity int - UseLaunchConfiguration bool - UseLaunchTemplate bool - UseMixedInstancesPolicy bool - UsePlacementGroup bool - ExpectRefreshCount int - }{ - {2, 0, "t3.nano", 1, true, false, false, false, 0}, // create asg with 2 az-s - {1, 0, "t3.nano", 1, true, false, false, false, 0}, // drop 1 az - {0, 2, "t3.nano", 4, true, false, false, false, 0}, // add 2 subnets, drop az-s - {0, 1, "t3.nano", 1, true, false, false, false, 0}, // drop 1 subnet - {0, 1, "t3.nano", 1, false, true, false, false, 1}, // drop launch config, add template - {0, 1, "t3.micro", 1, false, true, false, false, 2}, // update template - {0, 1, "t3.micro", 1, false, false, true, false, 3}, // drop template, add mixed policy - {0, 1, "t3.nano", 1, false, false, true, false, 4}, // update mixed policy - {0, 1, "t3.nano", 1, false, false, true, true, 4}, // use placement group - } - var group autoscaling.Group resourceName := "aws_autoscaling_group.test" - placementGroupName := fmt.Sprintf("tf-test-%s", acctest.RandString(8)) - - steps := make([]resource.TestStep, len(matrix)) - for i, test := range matrix { - steps[i] = resource.TestStep{ - Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( - test.AvailabilityZoneCount, - test.SubnetCount, - test.InstanceType, - test.Capacity, - test.UseLaunchConfiguration, - test.UseLaunchTemplate, - test.UseMixedInstancesPolicy, - test.UsePlacementGroup, - placementGroupName, - ), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAutoScalingGroupExists(resourceName, &group), - testAccCheckAutoScalingInstanceRefreshCount(&group, test.ExpectRefreshCount), - ), - } - } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, - Steps: steps, + Steps: []resource.TestStep{ + { + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.strategy", "Rolling"), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.preferences.#", "0"), + ), + }, + { + Config: testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.triggers.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "instance_refresh.0.triggers.*", "tags"), + testAccCheckAutoScalingInstanceRefreshCount(&group, 1), + testAccCheckAutoScalingInstanceRefreshStatus(&group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), + ), + }, + }, }) } @@ -4464,61 +4440,25 @@ resource "aws_launch_configuration" "test" { `, launchConfigurationName) } -func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers( - availabilityZoneCount int, - subnetCount int, - instanceType string, - capacity int, - useLaunchConfiguration bool, - useLaunchTemplate bool, - useMixedInstancesPolicy bool, - usePlacementGroup bool, - placementGroupName string, -) string { +func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers() string { return fmt.Sprintf(` resource "aws_autoscaling_group" "test" { - availability_zones = local.availability_zone_count > 0 ? slice(data.aws_availability_zones.current.names, 0, local.availability_zone_count) : null + availability_zones = [data.aws_availability_zones.current.names[0]] + max_size = 2 min_size = 1 - max_size = %[9]d - desired_capacity = %[9]d - launch_configuration = local.use_launch_configuration ? aws_launch_configuration.test.name : null - vpc_zone_identifier = local.subnet_count > 0 ? slice(aws_subnet.test.*.id, 0, local.subnet_count) : null - placement_group = local.use_placement_group ? aws_placement_group.test.name : null - - dynamic "launch_template" { - for_each = local.use_launch_template ? [1] : [] - content { - id = aws_launch_template.test.id - version = aws_launch_template.test.latest_version - } - } - - dynamic "mixed_instances_policy" { - for_each = local.use_mixed_instances_policy ? [1] : [] - content { - launch_template { - launch_template_specification { - launch_template_id = aws_launch_template.test.id - version = aws_launch_template.test.latest_version - } - } - } - } + desired_capacity = 1 + launch_configuration = aws_launch_configuration.test.name instance_refresh { - strategy = "Rolling" + strategy = "Rolling" + triggers = ["tags"] } -} -locals { - availability_zone_count = %[1]d - subnet_count = %[2]d - instance_type = %[3]q - use_launch_configuration = %[4]t - use_launch_template = %[5]t - use_mixed_instances_policy = %[6]t - use_placement_group = %[7]t - placement_group_name = %[8]q + tag { + key = "Key" + value = "Value" + propagate_at_launch = true + } } data "aws_ami" "test" { @@ -4540,32 +4480,11 @@ data "aws_availability_zones" "current" { } } -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" -} - -resource "aws_subnet" "test" { - count = length(data.aws_availability_zones.current.names) - availability_zone = data.aws_availability_zones.current.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 2, count.index) - vpc_id = aws_vpc.test.id -} - resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id - instance_type = local.instance_type -} - -resource "aws_launch_template" "test" { - image_id = data.aws_ami.test.image_id - instance_type = local.instance_type -} - -resource "aws_placement_group" "test" { - name = local.placement_group_name - strategy = "cluster" + instance_type = "t3.nano" } -`, availabilityZoneCount, subnetCount, instanceType, useLaunchConfiguration, useLaunchTemplate, useMixedInstancesPolicy, usePlacementGroup, placementGroupName, capacity) +`) } func testAccCheckAutoScalingInstanceRefreshCount(group *autoscaling.Group, expected int) resource.TestCheckFunc { diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 7d8b1372765..ffcb4d6b0c9 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -178,21 +178,6 @@ resource "aws_autoscaling_group" "bar" { ### Automatically refresh all instances after the group is updated ```hcl -data "aws_ami" "example" { - most_recent = true - owners = ["amazon"] - - filter { - name = "name" - values = ["amzn-ami-hvm-*-x86_64-gp2"] - } -} - -resource "aws_launch_template" "example" { - image_id = data.aws_ami.example.id - instance_type = "t3.nano" -} - resource "aws_autoscaling_group" "example" { availability_zones = ["us-east-1a"] desired_capacity = 1 @@ -211,6 +196,21 @@ resource "aws_autoscaling_group" "example" { } } } + +data "aws_ami" "example" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["amzn-ami-hvm-*-x86_64-gp2"] + } +} + +resource "aws_launch_template" "example" { + image_id = data.aws_ami.example.id + instance_type = "t3.nano" +} ``` ## Argument Reference @@ -354,8 +354,9 @@ This configuration block supports the following: * `preferences` - (Optional) Override default parameters for Instance Refresh. * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. +* `triggers` - (Optional) Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of `launch_configuration`, `launch_template`, or `mixed_instances_policy`. -~> **NOTE:** A refresh is only started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`. +~> **NOTE:** A refresh is started when any of the following Auto Scaling Group properties change: `launch_configuration`, `launch_template`, `mixed_instances_policy`. Additional properties can be specified in the `triggers` property of `instance_refresh`. ~> **NOTE:** Auto Scaling Groups support up to one active instance refresh at a time. When this resource is updated, any existing refresh is cancelled. @@ -367,15 +368,15 @@ In addition to all arguments above, the following attributes are exported: * `id` - The Auto Scaling Group id. * `arn` - The ARN for this Auto Scaling Group -* `availability_zones` - The availability zones of the autoscale group. -* `min_size` - The minimum size of the autoscale group -* `max_size` - The maximum size of the autoscale group +* `availability_zones` - The availability zones of the Auto Scaling Group. +* `min_size` - The minimum size of the Auto Scaling Group +* `max_size` - The maximum size of the Auto Scaling Group * `default_cooldown` - Time between a scaling activity and the succeeding scaling activity. -* `name` - The name of the autoscale group +* `name` - The name of the Auto Scaling Group * `health_check_grace_period` - Time after instance comes into service before checking health. * `health_check_type` - "EC2" or "ELB". Controls how health checking is done. * `desired_capacity` -The number of Amazon EC2 instances that should be running in the group. -* `launch_configuration` - The launch configuration of the autoscale group +* `launch_configuration` - The launch configuration of the Auto Scaling Group * `vpc_zone_identifier` (Optional) - The VPC zone identifier ~> **NOTE:** When using `ELB` as the `health_check_type`, `health_check_grace_period` is required. From f3fbd79da071408d8b0722180e299cdfe7774990 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 9 Dec 2020 11:15:26 -0500 Subject: [PATCH 0166/1212] add rule to remove extraneous string list to set casting --- .semgrep.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.semgrep.yml b/.semgrep.yml index 3520fd50fad..46f121ee58b 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -36,6 +36,17 @@ rules: regex: '^"github.com/aws/aws-sdk-go/service/[^/]+"$' severity: WARNING + - id: helper-schema-Set-extraneous-NewSet-with-flattenStringList + languages: [go] + message: Prefer `flattenStringSet()` function for casting a list of string pointers to a set + paths: + include: + - aws/ + patterns: + - pattern: schema.NewSet(schema.HashString, flattenStringList($APIOBJECT)) + - pattern-not-inside: func flattenStringSet(list []*string) *schema.Set { ... } + severity: WARNING + - id: helper-schema-ResourceData-GetOk-with-extraneous-conditional languages: [go] message: Zero value conditional check after `d.GetOk()` is extraneous From 09123f6903f3b24e50ee9767f7d9b4d9073d3e86 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 10:49:31 -0600 Subject: [PATCH 0167/1212] tests/resource/lakeformation_resource: Add exists and disappears --- ...rce_aws_lakeformation_datalake_settings.go | 4 +- aws/resource_aws_lakeformation_resource.go | 5 +- ...esource_aws_lakeformation_resource_test.go | 180 +++++++++++------- go.mod | 1 + go.sum | 30 +++ 5 files changed, 145 insertions(+), 75 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index 0a6f313e376..5304f2bf135 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index 87d2afb65c3..ee715543edb 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -26,6 +26,7 @@ func resourceAwsLakeFormationResource() *schema.Resource { "resource_arn": { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: validateArn, }, "role_arn": { @@ -92,9 +93,9 @@ func resourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface return fmt.Errorf("error reading Lake Formation Resource: %s", err) } - d.Set("resource_arn", output.ResourceInfo.ResourceArn) + // d.Set("resource_arn", output.ResourceInfo.ResourceArn) // output not including resource arn currently d.Set("role_arn", output.ResourceInfo.RoleArn) - if output.ResourceInfo.LastModified != nil { + if output.ResourceInfo.LastModified != nil { // output not including last modified currently d.Set("last_modified", output.ResourceInfo.LastModified.Format(time.RFC3339)) } diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index a63504cd449..b1b76e49e85 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -2,9 +2,11 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -12,83 +14,115 @@ import ( ) func TestAccAWSLakeFormationResource_basic(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_lakeformation_resource.test" - bucketName := "aws_s3_bucket.test" - roleName := "aws_iam_role.test" + bucketName := acctest.RandomWithPrefix("tf-acc-test") + roleName := acctest.RandomWithPrefix("tf-acc-test") + resourceAddr := "aws_lakeformation_resource.test" + bucketAddr := "aws_s3_bucket.test" + roleAddr := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_basic(rName), + Config: testAccAWSLakeFormationResourceConfig_basic(bucketName, roleName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "resource_arn", bucketName, "arn"), - resource.TestCheckResourceAttrPair(resourceName, "role_arn", roleName, "arn"), - resource.TestCheckResourceAttrSet(resourceName, "last_modified"), + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), ), }, }, }) } -func TestAccAWSLakeFormationResource_withRole(t *testing.T) { +func TestAccAWSLakeFormationResource_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_resource.test" - bucketName := "aws_s3_bucket.test" - roleName := "data.aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_withRole(rName), + Config: testAccAWSLakeFormationResourceConfig_basic(rName, rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), - resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "false"), + testAccCheckAWSLakeFormationResourceExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsLakeFormationResource(), resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) } -func TestAccAWSLakeFormationResource_update(t *testing.T) { +func TestAccAWSLakeFormationResource_serviceLinkedRole(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_lakeformation_resource.test" - bucketName := "aws_s3_bucket.test" - roleName := "data.aws_iam_role.test" + resourceAddr := "aws_lakeformation_resource.test" + bucketAddr := "aws_s3_bucket.test" + arn := arn.ARN{ + Partition: testAccGetPartition(), + Service: "iam", + AccountID: ".*", + Resource: "role/aws-service-role/lakeformation.amazonaws.com/AWSServiceRoleForLakeFormationDataAccess", + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckIamServiceLinkedRole(t, "/aws-service-role/lakeformation.amazonaws.com") + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceConfig_serviceLinkedRole(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), + resource.TestMatchResourceAttr(resourceAddr, "role_arn", regexp.MustCompile(fmt.Sprintf(`^%s$`, arn.String()))), + ), + }, + }, + }) +} + +func TestAccAWSLakeFormationResource_update(t *testing.T) { + bucketName := acctest.RandomWithPrefix("tf-acc-test") + roleName1 := acctest.RandomWithPrefix("tf-acc-test") + roleName2 := acctest.RandomWithPrefix("tf-acc-test") + resourceAddr := "aws_lakeformation_resource.test" + bucketAddr := "aws_s3_bucket.test" + roleAddr := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationResourceDeregister, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationResourceConfig_basic(rName), + Config: testAccAWSLakeFormationResourceConfig_basic(bucketName, roleName1), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), - resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "true"), - resource.TestCheckResourceAttrSet(resourceName, "role_arn"), + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), ), }, { - Config: testAccAWSLakeFormationResourceConfig_withRole(rName), + Config: testAccAWSLakeFormationResourceConfig_basic(bucketName, roleName2), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "resource_arn"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "role_arn"), - resource.TestCheckResourceAttr(resourceName, "use_service_linked_role", "false"), + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), ), }, }, }) } -func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { +func testAccCheckAWSLakeFormationResourceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lakeformationconn for _, rs := range s.RootModule().Resources { @@ -104,7 +138,7 @@ func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { _, err := conn.DescribeResource(input) if err == nil { - return fmt.Errorf("Resource still registered: %s", resourceArn) + return fmt.Errorf("resource still registered: %s", resourceArn) } if !isLakeFormationResourceNotFoundErr(err) { return err @@ -114,6 +148,29 @@ func testAccCheckAWSLakeFormationResourceDeregister(s *terraform.State) error { return nil } +func testAccCheckAWSLakeFormationResourceExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("resource not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + input := &lakeformation.DescribeResourceInput{ + ResourceArn: aws.String(rs.Primary.ID), + } + + _, err := conn.DescribeResource(input) + + if err != nil { + return fmt.Errorf("error getting Lake Formation resource (%s): %w", rs.Primary.ID, err) + } + + return nil + } +} + func isLakeFormationResourceNotFoundErr(err error) bool { return isAWSErr( err, @@ -121,29 +178,29 @@ func isLakeFormationResourceNotFoundErr(err error) bool { "Entity not found") } -func testAccAWSLakeFormationResourceConfig_basic(rName string) string { +func testAccAWSLakeFormationResourceConfig_basic(bucket, role string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_iam_role" "test" { - name = %[1]q + name = %[2]q path = "/test/" assume_role_policy = < Date: Wed, 9 Dec 2020 10:54:52 -0600 Subject: [PATCH 0168/1212] resource/lakeformation: Remove lakeformation_datalake_settings --- ...rce_aws_lakeformation_datalake_settings.go | 134 ------------------ ...ws_lakeformation_datalake_settings_test.go | 115 --------------- 2 files changed, 249 deletions(-) delete mode 100644 aws/resource_aws_lakeformation_datalake_settings.go delete mode 100644 aws/resource_aws_lakeformation_datalake_settings_test.go diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go deleted file mode 100644 index 5304f2bf135..00000000000 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ /dev/null @@ -1,134 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsLakeFormationDataLakeSettingsPut, - Update: resourceAwsLakeFormationDataLakeSettingsPut, - Read: resourceAwsLakeFormationDataLakeSettingsRead, - Delete: resourceAwsLakeFormationDataLakeSettingsReset, - - Schema: map[string]*schema.Schema{ - "catalog_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - }, - "admins": { - Type: schema.TypeSet, - Set: schema.HashString, - Required: true, - MinItems: 0, - MaxItems: 10, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.NoZeroValues, - }, - }, - }, - } -} - -func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) - - input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: expandLakeFormationDataLakePrincipal(d), - }, - } - - _, err := conn.PutDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error updating DataLakeSettings: %s", err) - } - - awsRegion := meta.(*AWSClient).region - d.SetId(fmt.Sprintf("lakeformation:%s:%s", awsRegion, catalogId)) - d.Set("catalog_id", catalogId) - - return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) -} - -func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - - input := &lakeformation.GetDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - } - - out, err := conn.GetDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error reading DataLakeSettings: %s", err) - } - - d.Set("catalog_id", catalogId) - if err := d.Set("admins", flattenLakeFormationDataLakePrincipal(out.DataLakeSettings.DataLakeAdmins)); err != nil { - return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) - } - // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions - - return nil -} - -func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - - input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), - }, - } - - _, err := conn.PutDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error reseting DataLakeSettings: %s", err) - } - - return nil -} - -func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { - if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { - catalogId = inputCatalogId.(string) - } else { - catalogId = accountId - } - return -} - -func expandLakeFormationDataLakePrincipal(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { - xs := d.Get("admins").(*schema.Set).List() - ys := make([]*lakeformation.DataLakePrincipal, len(xs)) - - for i, x := range xs { - ys[i] = &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(x.(string)), - } - } - - return ys -} - -func flattenLakeFormationDataLakePrincipal(xs []*lakeformation.DataLakePrincipal) []string { - ys := make([]string, len(xs)) - for i, x := range xs { - ys[i] = aws.StringValue(x.DataLakePrincipalIdentifier) - } - - return ys -} diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go deleted file mode 100644 index 6ec6e1be397..00000000000 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { - callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsEmpty, - Steps: []resource.TestStep{ - { - Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - testAccCheckAWSLakeFormationDataLakePrincipal(callerIdentityName, "arn", resourceName, "admins"), - ), - }, - }, - }) -} - -const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` -data "aws_caller_identity" "current" {} - -resource "aws_lakeformation_datalake_settings" "test" { - admins = ["${data.aws_caller_identity.current.arn}"] -} -` - -func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { - callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsEmpty, - Steps: []resource.TestStep{ - { - Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - testAccCheckAWSLakeFormationDataLakePrincipal(callerIdentityName, "arn", resourceName, "admins"), - ), - }, - }, - }) -} - -const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` -data "aws_caller_identity" "current" {} - -resource "aws_lakeformation_datalake_settings" "test" { - catalog_id = "${data.aws_caller_identity.current.account_id}" - admins = ["${data.aws_caller_identity.current.arn}"] -} -` - -func testAccCheckAWSLakeFormationDataLakeSettingsEmpty(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).lakeformationconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_lakeformation_datalake_settings" { - continue - } - - input := &lakeformation.GetDataLakeSettingsInput{ - CatalogId: aws.String(testAccProvider.Meta().(*AWSClient).accountid), - } - - out, err := conn.GetDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error reading DataLakeSettings: %s", err) - } - - if len(out.DataLakeSettings.DataLakeAdmins) > 0 { - return fmt.Errorf("Error admins list not empty in DataLakeSettings: %s", out) - } - } - - return nil -} - -func testAccCheckAWSLakeFormationDataLakePrincipal(nameFirst, keyFirst, nameSecond, keySecond string) resource.TestCheckFunc { - return func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - - valueFirst, okFirst := isFirst.Attributes[keyFirst] - if !okFirst { - return fmt.Errorf("%s: Attribute %q not set", nameFirst, keyFirst) - } - - expandedKey := fmt.Sprintf("%s.%d", keySecond, schema.HashString(valueFirst)) - return resource.TestCheckResourceAttr(nameSecond, expandedKey, valueFirst)(s) - } -} From 896f026aa9f7db886603b17bcf03d3e27fa48d68 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 11:00:09 -0600 Subject: [PATCH 0169/1212] docs/lakeformation_resource: Fix formatting --- website/docs/r/lakeformation_resource.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown index dd1cb4e05fb..b5aea161e58 100644 --- a/website/docs/r/lakeformation_resource.html.markdown +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -20,7 +20,7 @@ data "aws_s3_bucket" "example" { } resource "aws_lakeformation_resource" "example" { - resource_arn = "${data.aws_s3_bucket.example.arn}" + resource_arn = data.aws_s3_bucket.example.arn } ``` From 8555491293e40a7334df62019478d6b5258e5d18 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 11:04:43 -0600 Subject: [PATCH 0170/1212] docs/lakeformation_datalake_settings: Remove from branch (part of later PR) --- ...eformation_datalake_settings.html.markdown | 46 ------------------- 1 file changed, 46 deletions(-) delete mode 100644 website/docs/r/lakeformation_datalake_settings.html.markdown diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown deleted file mode 100644 index 0385b9243db..00000000000 --- a/website/docs/r/lakeformation_datalake_settings.html.markdown +++ /dev/null @@ -1,46 +0,0 @@ ---- -subcategory: "Lake Formation" -layout: "aws" -page_title: "AWS: aws_lakeformation_datalake_settings" -description: |- - Manages the data lake settings for the current account ---- - -# Resource: aws_lakeformation_datalake_settings - -Manages the data lake settings for the current account. - -## Example Usage - -```hcl -data "aws_iam_user" "existing_user" { - user_name = "an_existing_user_name" -} - -data "aws_iam_role" "existing_role" { - name = "an_existing_role_name" -} - -resource "aws_lakeformation_datalake_settings" "example" { - admins = [ - "${aws_iam_user.existing_user.arn}", - "${aws_iam_user.existing_role.arn}", - ] -} -``` - -## Argument Reference - -The following arguments are required: - -* `admins` – (Required) A list of up to 10 AWS Lake Formation principals (users or roles). - -The following arguments are optional: - -* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - Resource identifier with the pattern `lakeformation:AWS_REGION:ACCOUNT_ID`. From 7455563b8b2a856d35161ad405f3dfae5f737c9b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 11:28:47 -0600 Subject: [PATCH 0171/1212] tests/lakeformation_resource: Fix to use ARN check func --- aws/resource_aws_lakeformation_resource_test.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index b1b76e49e85..23d3ea14aef 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -2,11 +2,9 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -62,12 +60,6 @@ func TestAccAWSLakeFormationResource_serviceLinkedRole(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceAddr := "aws_lakeformation_resource.test" bucketAddr := "aws_s3_bucket.test" - arn := arn.ARN{ - Partition: testAccGetPartition(), - Service: "iam", - AccountID: ".*", - Resource: "role/aws-service-role/lakeformation.amazonaws.com/AWSServiceRoleForLakeFormationDataAccess", - } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -82,7 +74,7 @@ func TestAccAWSLakeFormationResource_serviceLinkedRole(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSLakeFormationResourceExists(resourceAddr), resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), - resource.TestMatchResourceAttr(resourceAddr, "role_arn", regexp.MustCompile(fmt.Sprintf(`^%s$`, arn.String()))), + testAccCheckResourceAttrGlobalARN(resourceAddr, "role_arn", "iam", "role/aws-service-role/lakeformation.amazonaws.com/AWSServiceRoleForLakeFormationDataAccess"), ), }, }, @@ -251,7 +243,7 @@ resource "aws_s3_bucket" "test" { } resource "aws_lakeformation_resource" "test" { - resource_arn = "${aws_s3_bucket.test.arn}" + resource_arn = aws_s3_bucket.test.arn } `, rName) } From 33f5ce0811e3af9c22957ef96512739b948138a4 Mon Sep 17 00:00:00 2001 From: tbugfinder Date: Wed, 9 Dec 2020 18:44:39 +0100 Subject: [PATCH 0172/1212] provider: adding Computed: true to id of few data sources, as those should be marked as computed even if set to optional (#16667) --- aws/data_source_aws_customer_gateway.go | 1 + aws/data_source_aws_ec2_transit_gateway.go | 1 + aws/data_source_aws_ec2_transit_gateway_peering_attachment.go | 1 + aws/data_source_aws_ec2_transit_gateway_route_table.go | 1 + aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go | 1 + aws/data_source_aws_guardduty_detector.go | 1 + 6 files changed, 6 insertions(+) diff --git a/aws/data_source_aws_customer_gateway.go b/aws/data_source_aws_customer_gateway.go index ee61a93669f..325e0c4b06b 100644 --- a/aws/data_source_aws_customer_gateway.go +++ b/aws/data_source_aws_customer_gateway.go @@ -21,6 +21,7 @@ func dataSourceAwsCustomerGateway() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "bgp_asn": { diff --git a/aws/data_source_aws_ec2_transit_gateway.go b/aws/data_source_aws_ec2_transit_gateway.go index 3812c20e565..499934d3739 100644 --- a/aws/data_source_aws_ec2_transit_gateway.go +++ b/aws/data_source_aws_ec2_transit_gateway.go @@ -52,6 +52,7 @@ func dataSourceAwsEc2TransitGateway() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "owner_id": { Type: schema.TypeString, diff --git a/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go b/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go index d2868d512de..2ddbda6803c 100644 --- a/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go @@ -20,6 +20,7 @@ func dataSourceAwsEc2TransitGatewayPeeringAttachment() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "peer_account_id": { Type: schema.TypeString, diff --git a/aws/data_source_aws_ec2_transit_gateway_route_table.go b/aws/data_source_aws_ec2_transit_gateway_route_table.go index 5124b7e559b..6caa944d781 100644 --- a/aws/data_source_aws_ec2_transit_gateway_route_table.go +++ b/aws/data_source_aws_ec2_transit_gateway_route_table.go @@ -33,6 +33,7 @@ func dataSourceAwsEc2TransitGatewayRouteTable() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "transit_gateway_id": { Type: schema.TypeString, diff --git a/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go b/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go index 22695899079..a1aea8c18c7 100644 --- a/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go @@ -28,6 +28,7 @@ func dataSourceAwsEc2TransitGatewayVpcAttachment() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "ipv6_support": { Type: schema.TypeString, diff --git a/aws/data_source_aws_guardduty_detector.go b/aws/data_source_aws_guardduty_detector.go index 6011da6a0a0..9f7666fcb55 100644 --- a/aws/data_source_aws_guardduty_detector.go +++ b/aws/data_source_aws_guardduty_detector.go @@ -16,6 +16,7 @@ func dataSourceAwsGuarddutyDetector() *schema.Resource { "id": { Type: schema.TypeString, Optional: true, + Computed: true, }, "status": { Type: schema.TypeString, From 3893c4468c5587c5d0ab626f7fc534541bfbf941 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 12:47:42 -0500 Subject: [PATCH 0173/1212] Update CHANGELOG for #16667 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a370e78817c..2bef033bb04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,12 @@ ENHANCEMENTS BUG FIXES +* data-source/aws_customer_gateway: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_ec2_transit_gateway: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_ec2_transit_gateway_peering_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_ec2_transit_gateway_route_table: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_ec2_transit_gateway_vpc_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_guardduty_detector: Prevent missing `id` attribute when not configured as argument [GH-16667] * resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou [GH-16589] ## 3.20.0 (December 03, 2020) From d8d69f553e6c770dea61f1e711df1396ee21de11 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Wed, 9 Dec 2020 13:09:40 -0500 Subject: [PATCH 0174/1212] Update CHANGELOG for #16605 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bef033bb04..f2acb876614 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BUG FIXES * data-source/aws_ec2_transit_gateway_route_table: Prevent missing `id` attribute when not configured as argument [GH-16667] * data-source/aws_ec2_transit_gateway_vpc_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] * data-source/aws_guardduty_detector: Prevent missing `id` attribute when not configured as argument [GH-16667] +* resource/aws_backup_plan: Prevent plan-time validation error for pre-existing resources with `lifecycle` `delete_after` and/or `copy_action` `lifecycle` `delete_after` arguments configured [GH-16605] * resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou [GH-16589] ## 3.20.0 (December 03, 2020) From c30f5fb77c81f85cf21fc060df32bc6406fc3e22 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 12:14:56 -0600 Subject: [PATCH 0175/1212] resource/lakeformation_resource: Remove go.mod, go.sum --- go.mod | 1 - go.sum | 30 ------------------------------ 2 files changed, 31 deletions(-) diff --git a/go.mod b/go.mod index 7c550e6ef53..64399b168cb 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk v1.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba diff --git a/go.sum b/go.sum index aad5854cd6e..4e66d75605f 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -164,8 +162,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -181,7 +177,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -199,32 +194,22 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= -github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -264,13 +249,11 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -279,7 +262,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -311,8 +293,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -320,10 +300,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -332,11 +309,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -344,13 +319,9 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -412,7 +383,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From 70b915157c8d955682287b593edb997e58a8b092 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 12:45:47 -0600 Subject: [PATCH 0176/1212] tests/lakeformation_resource: Add precheck --- aws/resource_aws_lakeformation_resource_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index 23d3ea14aef..4004e30aa7f 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -19,7 +19,7 @@ func TestAccAWSLakeFormationResource_basic(t *testing.T) { roleAddr := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ @@ -40,7 +40,7 @@ func TestAccAWSLakeFormationResource_disappears(t *testing.T) { resourceName := "aws_lakeformation_resource.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ @@ -64,6 +64,7 @@ func TestAccAWSLakeFormationResource_serviceLinkedRole(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) + testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) testAccPreCheckIamServiceLinkedRole(t, "/aws-service-role/lakeformation.amazonaws.com") }, Providers: testAccProviders, @@ -90,7 +91,7 @@ func TestAccAWSLakeFormationResource_update(t *testing.T) { roleAddr := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, Steps: []resource.TestStep{ From d9a1617a3195baa8bfc7c75750c819e834dd66c2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 9 Dec 2020 14:02:23 -0500 Subject: [PATCH 0177/1212] New Resource: aws_ec2_carrier_gateway (#16252) * r/aws_ec2_carrier_gateway: New resource. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSEc2CarrierGateway_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSEc2CarrierGateway_ -timeout 120m === RUN TestAccAWSEc2CarrierGateway_basic === PAUSE TestAccAWSEc2CarrierGateway_basic === RUN TestAccAWSEc2CarrierGateway_disappears === PAUSE TestAccAWSEc2CarrierGateway_disappears === RUN TestAccAWSEc2CarrierGateway_Tags === PAUSE TestAccAWSEc2CarrierGateway_Tags === CONT TestAccAWSEc2CarrierGateway_basic === CONT TestAccAWSEc2CarrierGateway_Tags === CONT TestAccAWSEc2CarrierGateway_disappears resource_aws_ec2_carrier_gateway_test.go:195: skipping since no Wavelength Zones are available --- SKIP: TestAccAWSEc2CarrierGateway_disappears (1.33s) === CONT TestAccAWSEc2CarrierGateway_basic resource_aws_ec2_carrier_gateway_test.go:195: skipping since no Wavelength Zones are available --- SKIP: TestAccAWSEc2CarrierGateway_basic (1.37s) === CONT TestAccAWSEc2CarrierGateway_Tags resource_aws_ec2_carrier_gateway_test.go:195: skipping since no Wavelength Zones are available --- SKIP: TestAccAWSEc2CarrierGateway_Tags (1.37s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 1.419s * r/aws_ec2_carrier_gateway: Add test sweeper. * r/aws_ec2_carrier_gateway: Better handling of carrier gateway deletion. --- aws/internal/service/ec2/errors.go | 4 + aws/internal/service/ec2/finder/finder.go | 19 ++ aws/internal/service/ec2/waiter/status.go | 30 ++ aws/internal/service/ec2/waiter/waiter.go | 40 +++ aws/provider.go | 1 + aws/resource_aws_ec2_carrier_gateway.go | 153 +++++++++ aws/resource_aws_ec2_carrier_gateway_test.go | 305 ++++++++++++++++++ aws/resource_aws_vpc_test.go | 1 + .../docs/r/ec2_carrier_gateway.html.markdown | 47 +++ 9 files changed, 600 insertions(+) create mode 100644 aws/resource_aws_ec2_carrier_gateway.go create mode 100644 aws/resource_aws_ec2_carrier_gateway_test.go create mode 100644 website/docs/r/ec2_carrier_gateway.html.markdown diff --git a/aws/internal/service/ec2/errors.go b/aws/internal/service/ec2/errors.go index 2429d2a236a..fc253b53def 100644 --- a/aws/internal/service/ec2/errors.go +++ b/aws/internal/service/ec2/errors.go @@ -4,6 +4,10 @@ const ( ErrCodeInvalidParameterValue = "InvalidParameterValue" ) +const ( + ErrCodeInvalidCarrierGatewayIDNotFound = "InvalidCarrierGatewayID.NotFound" +) + const ( ErrCodeClientVpnEndpointIdNotFound = "InvalidClientVpnEndpointId.NotFound" ErrCodeClientVpnAuthorizationRuleNotFound = "InvalidClientVpnEndpointAuthorizationRuleNotFound" diff --git a/aws/internal/service/ec2/finder/finder.go b/aws/internal/service/ec2/finder/finder.go index 6752919b3ec..d1ece5a371b 100644 --- a/aws/internal/service/ec2/finder/finder.go +++ b/aws/internal/service/ec2/finder/finder.go @@ -6,6 +6,25 @@ import ( tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" ) +// CarrierGatewayByID returns the carrier gateway corresponding to the specified identifier. +// Returns nil and potentially an error if no carrier gateway is found. +func CarrierGatewayByID(conn *ec2.EC2, id string) (*ec2.CarrierGateway, error) { + input := &ec2.DescribeCarrierGatewaysInput{ + CarrierGatewayIds: aws.StringSlice([]string{id}), + } + + output, err := conn.DescribeCarrierGateways(input) + if err != nil { + return nil, err + } + + if output == nil || len(output.CarrierGateways) == 0 { + return nil, nil + } + + return output.CarrierGateways[0], nil +} + func ClientVpnAuthorizationRule(conn *ec2.EC2, endpointID, targetNetworkCidr, accessGroupID string) (*ec2.DescribeClientVpnAuthorizationRulesOutput, error) { filters := map[string]string{ "destination-cidr": targetNetworkCidr, diff --git a/aws/internal/service/ec2/waiter/status.go b/aws/internal/service/ec2/waiter/status.go index 3bdd28b219f..7dc175db8f0 100644 --- a/aws/internal/service/ec2/waiter/status.go +++ b/aws/internal/service/ec2/waiter/status.go @@ -12,6 +12,36 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" ) +const ( + carrierGatewayStateNotFound = "NotFound" + carrierGatewayStateUnknown = "Unknown" +) + +// CarrierGatewayState fetches the CarrierGateway and its State +func CarrierGatewayState(conn *ec2.EC2, carrierGatewayID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + carrierGateway, err := finder.CarrierGatewayByID(conn, carrierGatewayID) + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidCarrierGatewayIDNotFound) { + return nil, carrierGatewayStateNotFound, nil + } + if err != nil { + return nil, carrierGatewayStateUnknown, err + } + + if carrierGateway == nil { + return nil, carrierGatewayStateNotFound, nil + } + + state := aws.StringValue(carrierGateway.State) + + if state == ec2.CarrierGatewayStateDeleted { + return nil, carrierGatewayStateNotFound, nil + } + + return carrierGateway, state, nil + } +} + // LocalGatewayRouteTableVpcAssociationState fetches the LocalGatewayRouteTableVpcAssociation and its State func LocalGatewayRouteTableVpcAssociationState(conn *ec2.EC2, localGatewayRouteTableVpcAssociationID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index cb597291ee9..954026e48d5 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -12,6 +12,46 @@ const ( InstanceAttributePropagationTimeout = 2 * time.Minute ) +const ( + CarrierGatewayAvailableTimeout = 5 * time.Minute + + CarrierGatewayDeletedTimeout = 5 * time.Minute +) + +func CarrierGatewayAvailable(conn *ec2.EC2, carrierGatewayID string) (*ec2.CarrierGateway, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.CarrierGatewayStatePending}, + Target: []string{ec2.CarrierGatewayStateAvailable}, + Refresh: CarrierGatewayState(conn, carrierGatewayID), + Timeout: CarrierGatewayAvailableTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.CarrierGateway); ok { + return output, err + } + + return nil, err +} + +func CarrierGatewayDeleted(conn *ec2.EC2, carrierGatewayID string) (*ec2.CarrierGateway, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.CarrierGatewayStateDeleting}, + Target: []string{}, + Refresh: CarrierGatewayState(conn, carrierGatewayID), + Timeout: CarrierGatewayDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.CarrierGateway); ok { + return output, err + } + + return nil, err +} + const ( // Maximum amount of time to wait for a LocalGatewayRouteTableVpcAssociation to return Associated LocalGatewayRouteTableVpcAssociationAssociatedTimeout = 5 * time.Minute diff --git a/aws/provider.go b/aws/provider.go index 7f56d5f9c18..0fa69f14079 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -592,6 +592,7 @@ func Provider() *schema.Provider { "aws_ebs_volume": resourceAwsEbsVolume(), "aws_ec2_availability_zone_group": resourceAwsEc2AvailabilityZoneGroup(), "aws_ec2_capacity_reservation": resourceAwsEc2CapacityReservation(), + "aws_ec2_carrier_gateway": resourceAwsEc2CarrierGateway(), "aws_ec2_client_vpn_authorization_rule": resourceAwsEc2ClientVpnAuthorizationRule(), "aws_ec2_client_vpn_endpoint": resourceAwsEc2ClientVpnEndpoint(), "aws_ec2_client_vpn_network_association": resourceAwsEc2ClientVpnNetworkAssociation(), diff --git a/aws/resource_aws_ec2_carrier_gateway.go b/aws/resource_aws_ec2_carrier_gateway.go new file mode 100644 index 00000000000..02ab26e5265 --- /dev/null +++ b/aws/resource_aws_ec2_carrier_gateway.go @@ -0,0 +1,153 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" +) + +func resourceAwsEc2CarrierGateway() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEc2CarrierGatewayCreate, + Read: resourceAwsEc2CarrierGatewayRead, + Update: resourceAwsEc2CarrierGatewayUpdate, + Delete: resourceAwsEc2CarrierGatewayDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchema(), + + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsEc2CarrierGatewayCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.CreateCarrierGatewayInput{ + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), "carrier-gateway"), + VpcId: aws.String(d.Get("vpc_id").(string)), + } + + log.Printf("[DEBUG] Creating EC2 Carrier Gateway: %s", input) + output, err := conn.CreateCarrierGateway(input) + + if err != nil { + return fmt.Errorf("error creating EC2 Carrier Gateway: %w", err) + } + + d.SetId(aws.StringValue(output.CarrierGateway.CarrierGatewayId)) + + _, err = waiter.CarrierGatewayAvailable(conn, d.Id()) + + if err != nil { + return fmt.Errorf("error waiting for EC2 Carrier Gateway (%s) to become available: %w", d.Id(), err) + } + + return resourceAwsEc2CarrierGatewayRead(d, meta) +} + +func resourceAwsEc2CarrierGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + carrierGateway, err := finder.CarrierGatewayByID(conn, d.Id()) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidCarrierGatewayIDNotFound) { + log.Printf("[WARN] EC2 Carrier Gateway (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading EC2 Carrier Gateway (%s): %w", d.Id(), err) + } + + if carrierGateway == nil || aws.StringValue(carrierGateway.State) == ec2.CarrierGatewayStateDeleted { + log.Printf("[WARN] EC2 Carrier Gateway (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ec2", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("carrier-gateway/%s", d.Id()), + }.String() + d.Set("arn", arn) + d.Set("owner_id", carrierGateway.OwnerId) + d.Set("vpc_id", carrierGateway.VpcId) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(carrierGateway.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsEc2CarrierGatewayUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating EC2 Carrier Gateway (%s) tags: %w", d.Id(), err) + } + } + + return resourceAwsEc2CarrierGatewayRead(d, meta) +} + +func resourceAwsEc2CarrierGatewayDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + log.Printf("[INFO] Deleting EC2 Carrier Gateway (%s)", d.Id()) + _, err := conn.DeleteCarrierGateway(&ec2.DeleteCarrierGatewayInput{ + CarrierGatewayId: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidCarrierGatewayIDNotFound) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting EC2 Carrier Gateway (%s): %w", d.Id(), err) + } + + _, err = waiter.CarrierGatewayDeleted(conn, d.Id()) + + if err != nil { + return fmt.Errorf("error waiting for EC2 Carrier Gateway (%s) to be deleted: %w", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_ec2_carrier_gateway_test.go b/aws/resource_aws_ec2_carrier_gateway_test.go new file mode 100644 index 00000000000..56d1e6daab3 --- /dev/null +++ b/aws/resource_aws_ec2_carrier_gateway_test.go @@ -0,0 +1,305 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" +) + +func init() { + resource.AddTestSweepers("aws_ec2_carrier_gateway", &resource.Sweeper{ + Name: "aws_ec2_carrier_gateway", + F: testSweepEc2CarrierGateway, + }) +} + +func testSweepEc2CarrierGateway(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).ec2conn + input := &ec2.DescribeCarrierGatewaysInput{} + var sweeperErrs *multierror.Error + + err = conn.DescribeCarrierGatewaysPages(input, func(page *ec2.DescribeCarrierGatewaysOutput, isLast bool) bool { + if page == nil { + return !isLast + } + + for _, carrierGateway := range page.CarrierGateways { + r := resourceAwsEc2CarrierGateway() + d := r.Data(nil) + d.SetId(aws.StringValue(carrierGateway.CarrierGatewayId)) + err = r.Delete(d, client) + + if err != nil { + log.Printf("[ERROR] %s", err) + sweeperErrs = multierror.Append(sweeperErrs, err) + continue + } + } + + return !isLast + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping EC2 Carrier Gateway sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EC2 Carrier Gateways: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAWSEc2CarrierGateway_basic(t *testing.T) { + var v ec2.CarrierGateway + resourceName := "aws_ec2_carrier_gateway.test" + vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckEc2CarrierGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccEc2CarrierGatewayConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEc2CarrierGatewayExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`carrier-gateway/cagw-.+`)), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", vpcResourceName, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSEc2CarrierGateway_disappears(t *testing.T) { + var v ec2.CarrierGateway + resourceName := "aws_ec2_carrier_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckEc2CarrierGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccEc2CarrierGatewayConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEc2CarrierGatewayExists(resourceName, &v), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2CarrierGateway(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSEc2CarrierGateway_Tags(t *testing.T) { + var v ec2.CarrierGateway + resourceName := "aws_ec2_carrier_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckEc2CarrierGatewayDestroy, + Steps: []resource.TestStep{ + { + Config: testAccEc2CarrierGatewayConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckEc2CarrierGatewayExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEc2CarrierGatewayConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckEc2CarrierGatewayExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccEc2CarrierGatewayConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckEc2CarrierGatewayExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckEc2CarrierGatewayDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ec2_carrier_gateway" { + continue + } + + out, err := finder.CarrierGatewayByID(conn, rs.Primary.ID) + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidCarrierGatewayIDNotFound) { + continue + } + if err != nil { + return err + } + if out == nil { + continue + } + if state := aws.StringValue(out.State); state != ec2.CarrierGatewayStateDeleted { + return fmt.Errorf("EC2 Carrier Gateway in incorrect state. Expected: %s, got: %s", ec2.CarrierGatewayStateDeleted, state) + } + + return err + } + + return nil +} + +func testAccCheckEc2CarrierGatewayExists(n string, v *ec2.CarrierGateway) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + out, err := finder.CarrierGatewayByID(conn, rs.Primary.ID) + if err != nil { + return err + } + if out == nil { + return fmt.Errorf("EC2 Carrier Gateway not found") + } + if state := aws.StringValue(out.State); state != ec2.CarrierGatewayStateAvailable { + return fmt.Errorf("EC2 Carrier Gateway in incorrect state. Expected: %s, got: %s", ec2.CarrierGatewayStateAvailable, state) + } + + *v = *out + + return nil + } +} + +func testAccPreCheckAWSWavelengthZoneAvailable(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + input := &ec2.DescribeAvailabilityZonesInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "zone-type": "wavelength-zone", + "opt-in-status": "opted-in", + }), + } + + output, err := conn.DescribeAvailabilityZones(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } + + if output == nil || len(output.AvailabilityZones) == 0 { + t.Skip("skipping since no Wavelength Zones are available") + } +} + +func testAccEc2CarrierGatewayConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_carrier_gateway" "test" { + vpc_id = aws_vpc.test.id +} +`, rName) +} + +func testAccEc2CarrierGatewayConfigTags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_carrier_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccEc2CarrierGatewayConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_carrier_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/aws/resource_aws_vpc_test.go b/aws/resource_aws_vpc_test.go index bbf915885a9..d02d3480ff0 100644 --- a/aws/resource_aws_vpc_test.go +++ b/aws/resource_aws_vpc_test.go @@ -22,6 +22,7 @@ func init() { resource.AddTestSweepers("aws_vpc", &resource.Sweeper{ Name: "aws_vpc", Dependencies: []string{ + "aws_ec2_carrier_gateway", "aws_egress_only_internet_gateway", "aws_internet_gateway", "aws_nat_gateway", diff --git a/website/docs/r/ec2_carrier_gateway.html.markdown b/website/docs/r/ec2_carrier_gateway.html.markdown new file mode 100644 index 00000000000..805b29f21c3 --- /dev/null +++ b/website/docs/r/ec2_carrier_gateway.html.markdown @@ -0,0 +1,47 @@ +--- +subcategory: "EC2" +layout: "aws" +page_title: "AWS: aws_ec2_carrier_gateway" +description: |- + Manages an EC2 Carrier Gateway. +--- + +# Resource: aws_ec2_carrier_gateway + +Manages an EC2 Carrier Gateway. See the AWS [documentation](https://docs.aws.amazon.com/vpc/latest/userguide/Carrier_Gateway.html) for more information. + +## Example Usage + +```hcl +resource "aws_ec2_carrier_gateway" "example" { + vpc_id = aws_vpc.example.id + + tags = { + Name = "example-carrier-gateway" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `vpc_id` - (Required) The ID of the VPC to associate with the carrier gateway. +* `tags` - (Optional) A map of tags to assign to the resource. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the carrier gateway. +* `arn` - The ARN of the carrier gateway. +* `owner_id` - The AWS account ID of the owner of the carrier gateway. + +## Import + +`aws_ec2_carrier_gateway` can be imported using the carrier gateway's ID, +e.g. + +``` +$ terraform import aws_ec2_carrier_gateway.example cgw-12345 +``` From ac142d74a9606b11e0c122289aa94be78b937c63 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 14:03:17 -0500 Subject: [PATCH 0178/1212] Update CHANGELOG for #16252 --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2acb876614..300c9915ee8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,8 @@ FEATURES -* **New Resource:** `resource_aws_glue_schema` [GH-16612] +* **New Resource:** `aws_ec2_carrier_gateway` [GH-16252] +* **New Resource:** `aws_glue_schema` [GH-16612] ENHANCEMENTS From 7e920ba9ae69941de490c4cf8cac78ce9323d741 Mon Sep 17 00:00:00 2001 From: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Date: Wed, 9 Dec 2020 13:26:04 -0600 Subject: [PATCH 0179/1212] resource/lakeformation_resource: Improve error messages Co-authored-by: Kit Ewbank --- aws/resource_aws_lakeformation_resource.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index ee715543edb..e861a77fef3 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -90,7 +90,7 @@ func resourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface } if err != nil { - return fmt.Errorf("error reading Lake Formation Resource: %s", err) + return fmt.Errorf("error reading Lake Formation Resource (%s): %w", d.Id(), err) } // d.Set("resource_arn", output.ResourceInfo.ResourceArn) // output not including resource arn currently @@ -116,7 +116,7 @@ func resourceAwsLakeFormationResourceUpdate(d *schema.ResourceData, meta interfa _, err := conn.UpdateResource(input) if err != nil { - return fmt.Errorf("error updating Lake Formation Resource: %s", err) + return fmt.Errorf("error updating Lake Formation Resource (%s): %w", d.Id(), err) } return resourceAwsLakeFormationResourceRead(d, meta) @@ -132,7 +132,7 @@ func resourceAwsLakeFormationResourceDelete(d *schema.ResourceData, meta interfa _, err := conn.DeregisterResource(input) if err != nil { - return fmt.Errorf("error deregistering Lake Formation Resource: %s", err) + return fmt.Errorf("error deregistering Lake Formation Resource (%s): %w", d.Id(), err) } return nil From 1529c2a167abf0545e246a0deff050dbc3eefb10 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 9 Dec 2020 11:57:50 -0800 Subject: [PATCH 0180/1212] Adds validation for field names --- .../service/autoscaling/waiter/waiter.go | 13 +++++-- aws/resource_aws_autoscaling_group.go | 35 ++++++++++++++++++- aws/resource_aws_autoscaling_group_test.go | 10 +++--- go.mod | 1 + .../docs/r/autoscaling_group.html.markdown | 7 ++++ 5 files changed, 58 insertions(+), 8 deletions(-) diff --git a/aws/internal/service/autoscaling/waiter/waiter.go b/aws/internal/service/autoscaling/waiter/waiter.go index 54e11e736c7..00aed168296 100644 --- a/aws/internal/service/autoscaling/waiter/waiter.go +++ b/aws/internal/service/autoscaling/waiter/waiter.go @@ -19,8 +19,17 @@ const ( func InstanceRefreshCancelled(conn *autoscaling.AutoScaling, asgName, instanceRefreshId string) (*autoscaling.InstanceRefresh, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress, autoscaling.InstanceRefreshStatusCancelling}, - Target: []string{autoscaling.InstanceRefreshStatusCancelled}, + Pending: []string{ + autoscaling.InstanceRefreshStatusPending, + autoscaling.InstanceRefreshStatusInProgress, + autoscaling.InstanceRefreshStatusCancelling, + }, + Target: []string{ + autoscaling.InstanceRefreshStatusCancelled, + // Failed and Successful are also acceptable end-states + autoscaling.InstanceRefreshStatusFailed, + autoscaling.InstanceRefreshStatusSuccessful, + }, Refresh: InstanceRefreshStatus(conn, asgName, instanceRefreshId), Timeout: InstanceRefreshCancelledTimeout, } diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index 28ec2208eca..4efe81f95ba 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -16,6 +16,8 @@ import ( "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -510,8 +512,11 @@ func resourceAwsAutoscalingGroup() *schema.Resource { "triggers": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: validateAutoScalingGroupInstanceRefreshTriggerFields, + }, }, }, }, @@ -1937,3 +1942,31 @@ func cancelAutoscalingInstanceRefresh(conn *autoscaling.AutoScaling, asgName str return nil } + +func validateAutoScalingGroupInstanceRefreshTriggerFields(i interface{}, path cty.Path) diag.Diagnostics { + v, ok := i.(string) + if !ok { + return diag.Errorf("expected type to be string") + } + + if v == "launch_configuration" || v == "launch_template" || v == "mixed_instances_policy" { + return diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("'%s' always triggers an instance refresh and can be removed", v), + }, + } + } + + schema := resourceAwsAutoscalingGroup().Schema + for attr, attrSchema := range schema { + if v == attr { + if attrSchema.Computed && !attrSchema.Optional { + return diag.Errorf("'%s' is a read-only parameter and cannot be used to trigger an instance refresh", v) + } + return nil + } + } + + return diag.Errorf("'%s' is not a recognized parameter name for aws_autoscaling_group", v) +} diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index f1366a6b0d3..b4565529be2 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -4450,14 +4450,14 @@ resource "aws_autoscaling_group" "test" { launch_configuration = aws_launch_configuration.test.name instance_refresh { - strategy = "Rolling" - triggers = ["tags"] + strategy = "Rolling" + triggers = ["tags"] } tag { - key = "Key" - value = "Value" - propagate_at_launch = true + key = "Key" + value = "Value" + propagate_at_launch = true } } diff --git a/go.mod b/go.mod index afb8bb7b154..b92f51c7c45 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index ffcb4d6b0c9..1ea072bff5a 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -189,11 +189,18 @@ resource "aws_autoscaling_group" "example" { version = aws_launch_template.example.latest_version } + tag { + key = "Key" + value = "Value" + propagate_at_launch = true + } + instance_refresh { strategy = "Rolling" preferences { min_healthy_percentage = 50 } + triggers = ["tag"] } } From 3ecadf23232a8008199279cb5dea1cd9d13c2bd0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 15:33:36 -0500 Subject: [PATCH 0181/1212] resource/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` configuration blocks (#16566) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16533 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsImageBuilderImageRecipe_basic (36.53s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_DeviceName (38.39s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_DeleteOnTermination (37.06s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Encrypted (38.32s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Iops (37.85s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_KmsKeyId (35.91s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_SnapshotId (47.80s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeSize (37.66s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType (35.44s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_NoDevice (34.26s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_VirtualName (38.65s) --- PASS: TestAccAwsImageBuilderImageRecipe_Component (44.83s) --- PASS: TestAccAwsImageBuilderImageRecipe_Description (35.08s) --- PASS: TestAccAwsImageBuilderImageRecipe_disappears (33.00s) --- PASS: TestAccAwsImageBuilderImageRecipe_Tags (66.51s) --- PASS: TestAccAwsImageBuilderImageRecipeDataSource_Arn (27.18s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAwsImageBuilderImageRecipe_basic (41.62s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_DeviceName (42.55s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_DeleteOnTermination (41.46s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Encrypted (40.31s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Iops (43.00s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_KmsKeyId (42.25s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_SnapshotId (49.99s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeSize (42.76s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType (44.07s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_NoDevice (39.93s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_VirtualName (40.95s) --- PASS: TestAccAwsImageBuilderImageRecipe_Component (43.14s) --- PASS: TestAccAwsImageBuilderImageRecipe_Description (41.88s) --- PASS: TestAccAwsImageBuilderImageRecipe_disappears (36.60s) --- PASS: TestAccAwsImageBuilderImageRecipe_Tags (75.02s) --- PASS: TestAccAwsImageBuilderImageRecipeDataSource_Arn (31.71s) ``` --- ...ta_source_aws_imagebuilder_image_recipe.go | 2 +- aws/resource_aws_imagebuilder_image_recipe.go | 6 +- ...urce_aws_imagebuilder_image_recipe_test.go | 60 +++++++++++++++++++ .../d/imagebuilder_image_recipe.html.markdown | 2 +- .../r/imagebuilder_image_recipe.html.markdown | 2 +- 5 files changed, 66 insertions(+), 6 deletions(-) diff --git a/aws/data_source_aws_imagebuilder_image_recipe.go b/aws/data_source_aws_imagebuilder_image_recipe.go index 0fee2445097..252490c2883 100644 --- a/aws/data_source_aws_imagebuilder_image_recipe.go +++ b/aws/data_source_aws_imagebuilder_image_recipe.go @@ -76,7 +76,7 @@ func dataSourceAwsImageBuilderImageRecipe() *schema.Resource { }, }, "component": { - Type: schema.TypeSet, + Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ diff --git a/aws/resource_aws_imagebuilder_image_recipe.go b/aws/resource_aws_imagebuilder_image_recipe.go index 9cbab5cc790..acfff669805 100644 --- a/aws/resource_aws_imagebuilder_image_recipe.go +++ b/aws/resource_aws_imagebuilder_image_recipe.go @@ -120,7 +120,7 @@ func resourceAwsImageBuilderImageRecipe() *schema.Resource { }, }, "component": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, ForceNew: true, MinItems: 1, @@ -185,8 +185,8 @@ func resourceAwsImageBuilderImageRecipeCreate(d *schema.ResourceData, meta inter input.BlockDeviceMappings = expandImageBuilderInstanceBlockDeviceMappings(v.(*schema.Set).List()) } - if v, ok := d.GetOk("component"); ok && v.(*schema.Set).Len() > 0 { - input.Components = expandImageBuilderComponentConfigurations(v.(*schema.Set).List()) + if v, ok := d.GetOk("component"); ok && len(v.([]interface{})) > 0 { + input.Components = expandImageBuilderComponentConfigurations(v.([]interface{})) } if v, ok := d.GetOk("description"); ok { diff --git a/aws/resource_aws_imagebuilder_image_recipe_test.go b/aws/resource_aws_imagebuilder_image_recipe_test.go index 1c0ad2f6e53..8d961ee1bea 100644 --- a/aws/resource_aws_imagebuilder_image_recipe_test.go +++ b/aws/resource_aws_imagebuilder_image_recipe_test.go @@ -410,6 +410,34 @@ func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_VirtualName(t *testing }) } +func TestAccAwsImageBuilderImageRecipe_Component(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image_recipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageRecipeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageRecipeConfigComponent(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageRecipeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "component.#", "3"), + resource.TestCheckResourceAttrPair(resourceName, "component.0.component_arn", "data.aws_imagebuilder_component.aws-cli-version-2-linux", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "component.1.component_arn", "data.aws_imagebuilder_component.update-linux", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "component.2.component_arn", "aws_imagebuilder_component.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAwsImageBuilderImageRecipe_Description(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_imagebuilder_image_recipe.test" @@ -787,6 +815,38 @@ resource "aws_imagebuilder_image_recipe" "test" { `, rName, virtualName)) } +func testAccAwsImageBuilderImageRecipeConfigComponent(rName string) string { + return composeConfig( + testAccAwsImageBuilderImageRecipeConfigBase(rName), + fmt.Sprintf(` +data "aws_imagebuilder_component" "aws-cli-version-2-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/aws-cli-version-2-linux/1.0.0" +} + +data "aws_imagebuilder_component" "update-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/1.0.0" +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = data.aws_imagebuilder_component.aws-cli-version-2-linux.arn + } + + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = %[1]q + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} +`, rName)) +} + func testAccAwsImageBuilderImageRecipeConfigDescription(rName string, description string) string { return composeConfig( testAccAwsImageBuilderImageRecipeConfigBase(rName), diff --git a/website/docs/d/imagebuilder_image_recipe.html.markdown b/website/docs/d/imagebuilder_image_recipe.html.markdown index bf072a68e71..c4b2932ab92 100644 --- a/website/docs/d/imagebuilder_image_recipe.html.markdown +++ b/website/docs/d/imagebuilder_image_recipe.html.markdown @@ -40,7 +40,7 @@ In addition to all arguments above, the following attributes are exported: * `volume_type` - Type of the volume. For example, `gp2` or `io2`. * `no_device` - Whether to remove a mapping from the parent image. * `virtual_name` - Virtual device name. For example, `ephemeral0`. Instance store volumes are numbered starting from 0. -* `component` - Set of objects with components for the image recipe. +* `component` - List of objects with components for the image recipe. * `component_arn` - Amazon Resource Name (ARN) of the Image Builder Component. * `date_created` - Date the image recipe was created. * `description` - Description of the image recipe. diff --git a/website/docs/r/imagebuilder_image_recipe.html.markdown b/website/docs/r/imagebuilder_image_recipe.html.markdown index fed1b559df8..c86e180bce7 100644 --- a/website/docs/r/imagebuilder_image_recipe.html.markdown +++ b/website/docs/r/imagebuilder_image_recipe.html.markdown @@ -38,7 +38,7 @@ resource "aws_imagebuilder_image_recipe" "example" { The following arguments are required: -* `component` - (Required) Configuration block(s) with components for the image recipe. Detailed below. +* `component` - (Required) Ordered configuration block(s) with components for the image recipe. Detailed below. * `name` - (Required) Name of the image recipe. * `parent_image` - (Required) Platform of the image recipe. * `version` - (Required) Version of the image recipe. From 88c71fb20338e529a750a0485bab4b94fcdc42e0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 15:39:09 -0500 Subject: [PATCH 0182/1212] Update CHANGELOG for #16566 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 300c9915ee8..74e546e2f91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.21.0 (Unreleased) +NOTES + +* resource/aws_imagebuilder_image_recipe: Previously the ordering of `component` configuration blocks was not properly handled by the resource, which could cause unexpected behavior with multiple Components. These configurations may see the ordering difference being fixed after upgrade. [GH-16566] + FEATURES * **New Resource:** `aws_ec2_carrier_gateway` [GH-16252] @@ -20,7 +24,9 @@ BUG FIXES * data-source/aws_ec2_transit_gateway_route_table: Prevent missing `id` attribute when not configured as argument [GH-16667] * data-source/aws_ec2_transit_gateway_vpc_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] * data-source/aws_guardduty_detector: Prevent missing `id` attribute when not configured as argument [GH-16667] +* data-source/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` attribute [GH-16566] * resource/aws_backup_plan: Prevent plan-time validation error for pre-existing resources with `lifecycle` `delete_after` and/or `copy_action` `lifecycle` `delete_after` arguments configured [GH-16605] +* resource/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` configuration blocks [GH-16566] * resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou [GH-16589] ## 3.20.0 (December 03, 2020) From dfda2fecd69945e123d7bcd9f0e4c4c04db2bcf0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 9 Dec 2020 12:39:48 -0800 Subject: [PATCH 0183/1212] Adds support for AWS_CONTAINER_CREDENTIALS_FULL_URI for authentication --- aws/provider_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/provider_test.go b/aws/provider_test.go index c83a667ca6f..8cbab7ba700 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -191,8 +191,8 @@ func testAccPreCheck(t *testing.T) { // Since we are outside the scope of the Terraform configuration we must // call Configure() to properly initialize the provider configuration. testAccProviderConfigure.Do(func() { - if os.Getenv("AWS_PROFILE") == "" && os.Getenv("AWS_ACCESS_KEY_ID") == "" { - t.Fatal("AWS_ACCESS_KEY_ID or AWS_PROFILE must be set for acceptance tests") + if os.Getenv("AWS_PROFILE") == "" && os.Getenv("AWS_ACCESS_KEY_ID") == "" && os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") == "" { + t.Fatal("AWS_ACCESS_KEY_ID, AWS_PROFILE, or AWS_CONTAINER_CREDENTIALS_FULL_URI must be set for acceptance tests") } if os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { From 44884e54d7b2ab77e4f7eac223d82de4ec38f51d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 15:42:13 -0500 Subject: [PATCH 0184/1212] provider: Prevent potential panics due to setting resource identifiers with raw string pointer dereferencing (#16594) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12992 These fixes were automatically applied by `semgrep --autofix` and are always safe since `(helper/schema.ResourceData).SetId()` only accepts a `string` type. Other types would generate a Go compilation error. Output from acceptance testing (provided as a smoke test): ``` --- PASS: TestAccDataSourceAwsApiGatewayRestApi_basic (24.51s) ``` --- .semgrep.yml | 10 ++++++++++ aws/data_source_aws_api_gateway_resource.go | 2 +- aws/data_source_aws_api_gateway_rest_api.go | 2 +- aws/data_source_aws_api_gateway_vpc_link.go | 2 +- aws/data_source_aws_cloudformation_stack.go | 2 +- aws/data_source_aws_db_snapshot.go | 2 +- aws/data_source_aws_dynamodb_table.go | 2 +- aws/data_source_aws_ebs_volume.go | 3 ++- aws/data_source_aws_efs_access_point.go | 2 +- ...data_source_aws_elastic_beanstalk_solution_stack.go | 3 ++- aws/data_source_aws_elasticache_cluster.go | 2 +- aws/data_source_aws_elasticsearch_domain.go | 2 +- aws/data_source_aws_elb.go | 2 +- aws/data_source_aws_iam_group.go | 2 +- aws/data_source_aws_iam_instance_profile.go | 2 +- aws/data_source_aws_iam_server_certificate.go | 2 +- aws/data_source_aws_instance.go | 2 +- aws/data_source_aws_lambda_alias.go | 2 +- aws/data_source_aws_lb_listener.go | 2 +- aws/data_source_aws_network_interface.go | 2 +- aws/data_source_aws_prefix_list.go | 2 +- aws/data_source_aws_security_group.go | 2 +- aws/data_source_aws_sfn_activity.go | 4 ++-- aws/data_source_aws_ssm_parameter.go | 2 +- aws/data_source_aws_ssm_patch_baseline.go | 2 +- aws/data_source_aws_subnet.go | 2 +- aws/resource_aws_acm_certificate.go | 2 +- aws/resource_aws_api_gateway_client_certificate.go | 2 +- aws/resource_aws_api_gateway_deployment.go | 2 +- aws/resource_aws_api_gateway_domain_name.go | 2 +- aws/resource_aws_api_gateway_model.go | 2 +- aws/resource_aws_api_gateway_request_validator.go | 2 +- aws/resource_aws_api_gateway_resource.go | 2 +- aws/resource_aws_api_gateway_rest_api.go | 2 +- aws/resource_aws_api_gateway_usage_plan_key.go | 2 +- aws/resource_aws_api_gateway_vpc_link.go | 2 +- aws/resource_aws_appsync_graphql_api.go | 2 +- aws/resource_aws_athena_named_query.go | 2 +- aws/resource_aws_batch_job_definition.go | 2 +- aws/resource_aws_cloud9_environment_ec2.go | 2 +- aws/resource_aws_cloudfront_distribution.go | 2 +- aws/resource_aws_cloudfront_origin_access_identity.go | 4 ++-- aws/resource_aws_cloudtrail.go | 2 +- aws/resource_aws_codedeploy_deployment_group.go | 2 +- aws/resource_aws_cognito_identity_pool.go | 2 +- aws/resource_aws_cognito_user_pool_client.go | 4 ++-- aws/resource_aws_config_remediation_configuration.go | 2 +- aws/resource_aws_db_subnet_group.go | 2 +- aws/resource_aws_default_route_table.go | 2 +- aws/resource_aws_devicefarm_project.go | 2 +- aws/resource_aws_dlm_lifecycle_policy.go | 2 +- aws/resource_aws_dms_certificate.go | 2 +- aws/resource_aws_dms_endpoint.go | 2 +- aws/resource_aws_dms_replication_subnet_group.go | 2 +- aws/resource_aws_dms_replication_task.go | 2 +- aws/resource_aws_docdb_cluster_instance.go | 2 +- aws/resource_aws_dx_connection_association.go | 2 +- aws/resource_aws_ebs_volume.go | 2 +- aws/resource_aws_ec2_capacity_reservation.go | 2 +- aws/resource_aws_ec2_client_vpn_endpoint.go | 2 +- aws/resource_aws_ec2_traffic_mirror_filter.go | 2 +- aws/resource_aws_ec2_traffic_mirror_filter_rule.go | 4 ++-- aws/resource_aws_ecr_lifecycle_policy.go | 2 +- aws/resource_aws_ecr_repository_policy.go | 6 +++--- aws/resource_aws_efs_access_point.go | 4 ++-- aws/resource_aws_efs_file_system.go | 2 +- aws/resource_aws_eip.go | 6 +++--- aws/resource_aws_elastic_beanstalk_environment.go | 2 +- aws/resource_aws_elastic_transcoder_pipeline.go | 2 +- aws/resource_aws_elastic_transcoder_preset.go | 2 +- aws/resource_aws_elasticache_parameter_group.go | 2 +- aws/resource_aws_elasticache_replication_group.go | 2 +- aws/resource_aws_emr_cluster.go | 2 +- aws/resource_aws_emr_instance_fleet.go | 2 +- aws/resource_aws_emr_instance_group.go | 2 +- aws/resource_aws_emr_security_configuration.go | 2 +- aws/resource_aws_fsx_windows_file_system.go | 2 +- aws/resource_aws_gamelift_alias.go | 2 +- aws/resource_aws_gamelift_build.go | 2 +- aws/resource_aws_gamelift_fleet.go | 2 +- aws/resource_aws_gamelift_game_session_queue.go | 2 +- aws/resource_aws_globalaccelerator_accelerator.go | 2 +- aws/resource_aws_globalaccelerator_listener.go | 2 +- aws/resource_aws_guardduty_detector.go | 2 +- aws/resource_aws_iam_access_key.go | 4 ++-- aws/resource_aws_iam_group.go | 2 +- aws/resource_aws_iam_instance_profile.go | 2 +- aws/resource_aws_iam_policy.go | 2 +- aws/resource_aws_iam_role.go | 2 +- aws/resource_aws_iam_service_linked_role.go | 2 +- aws/resource_aws_iam_user_login_profile.go | 2 +- aws/resource_aws_iam_user_ssh_key.go | 2 +- aws/resource_aws_inspector_assessment_target.go | 2 +- aws/resource_aws_internet_gateway.go | 2 +- aws/resource_aws_iot_thing.go | 2 +- aws/resource_aws_iot_thing_type.go | 2 +- aws/resource_aws_lambda_alias.go | 4 ++-- aws/resource_aws_lambda_event_source_mapping.go | 2 +- aws/resource_aws_launch_template.go | 2 +- aws/resource_aws_lb_listener.go | 2 +- ...esource_aws_licensemanager_license_configuration.go | 2 +- aws/resource_aws_main_route_table_association.go | 4 ++-- aws/resource_aws_mq_broker.go | 2 +- aws/resource_aws_mq_configuration.go | 2 +- aws/resource_aws_nat_gateway.go | 2 +- aws/resource_aws_network_interface.go | 2 +- aws/resource_aws_network_interface_attachment.go | 2 +- aws/resource_aws_opsworks_user_profile.go | 2 +- aws/resource_aws_organizations_account.go | 2 +- aws/resource_aws_organizations_organization.go | 2 +- aws/resource_aws_organizations_organizational_unit.go | 2 +- aws/resource_aws_qldb_ledger.go | 2 +- aws/resource_aws_rds_cluster_instance.go | 2 +- aws/resource_aws_rds_cluster_parameter_group.go | 2 +- aws/resource_aws_redshift_cluster.go | 4 ++-- aws/resource_aws_redshift_parameter_group.go | 2 +- aws/resource_aws_redshift_subnet_group.go | 2 +- aws/resource_aws_route53_health_check.go | 2 +- aws/resource_aws_route53_query_log.go | 2 +- aws/resource_aws_route_table_association.go | 2 +- aws/resource_aws_security_group.go | 2 +- aws/resource_aws_securityhub_standards_subscription.go | 2 +- aws/resource_aws_servicecatalog_portfolio.go | 2 +- aws/resource_aws_sfn_activity.go | 2 +- aws/resource_aws_shield_protection.go | 2 +- aws/resource_aws_sns_platform_application.go | 2 +- aws/resource_aws_sns_topic.go | 2 +- aws/resource_aws_sns_topic_subscription.go | 2 +- aws/resource_aws_spot_fleet_request.go | 4 ++-- aws/resource_aws_spot_instance_request.go | 2 +- aws/resource_aws_ssm_activation.go | 2 +- aws/resource_aws_ssm_association.go | 2 +- aws/resource_aws_ssm_document.go | 2 +- aws/resource_aws_ssm_maintenance_window.go | 2 +- aws/resource_aws_ssm_maintenance_window_task.go | 2 +- aws/resource_aws_ssm_patch_baseline.go | 2 +- aws/resource_aws_ssm_patch_group.go | 2 +- aws/resource_aws_transfer_server.go | 2 +- aws/resource_aws_vpc.go | 2 +- aws/resource_aws_vpc_peering_connection.go | 2 +- aws/resource_aws_waf_geo_match_set.go | 2 +- aws/resource_aws_waf_ipset.go | 2 +- aws/resource_aws_waf_rate_based_rule.go | 2 +- aws/resource_aws_waf_regex_match_set.go | 2 +- aws/resource_aws_waf_regex_pattern_set.go | 2 +- aws/resource_aws_waf_rule.go | 2 +- aws/resource_aws_waf_rule_group.go | 2 +- aws/resource_aws_waf_size_constraint_set.go | 2 +- aws/resource_aws_waf_sql_injection_match_set.go | 2 +- aws/resource_aws_waf_web_acl.go | 2 +- aws/resource_aws_wafregional_byte_match_set.go | 2 +- aws/resource_aws_wafregional_geo_match_set.go | 2 +- aws/resource_aws_wafregional_ipset.go | 2 +- aws/resource_aws_wafregional_rate_based_rule.go | 2 +- aws/resource_aws_wafregional_rule.go | 2 +- aws/resource_aws_wafregional_rule_group.go | 2 +- aws/resource_aws_wafregional_web_acl.go | 2 +- aws/resource_aws_workspaces_ip_group.go | 2 +- 158 files changed, 183 insertions(+), 171 deletions(-) diff --git a/.semgrep.yml b/.semgrep.yml index 3520fd50fad..bc90ac1ec1a 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -35,6 +35,16 @@ rules: metavariable: '$Y' regex: '^"github.com/aws/aws-sdk-go/service/[^/]+"$' severity: WARNING + + - id: aws-go-sdk-pointer-conversion-ResourceData-SetId + fix: d.SetId(aws.StringValue($VALUE)) + languages: [go] + message: Prefer AWS Go SDK pointer conversion aws.StringValue() function for dereferencing during d.SetId() + paths: + include: + - aws/ + pattern: 'd.SetId(*$VALUE)' + severity: WARNING - id: helper-schema-ResourceData-GetOk-with-extraneous-conditional languages: [go] diff --git a/aws/data_source_aws_api_gateway_resource.go b/aws/data_source_aws_api_gateway_resource.go index aa02a6c5f69..3f971a714b4 100644 --- a/aws/data_source_aws_api_gateway_resource.go +++ b/aws/data_source_aws_api_gateway_resource.go @@ -59,7 +59,7 @@ func dataSourceAwsApiGatewayResourceRead(d *schema.ResourceData, meta interface{ return fmt.Errorf("no Resources with path %q found for rest api %q", target, restApiId) } - d.SetId(*match.Id) + d.SetId(aws.StringValue(match.Id)) d.Set("path_part", match.PathPart) d.Set("parent_id", match.ParentId) diff --git a/aws/data_source_aws_api_gateway_rest_api.go b/aws/data_source_aws_api_gateway_rest_api.go index e07f70c765a..047337dc8de 100644 --- a/aws/data_source_aws_api_gateway_rest_api.go +++ b/aws/data_source_aws_api_gateway_rest_api.go @@ -105,7 +105,7 @@ func dataSourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{} match := matchedApis[0] - d.SetId(*match.Id) + d.SetId(aws.StringValue(match.Id)) restApiArn := arn.ARN{ Partition: meta.(*AWSClient).partition, diff --git a/aws/data_source_aws_api_gateway_vpc_link.go b/aws/data_source_aws_api_gateway_vpc_link.go index 7ee7a55c076..46d029c4f13 100644 --- a/aws/data_source_aws_api_gateway_vpc_link.go +++ b/aws/data_source_aws_api_gateway_vpc_link.go @@ -76,7 +76,7 @@ func dataSourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{} match := matchedVpcLinks[0] - d.SetId(*match.Id) + d.SetId(aws.StringValue(match.Id)) d.Set("name", match.Name) d.Set("status", match.Status) d.Set("status_message", match.StatusMessage) diff --git a/aws/data_source_aws_cloudformation_stack.go b/aws/data_source_aws_cloudformation_stack.go index 37c395c9a46..3678534742f 100644 --- a/aws/data_source_aws_cloudformation_stack.go +++ b/aws/data_source_aws_cloudformation_stack.go @@ -84,7 +84,7 @@ func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l) } stack := out.Stacks[0] - d.SetId(*stack.StackId) + d.SetId(aws.StringValue(stack.StackId)) d.Set("description", stack.Description) d.Set("disable_rollback", stack.DisableRollback) diff --git a/aws/data_source_aws_db_snapshot.go b/aws/data_source_aws_db_snapshot.go index 4a2a0d7eff7..5c7638adde4 100644 --- a/aws/data_source_aws_db_snapshot.go +++ b/aws/data_source_aws_db_snapshot.go @@ -195,7 +195,7 @@ func mostRecentDbSnapshot(snapshots []*rds.DBSnapshot) *rds.DBSnapshot { } func dbSnapshotDescriptionAttributes(d *schema.ResourceData, snapshot *rds.DBSnapshot) error { - d.SetId(*snapshot.DBSnapshotIdentifier) + d.SetId(aws.StringValue(snapshot.DBSnapshotIdentifier)) d.Set("db_instance_identifier", snapshot.DBInstanceIdentifier) d.Set("db_snapshot_identifier", snapshot.DBSnapshotIdentifier) d.Set("snapshot_type", snapshot.SnapshotType) diff --git a/aws/data_source_aws_dynamodb_table.go b/aws/data_source_aws_dynamodb_table.go index 22f9c0402ff..94418453b33 100644 --- a/aws/data_source_aws_dynamodb_table.go +++ b/aws/data_source_aws_dynamodb_table.go @@ -224,7 +224,7 @@ func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error retrieving DynamoDB table: %s", err) } - d.SetId(*result.Table.TableName) + d.SetId(aws.StringValue(result.Table.TableName)) err = flattenAwsDynamoDbTableResource(d, result.Table) if err != nil { diff --git a/aws/data_source_aws_ebs_volume.go b/aws/data_source_aws_ebs_volume.go index 0ff85a21d2c..0389b9881a6 100644 --- a/aws/data_source_aws_ebs_volume.go +++ b/aws/data_source_aws_ebs_volume.go @@ -5,6 +5,7 @@ import ( "log" "sort" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -129,7 +130,7 @@ func mostRecentVolume(volumes []*ec2.Volume) *ec2.Volume { } func volumeDescriptionAttributes(d *schema.ResourceData, client *AWSClient, volume *ec2.Volume) error { - d.SetId(*volume.VolumeId) + d.SetId(aws.StringValue(volume.VolumeId)) d.Set("volume_id", volume.VolumeId) arn := arn.ARN{ diff --git a/aws/data_source_aws_efs_access_point.go b/aws/data_source_aws_efs_access_point.go index 4f47fce4619..b74127d598b 100644 --- a/aws/data_source_aws_efs_access_point.go +++ b/aws/data_source_aws_efs_access_point.go @@ -113,7 +113,7 @@ func dataSourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Found EFS access point: %#v", ap) - d.SetId(*ap.AccessPointId) + d.SetId(aws.StringValue(ap.AccessPointId)) fsARN := arn.ARN{ AccountID: meta.(*AWSClient).accountid, diff --git a/aws/data_source_aws_elastic_beanstalk_solution_stack.go b/aws/data_source_aws_elastic_beanstalk_solution_stack.go index 2d6fe58b1a9..70288dfa356 100644 --- a/aws/data_source_aws_elastic_beanstalk_solution_stack.go +++ b/aws/data_source_aws_elastic_beanstalk_solution_stack.go @@ -5,6 +5,7 @@ import ( "log" "regexp" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticbeanstalk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -88,7 +89,7 @@ func mostRecentSolutionStack(solutionStacks []*string) *string { // populate the numerous fields that the image description returns. func solutionStackDescriptionAttributes(d *schema.ResourceData, solutionStack *string) error { // Simple attributes first - d.SetId(*solutionStack) + d.SetId(aws.StringValue(solutionStack)) d.Set("name", solutionStack) return nil } diff --git a/aws/data_source_aws_elasticache_cluster.go b/aws/data_source_aws_elasticache_cluster.go index ec042fef781..b8a9805f162 100644 --- a/aws/data_source_aws_elasticache_cluster.go +++ b/aws/data_source_aws_elasticache_cluster.go @@ -174,7 +174,7 @@ func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{ cluster := resp.CacheClusters[0] - d.SetId(*cluster.CacheClusterId) + d.SetId(aws.StringValue(cluster.CacheClusterId)) d.Set("cluster_id", cluster.CacheClusterId) d.Set("node_type", cluster.CacheNodeType) diff --git a/aws/data_source_aws_elasticsearch_domain.go b/aws/data_source_aws_elasticsearch_domain.go index afc8c231a1f..0d16b2516f5 100644 --- a/aws/data_source_aws_elasticsearch_domain.go +++ b/aws/data_source_aws_elasticsearch_domain.go @@ -294,7 +294,7 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface ds := resp.DomainStatus - d.SetId(*ds.ARN) + d.SetId(aws.StringValue(ds.ARN)) if ds.AccessPolicies != nil && *ds.AccessPolicies != "" { policies, err := structure.NormalizeJsonString(*ds.AccessPolicies) diff --git a/aws/data_source_aws_elb.go b/aws/data_source_aws_elb.go index e9533af6533..73d41056dfa 100644 --- a/aws/data_source_aws_elb.go +++ b/aws/data_source_aws_elb.go @@ -212,7 +212,7 @@ func dataSourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { if len(resp.LoadBalancerDescriptions) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(resp.LoadBalancerDescriptions)) } - d.SetId(*resp.LoadBalancerDescriptions[0].LoadBalancerName) + d.SetId(aws.StringValue(resp.LoadBalancerDescriptions[0].LoadBalancerName)) arn := arn.ARN{ Partition: meta.(*AWSClient).partition, diff --git a/aws/data_source_aws_iam_group.go b/aws/data_source_aws_iam_group.go index 8838fa57b4e..6fdcefad045 100644 --- a/aws/data_source_aws_iam_group.go +++ b/aws/data_source_aws_iam_group.go @@ -85,7 +85,7 @@ func dataSourceAwsIAMGroupRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("no IAM group found") } - d.SetId(*group.GroupId) + d.SetId(aws.StringValue(group.GroupId)) d.Set("arn", group.Arn) d.Set("path", group.Path) d.Set("group_id", group.GroupId) diff --git a/aws/data_source_aws_iam_instance_profile.go b/aws/data_source_aws_iam_instance_profile.go index 8c636d4e1e1..8ef77865860 100644 --- a/aws/data_source_aws_iam_instance_profile.go +++ b/aws/data_source_aws_iam_instance_profile.go @@ -66,7 +66,7 @@ func dataSourceAwsIAMInstanceProfileRead(d *schema.ResourceData, meta interface{ instanceProfile := resp.InstanceProfile - d.SetId(*instanceProfile.InstanceProfileId) + d.SetId(aws.StringValue(instanceProfile.InstanceProfileId)) d.Set("arn", instanceProfile.Arn) d.Set("create_date", fmt.Sprintf("%v", instanceProfile.CreateDate)) d.Set("path", instanceProfile.Path) diff --git a/aws/data_source_aws_iam_server_certificate.go b/aws/data_source_aws_iam_server_certificate.go index c021086a1a8..5f312e47c86 100644 --- a/aws/data_source_aws_iam_server_certificate.go +++ b/aws/data_source_aws_iam_server_certificate.go @@ -134,7 +134,7 @@ func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interfac } metadata := metadatas[0] - d.SetId(*metadata.ServerCertificateId) + d.SetId(aws.StringValue(metadata.ServerCertificateId)) d.Set("arn", metadata.Arn) d.Set("path", metadata.Path) d.Set("name", metadata.ServerCertificateName) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 826322a6503..248b9bedd14 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -409,7 +409,7 @@ func dataSourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { // Populate instance attribute fields with the returned instance func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2, ignoreTagsConfig *keyvaluetags.IgnoreConfig) error { - d.SetId(*instance.InstanceId) + d.SetId(aws.StringValue(instance.InstanceId)) // Set the easy attributes d.Set("instance_state", instance.State.Name) if instance.Placement != nil { diff --git a/aws/data_source_aws_lambda_alias.go b/aws/data_source_aws_lambda_alias.go index 64ad2384911..917ff8fb0be 100644 --- a/aws/data_source_aws_lambda_alias.go +++ b/aws/data_source_aws_lambda_alias.go @@ -62,7 +62,7 @@ func dataSourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error getting Lambda alias: %s", err) } - d.SetId(*aliasConfiguration.AliasArn) + d.SetId(aws.StringValue(aliasConfiguration.AliasArn)) d.Set("arn", aliasConfiguration.AliasArn) d.Set("description", aliasConfiguration.Description) diff --git a/aws/data_source_aws_lb_listener.go b/aws/data_source_aws_lb_listener.go index 27dc8ad6edf..fed51ee8faa 100644 --- a/aws/data_source_aws_lb_listener.go +++ b/aws/data_source_aws_lb_listener.go @@ -245,7 +245,7 @@ func dataSourceAwsLbListenerRead(d *schema.ResourceData, meta interface{}) error for _, listener := range resp.Listeners { if *listener.Port == int64(port.(int)) { //log.Printf("[DEBUG] get listener arn for %s:%s: %s", lbArn, port, *listener.Port) - d.SetId(*listener.ListenerArn) + d.SetId(aws.StringValue(listener.ListenerArn)) return resourceAwsLbListenerRead(d, meta) } } diff --git a/aws/data_source_aws_network_interface.go b/aws/data_source_aws_network_interface.go index 739f820966f..76a44ec9791 100644 --- a/aws/data_source_aws_network_interface.go +++ b/aws/data_source_aws_network_interface.go @@ -165,7 +165,7 @@ func dataSourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) eni := resp.NetworkInterfaces[0] - d.SetId(*eni.NetworkInterfaceId) + d.SetId(aws.StringValue(eni.NetworkInterfaceId)) if eni.Association != nil { d.Set("association", flattenEc2NetworkInterfaceAssociation(eni.Association)) } diff --git a/aws/data_source_aws_prefix_list.go b/aws/data_source_aws_prefix_list.go index dd2dd60ed71..786160ec8f4 100644 --- a/aws/data_source_aws_prefix_list.go +++ b/aws/data_source_aws_prefix_list.go @@ -62,7 +62,7 @@ func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error pl := resp.PrefixLists[0] - d.SetId(*pl.PrefixListId) + d.SetId(aws.StringValue(pl.PrefixListId)) d.Set("name", pl.PrefixListName) cidrs := make([]string, len(pl.Cidrs)) diff --git a/aws/data_source_aws_security_group.go b/aws/data_source_aws_security_group.go index a3ffc8f52ef..fb1b63e8c54 100644 --- a/aws/data_source_aws_security_group.go +++ b/aws/data_source_aws_security_group.go @@ -90,7 +90,7 @@ func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) er sg := resp.SecurityGroups[0] - d.SetId(*sg.GroupId) + d.SetId(aws.StringValue(sg.GroupId)) d.Set("name", sg.GroupName) d.Set("description", sg.Description) d.Set("vpc_id", sg.VpcId) diff --git a/aws/data_source_aws_sfn_activity.go b/aws/data_source_aws_sfn_activity.go index d3d6a6afe57..e08ddd11107 100644 --- a/aws/data_source_aws_sfn_activity.go +++ b/aws/data_source_aws_sfn_activity.go @@ -73,7 +73,7 @@ func dataSourceAwsSfnActivityRead(d *schema.ResourceData, meta interface{}) erro act := acts[0] - d.SetId(*act.ActivityArn) + d.SetId(aws.StringValue(act.ActivityArn)) d.Set("name", act.Name) d.Set("arn", act.ActivityArn) if err := d.Set("creation_date", act.CreationDate.Format(time.RFC3339)); err != nil { @@ -96,7 +96,7 @@ func dataSourceAwsSfnActivityRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("No activity found with arn %s in this region", arn) } - d.SetId(*act.ActivityArn) + d.SetId(aws.StringValue(act.ActivityArn)) d.Set("name", act.Name) d.Set("arn", act.ActivityArn) if err := d.Set("creation_date", act.CreationDate.Format(time.RFC3339)); err != nil { diff --git a/aws/data_source_aws_ssm_parameter.go b/aws/data_source_aws_ssm_parameter.go index c76575ef5ac..9d77076c45c 100644 --- a/aws/data_source_aws_ssm_parameter.go +++ b/aws/data_source_aws_ssm_parameter.go @@ -63,7 +63,7 @@ func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { } param := resp.Parameter - d.SetId(*param.Name) + d.SetId(aws.StringValue(param.Name)) arn := arn.ARN{ Partition: meta.(*AWSClient).partition, diff --git a/aws/data_source_aws_ssm_patch_baseline.go b/aws/data_source_aws_ssm_patch_baseline.go index ff776d9e76a..d2f5eccacdb 100644 --- a/aws/data_source_aws_ssm_patch_baseline.go +++ b/aws/data_source_aws_ssm_patch_baseline.go @@ -107,7 +107,7 @@ func dataAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) error baseline := *filteredBaselines[0] - d.SetId(*baseline.BaselineId) + d.SetId(aws.StringValue(baseline.BaselineId)) d.Set("name", baseline.BaselineName) d.Set("description", baseline.BaselineDescription) d.Set("default_baseline", baseline.DefaultBaseline) diff --git a/aws/data_source_aws_subnet.go b/aws/data_source_aws_subnet.go index 5302e7c5ed9..3a4ce67c3aa 100644 --- a/aws/data_source_aws_subnet.go +++ b/aws/data_source_aws_subnet.go @@ -166,7 +166,7 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { subnet := resp.Subnets[0] - d.SetId(*subnet.SubnetId) + d.SetId(aws.StringValue(subnet.SubnetId)) d.Set("vpc_id", subnet.VpcId) d.Set("availability_zone", subnet.AvailabilityZone) d.Set("availability_zone_id", subnet.AvailabilityZoneId) diff --git a/aws/resource_aws_acm_certificate.go b/aws/resource_aws_acm_certificate.go index b347c164144..e6326c96909 100644 --- a/aws/resource_aws_acm_certificate.go +++ b/aws/resource_aws_acm_certificate.go @@ -285,7 +285,7 @@ func resourceAwsAcmCertificateCreateRequested(d *schema.ResourceData, meta inter return fmt.Errorf("Error requesting certificate: %s", err) } - d.SetId(*resp.CertificateArn) + d.SetId(aws.StringValue(resp.CertificateArn)) return resourceAwsAcmCertificateRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_client_certificate.go b/aws/resource_aws_api_gateway_client_certificate.go index c8f7e146687..527fd80743f 100644 --- a/aws/resource_aws_api_gateway_client_certificate.go +++ b/aws/resource_aws_api_gateway_client_certificate.go @@ -63,7 +63,7 @@ func resourceAwsApiGatewayClientCertificateCreate(d *schema.ResourceData, meta i return fmt.Errorf("Failed to generate client certificate: %s", err) } - d.SetId(*out.ClientCertificateId) + d.SetId(aws.StringValue(out.ClientCertificateId)) return resourceAwsApiGatewayClientCertificateRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_deployment.go b/aws/resource_aws_api_gateway_deployment.go index 3f5dedc243d..05e863e3ed5 100644 --- a/aws/resource_aws_api_gateway_deployment.go +++ b/aws/resource_aws_api_gateway_deployment.go @@ -96,7 +96,7 @@ func resourceAwsApiGatewayDeploymentCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating API Gateway Deployment: %s", err) } - d.SetId(*deployment.Id) + d.SetId(aws.StringValue(deployment.Id)) log.Printf("[DEBUG] API Gateway Deployment ID: %s", d.Id()) return resourceAwsApiGatewayDeploymentRead(d, meta) diff --git a/aws/resource_aws_api_gateway_domain_name.go b/aws/resource_aws_api_gateway_domain_name.go index c4d88314ee7..7febcfd1275 100644 --- a/aws/resource_aws_api_gateway_domain_name.go +++ b/aws/resource_aws_api_gateway_domain_name.go @@ -202,7 +202,7 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating API Gateway Domain Name: %s", err) } - d.SetId(*domainName.DomainName) + d.SetId(aws.StringValue(domainName.DomainName)) return resourceAwsApiGatewayDomainNameRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_model.go b/aws/resource_aws_api_gateway_model.go index bd2566ada79..3b8bce3c1a8 100644 --- a/aws/resource_aws_api_gateway_model.go +++ b/aws/resource_aws_api_gateway_model.go @@ -103,7 +103,7 @@ func resourceAwsApiGatewayModelCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error creating API Gateway Model: %s", err) } - d.SetId(*model.Id) + d.SetId(aws.StringValue(model.Id)) return nil } diff --git a/aws/resource_aws_api_gateway_request_validator.go b/aws/resource_aws_api_gateway_request_validator.go index c62d9782edd..a92e6a2c266 100644 --- a/aws/resource_aws_api_gateway_request_validator.go +++ b/aws/resource_aws_api_gateway_request_validator.go @@ -72,7 +72,7 @@ func resourceAwsApiGatewayRequestValidatorCreate(d *schema.ResourceData, meta in return fmt.Errorf("Error creating Request Validator: %s", err) } - d.SetId(*out.Id) + d.SetId(aws.StringValue(out.Id)) return nil } diff --git a/aws/resource_aws_api_gateway_resource.go b/aws/resource_aws_api_gateway_resource.go index 46e91905e06..e2269ea8685 100644 --- a/aws/resource_aws_api_gateway_resource.go +++ b/aws/resource_aws_api_gateway_resource.go @@ -70,7 +70,7 @@ func resourceAwsApiGatewayResourceCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating API Gateway Resource: %s", err) } - d.SetId(*resource.Id) + d.SetId(aws.StringValue(resource.Id)) return resourceAwsApiGatewayResourceRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index 70045677e75..ffd553657ec 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -173,7 +173,7 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error creating API Gateway: %s", err) } - d.SetId(*gateway.Id) + d.SetId(aws.StringValue(gateway.Id)) if body, ok := d.GetOk("body"); ok { log.Printf("[DEBUG] Initializing API Gateway from OpenAPI spec %s", d.Id()) diff --git a/aws/resource_aws_api_gateway_usage_plan_key.go b/aws/resource_aws_api_gateway_usage_plan_key.go index e4768e3b5d3..c672d6b51ef 100644 --- a/aws/resource_aws_api_gateway_usage_plan_key.go +++ b/aws/resource_aws_api_gateway_usage_plan_key.go @@ -77,7 +77,7 @@ func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interf return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err) } - d.SetId(*up.Id) + d.SetId(aws.StringValue(up.Id)) return resourceAwsApiGatewayUsagePlanKeyRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_vpc_link.go b/aws/resource_aws_api_gateway_vpc_link.go index 857381b1bb7..9bc2ae15e98 100644 --- a/aws/resource_aws_api_gateway_vpc_link.go +++ b/aws/resource_aws_api_gateway_vpc_link.go @@ -74,7 +74,7 @@ func resourceAwsApiGatewayVpcLinkCreate(d *schema.ResourceData, meta interface{} return err } - d.SetId(*resp.Id) + d.SetId(aws.StringValue(resp.Id)) stateConf := &resource.StateChangeConf{ Pending: []string{apigateway.VpcLinkStatusPending}, diff --git a/aws/resource_aws_appsync_graphql_api.go b/aws/resource_aws_appsync_graphql_api.go index 02da6d04071..af4988538c2 100644 --- a/aws/resource_aws_appsync_graphql_api.go +++ b/aws/resource_aws_appsync_graphql_api.go @@ -242,7 +242,7 @@ func resourceAwsAppsyncGraphqlApiCreate(d *schema.ResourceData, meta interface{} return fmt.Errorf("error creating AppSync GraphQL API: %s", err) } - d.SetId(*resp.GraphqlApi.ApiId) + d.SetId(aws.StringValue(resp.GraphqlApi.ApiId)) if err := resourceAwsAppsyncSchemaPut(d, meta); err != nil { return fmt.Errorf("error creating AppSync GraphQL API (%s) Schema: %s", d.Id(), err) diff --git a/aws/resource_aws_athena_named_query.go b/aws/resource_aws_athena_named_query.go index 04bdb2711fd..004619b73e0 100644 --- a/aws/resource_aws_athena_named_query.go +++ b/aws/resource_aws_athena_named_query.go @@ -68,7 +68,7 @@ func resourceAwsAthenaNamedQueryCreate(d *schema.ResourceData, meta interface{}) if err != nil { return err } - d.SetId(*resp.NamedQueryId) + d.SetId(aws.StringValue(resp.NamedQueryId)) return resourceAwsAthenaNamedQueryRead(d, meta) } diff --git a/aws/resource_aws_batch_job_definition.go b/aws/resource_aws_batch_job_definition.go index 4451b23acde..43407c7c455 100644 --- a/aws/resource_aws_batch_job_definition.go +++ b/aws/resource_aws_batch_job_definition.go @@ -144,7 +144,7 @@ func resourceAwsBatchJobDefinitionCreate(d *schema.ResourceData, meta interface{ if err != nil { return fmt.Errorf("%s %q", err, name) } - d.SetId(*out.JobDefinitionArn) + d.SetId(aws.StringValue(out.JobDefinitionArn)) d.Set("arn", out.JobDefinitionArn) return resourceAwsBatchJobDefinitionRead(d, meta) } diff --git a/aws/resource_aws_cloud9_environment_ec2.go b/aws/resource_aws_cloud9_environment_ec2.go index 55d3e4a74de..24ddbdf312a 100644 --- a/aws/resource_aws_cloud9_environment_ec2.go +++ b/aws/resource_aws_cloud9_environment_ec2.go @@ -110,7 +110,7 @@ func resourceAwsCloud9EnvironmentEc2Create(d *schema.ResourceData, meta interfac if err != nil { return fmt.Errorf("Error creating Cloud9 EC2 Environment: %s", err) } - d.SetId(*out.EnvironmentId) + d.SetId(aws.StringValue(out.EnvironmentId)) stateConf := resource.StateChangeConf{ Pending: []string{ diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index e0e60f81ada..68e900ef6d3 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -694,7 +694,7 @@ func resourceAwsCloudFrontDistributionCreate(d *schema.ResourceData, meta interf return fmt.Errorf("error creating CloudFront Distribution: %s", err) } - d.SetId(*resp.Distribution.Id) + d.SetId(aws.StringValue(resp.Distribution.Id)) if d.Get("wait_for_deployment").(bool) { log.Printf("[DEBUG] Waiting until CloudFront Distribution (%s) is deployed", d.Id()) diff --git a/aws/resource_aws_cloudfront_origin_access_identity.go b/aws/resource_aws_cloudfront_origin_access_identity.go index dc7d24b0d4d..ccac12c0037 100644 --- a/aws/resource_aws_cloudfront_origin_access_identity.go +++ b/aws/resource_aws_cloudfront_origin_access_identity.go @@ -61,7 +61,7 @@ func resourceAwsCloudFrontOriginAccessIdentityCreate(d *schema.ResourceData, met if err != nil { return err } - d.SetId(*resp.CloudFrontOriginAccessIdentity.Id) + d.SetId(aws.StringValue(resp.CloudFrontOriginAccessIdentity.Id)) return resourceAwsCloudFrontOriginAccessIdentityRead(d, meta) } @@ -84,7 +84,7 @@ func resourceAwsCloudFrontOriginAccessIdentityRead(d *schema.ResourceData, meta // Update attributes from DistributionConfig flattenOriginAccessIdentityConfig(d, resp.CloudFrontOriginAccessIdentity.CloudFrontOriginAccessIdentityConfig) // Update other attributes outside of DistributionConfig - d.SetId(*resp.CloudFrontOriginAccessIdentity.Id) + d.SetId(aws.StringValue(resp.CloudFrontOriginAccessIdentity.Id)) d.Set("etag", resp.ETag) d.Set("s3_canonical_user_id", resp.CloudFrontOriginAccessIdentity.S3CanonicalUserId) d.Set("cloudfront_access_identity_path", fmt.Sprintf("origin-access-identity/cloudfront/%s", *resp.CloudFrontOriginAccessIdentity.Id)) diff --git a/aws/resource_aws_cloudtrail.go b/aws/resource_aws_cloudtrail.go index d43d80a80ef..292cdbdc5cd 100644 --- a/aws/resource_aws_cloudtrail.go +++ b/aws/resource_aws_cloudtrail.go @@ -215,7 +215,7 @@ func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] CloudTrail created: %s", t) - d.SetId(*t.Name) + d.SetId(aws.StringValue(t.Name)) // AWS CloudTrail sets newly-created trails to false. if v, ok := d.GetOk("enable_logging"); ok && v.(bool) { diff --git a/aws/resource_aws_codedeploy_deployment_group.go b/aws/resource_aws_codedeploy_deployment_group.go index b03cbae8135..9ffa32f77ee 100644 --- a/aws/resource_aws_codedeploy_deployment_group.go +++ b/aws/resource_aws_codedeploy_deployment_group.go @@ -574,7 +574,7 @@ func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta int return fmt.Errorf("Error creating CodeDeploy deployment group: %s", err) } - d.SetId(*resp.DeploymentGroupId) + d.SetId(aws.StringValue(resp.DeploymentGroupId)) return resourceAwsCodeDeployDeploymentGroupRead(d, meta) } diff --git a/aws/resource_aws_cognito_identity_pool.go b/aws/resource_aws_cognito_identity_pool.go index f847b6649b8..6fcd3763eb6 100644 --- a/aws/resource_aws_cognito_identity_pool.go +++ b/aws/resource_aws_cognito_identity_pool.go @@ -142,7 +142,7 @@ func resourceAwsCognitoIdentityPoolCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error creating Cognito Identity Pool: %s", err) } - d.SetId(*entity.IdentityPoolId) + d.SetId(aws.StringValue(entity.IdentityPoolId)) return resourceAwsCognitoIdentityPoolRead(d, meta) } diff --git a/aws/resource_aws_cognito_user_pool_client.go b/aws/resource_aws_cognito_user_pool_client.go index 160427efd0a..f53a4fd8628 100644 --- a/aws/resource_aws_cognito_user_pool_client.go +++ b/aws/resource_aws_cognito_user_pool_client.go @@ -261,7 +261,7 @@ func resourceAwsCognitoUserPoolClientCreate(d *schema.ResourceData, meta interfa return fmt.Errorf("Error creating Cognito User Pool Client: %s", err) } - d.SetId(*resp.UserPoolClient.ClientId) + d.SetId(aws.StringValue(resp.UserPoolClient.ClientId)) return resourceAwsCognitoUserPoolClientRead(d, meta) } @@ -287,7 +287,7 @@ func resourceAwsCognitoUserPoolClientRead(d *schema.ResourceData, meta interface return err } - d.SetId(*resp.UserPoolClient.ClientId) + d.SetId(aws.StringValue(resp.UserPoolClient.ClientId)) d.Set("user_pool_id", resp.UserPoolClient.UserPoolId) d.Set("name", resp.UserPoolClient.ClientName) d.Set("explicit_auth_flows", flattenStringSet(resp.UserPoolClient.ExplicitAuthFlows)) diff --git a/aws/resource_aws_config_remediation_configuration.go b/aws/resource_aws_config_remediation_configuration.go index b25e8f9bf56..8a99c0b5338 100644 --- a/aws/resource_aws_config_remediation_configuration.go +++ b/aws/resource_aws_config_remediation_configuration.go @@ -207,7 +207,7 @@ func resourceAwsConfigRemediationConfigurationRead(d *schema.ResourceData, meta d.Set("target_type", remediationConfiguration.TargetType) d.Set("target_version", remediationConfiguration.TargetVersion) d.Set("parameter", flattenRemediationConfigurationParameters(remediationConfiguration.Parameters)) - d.SetId(*remediationConfiguration.ConfigRuleName) + d.SetId(aws.StringValue(remediationConfiguration.ConfigRuleName)) return nil } diff --git a/aws/resource_aws_db_subnet_group.go b/aws/resource_aws_db_subnet_group.go index 911a90ae03f..9571be370e0 100644 --- a/aws/resource_aws_db_subnet_group.go +++ b/aws/resource_aws_db_subnet_group.go @@ -97,7 +97,7 @@ func resourceAwsDbSubnetGroupCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating DB Subnet Group: %s", err) } - d.SetId(*createOpts.DBSubnetGroupName) + d.SetId(aws.StringValue(createOpts.DBSubnetGroupName)) log.Printf("[INFO] DB Subnet Group ID: %s", d.Id()) return resourceAwsDbSubnetGroupRead(d, meta) } diff --git a/aws/resource_aws_default_route_table.go b/aws/resource_aws_default_route_table.go index ae493246984..c27960876f0 100644 --- a/aws/resource_aws_default_route_table.go +++ b/aws/resource_aws_default_route_table.go @@ -184,7 +184,7 @@ func resourceAwsDefaultRouteTableRead(d *schema.ResourceData, meta interface{}) rt := resp.RouteTables[0] d.Set("default_route_table_id", rt.RouteTableId) - d.SetId(*rt.RouteTableId) + d.SetId(aws.StringValue(rt.RouteTableId)) // re-use regular AWS Route Table READ. This is an extra API call but saves us // from trying to manually keep parity diff --git a/aws/resource_aws_devicefarm_project.go b/aws/resource_aws_devicefarm_project.go index 08b3760c38c..503cae5febd 100644 --- a/aws/resource_aws_devicefarm_project.go +++ b/aws/resource_aws_devicefarm_project.go @@ -54,7 +54,7 @@ func resourceAwsDevicefarmProjectCreate(d *schema.ResourceData, meta interface{} } log.Printf("[DEBUG] Successsfully Created DeviceFarm Project: %s", *out.Project.Arn) - d.SetId(*out.Project.Arn) + d.SetId(aws.StringValue(out.Project.Arn)) return resourceAwsDevicefarmProjectRead(d, meta) } diff --git a/aws/resource_aws_dlm_lifecycle_policy.go b/aws/resource_aws_dlm_lifecycle_policy.go index 194dbb6066a..f52ed13360a 100644 --- a/aws/resource_aws_dlm_lifecycle_policy.go +++ b/aws/resource_aws_dlm_lifecycle_policy.go @@ -162,7 +162,7 @@ func resourceAwsDlmLifecyclePolicyCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error creating DLM Lifecycle Policy: %s", err) } - d.SetId(*out.PolicyId) + d.SetId(aws.StringValue(out.PolicyId)) return resourceAwsDlmLifecyclePolicyRead(d, meta) } diff --git a/aws/resource_aws_dms_certificate.go b/aws/resource_aws_dms_certificate.go index c0a9849f752..799f052c512 100644 --- a/aws/resource_aws_dms_certificate.go +++ b/aws/resource_aws_dms_certificate.go @@ -118,7 +118,7 @@ func resourceAwsDmsCertificateDelete(d *schema.ResourceData, meta interface{}) e } func resourceAwsDmsCertificateSetState(d *schema.ResourceData, cert *dms.Certificate) error { - d.SetId(*cert.CertificateIdentifier) + d.SetId(aws.StringValue(cert.CertificateIdentifier)) d.Set("certificate_id", cert.CertificateIdentifier) d.Set("certificate_arn", cert.CertificateArn) diff --git a/aws/resource_aws_dms_endpoint.go b/aws/resource_aws_dms_endpoint.go index cc67a6d5789..3d5a6aef94a 100644 --- a/aws/resource_aws_dms_endpoint.go +++ b/aws/resource_aws_dms_endpoint.go @@ -690,7 +690,7 @@ func resourceAwsDmsEndpointDelete(d *schema.ResourceData, meta interface{}) erro } func resourceAwsDmsEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) error { - d.SetId(*endpoint.EndpointIdentifier) + d.SetId(aws.StringValue(endpoint.EndpointIdentifier)) d.Set("certificate_arn", endpoint.CertificateArn) d.Set("endpoint_arn", endpoint.EndpointArn) diff --git a/aws/resource_aws_dms_replication_subnet_group.go b/aws/resource_aws_dms_replication_subnet_group.go index a73ffc6a66a..03147680ca4 100644 --- a/aws/resource_aws_dms_replication_subnet_group.go +++ b/aws/resource_aws_dms_replication_subnet_group.go @@ -169,7 +169,7 @@ func resourceAwsDmsReplicationSubnetGroupDelete(d *schema.ResourceData, meta int } func resourceAwsDmsReplicationSubnetGroupSetState(d *schema.ResourceData, group *dms.ReplicationSubnetGroup) error { - d.SetId(*group.ReplicationSubnetGroupIdentifier) + d.SetId(aws.StringValue(group.ReplicationSubnetGroupIdentifier)) subnet_ids := []string{} for _, subnet := range group.Subnets { diff --git a/aws/resource_aws_dms_replication_task.go b/aws/resource_aws_dms_replication_task.go index 4c53a2934eb..ac1e9b05d92 100644 --- a/aws/resource_aws_dms_replication_task.go +++ b/aws/resource_aws_dms_replication_task.go @@ -282,7 +282,7 @@ func resourceAwsDmsReplicationTaskDelete(d *schema.ResourceData, meta interface{ } func resourceAwsDmsReplicationTaskSetState(d *schema.ResourceData, task *dms.ReplicationTask) error { - d.SetId(*task.ReplicationTaskIdentifier) + d.SetId(aws.StringValue(task.ReplicationTaskIdentifier)) d.Set("migration_type", task.MigrationType) d.Set("replication_instance_arn", task.ReplicationInstanceArn) diff --git a/aws/resource_aws_docdb_cluster_instance.go b/aws/resource_aws_docdb_cluster_instance.go index f43d1c35938..ba2fff4205d 100644 --- a/aws/resource_aws_docdb_cluster_instance.go +++ b/aws/resource_aws_docdb_cluster_instance.go @@ -226,7 +226,7 @@ func resourceAwsDocDBClusterInstanceCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("error creating DocDB Instance: %s", err) } - d.SetId(*resp.DBInstance.DBInstanceIdentifier) + d.SetId(aws.StringValue(resp.DBInstance.DBInstanceIdentifier)) // reuse db_instance refresh func stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_dx_connection_association.go b/aws/resource_aws_dx_connection_association.go index 08ca1dd2dcc..a4cb6ddb3a8 100644 --- a/aws/resource_aws_dx_connection_association.go +++ b/aws/resource_aws_dx_connection_association.go @@ -43,7 +43,7 @@ func resourceAwsDxConnectionAssociationCreate(d *schema.ResourceData, meta inter return err } - d.SetId(*resp.ConnectionId) + d.SetId(aws.StringValue(resp.ConnectionId)) return nil } diff --git a/aws/resource_aws_ebs_volume.go b/aws/resource_aws_ebs_volume.go index 439287fada5..871890bdd75 100644 --- a/aws/resource_aws_ebs_volume.go +++ b/aws/resource_aws_ebs_volume.go @@ -160,7 +160,7 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error *result.VolumeId, err) } - d.SetId(*result.VolumeId) + d.SetId(aws.StringValue(result.VolumeId)) return resourceAwsEbsVolumeRead(d, meta) } diff --git a/aws/resource_aws_ec2_capacity_reservation.go b/aws/resource_aws_ec2_capacity_reservation.go index ea79c17566d..d07ca865ab0 100644 --- a/aws/resource_aws_ec2_capacity_reservation.go +++ b/aws/resource_aws_ec2_capacity_reservation.go @@ -158,7 +158,7 @@ func resourceAwsEc2CapacityReservationCreate(d *schema.ResourceData, meta interf if err != nil { return fmt.Errorf("Error creating EC2 Capacity Reservation: %s", err) } - d.SetId(*out.CapacityReservation.CapacityReservationId) + d.SetId(aws.StringValue(out.CapacityReservation.CapacityReservationId)) return resourceAwsEc2CapacityReservationRead(d, meta) } diff --git a/aws/resource_aws_ec2_client_vpn_endpoint.go b/aws/resource_aws_ec2_client_vpn_endpoint.go index 86f47f6fd34..b9124f053b0 100644 --- a/aws/resource_aws_ec2_client_vpn_endpoint.go +++ b/aws/resource_aws_ec2_client_vpn_endpoint.go @@ -191,7 +191,7 @@ func resourceAwsEc2ClientVpnEndpointCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating Client VPN endpoint: %w", err) } - d.SetId(*resp.ClientVpnEndpointId) + d.SetId(aws.StringValue(resp.ClientVpnEndpointId)) return resourceAwsEc2ClientVpnEndpointRead(d, meta) } diff --git a/aws/resource_aws_ec2_traffic_mirror_filter.go b/aws/resource_aws_ec2_traffic_mirror_filter.go index abc2216cbbb..e08a31b35ab 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter.go @@ -59,7 +59,7 @@ func resourceAwsEc2TrafficMirrorFilterCreate(d *schema.ResourceData, meta interf return fmt.Errorf("Error while creating traffic filter %s", err) } - d.SetId(*out.TrafficMirrorFilter.TrafficMirrorFilterId) + d.SetId(aws.StringValue(out.TrafficMirrorFilter.TrafficMirrorFilterId)) if v, ok := d.GetOk("network_services"); ok { input := &ec2.ModifyTrafficMirrorFilterNetworkServicesInput{ diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go index 815b843e5c7..992391d9869 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go @@ -137,7 +137,7 @@ func resourceAwsEc2TrafficMirrorFilterRuleCreate(d *schema.ResourceData, meta in return fmt.Errorf("error creating EC2 Traffic Mirror Filter Rule (%s): %w", filterId, err) } - d.SetId(*out.TrafficMirrorFilterRule.TrafficMirrorFilterRuleId) + d.SetId(aws.StringValue(out.TrafficMirrorFilterRule.TrafficMirrorFilterRuleId)) return resourceAwsEc2TrafficMirrorFilterRuleRead(d, meta) } @@ -167,7 +167,7 @@ func resourceAwsEc2TrafficMirrorFilterRuleRead(d *schema.ResourceData, meta inte return nil } - d.SetId(*rule.TrafficMirrorFilterRuleId) + d.SetId(aws.StringValue(rule.TrafficMirrorFilterRuleId)) d.Set("traffic_mirror_filter_id", rule.TrafficMirrorFilterId) d.Set("destination_cidr_block", rule.DestinationCidrBlock) d.Set("source_cidr_block", rule.SourceCidrBlock) diff --git a/aws/resource_aws_ecr_lifecycle_policy.go b/aws/resource_aws_ecr_lifecycle_policy.go index aae91879514..9bde7e53c7e 100644 --- a/aws/resource_aws_ecr_lifecycle_policy.go +++ b/aws/resource_aws_ecr_lifecycle_policy.go @@ -50,7 +50,7 @@ func resourceAwsEcrLifecyclePolicyCreate(d *schema.ResourceData, meta interface{ if err != nil { return err } - d.SetId(*resp.RepositoryName) + d.SetId(aws.StringValue(resp.RepositoryName)) d.Set("registry_id", resp.RegistryId) return resourceAwsEcrLifecyclePolicyRead(d, meta) } diff --git a/aws/resource_aws_ecr_repository_policy.go b/aws/resource_aws_ecr_repository_policy.go index d806e6052ac..ba710623901 100644 --- a/aws/resource_aws_ecr_repository_policy.go +++ b/aws/resource_aws_ecr_repository_policy.go @@ -76,7 +76,7 @@ func resourceAwsEcrRepositoryPolicyCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] ECR repository policy created: %s", *repositoryPolicy.RepositoryName) - d.SetId(*repositoryPolicy.RepositoryName) + d.SetId(aws.StringValue(repositoryPolicy.RepositoryName)) d.Set("registry_id", repositoryPolicy.RegistryId) return resourceAwsEcrRepositoryPolicyRead(d, meta) @@ -106,7 +106,7 @@ func resourceAwsEcrRepositoryPolicyRead(d *schema.ResourceData, meta interface{} repositoryPolicy := out - d.SetId(*repositoryPolicy.RepositoryName) + d.SetId(aws.StringValue(repositoryPolicy.RepositoryName)) d.Set("repository", repositoryPolicy.RepositoryName) d.Set("registry_id", repositoryPolicy.RegistryId) d.Set("policy", repositoryPolicy.PolicyText) @@ -152,7 +152,7 @@ func resourceAwsEcrRepositoryPolicyUpdate(d *schema.ResourceData, meta interface repositoryPolicy := *out - d.SetId(*repositoryPolicy.RepositoryName) + d.SetId(aws.StringValue(repositoryPolicy.RepositoryName)) d.Set("registry_id", repositoryPolicy.RegistryId) return nil diff --git a/aws/resource_aws_efs_access_point.go b/aws/resource_aws_efs_access_point.go index 976998ee73a..b392192db4e 100644 --- a/aws/resource_aws_efs_access_point.go +++ b/aws/resource_aws_efs_access_point.go @@ -142,7 +142,7 @@ func resourceAwsEfsAccessPointCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("error creating EFS Access Point for File System (%s): %w", fsId, err) } - d.SetId(*ap.AccessPointId) + d.SetId(aws.StringValue(ap.AccessPointId)) log.Printf("[INFO] EFS access point ID: %s", d.Id()) stateConf := &resource.StateChangeConf{ @@ -218,7 +218,7 @@ func resourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) err log.Printf("[DEBUG] Found EFS access point: %#v", ap) - d.SetId(*ap.AccessPointId) + d.SetId(aws.StringValue(ap.AccessPointId)) fsARN := arn.ARN{ AccountID: meta.(*AWSClient).accountid, diff --git a/aws/resource_aws_efs_file_system.go b/aws/resource_aws_efs_file_system.go index aa9bb0e7300..3ee30becd2b 100644 --- a/aws/resource_aws_efs_file_system.go +++ b/aws/resource_aws_efs_file_system.go @@ -158,7 +158,7 @@ func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating EFS file system: %s", err) } - d.SetId(*fs.FileSystemId) + d.SetId(aws.StringValue(fs.FileSystemId)) log.Printf("[INFO] EFS file system ID: %s", d.Id()) stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_eip.go b/aws/resource_aws_eip.go index cb2a57d0c06..20a5c8c1896 100644 --- a/aws/resource_aws_eip.go +++ b/aws/resource_aws_eip.go @@ -167,9 +167,9 @@ func resourceAwsEipCreate(d *schema.ResourceData, meta interface{}) error { // it defaults to using the public IP log.Printf("[DEBUG] EIP Allocate: %#v", allocResp) if d.Get("domain").(string) == ec2.DomainTypeVpc { - d.SetId(*allocResp.AllocationId) + d.SetId(aws.StringValue(allocResp.AllocationId)) } else { - d.SetId(*allocResp.PublicIp) + d.SetId(aws.StringValue(allocResp.PublicIp)) } log.Printf("[INFO] EIP ID: %s (domain: %v)", d.Id(), *allocResp.Domain) @@ -309,7 +309,7 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { // This allows users to import the EIP based on the IP if they are in a VPC if *address.Domain == ec2.DomainTypeVpc && net.ParseIP(id) != nil { log.Printf("[DEBUG] Re-assigning EIP ID (%s) to it's Allocation ID (%s)", d.Id(), *address.AllocationId) - d.SetId(*address.AllocationId) + d.SetId(aws.StringValue(address.AllocationId)) } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(address.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { diff --git a/aws/resource_aws_elastic_beanstalk_environment.go b/aws/resource_aws_elastic_beanstalk_environment.go index 1eec93a1b1b..5123112f589 100644 --- a/aws/resource_aws_elastic_beanstalk_environment.go +++ b/aws/resource_aws_elastic_beanstalk_environment.go @@ -284,7 +284,7 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i } // Assign the application name as the resource ID - d.SetId(*resp.EnvironmentId) + d.SetId(aws.StringValue(resp.EnvironmentId)) waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) if err != nil { diff --git a/aws/resource_aws_elastic_transcoder_pipeline.go b/aws/resource_aws_elastic_transcoder_pipeline.go index 5dd82877a43..64df5e9d8a9 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline.go +++ b/aws/resource_aws_elastic_transcoder_pipeline.go @@ -225,7 +225,7 @@ func resourceAwsElasticTranscoderPipelineCreate(d *schema.ResourceData, meta int return fmt.Errorf("Error creating Elastic Transcoder Pipeline: %s", err) } - d.SetId(*resp.Pipeline.Id) + d.SetId(aws.StringValue(resp.Pipeline.Id)) for _, w := range resp.Warnings { log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) diff --git a/aws/resource_aws_elastic_transcoder_preset.go b/aws/resource_aws_elastic_transcoder_preset.go index 82115c22042..7b7cec56dbb 100644 --- a/aws/resource_aws_elastic_transcoder_preset.go +++ b/aws/resource_aws_elastic_transcoder_preset.go @@ -348,7 +348,7 @@ func resourceAwsElasticTranscoderPresetCreate(d *schema.ResourceData, meta inter log.Printf("[WARN] Elastic Transcoder Preset: %s", *resp.Warning) } - d.SetId(*resp.Preset.Id) + d.SetId(aws.StringValue(resp.Preset.Id)) d.Set("arn", resp.Preset.Arn) return nil diff --git a/aws/resource_aws_elasticache_parameter_group.go b/aws/resource_aws_elasticache_parameter_group.go index 7f7f75a98a2..0be28e769de 100644 --- a/aws/resource_aws_elasticache_parameter_group.go +++ b/aws/resource_aws_elasticache_parameter_group.go @@ -80,7 +80,7 @@ func resourceAwsElasticacheParameterGroupCreate(d *schema.ResourceData, meta int return fmt.Errorf("Error creating Cache Parameter Group: %s", err) } - d.SetId(*resp.CacheParameterGroup.CacheParameterGroupName) + d.SetId(aws.StringValue(resp.CacheParameterGroup.CacheParameterGroupName)) log.Printf("[INFO] Cache Parameter Group ID: %s", d.Id()) return resourceAwsElasticacheParameterGroupUpdate(d, meta) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index a72737d5e3c..0473bc2863a 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -369,7 +369,7 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i return fmt.Errorf("Error creating Elasticache Replication Group: %w", err) } - d.SetId(*resp.ReplicationGroup.ReplicationGroupId) + d.SetId(aws.StringValue(resp.ReplicationGroup.ReplicationGroupId)) pending := []string{"creating", "modifying", "restoring", "snapshotting"} stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_emr_cluster.go b/aws/resource_aws_emr_cluster.go index 0e235c9a3af..0f6810c7660 100644 --- a/aws/resource_aws_emr_cluster.go +++ b/aws/resource_aws_emr_cluster.go @@ -922,7 +922,7 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error running EMR Job Flow: %s", err) } - d.SetId(*resp.JobFlowId) + d.SetId(aws.StringValue(resp.JobFlowId)) // This value can only be obtained through a deprecated function d.Set("keep_job_flow_alive_when_no_steps", params.Instances.KeepJobFlowAliveWhenNoSteps) diff --git a/aws/resource_aws_emr_instance_fleet.go b/aws/resource_aws_emr_instance_fleet.go index 0ed7c90d8f3..ea59834cb88 100644 --- a/aws/resource_aws_emr_instance_fleet.go +++ b/aws/resource_aws_emr_instance_fleet.go @@ -236,7 +236,7 @@ func resourceAwsEMRInstanceFleetCreate(d *schema.ResourceData, meta interface{}) if resp == nil { return fmt.Errorf("error creating instance fleet: no instance fleet returned") } - d.SetId(*resp.InstanceFleetId) + d.SetId(aws.StringValue(resp.InstanceFleetId)) return nil } diff --git a/aws/resource_aws_emr_instance_group.go b/aws/resource_aws_emr_instance_group.go index 86125e45e3e..16651af9d05 100644 --- a/aws/resource_aws_emr_instance_group.go +++ b/aws/resource_aws_emr_instance_group.go @@ -185,7 +185,7 @@ func resourceAwsEMRInstanceGroupCreate(d *schema.ResourceData, meta interface{}) if resp == nil || len(resp.InstanceGroupIds) == 0 { return fmt.Errorf("Error creating instance groups: no instance group returned") } - d.SetId(*resp.InstanceGroupIds[0]) + d.SetId(aws.StringValue(resp.InstanceGroupIds[0])) if err := waitForEmrInstanceGroupStateRunning(conn, d.Get("cluster_id").(string), d.Id(), emrInstanceGroupCreateTimeout); err != nil { return fmt.Errorf("error waiting for EMR Instance Group (%s) creation: %s", d.Id(), err) diff --git a/aws/resource_aws_emr_security_configuration.go b/aws/resource_aws_emr_security_configuration.go index d36851e4806..7803338901d 100644 --- a/aws/resource_aws_emr_security_configuration.go +++ b/aws/resource_aws_emr_security_configuration.go @@ -75,7 +75,7 @@ func resourceAwsEmrSecurityConfigurationCreate(d *schema.ResourceData, meta inte return err } - d.SetId(*resp.Name) + d.SetId(aws.StringValue(resp.Name)) return resourceAwsEmrSecurityConfigurationRead(d, meta) } diff --git a/aws/resource_aws_fsx_windows_file_system.go b/aws/resource_aws_fsx_windows_file_system.go index 176423d198b..8ccecd2247b 100644 --- a/aws/resource_aws_fsx_windows_file_system.go +++ b/aws/resource_aws_fsx_windows_file_system.go @@ -274,7 +274,7 @@ func resourceAwsFsxWindowsFileSystemCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating FSx filesystem: %s", err) } - d.SetId(*result.FileSystem.FileSystemId) + d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) log.Println("[DEBUG] Waiting for filesystem to become available") diff --git a/aws/resource_aws_gamelift_alias.go b/aws/resource_aws_gamelift_alias.go index 294375f2e39..2b2732366d2 100644 --- a/aws/resource_aws_gamelift_alias.go +++ b/aws/resource_aws_gamelift_alias.go @@ -85,7 +85,7 @@ func resourceAwsGameliftAliasCreate(d *schema.ResourceData, meta interface{}) er return err } - d.SetId(*out.Alias.AliasId) + d.SetId(aws.StringValue(out.Alias.AliasId)) return resourceAwsGameliftAliasRead(d, meta) } diff --git a/aws/resource_aws_gamelift_build.go b/aws/resource_aws_gamelift_build.go index c8f657927b8..8ffb9a645c4 100644 --- a/aws/resource_aws_gamelift_build.go +++ b/aws/resource_aws_gamelift_build.go @@ -108,7 +108,7 @@ func resourceAwsGameliftBuildCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating Gamelift build client: %s", err) } - d.SetId(*out.Build.BuildId) + d.SetId(aws.StringValue(out.Build.BuildId)) stateConf := resource.StateChangeConf{ Pending: []string{gamelift.BuildStatusInitialized}, diff --git a/aws/resource_aws_gamelift_fleet.go b/aws/resource_aws_gamelift_fleet.go index ff06c9a2042..61539531478 100644 --- a/aws/resource_aws_gamelift_fleet.go +++ b/aws/resource_aws_gamelift_fleet.go @@ -258,7 +258,7 @@ func resourceAwsGameliftFleetCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error creating GameLift Fleet (%s): %w", d.Get("name").(string), err) } - d.SetId(*out.FleetAttributes.FleetId) + d.SetId(aws.StringValue(out.FleetAttributes.FleetId)) stateConf := &resource.StateChangeConf{ Pending: []string{ diff --git a/aws/resource_aws_gamelift_game_session_queue.go b/aws/resource_aws_gamelift_game_session_queue.go index 7c685e45a39..19f1f025964 100644 --- a/aws/resource_aws_gamelift_game_session_queue.go +++ b/aws/resource_aws_gamelift_game_session_queue.go @@ -81,7 +81,7 @@ func resourceAwsGameliftGameSessionQueueCreate(d *schema.ResourceData, meta inte return fmt.Errorf("error creating Gamelift Game Session Queue: %s", err) } - d.SetId(*out.GameSessionQueue.Name) + d.SetId(aws.StringValue(out.GameSessionQueue.Name)) return resourceAwsGameliftGameSessionQueueRead(d, meta) } diff --git a/aws/resource_aws_globalaccelerator_accelerator.go b/aws/resource_aws_globalaccelerator_accelerator.go index 59b43bdcbfc..a1fefbd9d5a 100644 --- a/aws/resource_aws_globalaccelerator_accelerator.go +++ b/aws/resource_aws_globalaccelerator_accelerator.go @@ -126,7 +126,7 @@ func resourceAwsGlobalAcceleratorAcceleratorCreate(d *schema.ResourceData, meta return fmt.Errorf("Error creating Global Accelerator accelerator: %s", err) } - d.SetId(*resp.Accelerator.AcceleratorArn) + d.SetId(aws.StringValue(resp.Accelerator.AcceleratorArn)) err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id()) if err != nil { diff --git a/aws/resource_aws_globalaccelerator_listener.go b/aws/resource_aws_globalaccelerator_listener.go index eab281b04f8..54d89e761df 100644 --- a/aws/resource_aws_globalaccelerator_listener.go +++ b/aws/resource_aws_globalaccelerator_listener.go @@ -89,7 +89,7 @@ func resourceAwsGlobalAcceleratorListenerCreate(d *schema.ResourceData, meta int return fmt.Errorf("Error creating Global Accelerator listener: %s", err) } - d.SetId(*resp.Listener.ListenerArn) + d.SetId(aws.StringValue(resp.Listener.ListenerArn)) // Creating a listener triggers the accelerator to change status to InPending stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_guardduty_detector.go b/aws/resource_aws_guardduty_detector.go index 0846454cec5..28e97bbfeda 100644 --- a/aws/resource_aws_guardduty_detector.go +++ b/aws/resource_aws_guardduty_detector.go @@ -70,7 +70,7 @@ func resourceAwsGuardDutyDetectorCreate(d *schema.ResourceData, meta interface{} if err != nil { return fmt.Errorf("Creating GuardDuty Detector failed: %s", err.Error()) } - d.SetId(*output.DetectorId) + d.SetId(aws.StringValue(output.DetectorId)) return resourceAwsGuardDutyDetectorRead(d, meta) } diff --git a/aws/resource_aws_iam_access_key.go b/aws/resource_aws_iam_access_key.go index f9863ca26fb..e402f1834a6 100644 --- a/aws/resource_aws_iam_access_key.go +++ b/aws/resource_aws_iam_access_key.go @@ -79,7 +79,7 @@ func resourceAwsIamAccessKeyCreate(d *schema.ResourceData, meta interface{}) err ) } - d.SetId(*createResp.AccessKey.AccessKeyId) + d.SetId(aws.StringValue(createResp.AccessKey.AccessKeyId)) if createResp.AccessKey == nil || createResp.AccessKey.SecretAccessKey == nil { return fmt.Errorf("CreateAccessKey response did not contain a Secret Access Key as expected") @@ -147,7 +147,7 @@ func resourceAwsIamAccessKeyRead(d *schema.ResourceData, meta interface{}) error } func resourceAwsIamAccessKeyReadResult(d *schema.ResourceData, key *iam.AccessKeyMetadata) error { - d.SetId(*key.AccessKeyId) + d.SetId(aws.StringValue(key.AccessKeyId)) if err := d.Set("user", key.UserName); err != nil { return err } diff --git a/aws/resource_aws_iam_group.go b/aws/resource_aws_iam_group.go index 33473a81f80..4e26c4a194f 100644 --- a/aws/resource_aws_iam_group.go +++ b/aws/resource_aws_iam_group.go @@ -61,7 +61,7 @@ func resourceAwsIamGroupCreate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error creating IAM Group %s: %s", name, err) } - d.SetId(*createResp.Group.GroupName) + d.SetId(aws.StringValue(createResp.Group.GroupName)) return resourceAwsIamGroupReadResult(d, createResp.Group) } diff --git a/aws/resource_aws_iam_instance_profile.go b/aws/resource_aws_iam_instance_profile.go index 6332881152b..ca4a422181e 100644 --- a/aws/resource_aws_iam_instance_profile.go +++ b/aws/resource_aws_iam_instance_profile.go @@ -234,7 +234,7 @@ func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{ } func instanceProfileReadResult(d *schema.ResourceData, result *iam.InstanceProfile) error { - d.SetId(*result.InstanceProfileName) + d.SetId(aws.StringValue(result.InstanceProfileName)) if err := d.Set("name", result.InstanceProfileName); err != nil { return err } diff --git a/aws/resource_aws_iam_policy.go b/aws/resource_aws_iam_policy.go index c83ab1ba616..0a44f36cabc 100644 --- a/aws/resource_aws_iam_policy.go +++ b/aws/resource_aws_iam_policy.go @@ -95,7 +95,7 @@ func resourceAwsIamPolicyCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error creating IAM policy %s: %s", name, err) } - d.SetId(*response.Policy.Arn) + d.SetId(aws.StringValue(response.Policy.Arn)) return resourceAwsIamPolicyRead(d, meta) } diff --git a/aws/resource_aws_iam_role.go b/aws/resource_aws_iam_role.go index 44f8aaa5699..008672a8332 100644 --- a/aws/resource_aws_iam_role.go +++ b/aws/resource_aws_iam_role.go @@ -172,7 +172,7 @@ func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error creating IAM Role %s: %s", name, err) } - d.SetId(*createResp.Role.RoleName) + d.SetId(aws.StringValue(createResp.Role.RoleName)) return resourceAwsIamRoleRead(d, meta) } diff --git a/aws/resource_aws_iam_service_linked_role.go b/aws/resource_aws_iam_service_linked_role.go index 862573cb077..cff1b210ffd 100644 --- a/aws/resource_aws_iam_service_linked_role.go +++ b/aws/resource_aws_iam_service_linked_role.go @@ -100,7 +100,7 @@ func resourceAwsIamServiceLinkedRoleCreate(d *schema.ResourceData, meta interfac if err != nil { return fmt.Errorf("Error creating service-linked role with name %s: %s", serviceName, err) } - d.SetId(*resp.Role.Arn) + d.SetId(aws.StringValue(resp.Role.Arn)) return resourceAwsIamServiceLinkedRoleRead(d, meta) } diff --git a/aws/resource_aws_iam_user_login_profile.go b/aws/resource_aws_iam_user_login_profile.go index 37aab38e8cf..45373c73985 100644 --- a/aws/resource_aws_iam_user_login_profile.go +++ b/aws/resource_aws_iam_user_login_profile.go @@ -155,7 +155,7 @@ func resourceAwsIamUserLoginProfileCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error creating IAM User Login Profile for %q: %s", username, err) } - d.SetId(*createResp.LoginProfile.UserName) + d.SetId(aws.StringValue(createResp.LoginProfile.UserName)) d.Set("key_fingerprint", fingerprint) d.Set("encrypted_password", encrypted) return nil diff --git a/aws/resource_aws_iam_user_ssh_key.go b/aws/resource_aws_iam_user_ssh_key.go index a821e988ddb..9de32bed07d 100644 --- a/aws/resource_aws_iam_user_ssh_key.go +++ b/aws/resource_aws_iam_user_ssh_key.go @@ -84,7 +84,7 @@ func resourceAwsIamUserSshKeyCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating IAM User SSH Key %s: %s", username, err) } - d.SetId(*createResp.SSHPublicKey.SSHPublicKeyId) + d.SetId(aws.StringValue(createResp.SSHPublicKey.SSHPublicKeyId)) return resourceAwsIamUserSshKeyUpdate(d, meta) } diff --git a/aws/resource_aws_inspector_assessment_target.go b/aws/resource_aws_inspector_assessment_target.go index a426630380d..a3fa56ced61 100644 --- a/aws/resource_aws_inspector_assessment_target.go +++ b/aws/resource_aws_inspector_assessment_target.go @@ -55,7 +55,7 @@ func resourceAwsInspectorAssessmentTargetCreate(d *schema.ResourceData, meta int return fmt.Errorf("error creating Inspector Assessment Target: %s", err) } - d.SetId(*resp.AssessmentTargetArn) + d.SetId(aws.StringValue(resp.AssessmentTargetArn)) return resourceAwsInspectorAssessmentTargetRead(d, meta) } diff --git a/aws/resource_aws_internet_gateway.go b/aws/resource_aws_internet_gateway.go index 785ece30d86..dc3704ad624 100644 --- a/aws/resource_aws_internet_gateway.go +++ b/aws/resource_aws_internet_gateway.go @@ -58,7 +58,7 @@ func resourceAwsInternetGatewayCreate(d *schema.ResourceData, meta interface{}) // Get the ID and store it ig := *resp.InternetGateway - d.SetId(*ig.InternetGatewayId) + d.SetId(aws.StringValue(ig.InternetGatewayId)) log.Printf("[INFO] InternetGateway ID: %s", d.Id()) var igRaw interface{} err = resource.Retry(5*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_iot_thing.go b/aws/resource_aws_iot_thing.go index c442142d93c..e3c521350b7 100644 --- a/aws/resource_aws_iot_thing.go +++ b/aws/resource_aws_iot_thing.go @@ -75,7 +75,7 @@ func resourceAwsIotThingCreate(d *schema.ResourceData, meta interface{}) error { return err } - d.SetId(*out.ThingName) + d.SetId(aws.StringValue(out.ThingName)) return resourceAwsIotThingRead(d, meta) } diff --git a/aws/resource_aws_iot_thing_type.go b/aws/resource_aws_iot_thing_type.go index f243014d6e2..5eaf9246baf 100644 --- a/aws/resource_aws_iot_thing_type.go +++ b/aws/resource_aws_iot_thing_type.go @@ -96,7 +96,7 @@ func resourceAwsIotThingTypeCreate(d *schema.ResourceData, meta interface{}) err return err } - d.SetId(*out.ThingTypeName) + d.SetId(aws.StringValue(out.ThingTypeName)) if v := d.Get("deprecated").(bool); v { params := &iot.DeprecateThingTypeInput{ diff --git a/aws/resource_aws_lambda_alias.go b/aws/resource_aws_lambda_alias.go index e05837862de..0d8a958d35d 100644 --- a/aws/resource_aws_lambda_alias.go +++ b/aws/resource_aws_lambda_alias.go @@ -96,7 +96,7 @@ func resourceAwsLambdaAliasCreate(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error creating Lambda alias: %s", err) } - d.SetId(*aliasConfiguration.AliasArn) + d.SetId(aws.StringValue(aliasConfiguration.AliasArn)) return resourceAwsLambdaAliasRead(d, meta) } @@ -128,7 +128,7 @@ func resourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) error d.Set("function_version", aliasConfiguration.FunctionVersion) d.Set("name", aliasConfiguration.Name) d.Set("arn", aliasConfiguration.AliasArn) - d.SetId(*aliasConfiguration.AliasArn) + d.SetId(aws.StringValue(aliasConfiguration.AliasArn)) invokeArn := lambdaFunctionInvokeArn(*aliasConfiguration.AliasArn, meta) d.Set("invoke_arn", invokeArn) diff --git a/aws/resource_aws_lambda_event_source_mapping.go b/aws/resource_aws_lambda_event_source_mapping.go index 55e2426c92b..6ccf481b20e 100644 --- a/aws/resource_aws_lambda_event_source_mapping.go +++ b/aws/resource_aws_lambda_event_source_mapping.go @@ -255,7 +255,7 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte // No error d.Set("uuid", eventSourceMappingConfiguration.UUID) - d.SetId(*eventSourceMappingConfiguration.UUID) + d.SetId(aws.StringValue(eventSourceMappingConfiguration.UUID)) return resourceAwsLambdaEventSourceMappingRead(d, meta) } diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index 9b41755ab3c..06afca1cfd2 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -654,7 +654,7 @@ func resourceAwsLaunchTemplateCreate(d *schema.ResourceData, meta interface{}) e } launchTemplate := resp.LaunchTemplate - d.SetId(*launchTemplate.LaunchTemplateId) + d.SetId(aws.StringValue(launchTemplate.LaunchTemplateId)) log.Printf("[DEBUG] Launch Template created: %q (version %d)", *launchTemplate.LaunchTemplateId, *launchTemplate.LatestVersionNumber) diff --git a/aws/resource_aws_lb_listener.go b/aws/resource_aws_lb_listener.go index 132efa93dfa..763ee3e2ee6 100644 --- a/aws/resource_aws_lb_listener.go +++ b/aws/resource_aws_lb_listener.go @@ -564,7 +564,7 @@ func resourceAwsLbListenerCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error creating ELBv2 Listener: no listeners returned in response") } - d.SetId(*resp.Listeners[0].ListenerArn) + d.SetId(aws.StringValue(resp.Listeners[0].ListenerArn)) return resourceAwsLbListenerRead(d, meta) } diff --git a/aws/resource_aws_licensemanager_license_configuration.go b/aws/resource_aws_licensemanager_license_configuration.go index 71dcca97554..8c8ca3216c9 100644 --- a/aws/resource_aws_licensemanager_license_configuration.go +++ b/aws/resource_aws_licensemanager_license_configuration.go @@ -99,7 +99,7 @@ func resourceAwsLicenseManagerLicenseConfigurationCreate(d *schema.ResourceData, if err != nil { return fmt.Errorf("Error creating License Manager license configuration: %s", err) } - d.SetId(*resp.LicenseConfigurationArn) + d.SetId(aws.StringValue(resp.LicenseConfigurationArn)) return resourceAwsLicenseManagerLicenseConfigurationRead(d, meta) } diff --git a/aws/resource_aws_main_route_table_association.go b/aws/resource_aws_main_route_table_association.go index 3d148b09c76..bd9213017fa 100644 --- a/aws/resource_aws_main_route_table_association.go +++ b/aws/resource_aws_main_route_table_association.go @@ -60,7 +60,7 @@ func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta int } d.Set("original_route_table_id", mainAssociation.RouteTableId) - d.SetId(*resp.NewAssociationId) + d.SetId(aws.StringValue(resp.NewAssociationId)) log.Printf("[INFO] New main route table association ID: %s", d.Id()) return nil @@ -102,7 +102,7 @@ func resourceAwsMainRouteTableAssociationUpdate(d *schema.ResourceData, meta int return err } - d.SetId(*resp.NewAssociationId) + d.SetId(aws.StringValue(resp.NewAssociationId)) log.Printf("[INFO] New main route table association ID: %s", d.Id()) return nil diff --git a/aws/resource_aws_mq_broker.go b/aws/resource_aws_mq_broker.go index bb779767289..2b7a8fc68a8 100644 --- a/aws/resource_aws_mq_broker.go +++ b/aws/resource_aws_mq_broker.go @@ -298,7 +298,7 @@ func resourceAwsMqBrokerCreate(d *schema.ResourceData, meta interface{}) error { return err } - d.SetId(*out.BrokerId) + d.SetId(aws.StringValue(out.BrokerId)) d.Set("arn", out.BrokerArn) stateConf := resource.StateChangeConf{ diff --git a/aws/resource_aws_mq_configuration.go b/aws/resource_aws_mq_configuration.go index f1b874a919b..7ac2056d1b3 100644 --- a/aws/resource_aws_mq_configuration.go +++ b/aws/resource_aws_mq_configuration.go @@ -97,7 +97,7 @@ func resourceAwsMqConfigurationCreate(d *schema.ResourceData, meta interface{}) return err } - d.SetId(*out.Id) + d.SetId(aws.StringValue(out.Id)) d.Set("arn", out.Arn) return resourceAwsMqConfigurationUpdate(d, meta) diff --git a/aws/resource_aws_nat_gateway.go b/aws/resource_aws_nat_gateway.go index 5d4d23e4552..a5aa45c960c 100644 --- a/aws/resource_aws_nat_gateway.go +++ b/aws/resource_aws_nat_gateway.go @@ -75,7 +75,7 @@ func resourceAwsNatGatewayCreate(d *schema.ResourceData, meta interface{}) error // Get the ID and store it ng := natResp.NatGateway - d.SetId(*ng.NatGatewayId) + d.SetId(aws.StringValue(ng.NatGatewayId)) log.Printf("[INFO] NAT Gateway ID: %s", d.Id()) // Wait for the NAT Gateway to become available diff --git a/aws/resource_aws_network_interface.go b/aws/resource_aws_network_interface.go index 7bc23ef4447..949b71014c5 100644 --- a/aws/resource_aws_network_interface.go +++ b/aws/resource_aws_network_interface.go @@ -170,7 +170,7 @@ func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error creating ENI: %s", err) } - d.SetId(*resp.NetworkInterface.NetworkInterfaceId) + d.SetId(aws.StringValue(resp.NetworkInterface.NetworkInterfaceId)) if err := waitForNetworkInterfaceCreation(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return fmt.Errorf("error waiting for Network Interface (%s) creation: %s", d.Id(), err) diff --git a/aws/resource_aws_network_interface_attachment.go b/aws/resource_aws_network_interface_attachment.go index 7f9c2ad6215..9494decd9fa 100644 --- a/aws/resource_aws_network_interface_attachment.go +++ b/aws/resource_aws_network_interface_attachment.go @@ -88,7 +88,7 @@ func resourceAwsNetworkInterfaceAttachmentCreate(d *schema.ResourceData, meta in "Error waiting for Volume (%s) to attach to Instance: %s, error: %s", network_interface_id, instance_id, err) } - d.SetId(*resp.AttachmentId) + d.SetId(aws.StringValue(resp.AttachmentId)) return resourceAwsNetworkInterfaceAttachmentRead(d, meta) } diff --git a/aws/resource_aws_opsworks_user_profile.go b/aws/resource_aws_opsworks_user_profile.go index 8d396c4b944..64fa50f31bd 100644 --- a/aws/resource_aws_opsworks_user_profile.go +++ b/aws/resource_aws_opsworks_user_profile.go @@ -91,7 +91,7 @@ func resourceAwsOpsworksUserProfileCreate(d *schema.ResourceData, meta interface return err } - d.SetId(*resp.IamUserArn) + d.SetId(aws.StringValue(resp.IamUserArn)) return resourceAwsOpsworksUserProfileUpdate(d, meta) } diff --git a/aws/resource_aws_organizations_account.go b/aws/resource_aws_organizations_account.go index c604ace92f2..8a4678d0bec 100644 --- a/aws/resource_aws_organizations_account.go +++ b/aws/resource_aws_organizations_account.go @@ -143,7 +143,7 @@ func resourceAwsOrganizationsAccountCreate(d *schema.ResourceData, meta interfac // Store the ID accountId := stateResp.(*organizations.CreateAccountStatus).AccountId - d.SetId(*accountId) + d.SetId(aws.StringValue(accountId)) if v, ok := d.GetOk("parent_id"); ok { newParentID := v.(string) diff --git a/aws/resource_aws_organizations_organization.go b/aws/resource_aws_organizations_organization.go index 4780fae10ac..f9768d94306 100644 --- a/aws/resource_aws_organizations_organization.go +++ b/aws/resource_aws_organizations_organization.go @@ -179,7 +179,7 @@ func resourceAwsOrganizationsOrganizationCreate(d *schema.ResourceData, meta int } org := resp.Organization - d.SetId(*org.Id) + d.SetId(aws.StringValue(org.Id)) awsServiceAccessPrincipals := d.Get("aws_service_access_principals").(*schema.Set).List() for _, principalRaw := range awsServiceAccessPrincipals { diff --git a/aws/resource_aws_organizations_organizational_unit.go b/aws/resource_aws_organizations_organizational_unit.go index 70ab43c4ba7..2b39422ba72 100644 --- a/aws/resource_aws_organizations_organizational_unit.go +++ b/aws/resource_aws_organizations_organizational_unit.go @@ -105,7 +105,7 @@ func resourceAwsOrganizationsOrganizationalUnitCreate(d *schema.ResourceData, me // Store the ID ouId := resp.OrganizationalUnit.Id - d.SetId(*ouId) + d.SetId(aws.StringValue(ouId)) return resourceAwsOrganizationsOrganizationalUnitRead(d, meta) } diff --git a/aws/resource_aws_qldb_ledger.go b/aws/resource_aws_qldb_ledger.go index cdf6f1649be..eec0aa633e9 100644 --- a/aws/resource_aws_qldb_ledger.go +++ b/aws/resource_aws_qldb_ledger.go @@ -82,7 +82,7 @@ func resourceAwsQLDBLedgerCreate(d *schema.ResourceData, meta interface{}) error } // Set QLDB ledger name - d.SetId(*qldbResp.Name) + d.SetId(aws.StringValue(qldbResp.Name)) log.Printf("[INFO] QLDB Ledger name: %s", d.Id()) diff --git a/aws/resource_aws_rds_cluster_instance.go b/aws/resource_aws_rds_cluster_instance.go index 4d0f285a916..2e11e6290e5 100644 --- a/aws/resource_aws_rds_cluster_instance.go +++ b/aws/resource_aws_rds_cluster_instance.go @@ -300,7 +300,7 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("error creating RDS Cluster (%s) Instance: %w", d.Get("cluster_identifier").(string), err) } - d.SetId(*resp.DBInstance.DBInstanceIdentifier) + d.SetId(aws.StringValue(resp.DBInstance.DBInstanceIdentifier)) // reuse db_instance refresh func stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_rds_cluster_parameter_group.go b/aws/resource_aws_rds_cluster_parameter_group.go index 4348702544a..965c9f16f48 100644 --- a/aws/resource_aws_rds_cluster_parameter_group.go +++ b/aws/resource_aws_rds_cluster_parameter_group.go @@ -112,7 +112,7 @@ func resourceAwsRDSClusterParameterGroupCreate(d *schema.ResourceData, meta inte return fmt.Errorf("Error creating DB Cluster Parameter Group: %s", err) } - d.SetId(*createOpts.DBClusterParameterGroupName) + d.SetId(aws.StringValue(createOpts.DBClusterParameterGroupName)) log.Printf("[INFO] DB Cluster Parameter Group ID: %s", d.Id()) // Set for update diff --git a/aws/resource_aws_redshift_cluster.go b/aws/resource_aws_redshift_cluster.go index bf4481cc496..a8f9ac0f874 100644 --- a/aws/resource_aws_redshift_cluster.go +++ b/aws/resource_aws_redshift_cluster.go @@ -405,7 +405,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) return err } - d.SetId(*resp.Cluster.ClusterIdentifier) + d.SetId(aws.StringValue(resp.Cluster.ClusterIdentifier)) } else { if _, ok := d.GetOk("master_password"); !ok { @@ -489,7 +489,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG]: Cluster create response: %s", resp) - d.SetId(*resp.Cluster.ClusterIdentifier) + d.SetId(aws.StringValue(resp.Cluster.ClusterIdentifier)) } stateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_redshift_parameter_group.go b/aws/resource_aws_redshift_parameter_group.go index e5966e749cf..5943ded27d0 100644 --- a/aws/resource_aws_redshift_parameter_group.go +++ b/aws/resource_aws_redshift_parameter_group.go @@ -98,7 +98,7 @@ func resourceAwsRedshiftParameterGroupCreate(d *schema.ResourceData, meta interf return fmt.Errorf("Error creating Redshift Parameter Group: %s", err) } - d.SetId(*createOpts.ParameterGroupName) + d.SetId(aws.StringValue(createOpts.ParameterGroupName)) if v := d.Get("parameter").(*schema.Set); v.Len() > 0 { parameters := expandRedshiftParameters(v.List()) diff --git a/aws/resource_aws_redshift_subnet_group.go b/aws/resource_aws_redshift_subnet_group.go index 9d1b0e88b82..29d82694469 100644 --- a/aws/resource_aws_redshift_subnet_group.go +++ b/aws/resource_aws_redshift_subnet_group.go @@ -81,7 +81,7 @@ func resourceAwsRedshiftSubnetGroupCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error creating Redshift Subnet Group: %s", err) } - d.SetId(*createOpts.ClusterSubnetGroupName) + d.SetId(aws.StringValue(createOpts.ClusterSubnetGroupName)) log.Printf("[INFO] Redshift Subnet Group ID: %s", d.Id()) return resourceAwsRedshiftSubnetGroupRead(d, meta) } diff --git a/aws/resource_aws_route53_health_check.go b/aws/resource_aws_route53_health_check.go index c2ae3ee16eb..4ecf1323ccf 100644 --- a/aws/resource_aws_route53_health_check.go +++ b/aws/resource_aws_route53_health_check.go @@ -339,7 +339,7 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{ return err } - d.SetId(*resp.HealthCheck.Id) + d.SetId(aws.StringValue(resp.HealthCheck.Id)) if err := keyvaluetags.Route53UpdateTags(conn, d.Id(), route53.TagResourceTypeHealthcheck, map[string]interface{}{}, d.Get("tags").(map[string]interface{})); err != nil { return fmt.Errorf("error setting Route53 Health Check (%s) tags: %s", d.Id(), err) diff --git a/aws/resource_aws_route53_query_log.go b/aws/resource_aws_route53_query_log.go index e6e69845706..4dba9c41c22 100644 --- a/aws/resource_aws_route53_query_log.go +++ b/aws/resource_aws_route53_query_log.go @@ -50,7 +50,7 @@ func resourceAwsRoute53QueryLogCreate(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Route53 query logging configuration created: %#v", out) - d.SetId(*out.QueryLoggingConfig.Id) + d.SetId(aws.StringValue(out.QueryLoggingConfig.Id)) return resourceAwsRoute53QueryLogRead(d, meta) } diff --git a/aws/resource_aws_route_table_association.go b/aws/resource_aws_route_table_association.go index 24243673f81..2dd3602b7a2 100644 --- a/aws/resource_aws_route_table_association.go +++ b/aws/resource_aws_route_table_association.go @@ -157,7 +157,7 @@ func resourceAwsRouteTableAssociationUpdate(d *schema.ResourceData, meta interfa } // Update the ID - d.SetId(*resp.NewAssociationId) + d.SetId(aws.StringValue(resp.NewAssociationId)) log.Printf("[INFO] Association ID: %s", d.Id()) return nil diff --git a/aws/resource_aws_security_group.go b/aws/resource_aws_security_group.go index d1a06b32fc0..ca3325b3579 100644 --- a/aws/resource_aws_security_group.go +++ b/aws/resource_aws_security_group.go @@ -262,7 +262,7 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error creating Security Group: %s", err) } - d.SetId(*createResp.GroupId) + d.SetId(aws.StringValue(createResp.GroupId)) log.Printf("[INFO] Security Group ID: %s", d.Id()) diff --git a/aws/resource_aws_securityhub_standards_subscription.go b/aws/resource_aws_securityhub_standards_subscription.go index 371c03b4a0d..ebf53ef11f7 100644 --- a/aws/resource_aws_securityhub_standards_subscription.go +++ b/aws/resource_aws_securityhub_standards_subscription.go @@ -47,7 +47,7 @@ func resourceAwsSecurityHubStandardsSubscriptionCreate(d *schema.ResourceData, m standardsSubscription := resp.StandardsSubscriptions[0] - d.SetId(*standardsSubscription.StandardsSubscriptionArn) + d.SetId(aws.StringValue(standardsSubscription.StandardsSubscriptionArn)) return resourceAwsSecurityHubStandardsSubscriptionRead(d, meta) } diff --git a/aws/resource_aws_servicecatalog_portfolio.go b/aws/resource_aws_servicecatalog_portfolio.go index 50b72fe782f..214f0d25061 100644 --- a/aws/resource_aws_servicecatalog_portfolio.go +++ b/aws/resource_aws_servicecatalog_portfolio.go @@ -82,7 +82,7 @@ func resourceAwsServiceCatalogPortfolioCreate(d *schema.ResourceData, meta inter if err != nil { return fmt.Errorf("Creating Service Catalog Portfolio failed: %s", err.Error()) } - d.SetId(*resp.PortfolioDetail.Id) + d.SetId(aws.StringValue(resp.PortfolioDetail.Id)) return resourceAwsServiceCatalogPortfolioRead(d, meta) } diff --git a/aws/resource_aws_sfn_activity.go b/aws/resource_aws_sfn_activity.go index 226f9822a1f..672254795ac 100644 --- a/aws/resource_aws_sfn_activity.go +++ b/aws/resource_aws_sfn_activity.go @@ -54,7 +54,7 @@ func resourceAwsSfnActivityCreate(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error creating Step Function Activity: %s", err) } - d.SetId(*activity.ActivityArn) + d.SetId(aws.StringValue(activity.ActivityArn)) return resourceAwsSfnActivityRead(d, meta) } diff --git a/aws/resource_aws_shield_protection.go b/aws/resource_aws_shield_protection.go index 1560f4bcb7d..6ab445fedca 100644 --- a/aws/resource_aws_shield_protection.go +++ b/aws/resource_aws_shield_protection.go @@ -45,7 +45,7 @@ func resourceAwsShieldProtectionCreate(d *schema.ResourceData, meta interface{}) if err != nil { return fmt.Errorf("error creating Shield Protection: %s", err) } - d.SetId(*resp.ProtectionId) + d.SetId(aws.StringValue(resp.ProtectionId)) return resourceAwsShieldProtectionRead(d, meta) } diff --git a/aws/resource_aws_sns_platform_application.go b/aws/resource_aws_sns_platform_application.go index 4f79bc6716a..ce5628222ff 100644 --- a/aws/resource_aws_sns_platform_application.go +++ b/aws/resource_aws_sns_platform_application.go @@ -116,7 +116,7 @@ func resourceAwsSnsPlatformApplicationCreate(d *schema.ResourceData, meta interf return fmt.Errorf("Error creating SNS platform application: %s", err) } - d.SetId(*output.PlatformApplicationArn) + d.SetId(aws.StringValue(output.PlatformApplicationArn)) return resourceAwsSnsPlatformApplicationUpdate(d, meta) } diff --git a/aws/resource_aws_sns_topic.go b/aws/resource_aws_sns_topic.go index 9b48c8f5caf..ac9bdc0e003 100644 --- a/aws/resource_aws_sns_topic.go +++ b/aws/resource_aws_sns_topic.go @@ -154,7 +154,7 @@ func resourceAwsSnsTopicCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating SNS topic: %s", err) } - d.SetId(*output.TopicArn) + d.SetId(aws.StringValue(output.TopicArn)) // update mutable attributes if d.HasChange("application_failure_feedback_role_arn") { diff --git a/aws/resource_aws_sns_topic_subscription.go b/aws/resource_aws_sns_topic_subscription.go index a9f8bbd6c03..353c20d9d9a 100644 --- a/aws/resource_aws_sns_topic_subscription.go +++ b/aws/resource_aws_sns_topic_subscription.go @@ -111,7 +111,7 @@ func resourceAwsSnsTopicSubscriptionCreate(d *schema.ResourceData, meta interfac } log.Printf("New subscription ARN: %s", *output.SubscriptionArn) - d.SetId(*output.SubscriptionArn) + d.SetId(aws.StringValue(output.SubscriptionArn)) // Write the ARN to the 'arn' field for export d.Set("arn", output.SubscriptionArn) diff --git a/aws/resource_aws_spot_fleet_request.go b/aws/resource_aws_spot_fleet_request.go index 5a4e2b63bf0..492685a8e1f 100644 --- a/aws/resource_aws_spot_fleet_request.go +++ b/aws/resource_aws_spot_fleet_request.go @@ -1047,7 +1047,7 @@ func resourceAwsSpotFleetRequestCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error requesting spot fleet: %s", err) } - d.SetId(*resp.SpotFleetRequestId) + d.SetId(aws.StringValue(resp.SpotFleetRequestId)) log.Printf("[INFO] Spot Fleet Request ID: %s", d.Id()) log.Println("[INFO] Waiting for Spot Fleet Request to be active") @@ -1209,7 +1209,7 @@ func resourceAwsSpotFleetRequestRead(d *schema.ResourceData, meta interface{}) e return nil } - d.SetId(*sfr.SpotFleetRequestId) + d.SetId(aws.StringValue(sfr.SpotFleetRequestId)) d.Set("spot_request_state", aws.StringValue(sfr.SpotFleetRequestState)) config := sfr.SpotFleetRequestConfig diff --git a/aws/resource_aws_spot_instance_request.go b/aws/resource_aws_spot_instance_request.go index a718c32db04..553c0c6360e 100644 --- a/aws/resource_aws_spot_instance_request.go +++ b/aws/resource_aws_spot_instance_request.go @@ -211,7 +211,7 @@ func resourceAwsSpotInstanceRequestCreate(d *schema.ResourceData, meta interface } sir := *resp.SpotInstanceRequests[0] - d.SetId(*sir.SpotInstanceRequestId) + d.SetId(aws.StringValue(sir.SpotInstanceRequestId)) if d.Get("wait_for_fulfillment").(bool) { spotStateConf := &resource.StateChangeConf{ diff --git a/aws/resource_aws_ssm_activation.go b/aws/resource_aws_ssm_activation.go index 844b8734474..216cdb8ff55 100644 --- a/aws/resource_aws_ssm_activation.go +++ b/aws/resource_aws_ssm_activation.go @@ -130,7 +130,7 @@ func resourceAwsSsmActivationCreate(d *schema.ResourceData, meta interface{}) er if resp.ActivationId == nil { return fmt.Errorf("ActivationId was nil") } - d.SetId(*resp.ActivationId) + d.SetId(aws.StringValue(resp.ActivationId)) d.Set("activation_code", resp.ActivationCode) return resourceAwsSsmActivationRead(d, meta) diff --git a/aws/resource_aws_ssm_association.go b/aws/resource_aws_ssm_association.go index dd0ed4b27ad..17409bd4f2f 100644 --- a/aws/resource_aws_ssm_association.go +++ b/aws/resource_aws_ssm_association.go @@ -186,7 +186,7 @@ func resourceAwsSsmAssociationCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("AssociationDescription was nil") } - d.SetId(*resp.AssociationDescription.AssociationId) + d.SetId(aws.StringValue(resp.AssociationDescription.AssociationId)) d.Set("association_id", resp.AssociationDescription.AssociationId) return resourceAwsSsmAssociationRead(d, meta) diff --git a/aws/resource_aws_ssm_document.go b/aws/resource_aws_ssm_document.go index 86f64ffe51c..ddfa59496eb 100644 --- a/aws/resource_aws_ssm_document.go +++ b/aws/resource_aws_ssm_document.go @@ -214,7 +214,7 @@ func resourceAwsSsmDocumentCreate(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error creating SSM document: %s", err) } - d.SetId(*resp.DocumentDescription.Name) + d.SetId(aws.StringValue(resp.DocumentDescription.Name)) if v, ok := d.GetOk("permissions"); ok && v != nil { if err := setDocumentPermissions(d, meta); err != nil { diff --git a/aws/resource_aws_ssm_maintenance_window.go b/aws/resource_aws_ssm_maintenance_window.go index 419768c0cbf..3a127f7d312 100644 --- a/aws/resource_aws_ssm_maintenance_window.go +++ b/aws/resource_aws_ssm_maintenance_window.go @@ -124,7 +124,7 @@ func resourceAwsSsmMaintenanceWindowCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("error creating SSM Maintenance Window: %s", err) } - d.SetId(*resp.WindowId) + d.SetId(aws.StringValue(resp.WindowId)) if !d.Get("enabled").(bool) { input := &ssm.UpdateMaintenanceWindowInput{ diff --git a/aws/resource_aws_ssm_maintenance_window_task.go b/aws/resource_aws_ssm_maintenance_window_task.go index f781acba716..c4631d0f87b 100644 --- a/aws/resource_aws_ssm_maintenance_window_task.go +++ b/aws/resource_aws_ssm_maintenance_window_task.go @@ -604,7 +604,7 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte return err } - d.SetId(*resp.WindowTaskId) + d.SetId(aws.StringValue(resp.WindowTaskId)) return resourceAwsSsmMaintenanceWindowTaskRead(d, meta) } diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index f935ecc4828..f11a9019f59 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -167,7 +167,7 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) return err } - d.SetId(*resp.BaselineId) + d.SetId(aws.StringValue(resp.BaselineId)) return resourceAwsSsmPatchBaselineRead(d, meta) } diff --git a/aws/resource_aws_ssm_patch_group.go b/aws/resource_aws_ssm_patch_group.go index 8e67e7dfa1a..07e23114bc5 100644 --- a/aws/resource_aws_ssm_patch_group.go +++ b/aws/resource_aws_ssm_patch_group.go @@ -43,7 +43,7 @@ func resourceAwsSsmPatchGroupCreate(d *schema.ResourceData, meta interface{}) er return err } - d.SetId(*resp.PatchGroup) + d.SetId(aws.StringValue(resp.PatchGroup)) return resourceAwsSsmPatchGroupRead(d, meta) } diff --git a/aws/resource_aws_transfer_server.go b/aws/resource_aws_transfer_server.go index 8dd1c4d85ac..5e1bf47b88f 100644 --- a/aws/resource_aws_transfer_server.go +++ b/aws/resource_aws_transfer_server.go @@ -190,7 +190,7 @@ func resourceAwsTransferServerCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error creating Transfer Server: %s", err) } - d.SetId(*resp.ServerId) + d.SetId(aws.StringValue(resp.ServerId)) stateChangeConf := &resource.StateChangeConf{ Pending: []string{transfer.StateStarting}, diff --git a/aws/resource_aws_vpc.go b/aws/resource_aws_vpc.go index 368fe73faae..a26ff1f5dd8 100644 --- a/aws/resource_aws_vpc.go +++ b/aws/resource_aws_vpc.go @@ -145,7 +145,7 @@ func resourceAwsVpcCreate(d *schema.ResourceData, meta interface{}) error { // Get the ID and store it vpc := vpcResp.Vpc - d.SetId(*vpc.VpcId) + d.SetId(aws.StringValue(vpc.VpcId)) log.Printf("[INFO] VPC ID: %s", d.Id()) // Wait for the VPC to become available diff --git a/aws/resource_aws_vpc_peering_connection.go b/aws/resource_aws_vpc_peering_connection.go index 066c07b15f1..61a861903e5 100644 --- a/aws/resource_aws_vpc_peering_connection.go +++ b/aws/resource_aws_vpc_peering_connection.go @@ -98,7 +98,7 @@ func resourceAwsVPCPeeringCreate(d *schema.ResourceData, meta interface{}) error // Get the ID and store it rt := resp.VpcPeeringConnection - d.SetId(*rt.VpcPeeringConnectionId) + d.SetId(aws.StringValue(rt.VpcPeeringConnectionId)) log.Printf("[INFO] VPC Peering Connection ID: %s", d.Id()) err = vpcPeeringConnectionWaitUntilAvailable(conn, d.Id(), d.Timeout(schema.TimeoutCreate)) diff --git a/aws/resource_aws_waf_geo_match_set.go b/aws/resource_aws_waf_geo_match_set.go index 2d863317352..c6fd7a7f863 100644 --- a/aws/resource_aws_waf_geo_match_set.go +++ b/aws/resource_aws_waf_geo_match_set.go @@ -69,7 +69,7 @@ func resourceAwsWafGeoMatchSetCreate(d *schema.ResourceData, meta interface{}) e } resp := out.(*waf.CreateGeoMatchSetOutput) - d.SetId(*resp.GeoMatchSet.GeoMatchSetId) + d.SetId(aws.StringValue(resp.GeoMatchSet.GeoMatchSetId)) return resourceAwsWafGeoMatchSetUpdate(d, meta) } diff --git a/aws/resource_aws_waf_ipset.go b/aws/resource_aws_waf_ipset.go index 53d35950e33..ffed8c807f1 100644 --- a/aws/resource_aws_waf_ipset.go +++ b/aws/resource_aws_waf_ipset.go @@ -74,7 +74,7 @@ func resourceAwsWafIPSetCreate(d *schema.ResourceData, meta interface{}) error { return err } resp := out.(*waf.CreateIPSetOutput) - d.SetId(*resp.IPSet.IPSetId) + d.SetId(aws.StringValue(resp.IPSet.IPSetId)) if v, ok := d.GetOk("ip_set_descriptors"); ok && v.(*schema.Set).Len() > 0 { diff --git a/aws/resource_aws_waf_rate_based_rule.go b/aws/resource_aws_waf_rate_based_rule.go index 19b29e9c4ad..29826b76e35 100644 --- a/aws/resource_aws_waf_rate_based_rule.go +++ b/aws/resource_aws_waf_rate_based_rule.go @@ -99,7 +99,7 @@ func resourceAwsWafRateBasedRuleCreate(d *schema.ResourceData, meta interface{}) return err } resp := out.(*waf.CreateRateBasedRuleOutput) - d.SetId(*resp.Rule.RuleId) + d.SetId(aws.StringValue(resp.Rule.RuleId)) newPredicates := d.Get("predicates").(*schema.Set).List() if len(newPredicates) > 0 { diff --git a/aws/resource_aws_waf_regex_match_set.go b/aws/resource_aws_waf_regex_match_set.go index 8898683fa81..71d229bfd49 100644 --- a/aws/resource_aws_waf_regex_match_set.go +++ b/aws/resource_aws_waf_regex_match_set.go @@ -90,7 +90,7 @@ func resourceAwsWafRegexMatchSetCreate(d *schema.ResourceData, meta interface{}) } resp := out.(*waf.CreateRegexMatchSetOutput) - d.SetId(*resp.RegexMatchSet.RegexMatchSetId) + d.SetId(aws.StringValue(resp.RegexMatchSet.RegexMatchSetId)) return resourceAwsWafRegexMatchSetUpdate(d, meta) } diff --git a/aws/resource_aws_waf_regex_pattern_set.go b/aws/resource_aws_waf_regex_pattern_set.go index 96a66135a76..d3e4dbf178d 100644 --- a/aws/resource_aws_waf_regex_pattern_set.go +++ b/aws/resource_aws_waf_regex_pattern_set.go @@ -57,7 +57,7 @@ func resourceAwsWafRegexPatternSetCreate(d *schema.ResourceData, meta interface{ } resp := out.(*waf.CreateRegexPatternSetOutput) - d.SetId(*resp.RegexPatternSet.RegexPatternSetId) + d.SetId(aws.StringValue(resp.RegexPatternSet.RegexPatternSetId)) return resourceAwsWafRegexPatternSetUpdate(d, meta) } diff --git a/aws/resource_aws_waf_rule.go b/aws/resource_aws_waf_rule.go index 4ba58ea6af9..7802bf0e00e 100644 --- a/aws/resource_aws_waf_rule.go +++ b/aws/resource_aws_waf_rule.go @@ -88,7 +88,7 @@ func resourceAwsWafRuleCreate(d *schema.ResourceData, meta interface{}) error { return err } resp := out.(*waf.CreateRuleOutput) - d.SetId(*resp.Rule.RuleId) + d.SetId(aws.StringValue(resp.Rule.RuleId)) newPredicates := d.Get("predicates").(*schema.Set).List() if len(newPredicates) > 0 { diff --git a/aws/resource_aws_waf_rule_group.go b/aws/resource_aws_waf_rule_group.go index 8f8fc6fe900..b07e7f549a3 100644 --- a/aws/resource_aws_waf_rule_group.go +++ b/aws/resource_aws_waf_rule_group.go @@ -98,7 +98,7 @@ func resourceAwsWafRuleGroupCreate(d *schema.ResourceData, meta interface{}) err return err } resp := out.(*waf.CreateRuleGroupOutput) - d.SetId(*resp.RuleGroup.RuleGroupId) + d.SetId(aws.StringValue(resp.RuleGroup.RuleGroupId)) activatedRules := d.Get("activated_rule").(*schema.Set).List() if len(activatedRules) > 0 { diff --git a/aws/resource_aws_waf_size_constraint_set.go b/aws/resource_aws_waf_size_constraint_set.go index 30c65a42dc6..1a803c0a6ee 100644 --- a/aws/resource_aws_waf_size_constraint_set.go +++ b/aws/resource_aws_waf_size_constraint_set.go @@ -44,7 +44,7 @@ func resourceAwsWafSizeConstraintSetCreate(d *schema.ResourceData, meta interfac } resp := out.(*waf.CreateSizeConstraintSetOutput) - d.SetId(*resp.SizeConstraintSet.SizeConstraintSetId) + d.SetId(aws.StringValue(resp.SizeConstraintSet.SizeConstraintSetId)) return resourceAwsWafSizeConstraintSetUpdate(d, meta) } diff --git a/aws/resource_aws_waf_sql_injection_match_set.go b/aws/resource_aws_waf_sql_injection_match_set.go index 6f34281b6b1..249d3781447 100644 --- a/aws/resource_aws_waf_sql_injection_match_set.go +++ b/aws/resource_aws_waf_sql_injection_match_set.go @@ -76,7 +76,7 @@ func resourceAwsWafSqlInjectionMatchSetCreate(d *schema.ResourceData, meta inter return fmt.Errorf("Error creating SqlInjectionMatchSet: %s", err) } resp := out.(*waf.CreateSqlInjectionMatchSetOutput) - d.SetId(*resp.SqlInjectionMatchSet.SqlInjectionMatchSetId) + d.SetId(aws.StringValue(resp.SqlInjectionMatchSet.SqlInjectionMatchSetId)) return resourceAwsWafSqlInjectionMatchSetUpdate(d, meta) } diff --git a/aws/resource_aws_waf_web_acl.go b/aws/resource_aws_waf_web_acl.go index e3b522ca1a7..e9e46f02fa3 100644 --- a/aws/resource_aws_waf_web_acl.go +++ b/aws/resource_aws_waf_web_acl.go @@ -169,7 +169,7 @@ func resourceAwsWafWebAclCreate(d *schema.ResourceData, meta interface{}) error return err } resp := out.(*waf.CreateWebACLOutput) - d.SetId(*resp.WebACL.WebACLId) + d.SetId(aws.StringValue(resp.WebACL.WebACLId)) arn := arn.ARN{ Partition: meta.(*AWSClient).partition, diff --git a/aws/resource_aws_wafregional_byte_match_set.go b/aws/resource_aws_wafregional_byte_match_set.go index 4a53d1da49e..84b59699571 100644 --- a/aws/resource_aws_wafregional_byte_match_set.go +++ b/aws/resource_aws_wafregional_byte_match_set.go @@ -87,7 +87,7 @@ func resourceAwsWafRegionalByteMatchSetCreate(d *schema.ResourceData, meta inter } resp := out.(*waf.CreateByteMatchSetOutput) - d.SetId(*resp.ByteMatchSet.ByteMatchSetId) + d.SetId(aws.StringValue(resp.ByteMatchSet.ByteMatchSetId)) return resourceAwsWafRegionalByteMatchSetUpdate(d, meta) } diff --git a/aws/resource_aws_wafregional_geo_match_set.go b/aws/resource_aws_wafregional_geo_match_set.go index 22e70b6084b..2fc119894c7 100644 --- a/aws/resource_aws_wafregional_geo_match_set.go +++ b/aws/resource_aws_wafregional_geo_match_set.go @@ -66,7 +66,7 @@ func resourceAwsWafRegionalGeoMatchSetCreate(d *schema.ResourceData, meta interf } resp := out.(*waf.CreateGeoMatchSetOutput) - d.SetId(*resp.GeoMatchSet.GeoMatchSetId) + d.SetId(aws.StringValue(resp.GeoMatchSet.GeoMatchSetId)) return resourceAwsWafRegionalGeoMatchSetUpdate(d, meta) } diff --git a/aws/resource_aws_wafregional_ipset.go b/aws/resource_aws_wafregional_ipset.go index bb5240dc47d..07f63165207 100644 --- a/aws/resource_aws_wafregional_ipset.go +++ b/aws/resource_aws_wafregional_ipset.go @@ -68,7 +68,7 @@ func resourceAwsWafRegionalIPSetCreate(d *schema.ResourceData, meta interface{}) return err } resp := out.(*waf.CreateIPSetOutput) - d.SetId(*resp.IPSet.IPSetId) + d.SetId(aws.StringValue(resp.IPSet.IPSetId)) return resourceAwsWafRegionalIPSetUpdate(d, meta) } diff --git a/aws/resource_aws_wafregional_rate_based_rule.go b/aws/resource_aws_wafregional_rate_based_rule.go index dbfc7795b72..81e5c942945 100644 --- a/aws/resource_aws_wafregional_rate_based_rule.go +++ b/aws/resource_aws_wafregional_rate_based_rule.go @@ -100,7 +100,7 @@ func resourceAwsWafRegionalRateBasedRuleCreate(d *schema.ResourceData, meta inte return fmt.Errorf("Error creating WAF Regional Rate Based Rule (%s): %s", d.Id(), err) } resp := out.(*waf.CreateRateBasedRuleOutput) - d.SetId(*resp.Rule.RuleId) + d.SetId(aws.StringValue(resp.Rule.RuleId)) newPredicates := d.Get("predicate").(*schema.Set).List() if len(newPredicates) > 0 { diff --git a/aws/resource_aws_wafregional_rule.go b/aws/resource_aws_wafregional_rule.go index 4e52026fd8d..07febe60d2e 100644 --- a/aws/resource_aws_wafregional_rule.go +++ b/aws/resource_aws_wafregional_rule.go @@ -88,7 +88,7 @@ func resourceAwsWafRegionalRuleCreate(d *schema.ResourceData, meta interface{}) return err } resp := out.(*waf.CreateRuleOutput) - d.SetId(*resp.Rule.RuleId) + d.SetId(aws.StringValue(resp.Rule.RuleId)) newPredicates := d.Get("predicate").(*schema.Set).List() if len(newPredicates) > 0 { diff --git a/aws/resource_aws_wafregional_rule_group.go b/aws/resource_aws_wafregional_rule_group.go index 79dc35afea2..2e6c063bab2 100644 --- a/aws/resource_aws_wafregional_rule_group.go +++ b/aws/resource_aws_wafregional_rule_group.go @@ -100,7 +100,7 @@ func resourceAwsWafRegionalRuleGroupCreate(d *schema.ResourceData, meta interfac return err } resp := out.(*waf.CreateRuleGroupOutput) - d.SetId(*resp.RuleGroup.RuleGroupId) + d.SetId(aws.StringValue(resp.RuleGroup.RuleGroupId)) activatedRule := d.Get("activated_rule").(*schema.Set).List() if len(activatedRule) > 0 { diff --git a/aws/resource_aws_wafregional_web_acl.go b/aws/resource_aws_wafregional_web_acl.go index 45d8355d15d..406741efa19 100644 --- a/aws/resource_aws_wafregional_web_acl.go +++ b/aws/resource_aws_wafregional_web_acl.go @@ -194,7 +194,7 @@ func resourceAwsWafRegionalWebAclCreate(d *schema.ResourceData, meta interface{} return err } resp := out.(*waf.CreateWebACLOutput) - d.SetId(*resp.WebACL.WebACLId) + d.SetId(aws.StringValue(resp.WebACL.WebACLId)) // The WAF API currently omits this, but use it when it becomes available webACLARN := aws.StringValue(resp.WebACL.WebACLArn) diff --git a/aws/resource_aws_workspaces_ip_group.go b/aws/resource_aws_workspaces_ip_group.go index f17e43e3e1c..a4e813ebbae 100644 --- a/aws/resource_aws_workspaces_ip_group.go +++ b/aws/resource_aws_workspaces_ip_group.go @@ -72,7 +72,7 @@ func resourceAwsWorkspacesIpGroupCreate(d *schema.ResourceData, meta interface{} return err } - d.SetId(*resp.GroupId) + d.SetId(aws.StringValue(resp.GroupId)) return resourceAwsWorkspacesIpGroupRead(d, meta) } From 96c1287aaabb2ab04baaaaee2ad7a3aca1a457e7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 15:57:57 -0500 Subject: [PATCH 0185/1212] provider: Document, standardize, and lint for disappears acceptance testing of parent resources (#16597) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16591 --- .semgrep.yml | 13 ++++++++ aws/resource_aws_backup_selection_test.go | 2 +- aws/resource_aws_emr_instance_group_test.go | 2 +- ...ce_aws_lambda_event_source_mapping_test.go | 30 +------------------ ...e_aws_msk_scram_secret_association_test.go | 2 +- aws/resource_aws_network_acl_rule_test.go | 2 +- ...ws_networkfirewall_resource_policy_test.go | 4 +-- aws/resource_aws_s3_access_point_test.go | 2 +- ..._aws_s3_bucket_public_access_block_test.go | 2 +- ...afv2_web_acl_logging_configuration_test.go | 2 +- .../running-and-writing-acceptance-tests.md | 26 ++++++++++++++++ 11 files changed, 49 insertions(+), 38 deletions(-) diff --git a/.semgrep.yml b/.semgrep.yml index bc90ac1ec1a..5ef50de66ad 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -1,4 +1,17 @@ rules: + - id: acceptance-test-naming-parent-disappears + languages: [go] + message: Prefer naming acceptance tests with _disappears_Parent suffix + paths: + include: + - 'aws/*_test.go' + patterns: + - pattern: func $FUNCNAME(t *testing.T) { ... } + - metavariable-regex: + metavariable: "$FUNCNAME" + regex: "^TestAcc[^_]+_([a-zA-Z]+[dD]isappears|[^_]+_disappears)$" + severity: WARNING + - id: aws-sdk-go-multiple-service-imports languages: [go] message: Resources should not implement multiple AWS service functionality diff --git a/aws/resource_aws_backup_selection_test.go b/aws/resource_aws_backup_selection_test.go index e7030459966..9d6c72f5a2c 100644 --- a/aws/resource_aws_backup_selection_test.go +++ b/aws/resource_aws_backup_selection_test.go @@ -59,7 +59,7 @@ func TestAccAwsBackupSelection_disappears(t *testing.T) { }) } -func TestAccAwsBackupSelection_backupPlanDisappears(t *testing.T) { +func TestAccAwsBackupSelection_disappears_BackupPlan(t *testing.T) { var selection1 backup.GetBackupSelectionOutput resourceName := "aws_backup_selection.test" backupPlanResourceName := "aws_backup_plan.test" diff --git a/aws/resource_aws_emr_instance_group_test.go b/aws/resource_aws_emr_instance_group_test.go index 340f765cfdc..ab797cf7347 100644 --- a/aws/resource_aws_emr_instance_group_test.go +++ b/aws/resource_aws_emr_instance_group_test.go @@ -202,7 +202,7 @@ func TestAccAWSEMRInstanceGroup_InstanceCount(t *testing.T) { } // Regression test for https://github.com/hashicorp/terraform-provider-aws/issues/1355 -func TestAccAWSEMRInstanceGroup_EmrClusterDisappears(t *testing.T) { +func TestAccAWSEMRInstanceGroup_disappears_EmrCluster(t *testing.T) { var cluster emr.Cluster var ig emr.InstanceGroup rInt := acctest.RandInt() diff --git a/aws/resource_aws_lambda_event_source_mapping_test.go b/aws/resource_aws_lambda_event_source_mapping_test.go index 11eb857de2f..f4e7c7d65af 100644 --- a/aws/resource_aws_lambda_event_source_mapping_test.go +++ b/aws/resource_aws_lambda_event_source_mapping_test.go @@ -246,35 +246,7 @@ func TestAccAWSLambdaEventSourceMapping_SQSBatchWindow(t *testing.T) { }) } -func TestAccAWSLambdaEventSourceMapping_kinesis_disappears(t *testing.T) { - var conf lambda.EventSourceMappingConfiguration - - rString := acctest.RandString(8) - roleName := fmt.Sprintf("tf_acc_role_lambda_esm_import_%s", rString) - policyName := fmt.Sprintf("tf_acc_policy_lambda_esm_import_%s", rString) - attName := fmt.Sprintf("tf_acc_att_lambda_esm_import_%s", rString) - streamName := fmt.Sprintf("tf_acc_stream_lambda_esm_import_%s", rString) - funcName := fmt.Sprintf("tf_acc_lambda_esm_import_%s", rString) - uFuncName := fmt.Sprintf("tf_acc_lambda_esm_import_updated_%s", rString) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckLambdaEventSourceMappingDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLambdaEventSourceMappingConfig_kinesis(roleName, policyName, attName, streamName, funcName, uFuncName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsLambdaEventSourceMappingExists("aws_lambda_event_source_mapping.lambda_event_source_mapping_test", &conf), - testAccCheckAWSLambdaEventSourceMappingDisappears(&conf), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSLambdaEventSourceMapping_sqsDisappears(t *testing.T) { +func TestAccAWSLambdaEventSourceMapping_disappears(t *testing.T) { var conf lambda.EventSourceMappingConfiguration rString := acctest.RandString(8) diff --git a/aws/resource_aws_msk_scram_secret_association_test.go b/aws/resource_aws_msk_scram_secret_association_test.go index e73e5d2047a..0847e305a2e 100644 --- a/aws/resource_aws_msk_scram_secret_association_test.go +++ b/aws/resource_aws_msk_scram_secret_association_test.go @@ -109,7 +109,7 @@ func TestAccAwsMskScramSecretAssociation_disappears(t *testing.T) { }) } -func TestAccAwsMskScramSecretAssociation_clusterDisappears(t *testing.T) { +func TestAccAwsMskScramSecretAssociation_disappears_Cluster(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_msk_scram_secret_association.test" clusterResourceName := "aws_msk_cluster.test" diff --git a/aws/resource_aws_network_acl_rule_test.go b/aws/resource_aws_network_acl_rule_test.go index ea56cdc6650..9823023ed00 100644 --- a/aws/resource_aws_network_acl_rule_test.go +++ b/aws/resource_aws_network_acl_rule_test.go @@ -68,7 +68,7 @@ func TestAccAWSNetworkAclRule_disappears(t *testing.T) { }) } -func TestAccAWSNetworkAclRule_ingressEgressSameNumberDisappears(t *testing.T) { +func TestAccAWSNetworkAclRule_disappears_IngressEgressSameNumber(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, diff --git a/aws/resource_aws_networkfirewall_resource_policy_test.go b/aws/resource_aws_networkfirewall_resource_policy_test.go index 80b5c776a14..3eee0d8de0f 100644 --- a/aws/resource_aws_networkfirewall_resource_policy_test.go +++ b/aws/resource_aws_networkfirewall_resource_policy_test.go @@ -101,7 +101,7 @@ func TestAccAwsNetworkFirewallResourcePolicy_disappears(t *testing.T) { }) } -func TestAccAwsNetworkFirewallResourcePolicy_firewallPolicy_disappears(t *testing.T) { +func TestAccAwsNetworkFirewallResourcePolicy_disappears_FirewallPolicy(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_networkfirewall_resource_policy.test" @@ -122,7 +122,7 @@ func TestAccAwsNetworkFirewallResourcePolicy_firewallPolicy_disappears(t *testin }) } -func TestAccAwsNetworkFirewallResourcePolicy_ruleGroup_disappears(t *testing.T) { +func TestAccAwsNetworkFirewallResourcePolicy_disappears_RuleGroup(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_networkfirewall_resource_policy.test" diff --git a/aws/resource_aws_s3_access_point_test.go b/aws/resource_aws_s3_access_point_test.go index 8dd9b040955..9dc1917e132 100644 --- a/aws/resource_aws_s3_access_point_test.go +++ b/aws/resource_aws_s3_access_point_test.go @@ -141,7 +141,7 @@ func TestAccAWSS3AccessPoint_disappears(t *testing.T) { }) } -func TestAccAWSS3AccessPoint_bucketDisappears(t *testing.T) { +func TestAccAWSS3AccessPoint_disappears_Bucket(t *testing.T) { var v s3control.GetAccessPointOutput bucketName := acctest.RandomWithPrefix("tf-acc-test") accessPointName := acctest.RandomWithPrefix("tf-acc-test") diff --git a/aws/resource_aws_s3_bucket_public_access_block_test.go b/aws/resource_aws_s3_bucket_public_access_block_test.go index 2dfafeab6aa..b065203e0fd 100644 --- a/aws/resource_aws_s3_bucket_public_access_block_test.go +++ b/aws/resource_aws_s3_bucket_public_access_block_test.go @@ -67,7 +67,7 @@ func TestAccAWSS3BucketPublicAccessBlock_disappears(t *testing.T) { }) } -func TestAccAWSS3BucketPublicAccessBlock_bucketDisappears(t *testing.T) { +func TestAccAWSS3BucketPublicAccessBlock_disappears_Bucket(t *testing.T) { var config s3.PublicAccessBlockConfiguration name := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) resourceName := "aws_s3_bucket_public_access_block.bucket" diff --git a/aws/resource_aws_wafv2_web_acl_logging_configuration_test.go b/aws/resource_aws_wafv2_web_acl_logging_configuration_test.go index 5a1768b75d5..bce42080a91 100644 --- a/aws/resource_aws_wafv2_web_acl_logging_configuration_test.go +++ b/aws/resource_aws_wafv2_web_acl_logging_configuration_test.go @@ -201,7 +201,7 @@ func TestAccAwsWafv2WebACLLoggingConfiguration_disappears(t *testing.T) { }) } -func TestAccAwsWafv2WebACLLoggingConfiguration_webACLDisappears(t *testing.T) { +func TestAccAwsWafv2WebACLLoggingConfiguration_disappears_WebAcl(t *testing.T) { var v wafv2.LoggingConfiguration rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_wafv2_web_acl_logging_configuration.test" diff --git a/docs/contributing/running-and-writing-acceptance-tests.md b/docs/contributing/running-and-writing-acceptance-tests.md index ad833ab99fc..b8ccab736c1 100644 --- a/docs/contributing/running-and-writing-acceptance-tests.md +++ b/docs/contributing/running-and-writing-acceptance-tests.md @@ -639,6 +639,32 @@ if err != nil { } ``` +For children resources that are encapsulated by a parent resource, it is also preferable to verify that removing the parent resource will not generate an error either. These are typically named `TestAccAws{SERVICE}{THING}_disappears_{PARENT}`, e.g. `TestAccAwsRoute53ZoneAssociation_disappears_Vpc` + +```go +func TestAccAwsExampleChildThing_disappears_ParentThing(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + parentResourceName := "aws_example_parent_thing.test" + resourceName := "aws_example_child_thing.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsExampleChildThingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsExampleThingConfigName(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsExampleThingExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsExampleParentThing(), parentResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} +``` + #### Per Attribute Acceptance Tests These are typically named `TestAccAws{SERVICE}{THING}_{ATTRIBUTE}`, e.g. `TestAccAwsCloudWatchDashboard_Name` From f7cabdbd3d3239e9f4c5203be857d7805786440c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 9 Dec 2020 15:59:28 -0500 Subject: [PATCH 0186/1212] docs/contributing: Add Extending Terraform link for acceptance test framework environment variables (#16600) --- docs/contributing/running-and-writing-acceptance-tests.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/contributing/running-and-writing-acceptance-tests.md b/docs/contributing/running-and-writing-acceptance-tests.md index b8ccab736c1..fd5c17988ac 100644 --- a/docs/contributing/running-and-writing-acceptance-tests.md +++ b/docs/contributing/running-and-writing-acceptance-tests.md @@ -116,6 +116,8 @@ ok github.com/hashicorp/terraform-provider-aws/aws 55.619s Running acceptance tests requires version 0.12.26 or higher of the Terraform CLI to be installed. +For advanced developers, the acceptance testing framework accepts some additional environment variables that can be used to control Terraform CLI binary selection, logging, and other behaviors. See the [Extending Terraform documentation](https://www.terraform.io/docs/extend/testing/acceptance-tests/index.html#environment-variables) for more information. + Please Note: On macOS 10.14 and later (and some Linux distributions), the default user open file limit is 256. This may cause unexpected issues when running the acceptance testing since this can prevent various operations from occurring such as opening network connections to AWS. To view this limit, the `ulimit -n` command can be run. To update this limit, run `ulimit -n 1024` (or higher). ### Running Cross-Account Tests From 870c35d564a6d979bc2f285ffba30b20c8fc46df Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 9 Dec 2020 15:00:58 -0600 Subject: [PATCH 0187/1212] resource/lakeformation_resource: Use ForceNew for more update options --- aws/resource_aws_lakeformation_resource.go | 22 +--------- ...esource_aws_lakeformation_resource_test.go | 43 ++++++++++++++++++- .../r/lakeformation_resource.html.markdown | 4 +- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index e861a77fef3..6aad389c111 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -15,7 +15,6 @@ func resourceAwsLakeFormationResource() *schema.Resource { return &schema.Resource{ Create: resourceAwsLakeFormationResourceCreate, Read: resourceAwsLakeFormationResourceRead, - Update: resourceAwsLakeFormationResourceUpdate, Delete: resourceAwsLakeFormationResourceDelete, Schema: map[string]*schema.Schema{ @@ -33,6 +32,7 @@ func resourceAwsLakeFormationResource() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, ValidateFunc: validateArn, }, }, @@ -102,26 +102,6 @@ func resourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface return nil } -func resourceAwsLakeFormationResourceUpdate(d *schema.ResourceData, meta interface{}) error { - if _, ok := d.GetOk("role_arn"); !ok { - return resourceAwsLakeFormationResourceCreate(d, meta) - } - - conn := meta.(*AWSClient).lakeformationconn - - input := &lakeformation.UpdateResourceInput{ - ResourceArn: aws.String(d.Get("resource_arn").(string)), - RoleArn: aws.String(d.Get("role_arn").(string)), - } - - _, err := conn.UpdateResource(input) - if err != nil { - return fmt.Errorf("error updating Lake Formation Resource (%s): %w", d.Id(), err) - } - - return resourceAwsLakeFormationResourceRead(d, meta) -} - func resourceAwsLakeFormationResourceDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn resourceArn := d.Get("resource_arn").(string) diff --git a/aws/resource_aws_lakeformation_resource_test.go b/aws/resource_aws_lakeformation_resource_test.go index 4004e30aa7f..d6c457490e1 100644 --- a/aws/resource_aws_lakeformation_resource_test.go +++ b/aws/resource_aws_lakeformation_resource_test.go @@ -82,7 +82,7 @@ func TestAccAWSLakeFormationResource_serviceLinkedRole(t *testing.T) { }) } -func TestAccAWSLakeFormationResource_update(t *testing.T) { +func TestAccAWSLakeFormationResource_updateRoleToRole(t *testing.T) { bucketName := acctest.RandomWithPrefix("tf-acc-test") roleName1 := acctest.RandomWithPrefix("tf-acc-test") roleName2 := acctest.RandomWithPrefix("tf-acc-test") @@ -115,6 +115,47 @@ func TestAccAWSLakeFormationResource_update(t *testing.T) { }) } +func TestAccAWSLakeFormationResource_updateSLRToRole(t *testing.T) { + bucketName := acctest.RandomWithPrefix("tf-acc-test") + roleName := acctest.RandomWithPrefix("tf-acc-test") + resourceAddr := "aws_lakeformation_resource.test" + bucketAddr := "aws_s3_bucket.test" + roleAddr := "aws_iam_role.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) + testAccPreCheckIamServiceLinkedRole(t, "/aws-service-role/lakeformation.amazonaws.com") + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceConfig_serviceLinkedRole(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), + testAccCheckResourceAttrGlobalARN(resourceAddr, "role_arn", "iam", "role/aws-service-role/lakeformation.amazonaws.com/AWSServiceRoleForLakeFormationDataAccess"), + ), + }, + { + Config: testAccAWSLakeFormationResourceConfig_basic(bucketName, roleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationResourceExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), + ), + }, + }, + }) +} + +// AWS does not support changing from an IAM role to an SLR. No error is thrown +// but the registration is not changed (the IAM role continues in the registration). +// +// func TestAccAWSLakeFormationResource_updateRoleToSLR(t *testing.T) { + func testAccCheckAWSLakeFormationResourceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lakeformationconn diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown index b5aea161e58..6444cbe713f 100644 --- a/website/docs/r/lakeformation_resource.html.markdown +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -29,7 +29,9 @@ resource "aws_lakeformation_resource" "example" { The following arguments are required: * `resource_arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. -* `role_arn` – (Optional) Role that has read/write access to the resource. If not provided, the service-linked role is used. +* `role_arn` – (Optional) Role that has read/write access to the resource. If not provided, the Lake Formation service-linked role must exist and is used. + +~> **NOTE:** AWS does not support registering an S3 location with an IAM role and subsequently updating the S3 location registration to a service-linked role. ## Attributes Reference From 92f4a932895fa4e062dd5d91adfb8ab6f8aa99da Mon Sep 17 00:00:00 2001 From: Kevin Yeh Date: Wed, 9 Dec 2020 17:49:41 -0500 Subject: [PATCH 0188/1212] Mark kinesis stream access_key as sensitive --- aws/resource_aws_kinesis_firehose_delivery_stream.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index bc53eb05c7f..fa77e76333a 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -1476,6 +1476,7 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(0, 4096), + Sensitive: true, }, "role_arn": { From 4fd4695c779da62120fd0ba7b1aa58f92a0081bd Mon Sep 17 00:00:00 2001 From: Kevin Yeh Date: Thu, 10 Dec 2020 00:57:03 -0500 Subject: [PATCH 0189/1212] lint --- aws/resource_aws_kinesis_firehose_delivery_stream.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index fa77e76333a..c3998de9548 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -1476,7 +1476,7 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(0, 4096), - Sensitive: true, + Sensitive: true, }, "role_arn": { From f7e181fb06b0f8e280dd88f9139e83e6f5b03be0 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 10 Dec 2020 17:52:45 +0200 Subject: [PATCH 0190/1212] reduce retry logic --- aws/resource_aws_sagemaker_image.go | 33 ++++++----------------------- 1 file changed, 7 insertions(+), 26 deletions(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 4a434c31588..3da9bdb809c 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -4,12 +4,10 @@ import ( "fmt" "log" "regexp" - "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" @@ -86,34 +84,17 @@ func resourceAwsSagemakerImageCreate(d *schema.ResourceData, meta interface{}) e // for some reason even if the operation is retried the same error response is given even though the role is valid. a short sleep before creation solves it. time.Sleep(1 * time.Minute) - log.Printf("[DEBUG] sagemaker Image create config: %#v", *input) - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - var err error - _, err = conn.CreateImage(input) - if err != nil { - return resource.NonRetryableError(fmt.Errorf("error creating SageMaker Image: %w", err)) - } - - d.SetId(name) - - out, err := waiter.ImageCreated(conn, d.Id()) - - if strings.Contains(aws.StringValue(out.FailureReason), "Unable to assume role with RoleArn") { - return resource.RetryableError(err) - } - if err != nil { - return resource.NonRetryableError(fmt.Errorf("error waiting for SageMaker Image (%s) to create: %w", d.Id(), err)) - } - return nil - }) - if isResourceTimeoutError(err) { - _, err = conn.CreateImage(input) - _, err = waiter.ImageCreated(conn, d.Id()) - } + _, err := conn.CreateImage(input) if err != nil { return fmt.Errorf("error creating SageMaker Image %s: %w", name, err) } + d.SetId(name) + + if _, err := waiter.ImageCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for SageMaker Image (%s) to be created: %w", d.Id(), err) + } + return resourceAwsSagemakerImageRead(d, meta) } From 725032a9027d69d85cf0b7017ced026e96ac79ce Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 11 Dec 2020 09:55:12 -0500 Subject: [PATCH 0191/1212] tests/resource/aws_instance: Correct 'TestAccAWSInstance_EbsRootDevice_ModifyType'. (#16702) Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSInstance_EbsRootDevice_ModifyType' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSInstance_EbsRootDevice_ModifyType -timeout 120m === RUN TestAccAWSInstance_EbsRootDevice_ModifyType === PAUSE TestAccAWSInstance_EbsRootDevice_ModifyType === CONT TestAccAWSInstance_EbsRootDevice_ModifyType --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyType (126.02s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 126.110s --- aws/resource_aws_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index e50b8a14c3c..2132b71a968 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1540,7 +1540,7 @@ func TestAccAWSInstance_EbsRootDevice_ModifyType(t *testing.T) { deleteOnTermination := "true" originalType := "gp2" - updatedType := "io1" + updatedType := "standard" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 57d37d813b557dc898c0837a202d5a5b8cd5b8b3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 11 Dec 2020 10:03:43 -0500 Subject: [PATCH 0192/1212] aws_launch_template: AWS Wavelength support (#16707) * r/aws_launch_template: Add 'associate_carrier_ip_address' to 'network_interfaces' block. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSLaunchTemplate_associateCarrierIPAddress' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSLaunchTemplate_associateCarrierIPAddress -timeout 120m === RUN TestAccAWSLaunchTemplate_associateCarrierIPAddress === PAUSE TestAccAWSLaunchTemplate_associateCarrierIPAddress === CONT TestAccAWSLaunchTemplate_associateCarrierIPAddress --- PASS: TestAccAWSLaunchTemplate_associateCarrierIPAddress (96.08s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 96.150s * d/aws_launch_template: Add 'associate_carrier_ip_address' to 'network_interfaces' block. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress -timeout 120m === RUN TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress === PAUSE TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress === CONT TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress --- PASS: TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress (38.24s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 38.329s --- aws/data_source_aws_launch_template.go | 4 ++ aws/data_source_aws_launch_template_test.go | 51 +++++++++++++ aws/resource_aws_launch_template.go | 19 +++++ aws/resource_aws_launch_template_test.go | 76 ++++++++++++++++++++ website/docs/r/launch_template.html.markdown | 1 + 5 files changed, 151 insertions(+) diff --git a/aws/data_source_aws_launch_template.go b/aws/data_source_aws_launch_template.go index 3c4f27dad4e..8de8edac8c0 100644 --- a/aws/data_source_aws_launch_template.go +++ b/aws/data_source_aws_launch_template.go @@ -239,6 +239,10 @@ func dataSourceAwsLaunchTemplate() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "associate_carrier_ip_address": { + Type: schema.TypeString, + Computed: true, + }, "associate_public_ip_address": { Type: schema.TypeString, Computed: true, diff --git a/aws/data_source_aws_launch_template_test.go b/aws/data_source_aws_launch_template_test.go index 596695bbc6e..8f03d3983c2 100644 --- a/aws/data_source_aws_launch_template_test.go +++ b/aws/data_source_aws_launch_template_test.go @@ -160,6 +160,41 @@ func TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress(t *testing.T) { }) } +func TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_launch_template.test" + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateDataSourceConfig_associateCarrierIpAddress(rName, "true"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.#", resourceName, "network_interfaces.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.0.associate_carrier_ip_address", resourceName, "network_interfaces.0.associate_carrier_ip_address"), + ), + }, + { + Config: testAccAWSLaunchTemplateDataSourceConfig_associateCarrierIpAddress(rName, "false"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.#", resourceName, "network_interfaces.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.0.associate_carrier_ip_address", resourceName, "network_interfaces.0.associate_carrier_ip_address"), + ), + }, + { + Config: testAccAWSLaunchTemplateDataSourceConfig_associateCarrierIpAddress(rName, "null"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.#", resourceName, "network_interfaces.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "network_interfaces.0.associate_carrier_ip_address", resourceName, "network_interfaces.0.associate_carrier_ip_address"), + ), + }, + }, + }) +} + func TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") dataSourceName := "data.aws_launch_template.test" @@ -290,6 +325,22 @@ data "aws_launch_template" "test" { `, rName, associatePublicIPAddress) } +func testAccAWSLaunchTemplateDataSourceConfig_associateCarrierIpAddress(rName, associateCarrierIPAddress string) string { + return fmt.Sprintf(` +resource "aws_launch_template" "test" { + name = %[1]q + + network_interfaces { + associate_carrier_ip_address = %[2]s + } +} + +data "aws_launch_template" "test" { + name = aws_launch_template.test.name +} +`, rName, associateCarrierIPAddress) +} + func testAccAWSLaunchTemplateDataSourceConfigNetworkInterfacesDeleteOnTermination(rName, deleteOnTermination string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index 06afca1cfd2..48b7b2e54af 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -418,6 +418,12 @@ func resourceAwsLaunchTemplate() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "associate_carrier_ip_address": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentTypeStringBoolean, + ValidateFunc: validateTypeStringNullableBoolean, + }, "associate_public_ip_address": { Type: schema.TypeString, Optional: true, @@ -1130,6 +1136,11 @@ func getNetworkInterfaces(n []*ec2.LaunchTemplateInstanceNetworkInterfaceSpecifi "private_ip_address": aws.StringValue(v.PrivateIpAddress), "subnet_id": aws.StringValue(v.SubnetId), } + + if v.AssociateCarrierIpAddress != nil { + networkInterface["associate_carrier_ip_address"] = strconv.FormatBool(aws.BoolValue(v.AssociateCarrierIpAddress)) + } + if v.AssociatePublicIpAddress != nil { networkInterface["associate_public_ip_address"] = strconv.FormatBool(aws.BoolValue(v.AssociatePublicIpAddress)) } @@ -1533,6 +1544,14 @@ func readNetworkInterfacesFromConfig(ni map[string]interface{}) (*ec2.LaunchTemp networkInterface.NetworkInterfaceId = aws.String(v) } + if v, ok := ni["associate_carrier_ip_address"]; ok && v.(string) != "" { + vBool, err := strconv.ParseBool(v.(string)) + if err != nil { + return nil, fmt.Errorf("error converting associate_carrier_ip_address %q from string to boolean: %s", v.(string), err) + } + networkInterface.AssociateCarrierIpAddress = aws.Bool(vBool) + } + if v, ok := ni["associate_public_ip_address"]; ok && v.(string) != "" { vBool, err := strconv.ParseBool(v.(string)) if err != nil { diff --git a/aws/resource_aws_launch_template_test.go b/aws/resource_aws_launch_template_test.go index abe1c477cc1..0c9d5172157 100644 --- a/aws/resource_aws_launch_template_test.go +++ b/aws/resource_aws_launch_template_test.go @@ -772,6 +772,55 @@ func TestAccAWSLaunchTemplate_associatePublicIPAddress(t *testing.T) { }) } +func TestAccAWSLaunchTemplate_associateCarrierIPAddress(t *testing.T) { + var template ec2.LaunchTemplate + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateConfig_associateCarrierIpAddress(rName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.associate_carrier_ip_address", "true"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv4_address_count", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSLaunchTemplateConfig_associateCarrierIpAddress(rName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.associate_carrier_ip_address", "false"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv4_address_count", "2"), + ), + }, + { + Config: testAccAWSLaunchTemplateConfig_associateCarrierIpAddress(rName, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.associate_carrier_ip_address", ""), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv4_address_count", "2"), + ), + }, + }, + }) +} + func TestAccAWSLaunchTemplate_placement_partitionNum(t *testing.T) { var template ec2.LaunchTemplate resourceName := "aws_launch_template.test" @@ -1605,6 +1654,33 @@ resource "aws_launch_template" "test" { `, rName, associatePublicIPAddress) } +func testAccAWSLaunchTemplateConfig_associateCarrierIpAddress(rName, associateCarrierIPAddress string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "10.1.0.0/24" +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id +} + +resource "aws_launch_template" "test" { + name = %[1]q + + network_interfaces { + network_interface_id = aws_network_interface.test.id + associate_carrier_ip_address = %[2]s + ipv4_address_count = 2 + } +} +`, rName, associateCarrierIPAddress) +} + func testAccAWSLaunchTemplateConfig_networkInterface_ipv6Addresses(rName string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index 6adf5f4c5ac..04a730939a8 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -289,6 +289,7 @@ Check limitations for autoscaling group in [Creating an Auto Scaling Group Using Each `network_interfaces` block supports the following: +* `associate_carrier_ip_address` - Associate a Carrier IP address with `eth0` for a new network interface. Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. Boolean value. * `associate_public_ip_address` - Associate a public ip address with the network interface. Boolean value. * `delete_on_termination` - Whether the network interface should be destroyed on instance termination. Defaults to `false` if not set. * `description` - Description of the network interface. From 17ac50fba6ec1e67eef49b58921bfa279fe7a602 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 11 Dec 2020 10:04:47 -0500 Subject: [PATCH 0193/1212] Update CHANGELOG for #16707 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74e546e2f91..cdae2e20342 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,10 @@ FEATURES ENHANCEMENTS +* data-source/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] +* resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] * resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] * resource/aws_workspaces_workspace: Add failed request error code along with message [GH-16459] From 755de69f96e1ef89293891babdf93359d0362105 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 11 Dec 2020 10:18:01 -0500 Subject: [PATCH 0194/1212] service/ec2: Add gp3 volume scalable throughput to aws_launch_template resource and datasource (#16649) * r/aws_launch_template: Add 'throughput' attribute to `block_device_mappings.ebs` configuration block. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS -timeout 120m === RUN TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS === PAUSE TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS === RUN TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination === PAUSE TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination === RUN TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3 === PAUSE TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3 === CONT TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS === CONT TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3 === CONT TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3 (35.81s) --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS (35.94s) --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination (68.82s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 68.946s * d/aws_launch_template: Add 'throughput' attribute to `block_device_mappings.ebs` configuration block. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSLaunchTemplateDataSource_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSLaunchTemplateDataSource_ -timeout 120m === RUN TestAccAWSLaunchTemplateDataSource_basic === PAUSE TestAccAWSLaunchTemplateDataSource_basic === RUN TestAccAWSLaunchTemplateDataSource_filter_basic === PAUSE TestAccAWSLaunchTemplateDataSource_filter_basic === RUN TestAccAWSLaunchTemplateDataSource_filter_tags === PAUSE TestAccAWSLaunchTemplateDataSource_filter_tags === RUN TestAccAWSLaunchTemplateDataSource_metadataOptions === PAUSE TestAccAWSLaunchTemplateDataSource_metadataOptions === RUN TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress === PAUSE TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress === RUN TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination === PAUSE TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination === RUN TestAccAWSLaunchTemplateDataSource_NonExistent === PAUSE TestAccAWSLaunchTemplateDataSource_NonExistent === CONT TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress === CONT TestAccAWSLaunchTemplateDataSource_NonExistent === CONT TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination === CONT TestAccAWSLaunchTemplateDataSource_filter_basic === CONT TestAccAWSLaunchTemplateDataSource_metadataOptions === CONT TestAccAWSLaunchTemplateDataSource_filter_tags === CONT TestAccAWSLaunchTemplateDataSource_basic --- PASS: TestAccAWSLaunchTemplateDataSource_NonExistent (4.66s) --- PASS: TestAccAWSLaunchTemplateDataSource_basic (22.81s) --- PASS: TestAccAWSLaunchTemplateDataSource_filter_basic (23.91s) --- PASS: TestAccAWSLaunchTemplateDataSource_metadataOptions (24.09s) --- PASS: TestAccAWSLaunchTemplateDataSource_filter_tags (25.09s) --- PASS: TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination (47.45s) --- PASS: TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress (48.65s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 48.749s --- aws/data_source_aws_launch_template.go | 4 ++ aws/resource_aws_launch_template.go | 13 ++++ aws/resource_aws_launch_template_test.go | 73 ++++++++++++++++++++ website/docs/r/launch_template.html.markdown | 3 +- 4 files changed, 92 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_launch_template.go b/aws/data_source_aws_launch_template.go index 8de8edac8c0..1cc0203b7f2 100644 --- a/aws/data_source_aws_launch_template.go +++ b/aws/data_source_aws_launch_template.go @@ -79,6 +79,10 @@ func dataSourceAwsLaunchTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, "volume_size": { Type: schema.TypeInt, Computed: true, diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index 48b7b2e54af..ebdb5b4792a 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -133,6 +133,12 @@ func resourceAwsLaunchTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntBetween(125, 1000), + }, "volume_size": { Type: schema.TypeInt, Optional: true, @@ -935,6 +941,9 @@ func getBlockDeviceMappings(m []*ec2.LaunchTemplateBlockDeviceMapping) []interfa if v.Ebs.SnapshotId != nil { ebs["snapshot_id"] = aws.StringValue(v.Ebs.SnapshotId) } + if v.Ebs.Throughput != nil { + ebs["throughput"] = aws.Int64Value(v.Ebs.Throughput) + } mapping["ebs"] = []interface{}{ebs} } @@ -1507,6 +1516,10 @@ func readEbsBlockDeviceFromConfig(ebs map[string]interface{}) (*ec2.LaunchTempla ebsDevice.SnapshotId = aws.String(v) } + if v := ebs["throughput"].(int); v > 0 { + ebsDevice.Throughput = aws.Int64(int64(v)) + } + if v := ebs["volume_size"]; v != nil { ebsDevice.VolumeSize = aws.Int64(int64(v.(int))) } diff --git a/aws/resource_aws_launch_template_test.go b/aws/resource_aws_launch_template_test.go index 0c9d5172157..99dfabd6fab 100644 --- a/aws/resource_aws_launch_template_test.go +++ b/aws/resource_aws_launch_template_test.go @@ -143,6 +143,7 @@ func TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "block_device_mappings.#", "1"), resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.device_name", "/dev/xvda"), resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.throughput", "0"), resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.volume_size", "15"), ), }, @@ -196,6 +197,38 @@ func TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination(t *tes }) } +func TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3(t *testing.T) { + var template ec2.LaunchTemplate + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateConfig_BlockDeviceMappings_EBS_Gp3(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.device_name", "/dev/xvda"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.iops", "4000"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.throughput", "500"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.volume_size", "15"), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.0.ebs.0.volume_type", "gp3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSLaunchTemplate_EbsOptimized(t *testing.T) { var template ec2.LaunchTemplate rName := acctest.RandomWithPrefix("tf-acc-test") @@ -1319,6 +1352,46 @@ resource "aws_autoscaling_group" "test" { `, rName, deleteOnTermination)) } +func testAccAWSLaunchTemplateConfig_BlockDeviceMappings_EBS_Gp3(rName string) string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_launch_template" "test" { + image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + name = %[1]q + + block_device_mappings { + device_name = "/dev/xvda" + + ebs { + iops = 4000 + throughput = 500 + volume_size = 15 + volume_type = "gp3" + } + } +} + +# Creating an AutoScaling Group verifies the launch template +# ValidationError: You must use a valid fully-formed launch template. the encrypted flag cannot be specified since device /dev/sda1 has a snapshot specified. +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.available.names[0]] + desired_capacity = 0 + max_size = 0 + min_size = 0 + name = %[1]q + + launch_template { + id = aws_launch_template.test.id + version = aws_launch_template.test.default_version + } +} +`, rName)) +} + func testAccAWSLaunchTemplateConfig_NetworkInterfaces_DeleteOnTermination(rName string, deleteOnTermination string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index 04a730939a8..7a9e5a94ee4 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -179,8 +179,9 @@ The `ebs` block supports the following: * `kms_key_id` - The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. `encrypted` must be set to `true` when this is set. * `snapshot_id` - The Snapshot ID to mount. +* `thoughput` - The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s. * `volume_size` - The size of the volume in gigabytes. -* `volume_type` - The type of volume. Can be `"standard"`, `"gp2"`, `"io1"` or `"io2"`. (Default: `"standard"`). +* `volume_type` - The volume type. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). ### Capacity Reservation Specification From 120b9448c6966ebb0ea4371ffb557d383526db52 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 11 Dec 2020 10:19:11 -0500 Subject: [PATCH 0195/1212] Update CHANGELOG for #16649 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cdae2e20342..286b189d769 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,9 +12,11 @@ FEATURES ENHANCEMENTS * data-source/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] +* data-source/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] * resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] +* resource/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] * resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] * resource/aws_workspaces_workspace: Add failed request error code along with message [GH-16459] From 02f60650780fe2acdfe5244c08ee7953641250ab Mon Sep 17 00:00:00 2001 From: Yoriyasu Yano <430092+yorinasub17@users.noreply.github.com> Date: Fri, 11 Dec 2020 09:23:45 -0600 Subject: [PATCH 0196/1212] data-source/aws_launch_template: Support id as argument (#16457) --- aws/data_source_aws_launch_template.go | 9 +++++ aws/data_source_aws_launch_template_test.go | 36 ++++++++++++++++++++ website/docs/d/launch_template.html.markdown | 1 + 3 files changed, 46 insertions(+) diff --git a/aws/data_source_aws_launch_template.go b/aws/data_source_aws_launch_template.go index 1cc0203b7f2..964ce62aae5 100644 --- a/aws/data_source_aws_launch_template.go +++ b/aws/data_source_aws_launch_template.go @@ -29,6 +29,11 @@ func dataSourceAwsLaunchTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "default_version": { Type: schema.TypeInt, Computed: true, @@ -391,6 +396,7 @@ func dataSourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) e ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig filters, filtersOk := d.GetOk("filter") + id, idOk := d.GetOk("id") name, nameOk := d.GetOk("name") tags, tagsOk := d.GetOk("tags") @@ -398,6 +404,9 @@ func dataSourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) e if filtersOk { params.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) } + if idOk { + params.LaunchTemplateIds = []*string{aws.String(id.(string))} + } if nameOk { params.LaunchTemplateNames = []*string{aws.String(name.(string))} } diff --git a/aws/data_source_aws_launch_template_test.go b/aws/data_source_aws_launch_template_test.go index 8f03d3983c2..1eec0725a84 100644 --- a/aws/data_source_aws_launch_template_test.go +++ b/aws/data_source_aws_launch_template_test.go @@ -33,6 +33,30 @@ func TestAccAWSLaunchTemplateDataSource_basic(t *testing.T) { }) } +func TestAccAWSLaunchTemplateDataSource_id_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_launch_template.test" + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateDataSourceConfig_BasicId(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "default_version", dataSourceName, "default_version"), + resource.TestCheckResourceAttrPair(resourceName, "latest_version", dataSourceName, "latest_version"), + resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), + resource.TestCheckResourceAttrPair(resourceName, "hibernation_options", dataSourceName, "hibernation_options"), + ), + }, + }, + }) +} + func TestAccAWSLaunchTemplateDataSource_filter_basic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") dataSourceName := "data.aws_launch_template.test" @@ -256,6 +280,18 @@ data "aws_launch_template" "test" { `, rName) } +func testAccAWSLaunchTemplateDataSourceConfig_BasicId(rName string) string { + return fmt.Sprintf(` +resource "aws_launch_template" "test" { + name = %q +} + +data "aws_launch_template" "test" { + id = aws_launch_template.test.id +} +`, rName) +} + func testAccAWSLaunchTemplateDataSourceConfigBasicFilter(rName string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/website/docs/d/launch_template.html.markdown b/website/docs/d/launch_template.html.markdown index f9b73bf511e..940e4a2b6f0 100644 --- a/website/docs/d/launch_template.html.markdown +++ b/website/docs/d/launch_template.html.markdown @@ -34,6 +34,7 @@ data "aws_launch_template" "test" { The following arguments are supported: * `filter` - (Optional) Configuration block(s) for filtering. Detailed below. +* `id` - (Optional) The ID of the specific launch template to retrieve. * `name` - (Optional) The name of the launch template. * `tags` - (Optional) A map of tags, each pair of which must exactly match a pair on the desired Launch Template. From 00ccbeea1fe7e97454b492893fbae8a215eb5a49 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 11 Dec 2020 10:24:22 -0500 Subject: [PATCH 0197/1212] Update CHANGELOG for #16457 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 286b189d769..36aa97aed4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS * data-source/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] * data-source/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] +* data-source/aws_launch_template: Support `id` as argument [GH-16457] * resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] * resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] From b5f64e121031d2299262bb13979a224e14ded53f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 11 Dec 2020 11:25:28 -0500 Subject: [PATCH 0198/1212] resource/aws_spot_fleet_request: Add 'throughput' attribute to 'launch_specification.ebs_block_device' and 'launch_specification.root_block_device' configuration blocks. (#16652) Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3\|TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3\|TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3 -timeout 120m === RUN TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3 === PAUSE TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3 === RUN TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3 === PAUSE TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3 === CONT TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3 === CONT TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3 --- PASS: TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3 (124.58s) --- PASS: TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3 (124.82s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 124.908s --- aws/resource_aws_spot_fleet_request.go | 28 +++++ aws/resource_aws_spot_fleet_request_test.go | 128 ++++++++++++++++++++ 2 files changed, 156 insertions(+) diff --git a/aws/resource_aws_spot_fleet_request.go b/aws/resource_aws_spot_fleet_request.go index 492685a8e1f..39a852836d0 100644 --- a/aws/resource_aws_spot_fleet_request.go +++ b/aws/resource_aws_spot_fleet_request.go @@ -118,6 +118,12 @@ func resourceAwsSpotFleetRequest() *schema.Resource { Computed: true, ForceNew: true, }, + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, "volume_size": { Type: schema.TypeInt, Optional: true, @@ -191,6 +197,12 @@ func resourceAwsSpotFleetRequest() *schema.Resource { Computed: true, ForceNew: true, }, + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, "volume_size": { Type: schema.TypeInt, Optional: true, @@ -706,6 +718,10 @@ func readSpotFleetBlockDeviceMappingsFromConfig( ebs.Iops = aws.Int64(int64(v)) } + if v, ok := bd["throughput"].(int); ok && v > 0 { + ebs.Throughput = aws.Int64(int64(v)) + } + blockDevices = append(blockDevices, &ec2.BlockDeviceMapping{ DeviceName: aws.String(bd["device_name"].(string)), Ebs: ebs, @@ -755,6 +771,10 @@ func readSpotFleetBlockDeviceMappingsFromConfig( ebs.Iops = aws.Int64(int64(v)) } + if v, ok := bd["throughput"].(int); ok && v > 0 { + ebs.Throughput = aws.Int64(int64(v)) + } + if dn, err := fetchRootDeviceName(d["ami"].(string), conn); err == nil { if dn == nil { return nil, fmt.Errorf( @@ -1475,6 +1495,10 @@ func ebsBlockDevicesToSet(bdm []*ec2.BlockDeviceMapping, rootDevName *string) *s m["iops"] = aws.Int64Value(ebs.Iops) } + if ebs.Throughput != nil { + m["throughput"] = aws.Int64Value(ebs.Throughput) + } + set.Add(m) } } @@ -1535,6 +1559,10 @@ func rootBlockDeviceToSet( m["iops"] = aws.Int64Value(val.Ebs.Iops) } + if val.Ebs.Throughput != nil { + m["throughput"] = aws.Int64Value(val.Ebs.Throughput) + } + set.Add(m) } } diff --git a/aws/resource_aws_spot_fleet_request_test.go b/aws/resource_aws_spot_fleet_request_test.go index f80284f183a..95ac81468f6 100644 --- a/aws/resource_aws_spot_fleet_request_test.go +++ b/aws/resource_aws_spot_fleet_request_test.go @@ -945,6 +945,71 @@ func TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDevice_KmsKeyId(t * }) } +func TestAccAWSSpotFleetRequest_LaunchSpecification_EbsBlockDeviceGp3(t *testing.T) { + var config ec2.SpotFleetRequestConfig + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_spot_fleet_request.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEc2SpotFleetRequest(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSpotFleetRequestLaunchSpecificationEbsBlockDeviceGp3(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSSpotFleetRequestExists(resourceName, &config), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "launch_specification.*.ebs_block_device.*", map[string]string{ + "device_name": "/dev/xvdcz", + "iops": "4000", + "throughput": "500", + "volume_size": "15", + "volume_type": "gp3", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"wait_for_fulfillment"}, + }, + }, + }) +} + +func TestAccAWSSpotFleetRequest_LaunchSpecification_RootBlockDeviceGp3(t *testing.T) { + var config ec2.SpotFleetRequestConfig + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_spot_fleet_request.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEc2SpotFleetRequest(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSpotFleetRequestLaunchSpecificationRootBlockDeviceGp3(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSSpotFleetRequestExists(resourceName, &config), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "launch_specification.*.root_block_device.*", map[string]string{ + "iops": "4000", + "throughput": "500", + "volume_size": "15", + "volume_type": "gp3", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"wait_for_fulfillment"}, + }, + }, + }) +} + func TestAccAWSSpotFleetRequest_withTags(t *testing.T) { var config ec2.SpotFleetRequestConfig rName := acctest.RandomWithPrefix("tf-acc-test") @@ -2358,6 +2423,69 @@ resource "aws_spot_fleet_request" "test" { `, validUntil, rName) } +func testAccAWSSpotFleetRequestLaunchSpecificationEbsBlockDeviceGp3(rName string) string { + return composeConfig( + testAccAWSSpotFleetRequestConfigBase(rName), + ` +resource "aws_spot_fleet_request" "test" { + iam_fleet_role = aws_iam_role.test.arn + spot_price = "0.05" + target_capacity = 1 + terminate_instances_with_expiration = true + wait_for_fulfillment = true + + launch_specification { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t2.micro" + + ebs_block_device { + device_name = "/dev/xvda" + volume_type = "gp2" + volume_size = 8 + } + + ebs_block_device { + device_name = "/dev/xvdcz" + iops = 4000 + throughput = 500 + volume_size = 15 + volume_type = "gp3" + } + } + + depends_on = [aws_iam_policy_attachment.test] +} +`) +} + +func testAccAWSSpotFleetRequestLaunchSpecificationRootBlockDeviceGp3(rName string) string { + return composeConfig( + testAccAWSSpotFleetRequestConfigBase(rName), + ` +resource "aws_spot_fleet_request" "test" { + iam_fleet_role = aws_iam_role.test.arn + spot_price = "0.05" + target_capacity = 1 + terminate_instances_with_expiration = true + wait_for_fulfillment = true + + launch_specification { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t2.micro" + + root_block_device { + iops = 4000 + throughput = 500 + volume_size = 15 + volume_type = "gp3" + } + } + + depends_on = [aws_iam_policy_attachment.test] +} +`) +} + func testAccAWSSpotFleetRequestLaunchSpecificationWithInstanceStoreAmi(rName string, validUntil string) string { return testAccLatestAmazonLinuxHvmInstanceStoreAmiConfig() + testAccAWSSpotFleetRequestConfigBase(rName) + From ed411a52e36d738c5e28fc5a5f660e3f38088783 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 11 Dec 2020 11:28:22 -0500 Subject: [PATCH 0199/1212] Update CHANGELOG for #16652 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36aa97aed4b..181cd700105 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS * resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] * resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] * resource/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] +* resource/aws_spot_fleet_request: Add `throughput` attribute to `launch_specification.ebs_block_device` and `launch_specification.root_block_device` configuration blocks [GH-16652] * resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] * resource/aws_workspaces_workspace: Add failed request error code along with message [GH-16459] From 226e9a506a9d681c596c190ca988a9c91899f893 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 11 Dec 2020 16:47:04 +0000 Subject: [PATCH 0200/1212] v3.21.0 --- CHANGELOG.md | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 181cd700105..f03e053af65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,39 +1,39 @@ -## 3.21.0 (Unreleased) +## 3.21.0 (December 11, 2020) NOTES -* resource/aws_imagebuilder_image_recipe: Previously the ordering of `component` configuration blocks was not properly handled by the resource, which could cause unexpected behavior with multiple Components. These configurations may see the ordering difference being fixed after upgrade. [GH-16566] +* resource/aws_imagebuilder_image_recipe: Previously the ordering of `component` configuration blocks was not properly handled by the resource, which could cause unexpected behavior with multiple Components. These configurations may see the ordering difference being fixed after upgrade. ([#16566](https://github.com/hashicorp/terraform-provider-aws/issues/16566)) FEATURES -* **New Resource:** `aws_ec2_carrier_gateway` [GH-16252] -* **New Resource:** `aws_glue_schema` [GH-16612] +* **New Resource:** `aws_ec2_carrier_gateway` ([#16252](https://github.com/hashicorp/terraform-provider-aws/issues/16252)) +* **New Resource:** `aws_glue_schema` ([#16612](https://github.com/hashicorp/terraform-provider-aws/issues/16612)) ENHANCEMENTS -* data-source/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] -* data-source/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] -* data-source/aws_launch_template: Support `id` as argument [GH-16457] -* resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute [GH-16167] -* resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute [GH-16167] -* resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block [GH-16707] -* resource/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block [GH-16649] -* resource/aws_spot_fleet_request: Add `throughput` attribute to `launch_specification.ebs_block_device` and `launch_specification.root_block_device` configuration blocks [GH-16652] -* resource/aws_ssm_maintenance_window: Add `schedule_offset` argument [GH-16569] -* resource/aws_workspaces_workspace: Add failed request error code along with message [GH-16459] +* data-source/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block ([#16707](https://github.com/hashicorp/terraform-provider-aws/issues/16707)) +* data-source/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block ([#16649](https://github.com/hashicorp/terraform-provider-aws/issues/16649)) +* data-source/aws_launch_template: Support `id` as argument ([#16457](https://github.com/hashicorp/terraform-provider-aws/issues/16457)) +* resource/aws_appmesh_virtual_node: Add `listener.connection_pool` attribute ([#16167](https://github.com/hashicorp/terraform-provider-aws/issues/16167)) +* resource/aws_appmesh_virtual_node: Add `listener.outlier_detection` attribute ([#16167](https://github.com/hashicorp/terraform-provider-aws/issues/16167)) +* resource/aws_launch_template: Add `associate_carrier_ip_address` attribute to `network_interfaces` configuration block ([#16707](https://github.com/hashicorp/terraform-provider-aws/issues/16707)) +* resource/aws_launch_template: Add `throughput` attribute to `block_device_mappings.ebs` configuration block ([#16649](https://github.com/hashicorp/terraform-provider-aws/issues/16649)) +* resource/aws_spot_fleet_request: Add `throughput` attribute to `launch_specification.ebs_block_device` and `launch_specification.root_block_device` configuration blocks ([#16652](https://github.com/hashicorp/terraform-provider-aws/issues/16652)) +* resource/aws_ssm_maintenance_window: Add `schedule_offset` argument ([#16569](https://github.com/hashicorp/terraform-provider-aws/issues/16569)) +* resource/aws_workspaces_workspace: Add failed request error code along with message ([#16459](https://github.com/hashicorp/terraform-provider-aws/issues/16459)) BUG FIXES -* data-source/aws_customer_gateway: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_ec2_transit_gateway: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_ec2_transit_gateway_peering_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_ec2_transit_gateway_route_table: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_ec2_transit_gateway_vpc_attachment: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_guardduty_detector: Prevent missing `id` attribute when not configured as argument [GH-16667] -* data-source/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` attribute [GH-16566] -* resource/aws_backup_plan: Prevent plan-time validation error for pre-existing resources with `lifecycle` `delete_after` and/or `copy_action` `lifecycle` `delete_after` arguments configured [GH-16605] -* resource/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` configuration blocks [GH-16566] -* resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou [GH-16589] +* data-source/aws_customer_gateway: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_ec2_transit_gateway: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_ec2_transit_gateway_peering_attachment: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_ec2_transit_gateway_route_table: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_ec2_transit_gateway_vpc_attachment: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_guardduty_detector: Prevent missing `id` attribute when not configured as argument ([#16667](https://github.com/hashicorp/terraform-provider-aws/issues/16667)) +* data-source/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` attribute ([#16566](https://github.com/hashicorp/terraform-provider-aws/issues/16566)) +* resource/aws_backup_plan: Prevent plan-time validation error for pre-existing resources with `lifecycle` `delete_after` and/or `copy_action` `lifecycle` `delete_after` arguments configured ([#16605](https://github.com/hashicorp/terraform-provider-aws/issues/16605)) +* resource/aws_imagebuilder_image_recipe: Ensure proper ordering of `component` configuration blocks ([#16566](https://github.com/hashicorp/terraform-provider-aws/issues/16566)) +* resource/aws_workspaces_directory: Fix empty custom_security_group_id & default_ou ([#16589](https://github.com/hashicorp/terraform-provider-aws/issues/16589)) ## 3.20.0 (December 03, 2020) From 4350d67e1613885ee4eb770b5252e446310485e6 Mon Sep 17 00:00:00 2001 From: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Date: Fri, 11 Dec 2020 11:53:29 -0600 Subject: [PATCH 0201/1212] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f03e053af65..dfd88882056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 3.22.0 (unreleased) + +FEATURES + +* **New Resource:** `aws_lakeformation_resource` ([#13267](https://github.com/hashicorp/terraform-provider-aws/issues/13267)) + ## 3.21.0 (December 11, 2020) NOTES From 5c90eeda526b174d3356b73d8d85625b3dfe3673 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Fri, 11 Dec 2020 19:47:55 +0100 Subject: [PATCH 0202/1212] Ensure not nil, even if no partition keys defined. --- aws/resource_aws_glue_catalog_table.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws/resource_aws_glue_catalog_table.go b/aws/resource_aws_glue_catalog_table.go index 527f16ba141..122581d4429 100644 --- a/aws/resource_aws_glue_catalog_table.go +++ b/aws/resource_aws_glue_catalog_table.go @@ -450,6 +450,8 @@ func expandGlueTableInput(d *schema.ResourceData) *glue.TableInput { if v, ok := d.GetOk("partition_keys"); ok { tableInput.PartitionKeys = expandGlueColumns(v.([]interface{})) + } else { + tableInput.PartitionKeys = []*glue.Column{} } if v, ok := d.GetOk("view_original_text"); ok { From 159b09f3f80147e10cfac6c95a348888052b4886 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Fri, 11 Dec 2020 19:48:39 +0100 Subject: [PATCH 0203/1212] Check that is empty is base test case --- aws/resource_aws_glue_catalog_table_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index c44144e9f52..8f575fe2b74 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -28,6 +28,7 @@ func TestAccAWSGlueCatalogTable_basic(t *testing.T) { testAccCheckResourceAttrRegionalARN(resourceName, "arn", "glue", fmt.Sprintf("table/%s/%s", rName, rName)), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "database_name", rName), + resource.TestCheckResourceAttr(resourceName, "partition_keys.#", "0"), testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), ), }, From 3fada75cc71b19681d70863cc710c533f4c74de9 Mon Sep 17 00:00:00 2001 From: Marco Rinalducci Date: Tue, 18 Aug 2020 16:22:21 +0200 Subject: [PATCH 0204/1212] Add vpn options to aws_vpn_connection resource --- aws/resource_aws_vpn_connection.go | 893 +++++++++++++++++++- aws/resource_aws_vpn_connection_test.go | 285 ++++++- website/docs/r/vpn_connection.html.markdown | 48 +- 3 files changed, 1206 insertions(+), 20 deletions(-) diff --git a/aws/resource_aws_vpn_connection.go b/aws/resource_aws_vpn_connection.go index 552c1d90f0a..8ba13db3e22 100644 --- a/aws/resource_aws_vpn_connection.go +++ b/aws/resource_aws_vpn_connection.go @@ -105,6 +105,44 @@ func resourceAwsVpnConnection() *schema.Resource { ForceNew: true, }, + "enable_acceleration": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + RequiredWith: []string{"transit_gateway_id"}, + }, + + "local_ipv4_network_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateLocalIpv4NetworkCidr(), + }, + + "local_ipv6_network_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateLocalIpv6NetworkCidr(), + RequiredWith: []string{"transit_gateway_id"}, + }, + + "remote_ipv4_network_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateLocalIpv4NetworkCidr(), + }, + + "remote_ipv6_network_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateLocalIpv6NetworkCidr(), + RequiredWith: []string{"transit_gateway_id"}, + }, + "static_routes_only": { Type: schema.TypeBool, Optional: true, @@ -112,6 +150,104 @@ func resourceAwsVpnConnection() *schema.Resource { ForceNew: true, }, + "tunnel_inside_ip_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateTunnelInsideIPVersion(), + }, + + "tunnel1_dpd_timeout_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelDpdTimeoutAction(), + }, + + "tunnel1_dpd_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelDpdTimeoutSeconds(), + }, + + "tunnel1_ike_versions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel1_phase1_dh_group_numbers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "tunnel1_phase1_encryption_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel1_phase1_integrity_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel1_phase1_lifetime_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelPhase1LifetimeSeconds(), + }, + + "tunnel1_phase2_dh_group_numbers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "tunnel1_phase2_encryption_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel1_phase2_integrity_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel1_phase2_lifetime_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelPhase2LifetimeSeconds(), + }, + + "tunnel1_rekey_fuzz_percentage": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelRekeyFuzzPercentage(), + }, + + "tunnel1_rekey_margin_time_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelRekeyMarginTimeSeconds(), + }, + + "tunnel1_replay_window_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelReplayWindowSize(), + }, + + "tunnel1_startup_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelStartupAction(), + }, + "tunnel1_inside_cidr": { Type: schema.TypeString, Optional: true, @@ -120,6 +256,15 @@ func resourceAwsVpnConnection() *schema.Resource { ValidateFunc: validateVpnConnectionTunnelInsideCIDR(), }, + "tunnel1_inside_ipv6_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelInsideIpv6CIDR(), + RequiredWith: []string{"transit_gateway_id"}, + }, + "tunnel1_preshared_key": { Type: schema.TypeString, Optional: true, @@ -129,6 +274,96 @@ func resourceAwsVpnConnection() *schema.Resource { ValidateFunc: validateVpnConnectionTunnelPreSharedKey(), }, + "tunnel2_dpd_timeout_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelDpdTimeoutAction(), + }, + + "tunnel2_dpd_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelDpdTimeoutSeconds(), + }, + + "tunnel2_ike_versions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel2_phase1_dh_group_numbers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "tunnel2_phase1_encryption_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel2_phase1_integrity_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel2_phase1_lifetime_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelPhase1LifetimeSeconds(), + }, + + "tunnel2_phase2_dh_group_numbers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "tunnel2_phase2_encryption_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel2_phase2_integrity_algorithms": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "tunnel2_phase2_lifetime_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelPhase2LifetimeSeconds(), + }, + + "tunnel2_rekey_fuzz_percentage": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelRekeyFuzzPercentage(), + }, + + "tunnel2_rekey_margin_time_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelRekeyMarginTimeSeconds(), + }, + + "tunnel2_replay_window_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelReplayWindowSize(), + }, + + "tunnel2_startup_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateVpnConnectionTunnelStartupAction(), + }, + "tunnel2_inside_cidr": { Type: schema.TypeString, Optional: true, @@ -137,6 +372,15 @@ func resourceAwsVpnConnection() *schema.Resource { ValidateFunc: validateVpnConnectionTunnelInsideCIDR(), }, + "tunnel2_inside_ipv6_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateVpnConnectionTunnelInsideIpv6CIDR(), + RequiredWith: []string{"transit_gateway_id"}, + }, + "tunnel2_preshared_key": { Type: schema.TypeString, Optional: true, @@ -277,6 +521,182 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er {}, {}, } + if v, ok := d.GetOk("tunnel1_dpd_timeout_action"); ok { + options[0].DPDTimeoutAction = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tunnel2_dpd_timeout_action"); ok { + options[1].DPDTimeoutAction = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tunnel1_dpd_timeout_seconds"); ok { + options[0].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_dpd_timeout_seconds"); ok { + options[1].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_ike_versions"); ok { + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].IKEVersions = l + } + + if v, ok := d.GetOk("tunnel2_ike_versions"); ok { + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].IKEVersions = l + } + + if v, ok := d.GetOk("tunnel1_phase1_dh_group_numbers"); ok { + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase1DHGroupNumbers = l + } + + if v, ok := d.GetOk("tunnel2_phase1_dh_group_numbers"); ok { + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[1].Phase1DHGroupNumbers = l + } + + if v, ok := d.GetOk("tunnel1_phase1_encryption_algorithms"); ok { + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1EncryptionAlgorithms = l + } + + if v, ok := d.GetOk("tunnel2_phase1_encryption_algorithms"); ok { + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1EncryptionAlgorithms = l + } + + if v, ok := d.GetOk("tunnel1_phase1_integrity_algorithms"); ok { + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1IntegrityAlgorithms = l + } + + if v, ok := d.GetOk("tunnel2_phase1_integrity_algorithms"); ok { + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1IntegrityAlgorithms = l + } + + if v, ok := d.GetOk("tunnel1_phase1_lifetime_seconds"); ok { + options[0].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_phase1_lifetime_seconds"); ok { + options[1].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_phase2_dh_group_numbers"); ok { + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase2DHGroupNumbers = l + } + + if v, ok := d.GetOk("tunnel2_phase2_dh_group_numbers"); ok { + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[1].Phase2DHGroupNumbers = l + } + + if v, ok := d.GetOk("tunnel1_phase2_encryption_algorithms"); ok { + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase2EncryptionAlgorithms = l + } + + if v, ok := d.GetOk("tunnel2_phase2_encryption_algorithms"); ok { + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase2EncryptionAlgorithms = l + } + + if v, ok := d.GetOk("tunnel1_phase2_integrity_algorithms"); ok { + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase2IntegrityAlgorithms = l + } + + if v, ok := d.GetOk("tunnel2_phase2_integrity_algorithms"); ok { + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase2IntegrityAlgorithms = l + } + + if v, ok := d.GetOk("tunnel1_phase2_lifetime_seconds"); ok { + options[0].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_phase2_lifetime_seconds"); ok { + options[1].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_rekey_fuzz_percentage"); ok { + options[0].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_rekey_fuzz_percentage"); ok { + options[1].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_rekey_margin_time_seconds"); ok { + options[0].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_rekey_margin_time_seconds"); ok { + options[1].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_replay_window_size"); ok { + options[0].ReplayWindowSize = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel2_replay_window_size"); ok { + options[1].ReplayWindowSize = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("tunnel1_startup_action"); ok { + options[0].StartupAction = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tunnel2_startup_action"); ok { + options[1].StartupAction = aws.String(v.(string)) + } + if v, ok := d.GetOk("tunnel1_inside_cidr"); ok { options[0].TunnelInsideCidr = aws.String(v.(string)) } @@ -285,6 +705,14 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er options[1].TunnelInsideCidr = aws.String(v.(string)) } + if v, ok := d.GetOk("tunnel1_inside_ipv6_cidr"); ok { + options[0].TunnelInsideIpv6Cidr = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tunnel2_inside_ipv6_cidr"); ok { + options[1].TunnelInsideIpv6Cidr = aws.String(v.(string)) + } + if v, ok := d.GetOk("tunnel1_preshared_key"); ok { options[0].PreSharedKey = aws.String(v.(string)) } @@ -293,11 +721,40 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er options[1].PreSharedKey = aws.String(v.(string)) } - connectOpts := &ec2.VpnConnectionOptionsSpecification{ - StaticRoutesOnly: aws.Bool(d.Get("static_routes_only").(bool)), - TunnelOptions: options, + var connectOpts *ec2.VpnConnectionOptionsSpecification = new(ec2.VpnConnectionOptionsSpecification) + ipv := d.Get("tunnel_inside_ip_version").(string) + if ipv == "ipv6" { + if v, ok := d.GetOk("local_ipv6_network_cidr"); ok { + connectOpts.LocalIpv6NetworkCidr = aws.String(v.(string)) + } + + if v, ok := d.GetOk("remote_ipv6_network_cidr"); ok { + connectOpts.RemoteIpv6NetworkCidr = aws.String(v.(string)) + } + + connectOpts.TunnelInsideIpVersion = aws.String(ipv) + } else { + if v, ok := d.GetOk("local_ipv4_network_cidr"); ok { + connectOpts.LocalIpv4NetworkCidr = aws.String(v.(string)) + } + + if v, ok := d.GetOk("remote_ipv4_network_cidr"); ok { + connectOpts.RemoteIpv4NetworkCidr = aws.String(v.(string)) + } + + connectOpts.TunnelInsideIpVersion = aws.String("ipv4") + } + + if v, ok := d.GetOk("enable_acceleration"); ok { + connectOpts.EnableAcceleration = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("static_routes_only"); ok { + connectOpts.StaticRoutesOnly = aws.Bool(v.(bool)) + } + + connectOpts.TunnelOptions = options + createOpts := &ec2.CreateVpnConnectionInput{ CustomerGatewayId: aws.String(d.Get("customer_gateway_id").(string)), Options: connectOpts, @@ -436,12 +893,42 @@ func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) erro } if vpnConnection.Options != nil { + if err := d.Set("enable_acceleration", vpnConnection.Options.EnableAcceleration); err != nil { + return err + } + + if err := d.Set("local_ipv4_network_cidr", vpnConnection.Options.LocalIpv4NetworkCidr); err != nil { + return err + } + + if err := d.Set("local_ipv6_network_cidr", vpnConnection.Options.LocalIpv6NetworkCidr); err != nil { + return err + } + + if err := d.Set("remote_ipv4_network_cidr", vpnConnection.Options.RemoteIpv4NetworkCidr); err != nil { + return err + } + + if err := d.Set("remote_ipv6_network_cidr", vpnConnection.Options.RemoteIpv6NetworkCidr); err != nil { + return err + } + if err := d.Set("static_routes_only", vpnConnection.Options.StaticRoutesOnly); err != nil { return err } + + if err := d.Set("tunnel_inside_ip_version", vpnConnection.Options.TunnelInsideIpVersion); err != nil { + return err + } } else { - //If there no Options on the connection then we do not support *static_routes* + //If there no Options on the connection then we do not support it + d.Set("enable_acceleration", false) + d.Set("local_ipv4_network_cidr", "") + d.Set("local_ipv6_network_cidr", "") + d.Set("remote_ipv4_network_cidr", "") + d.Set("remote_ipv6_network_cidr", "") d.Set("static_routes_only", false) + d.Set("tunnel_inside_ip_version", "") } // Set read only attributes. @@ -490,10 +977,273 @@ func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) erro func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn + tun1Changed := false + tun2Changed := false + vgwTelemetryTun1Index := 0 + vgwTelemetryTun2Index := 1 + options := []*ec2.ModifyVpnTunnelOptionsSpecification{ + {}, {}, + } + + var connOpts *ec2.ModifyVpnConnectionOptionsInput = new(ec2.ModifyVpnConnectionOptionsInput) + connChanged := false + + vpnConnectionID := d.Id() + + if d.HasChange("local_ipv4_network_cidr") { + connChanged = true + connOpts.LocalIpv4NetworkCidr = aws.String(d.Get("local_ipv4_network_cidr").(string)) + } + + if d.HasChange("local_ipv6_network_cidr") { + connChanged = true + connOpts.LocalIpv6NetworkCidr = aws.String(d.Get("local_ipv6_network_cidr").(string)) + } + + if d.HasChange("remote_ipv4_network_cidr") { + connChanged = true + connOpts.RemoteIpv4NetworkCidr = aws.String(d.Get("remote_ipv4_network_cidr").(string)) + } + + if d.HasChange("remote_ipv6_network_cidr") { + connChanged = true + connOpts.RemoteIpv6NetworkCidr = aws.String(d.Get("remote_ipv6_network_cidr").(string)) + } + + if connChanged { + connOpts.VpnConnectionId = aws.String(vpnConnectionID) + _, err := conn.ModifyVpnConnectionOptions(connOpts) + if err != nil { + return fmt.Errorf("Error modifying vpn connection options: %s", err) + } + + if err := waitForEc2VpnConnectionAvailableWhenModifying(conn, vpnConnectionID); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to become available: %s", vpnConnectionID, err) + } + } + + if d.HasChange("tunnel1_dpd_timeout_action") { + tun1Changed = true + options[0].DPDTimeoutAction = aws.String(d.Get("tunnel1_dpd_timeout_action").(string)) + } + + if d.HasChange("tunnel2_dpd_timeout_action") { + tun2Changed = true + options[1].DPDTimeoutAction = aws.String(d.Get("tunnel2_dpd_timeout_action").(string)) + } + + if d.HasChange("tunnel1_dpd_timeout_seconds") { + tun1Changed = true + options[0].DPDTimeoutSeconds = aws.Int64(int64(d.Get("tunnel1_dpd_timeout_seconds").(int))) + } + + if d.HasChange("tunnel2_dpd_timeout_seconds") { + tun2Changed = true + options[1].DPDTimeoutSeconds = aws.Int64(int64(d.Get("tunnel2_dpd_timeout_seconds").(int))) + } + + if d.HasChange("tunnel1_ike_versions") { + tun1Changed = true + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range d.Get("tunnel1_ike_versions").(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].IKEVersions = l + } + + if d.HasChange("tunnel2_ike_versions") { + tun2Changed = true + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range d.Get("tunnel2_ike_versions").(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].IKEVersions = l + } + + if d.HasChange("tunnel1_phase1_dh_group_numbers") { + tun1Changed = true + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range d.Get("tunnel1_phase1_dh_group_numbers").(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase1DHGroupNumbers = l + } + + if d.HasChange("tunnel2_phase1_dh_group_numbers") { + tun2Changed = true + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range d.Get("tunnel2_phase1_dh_group_numbers").(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[1].Phase1DHGroupNumbers = l + } + + if d.HasChange("tunnel1_phase1_encryption_algorithms") { + tun1Changed = true + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel1_phase1_encryption_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1EncryptionAlgorithms = l + } + + if d.HasChange("tunnel2_phase1_encryption_algorithms") { + tun2Changed = true + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel2_phase1_encryption_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1EncryptionAlgorithms = l + } + + if d.HasChange("tunnel1_phase1_integrity_algorithms") { + tun1Changed = true + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel1_phase1_integrity_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1IntegrityAlgorithms = l + } + + if d.HasChange("tunnel2_phase1_integrity_algorithms") { + tun2Changed = true + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel2_phase1_integrity_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1IntegrityAlgorithms = l + } + + if d.HasChange("tunnel1_phase1_lifetime_seconds") { + tun1Changed = true + options[0].Phase1LifetimeSeconds = aws.Int64(int64(d.Get("tunnel1_phase1_lifetime_seconds").(int))) + } + + if d.HasChange("tunnel2_phase1_lifetime_seconds") { + tun2Changed = true + options[1].Phase1LifetimeSeconds = aws.Int64(int64(d.Get("tunnel2_phase1_lifetime_seconds").(int))) + } + + if d.HasChange("tunnel1_phase2_dh_group_numbers") { + tun1Changed = true + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range d.Get("tunnel1_phase2_dh_group_numbers").(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase2DHGroupNumbers = l + } + + if d.HasChange("tunnel2_phase2_dh_group_numbers") { + tun2Changed = true + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range d.Get("tunnel2_phase2_dh_group_numbers").(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[1].Phase2DHGroupNumbers = l + } + + if d.HasChange("tunnel1_phase2_encryption_algorithms") { + tun1Changed = true + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel1_phase2_encryption_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase2EncryptionAlgorithms = l + } + + if d.HasChange("tunnel2_phase2_encryption_algorithms") { + tun2Changed = true + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel2_phase2_encryption_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase2EncryptionAlgorithms = l + } + + if d.HasChange("tunnel1_phase2_integrity_algorithms") { + tun1Changed = true + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel1_phase2_integrity_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase2IntegrityAlgorithms = l + } + + if d.HasChange("tunnel2_phase2_integrity_algorithms") { + tun2Changed = true + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range d.Get("tunnel2_phase2_integrity_algorithms").(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase2IntegrityAlgorithms = l + } + + if d.HasChange("tunnel1_phase2_lifetime_seconds") { + tun1Changed = true + options[0].Phase2LifetimeSeconds = aws.Int64(int64(d.Get("tunnel1_phase2_lifetime_seconds").(int))) + } + + if d.HasChange("tunnel2_phase2_lifetime_seconds") { + tun2Changed = true + options[1].Phase2LifetimeSeconds = aws.Int64(int64(d.Get("tunnel2_phase2_lifetime_seconds").(int))) + } + + if d.HasChange("tunnel1_rekey_fuzz_percentage") { + tun1Changed = true + options[0].RekeyFuzzPercentage = aws.Int64(int64(d.Get("tunnel1_rekey_fuzz_percentage").(int))) + } + + if d.HasChange("tunnel2_rekey_fuzz_percentage") { + tun2Changed = true + options[1].RekeyFuzzPercentage = aws.Int64(int64(d.Get("tunnel2_rekey_fuzz_percentage").(int))) + } + + if d.HasChange("tunnel1_rekey_margin_time_seconds") { + tun1Changed = true + options[0].RekeyMarginTimeSeconds = aws.Int64(int64(d.Get("tunnel1_rekey_margin_time_seconds").(int))) + } + + if d.HasChange("tunnel2_rekey_margin_time_seconds") { + tun2Changed = true + options[1].RekeyMarginTimeSeconds = aws.Int64(int64(d.Get("tunnel2_rekey_margin_time_seconds").(int))) + } + + if d.HasChange("tunnel1_replay_window_size") { + tun1Changed = true + options[0].ReplayWindowSize = aws.Int64(int64(d.Get("tunnel1_replay_window_size").(int))) + } + + if d.HasChange("tunnel2_replay_window_size") { + tun2Changed = true + options[1].ReplayWindowSize = aws.Int64(int64(d.Get("tunnel2_replay_window_size").(int))) + } + + if d.HasChange("tunnel1_startup_action") { + tun1Changed = true + options[0].StartupAction = aws.String(d.Get("tunnel1_startup_action").(string)) + } + + if d.HasChange("tunnel2_startup_action") { + tun2Changed = true + options[1].StartupAction = aws.String(d.Get("tunnel2_startup_action").(string)) + } + + if tun1Changed { + if err := modifyVpnTunnelOptions(conn, d.Get("vgw_telemetry").(*schema.Set), vpnConnectionID, vgwTelemetryTun1Index, options[0]); err != nil { + return err + } + } + + if tun2Changed { + if err := modifyVpnTunnelOptions(conn, d.Get("vgw_telemetry").(*schema.Set), vpnConnectionID, vgwTelemetryTun2Index, options[1]); err != nil { + return err + } + } + if d.HasChange("tags") { o, n := d.GetChange("tags") - if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + if err := keyvaluetags.Ec2UpdateTags(conn, vpnConnectionID, o, n); err != nil { return fmt.Errorf("error updating EC2 VPN Connection (%s) tags: %s", d.Id(), err) } } @@ -560,6 +1310,29 @@ func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} return result } +func modifyVpnTunnelOptions(conn *ec2.EC2, vgwTelemetry *schema.Set, vpnConnectionID string, vgwTelemetryTunIndex int, optionsTun *ec2.ModifyVpnTunnelOptionsSpecification) error { + if v := vgwTelemetry; v.Len() > 0 { + vpnTunnelOutsideIPAddress := v.List()[vgwTelemetryTunIndex].(map[string]interface{})["outside_ip_address"].(string) + + o := &ec2.ModifyVpnTunnelOptionsInput{ + VpnConnectionId: aws.String(vpnConnectionID), + VpnTunnelOutsideIpAddress: aws.String(vpnTunnelOutsideIPAddress), + TunnelOptions: optionsTun, + } + + _, err := conn.ModifyVpnTunnelOptions(o) + if err != nil { + return fmt.Errorf("Error modifying vpn tunnel options: %s", err) + } + + if err := waitForEc2VpnConnectionAvailableWhenModifying(conn, vpnConnectionID); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to become available: %s", vpnConnectionID, err) + } + } + + return nil +} + func waitForEc2VpnConnectionAvailable(conn *ec2.EC2, id string) error { // Wait for the connection to become available. This has an obscenely // high default timeout because AWS VPN connections are notoriously @@ -579,6 +1352,25 @@ func waitForEc2VpnConnectionAvailable(conn *ec2.EC2, id string) error { return err } +func waitForEc2VpnConnectionAvailableWhenModifying(conn *ec2.EC2, id string) error { + // Wait for the connection to become available. This has an obscenely + // high default timeout because AWS VPN connections are notoriously + // slow at coming up or going down. There's also no point in checking + // more frequently than every ten seconds. + stateConf := &resource.StateChangeConf{ + Pending: []string{"modifying"}, // VPN state modifying const is not available in SDK + Target: []string{ec2.VpnStateAvailable}, + Refresh: vpnConnectionRefreshFunc(conn, id), + Timeout: 40 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + func waitForEc2VpnConnectionDeletion(conn *ec2.EC2, id string) error { // These things can take quite a while to tear themselves down and any // attempt to modify resources they reference (e.g. CustomerGateways or @@ -636,6 +1428,7 @@ func validateVpnConnectionTunnelPreSharedKey() schema.SchemaValidateFunc { } // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpnTunnelOptionsSpecification.html +// https://docs.aws.amazon.com/vpn/latest/s2svpn/VPNTunnels.html func validateVpnConnectionTunnelInsideCIDR() schema.SchemaValidateFunc { disallowedCidrs := []string{ "169.254.0.0/30", @@ -653,3 +1446,93 @@ func validateVpnConnectionTunnelInsideCIDR() schema.SchemaValidateFunc { validation.StringNotInSlice(disallowedCidrs, false), ) } + +func validateVpnConnectionTunnelInsideIpv6CIDR() schema.SchemaValidateFunc { + return validation.All( + validation.IsCIDRNetwork(126, 126), + validation.StringMatch(regexp.MustCompile(`^fd00:`), "must be within fd00::/8"), + ) +} + +func validateLocalIpv4NetworkCidr() schema.SchemaValidateFunc { + return validation.All( + validation.IsCIDRNetwork(32, 32), + ) +} + +func validateLocalIpv6NetworkCidr() schema.SchemaValidateFunc { + return validation.All( + validation.IsCIDRNetwork(128, 128), + ) +} + +func validateVpnConnectionTunnelDpdTimeoutAction() schema.SchemaValidateFunc { + allowedDpdTimeoutActions := []string{ + "clear", + "none", + "restart", + } + + return validation.All( + validation.StringInSlice(allowedDpdTimeoutActions, false), + ) +} + +func validateTunnelInsideIPVersion() schema.SchemaValidateFunc { + allowedIPVersions := []string{ + "ipv4", + "ipv6", + } + + return validation.All( + validation.StringInSlice(allowedIPVersions, false), + ) +} + +func validateVpnConnectionTunnelDpdTimeoutSeconds() schema.SchemaValidateFunc { + return validation.All( + //validation.IntBetween(0, 30) + validation.IntAtLeast(30), // Must be 30 or higher + ) +} + +func validateVpnConnectionTunnelPhase1LifetimeSeconds() schema.SchemaValidateFunc { + return validation.All( + validation.IntBetween(900, 28800), + ) +} + +func validateVpnConnectionTunnelPhase2LifetimeSeconds() schema.SchemaValidateFunc { + return validation.All( + validation.IntBetween(900, 3600), + ) +} + +func validateVpnConnectionTunnelRekeyFuzzPercentage() schema.SchemaValidateFunc { + return validation.All( + validation.IntBetween(0, 100), + ) +} + +func validateVpnConnectionTunnelRekeyMarginTimeSeconds() schema.SchemaValidateFunc { + return validation.All( + validation.IntBetween(60, 1800), + ) +} + +func validateVpnConnectionTunnelReplayWindowSize() schema.SchemaValidateFunc { + return validation.All( + validation.IntBetween(64, 2048), + ) +} + +func validateVpnConnectionTunnelStartupAction() schema.SchemaValidateFunc { + allowedStartupAction := []string{ + "add", + "start", + } + + return validation.All( + validation.StringInSlice(allowedStartupAction, false), + ) +} diff --git a/aws/resource_aws_vpn_connection_test.go b/aws/resource_aws_vpn_connection_test.go index 3648fe7e64c..0f4977f4190 100644 --- a/aws/resource_aws_vpn_connection_test.go +++ b/aws/resource_aws_vpn_connection_test.go @@ -88,6 +88,7 @@ func TestAccAWSVpnConnection_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccAwsVpnConnectionExists(resourceName, &vpn), resource.TestCheckResourceAttr(resourceName, "transit_gateway_attachment_id", ""), + resource.TestCheckResourceAttr(resourceName, "enable_acceleration", "false"), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`vpn-connection/vpn-.+`)), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -210,9 +211,63 @@ func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { ExpectError: regexp.MustCompile(`can only contain alphanumeric, period and underscore characters`), }, + // Should pre-check: + // - local_ipv4_network_cidr + // - local_ipv6_network_cidr + // - remote_ipv4_network_cidr + // - remote_ipv6_network_cidr + // - tunnel_inside_ip_version + // - tunnel1_dpd_timeout_action + // - tunnel1_dpd_timeout_seconds + // - tunnel1_phase1_lifetime_seconds + // - tunnel1_phase2_lifetime_seconds + // - tunnel1_rekey_fuzz_percentage + // - tunnel1_rekey_margin_time_seconds + // - tunnel1_replay_window_size + // - tunnel1_startup_action + // - tunnel1_inside_cidr + // - tunnel1_inside_ipv6_cidr + //Try actual building { - Config: testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn, "12345678", "169.254.8.0/30", "abcdefgh", "169.254.9.0/30"), + Config: testAccAwsVpnConnectionConfigTunnelOptions( + rBgpAsn, + "192.168.1.1/32", + "192.168.1.2/32", + "12345678", + "169.254.8.0/30", + "clear", + 30, + "\"ikev1\", \"ikev2\"", + "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + 28800, + "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + 3600, + 100, + 540, + 1024, + "add", + "abcdefgh", + "169.254.9.0/30", + "clear", + 30, + "\"ikev1\", \"ikev2\"", + "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + 28800, + "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + 3600, + 100, + 540, + 1024, + "add"), Check: resource.ComposeTestCheckFunc( testAccAwsVpnConnectionExists(resourceName, &vpn), resource.TestCheckResourceAttr(resourceName, "static_routes_only", "false"), @@ -246,6 +301,60 @@ func TestAccAWSVpnConnection_withoutStaticRoutes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccAwsVpnConnectionExists(resourceName, &vpn), resource.TestCheckResourceAttr(resourceName, "static_routes_only", "false"), + resource.TestCheckResourceAttr(resourceName, "enable_acceleration", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSVpnConnection_withEnableAcceleration(t *testing.T) { + rBgpAsn := acctest.RandIntRange(64512, 65534) + resourceName := "aws_vpn_connection.test" + var vpn ec2.VpnConnection + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccAwsVpnConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsVpnConnectionConfigEnableAcceleration(rBgpAsn), + Check: resource.ComposeTestCheckFunc( + testAccAwsVpnConnectionExists(resourceName, &vpn), + resource.TestCheckResourceAttr(resourceName, "enable_acceleration", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSVpnConnection_withIpv6(t *testing.T) { + rBgpAsn := acctest.RandIntRange(64512, 65534) + resourceName := "aws_vpn_connection.test" + var vpn ec2.VpnConnection + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccAwsVpnConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsVpnConnectionConfigIpv6(rBgpAsn, "fd00:2001:db8:2:2d1:81ff:fe41:d201/128", "fd00:2001:db8:2:2d1:81ff:fe41:d202/128", "fd00:2001:db8:2:2d1:81ff:fe41:d200/126", "fd00:2001:db8:2:2d1:81ff:fe41:d204/126"), + Check: resource.ComposeTestCheckFunc( + testAccAwsVpnConnectionExists(resourceName, &vpn), ), }, { @@ -493,10 +602,60 @@ resource "aws_vpn_connection" "test" { customer_gateway_id = aws_customer_gateway.customer_gateway.id type = "ipsec.1" static_routes_only = false + enable_acceleration = false } `, rBgpAsn, rInt) } +func testAccAwsVpnConnectionConfigEnableAcceleration(rBgpAsn int) string { + return fmt.Sprintf(` +resource "aws_ec2_transit_gateway" "test" {} +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags = { + Name = "tf-acc-test-ec2-vpn-connection-enable-acceleration" + } +} +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.customer_gateway.id + transit_gateway_id = aws_ec2_transit_gateway.test.id + type = "ipsec.1" + static_routes_only = false + enable_acceleration = true +} +`, rBgpAsn) +} + +func testAccAwsVpnConnectionConfigIpv6(rBgpAsn int, localIpv6NetworkCidr string, remoteIpv6NetworkCidr string, tunnel1InsideIpv6Cidr string, tunnel2InsideIpv6Cidr string) string { + return fmt.Sprintf(` +resource "aws_ec2_transit_gateway" "test" {} +resource "aws_customer_gateway" "customer_gateway" { + bgp_asn = %d + ip_address = "178.0.0.1" + type = "ipsec.1" + tags = { + Name = "tf-acc-test-ec2-vpn-connection-enable-acceleration" + } +} +resource "aws_vpn_connection" "test" { + customer_gateway_id = aws_customer_gateway.customer_gateway.id + transit_gateway_id = aws_ec2_transit_gateway.test.id + type = "ipsec.1" + static_routes_only = false + enable_acceleration = false + + local_ipv6_network_cidr = "%s" + remote_ipv6_network_cidr = "%s" + tunnel_inside_ip_version = "ipv6" + + tunnel1_inside_ipv6_cidr = "%s" + tunnel2_inside_ipv6_cidr = "%s" +} +`, rBgpAsn, localIpv6NetworkCidr, remoteIpv6NetworkCidr, tunnel1InsideIpv6Cidr, tunnel2InsideIpv6Cidr) +} + func testAccAwsVpnConnectionConfigSingleTunnelOptions(rBgpAsn int, psk string, tunnelCidr string) string { return fmt.Sprintf(` resource "aws_vpn_gateway" "vpn_gateway" { @@ -549,7 +708,45 @@ resource "aws_vpn_connection" "test" { `, rBgpAsn) } -func testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn int, psk string, tunnelCidr string, psk2 string, tunnelCidr2 string) string { +func testAccAwsVpnConnectionConfigTunnelOptions( + rBgpAsn int, + localIpv4NetworkCidr string, + remoteIpv4NetworkCidr string, + psk string, + tunnelCidr string, + dpdTimeoutAction string, + dpdTimeoutSeconds int, + ikeVersions string, + phase1DhGroupNumbers string, + phase1EncryptionAlgorithms string, + phase1IntegrityAlgorithms string, + phase1LifetimeSeconds int, + phase2DhGroupNumbers string, + phase2EncryptionAlgorithms string, + phase2IntegrityAlgorithms string, + phase2LifetimeSeconds int, + rekeyFuzzPercentage int, + rekeyMarginTimeSeconds int, + replayWindowSize int, + startupAction string, + psk2 string, + tunnelCidr2 string, + dpdTimeoutAction2 string, + dpdTimeoutSeconds2 int, + ikeVersions2 string, + phase1DhGroupNumbers2 string, + phase1EncryptionAlgorithms2 string, + phase1IntegrityAlgorithms2 string, + phase1LifetimeSeconds2 int, + phase2DhGroupNumbers2 string, + phase2EncryptionAlgorithms2 string, + phase2IntegrityAlgorithms2 string, + phase2LifetimeSeconds2 int, + rekeyFuzzPercentage2 int, + rekeyMarginTimeSeconds2 int, + replayWindowSize2 int, + startupAction2 string, +) string { return fmt.Sprintf(` resource "aws_vpn_gateway" "vpn_gateway" { tags = { @@ -573,13 +770,83 @@ resource "aws_vpn_connection" "test" { type = "ipsec.1" static_routes_only = false - tunnel1_inside_cidr = "%s" - tunnel1_preshared_key = "%s" - - tunnel2_inside_cidr = "%s" - tunnel2_preshared_key = "%s" -} -`, rBgpAsn, tunnelCidr, psk, tunnelCidr2, psk2) + local_ipv4_network_cidr = "%s" + remote_ipv4_network_cidr = "%s" + + tunnel1_inside_cidr = "%s" + tunnel1_preshared_key = "%s" + tunnel1_dpd_timeout_action = "%s" + tunnel1_dpd_timeout_seconds = %d + tunnel1_ike_versions = [%s] + tunnel1_phase1_dh_group_numbers = [%s] + tunnel1_phase1_encryption_algorithms = [%s] + tunnel1_phase1_integrity_algorithms = [%s] + tunnel1_phase1_lifetime_seconds = %d + tunnel1_phase2_dh_group_numbers = [%s] + tunnel1_phase2_encryption_algorithms = [%s] + tunnel1_phase2_integrity_algorithms = [%s] + tunnel1_phase2_lifetime_seconds = %d + tunnel1_rekey_fuzz_percentage = %d + tunnel1_rekey_margin_time_seconds = %d + tunnel1_replay_window_size = %d + tunnel1_startup_action = "%s" + + tunnel2_inside_cidr = "%s" + tunnel2_preshared_key = "%s" + tunnel2_dpd_timeout_action = "%s" + tunnel2_dpd_timeout_seconds = %d + tunnel2_ike_versions = [%s] + tunnel2_phase1_dh_group_numbers = [%s] + tunnel2_phase1_encryption_algorithms = [%s] + tunnel2_phase1_integrity_algorithms = [%s] + tunnel2_phase1_lifetime_seconds = %d + tunnel2_phase2_dh_group_numbers = [%s] + tunnel2_phase2_encryption_algorithms = [%s] + tunnel2_phase2_integrity_algorithms = [%s] + tunnel2_phase2_lifetime_seconds = %d + tunnel2_rekey_fuzz_percentage = %d + tunnel2_rekey_margin_time_seconds = %d + tunnel2_replay_window_size = %d + tunnel2_startup_action = "%s" +} +`, + rBgpAsn, + localIpv4NetworkCidr, + remoteIpv4NetworkCidr, + tunnelCidr, + psk, + dpdTimeoutAction, + dpdTimeoutSeconds, + ikeVersions, + phase1DhGroupNumbers, + phase1EncryptionAlgorithms, + phase1IntegrityAlgorithms, + phase1LifetimeSeconds, + phase2DhGroupNumbers, + phase2EncryptionAlgorithms, + phase2IntegrityAlgorithms, + phase2LifetimeSeconds, + rekeyFuzzPercentage, + rekeyMarginTimeSeconds, + replayWindowSize, + startupAction, + tunnelCidr2, + psk2, + dpdTimeoutAction2, + dpdTimeoutSeconds2, + ikeVersions2, + phase1DhGroupNumbers2, + phase1EncryptionAlgorithms2, + phase1IntegrityAlgorithms2, + phase1LifetimeSeconds2, + phase2DhGroupNumbers2, + phase2EncryptionAlgorithms2, + phase2IntegrityAlgorithms2, + phase2LifetimeSeconds2, + rekeyFuzzPercentage2, + rekeyMarginTimeSeconds2, + replayWindowSize2, + startupAction2) } func testAccAwsVpnConnectionConfigTags1(rBgpAsn int, tagKey1, tagValue1 string) string { diff --git a/website/docs/r/vpn_connection.html.markdown b/website/docs/r/vpn_connection.html.markdown index d74d406744b..c9b046e07cb 100644 --- a/website/docs/r/vpn_connection.html.markdown +++ b/website/docs/r/vpn_connection.html.markdown @@ -76,13 +76,49 @@ One of the following arguments is required: Other arguments: * `static_routes_only` - (Optional, Default `false`) Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP. +* `enable_acceleration` - (Optional, Default `false`) Indicate whether to enable acceleration for the VPN connection. Supports only EC2 Transit Gateway. * `tags` - (Optional) Tags to apply to the connection. -* `tunnel1_inside_cidr` - (Optional) The CIDR block of the inside IP addresses for the first VPN tunnel. -* `tunnel2_inside_cidr` - (Optional) The CIDR block of the inside IP addresses for the second VPN tunnel. -* `tunnel1_preshared_key` - (Optional) The preshared key of the first VPN tunnel. -* `tunnel2_preshared_key` - (Optional) The preshared key of the second VPN tunnel. - -~> **Note:** The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). +* `local_ipv4_network_cidr` - (Optional, Default `0.0.0.0/0`) The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. +* `local_ipv6_network_cidr` - (Optional, Default `::/0`) The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. +* `remote_ipv4_network_cidr` - (Optional, Default `0.0.0.0/0`) The IPv4 CIDR on the AWS side of the VPN connection. +* `remote_ipv6_network_cidr` - (Optional, Default `::/0`) The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. +* `tunnel_inside_ip_version` - (Optional, Default `ipv4`) Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. Valid values are `ipv4 | ipv6`. `ipv6` Supports only EC2 Transit Gateway. +* `tunnel1_inside_cidr` - (Optional) The CIDR block of the inside IP addresses for the first VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. +* `tunnel2_inside_cidr` - (Optional) The CIDR block of the inside IP addresses for the second VPN tunnel. Valid value is a size /30 CIDR block from the 169.254.0.0/16 range. +* `tunnel1_inside_ipv6_cidr` - (Optional) The range of inside IPv6 addresses for the first VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. +* `tunnel2_inside_ipv6_cidr` - (Optional) The range of inside IPv6 addresses for the second VPN tunnel. Supports only EC2 Transit Gateway. Valid value is a size /126 CIDR block from the local fd00::/8 range. +* `tunnel1_preshared_key` - (Optional) The preshared key of the first VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). +* `tunnel2_preshared_key` - (Optional) The preshared key of the second VPN tunnel. The preshared key must be between 8 and 64 characters in length and cannot start with zero(0). Allowed characters are alphanumeric characters, periods(.) and underscores(_). +* `tunnel1_dpd_timeout_action` - (Optional, Default `clear`) The action to take after DPD timeout occurs for the first VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are `clear | none | restart`. +* `tunnel2_dpd_timeout_action` - (Optional, Default `clear`) The action to take after DPD timeout occurs for the second VPN tunnel. Specify restart to restart the IKE initiation. Specify clear to end the IKE session. Valid values are `clear | none | restart`. +* `tunnel1_dpd_timeout_seconds` - (Optional, Default `30`) The number of seconds after which a DPD timeout occurs for the first VPN tunnel. Valid value is equal or higher than `30`. +* `tunnel2_dpd_timeout_seconds` - (Optional, Default `30`) The number of seconds after which a DPD timeout occurs for the second VPN tunnel. Valid value is equal or higher than `30`. +* `tunnel1_ike_versions` - (Optional) The IKE versions that are permitted for the first VPN tunnel. Valid values are `ikev1 | ikev2`. +* `tunnel2_ike_versions` - (Optional) The IKE versions that are permitted for the second VPN tunnel. Valid values are `ikev1 | ikev2`. +* `tunnel1_phase1_dh_group_numbers` - (Optional) List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are ` 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24`. +* `tunnel2_phase1_dh_group_numbers` - (Optional) List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are ` 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24`. +* `tunnel1_phase1_encryption_algorithms` - (Optional) List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are `AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16`. +* `tunnel2_phase1_encryption_algorithms` - (Optional) List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are `AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16`. +* `tunnel1_phase1_integrity_algorithms` - (Optional) One or more integrity algorithms that are permitted for the first VPN tunnel for phase 1 IKE negotiations. Valid values are `SHA1 | SHA2-256 | SHA2-384 | SHA2-512`. +* `tunnel2_phase1_integrity_algorithms` - (Optional) One or more integrity algorithms that are permitted for the second VPN tunnel for phase 1 IKE negotiations. Valid values are `SHA1 | SHA2-256 | SHA2-384 | SHA2-512`. +* `tunnel1_phase1_lifetime_seconds` - (Optional, Default `28800`) The lifetime for phase 1 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between `900` and `28800`. +* `tunnel2_phase1_lifetime_seconds` - (Optional, Default `28800`) The lifetime for phase 1 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between `900` and `28800`. +* `tunnel1_phase2_dh_group_numbers` - (Optional) List of one or more Diffie-Hellman group numbers that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are `2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24`. +* `tunnel2_phase2_dh_group_numbers` - (Optional) List of one or more Diffie-Hellman group numbers that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are `2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24`. +* `tunnel1_phase2_encryption_algorithms` - (Optional) List of one or more encryption algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are `AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16`. +* `tunnel2_phase2_encryption_algorithms` - (Optional) List of one or more encryption algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are `AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16`. +* `tunnel1_phase2_integrity_algorithms` - (Optional) List of one or more integrity algorithms that are permitted for the first VPN tunnel for phase 2 IKE negotiations. Valid values are `SHA1 | SHA2-256 | SHA2-384 | SHA2-512`. +* `tunnel2_phase2_integrity_algorithms` - (Optional) List of one or more integrity algorithms that are permitted for the second VPN tunnel for phase 2 IKE negotiations. Valid values are `SHA1 | SHA2-256 | SHA2-384 | SHA2-512`. +* `tunnel1_phase2_lifetime_seconds` - (Optional, Default `3600`) The lifetime for phase 2 of the IKE negotiation for the first VPN tunnel, in seconds. Valid value is between `900` and `3600`. +* `tunnel2_phase2_lifetime_seconds` - (Optional, Default `3600`) The lifetime for phase 2 of the IKE negotiation for the second VPN tunnel, in seconds. Valid value is between `900` and `3600`. +* `tunnel1_rekey_fuzz_percentage` - (Optional, Default `100`) The percentage of the rekey window for the first VPN tunnel (determined by `tunnel1_rekey_margin_time_seconds`) during which the rekey time is randomly selected. Valid value is between `0` and `100`. +* `tunnel2_rekey_fuzz_percentage` - (Optional, Default `100`) The percentage of the rekey window for the second VPN tunnel (determined by `tunnel2_rekey_margin_time_seconds`) during which the rekey time is randomly selected. Valid value is between `0` and `100`. +* `tunnel1_rekey_margin_time_seconds` - (Optional, Default `540`) The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the first VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for `tunnel1_rekey_fuzz_percentage`. Valid value is between `60` and half of `tunnel1_phase2_lifetime_seconds`. +* `tunnel2_rekey_margin_time_seconds` - (Optional, Default `540`) The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the second VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for `tunnel2_rekey_fuzz_percentage`. Valid value is between `60` and half of `tunnel2_phase2_lifetime_seconds`. +* `tunnel1_replay_window_size` - (Optional, Default `1024`) The number of packets in an IKE replay window for the first VPN tunnel. Valid value is between `64` and `2048`. +* `tunnel2_replay_window_size` - (Optional, Default `1024`) The number of packets in an IKE replay window for the second VPN tunnel. Valid value is between `64` and `2048`. +* `tunnel1_startup_action` - (Optional, Default `add`) The action to take when the establishing the tunnel for the first VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are `add | start`. +* `tunnel2_startup_action` - (Optional, Default `add`) The action to take when the establishing the tunnel for the second VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. Valid values are `add | start`. ## Attributes Reference From de14cad5c045dfaa94cbb25c94946b6a3541d6cd Mon Sep 17 00:00:00 2001 From: Marco Rinalducci Date: Sat, 12 Dec 2020 17:32:44 +0100 Subject: [PATCH 0205/1212] Code better broken up into functions and added struct for testing --- aws/resource_aws_vpn_connection.go | 772 ++++++++++++------------ aws/resource_aws_vpn_connection_test.go | 287 +++++---- 2 files changed, 540 insertions(+), 519 deletions(-) diff --git a/aws/resource_aws_vpn_connection.go b/aws/resource_aws_vpn_connection.go index 8ba13db3e22..669ec37a13c 100644 --- a/aws/resource_aws_vpn_connection.go +++ b/aws/resource_aws_vpn_connection.go @@ -516,211 +516,274 @@ func resourceAwsVpnConnection() *schema.Resource { func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - // Fill the tunnel options for the EC2 API - options := []*ec2.VpnTunnelOptionsSpecification{ - {}, {}, + // Fill the connection options for the EC2 API + connectOpts := expandVpnConnectionOptions(d) + + createOpts := &ec2.CreateVpnConnectionInput{ + CustomerGatewayId: aws.String(d.Get("customer_gateway_id").(string)), + Options: connectOpts, + Type: aws.String(d.Get("type").(string)), + TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeVpnConnection), } - if v, ok := d.GetOk("tunnel1_dpd_timeout_action"); ok { - options[0].DPDTimeoutAction = aws.String(v.(string)) + if v, ok := d.GetOk("transit_gateway_id"); ok { + createOpts.TransitGatewayId = aws.String(v.(string)) } - if v, ok := d.GetOk("tunnel2_dpd_timeout_action"); ok { - options[1].DPDTimeoutAction = aws.String(v.(string)) + if v, ok := d.GetOk("vpn_gateway_id"); ok { + createOpts.VpnGatewayId = aws.String(v.(string)) } - if v, ok := d.GetOk("tunnel1_dpd_timeout_seconds"); ok { - options[0].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) + // Create the VPN Connection + log.Printf("[DEBUG] Creating vpn connection") + resp, err := conn.CreateVpnConnection(createOpts) + if err != nil { + return fmt.Errorf("Error creating vpn connection: %s", err) } - if v, ok := d.GetOk("tunnel2_dpd_timeout_seconds"); ok { - options[1].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) + d.SetId(aws.StringValue(resp.VpnConnection.VpnConnectionId)) + + if err := waitForEc2VpnConnectionAvailable(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to become available: %s", d.Id(), err) } - if v, ok := d.GetOk("tunnel1_ike_versions"); ok { - l := []*ec2.IKEVersionsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + // Read off the API to populate our RO fields. + return resourceAwsVpnConnectionRead(d, meta) +} + +func vpnConnectionRefreshFunc(conn *ec2.EC2, connectionId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ + VpnConnectionIds: []*string{aws.String(connectionId)}, + }) + + if err != nil { + if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { + resp = nil + } else { + log.Printf("Error on VPNConnectionRefresh: %s", err) + return nil, "", err + } } - options[0].IKEVersions = l - } - if v, ok := d.GetOk("tunnel2_ike_versions"); ok { - l := []*ec2.IKEVersionsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) + if resp == nil || len(resp.VpnConnections) == 0 { + return nil, "", nil } - options[1].IKEVersions = l + + connection := resp.VpnConnections[0] + return connection, aws.StringValue(connection.State), nil } +} - if v, ok := d.GetOk("tunnel1_phase1_dh_group_numbers"); ok { - l := []*ec2.Phase1DHGroupNumbersRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) - } - options[0].Phase1DHGroupNumbers = l +func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ + VpnConnectionIds: []*string{aws.String(d.Id())}, + }) + + if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { + log.Printf("[WARN] EC2 VPN Connection (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - if v, ok := d.GetOk("tunnel2_phase1_dh_group_numbers"); ok { - l := []*ec2.Phase1DHGroupNumbersRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) - } - options[1].Phase1DHGroupNumbers = l + if err != nil { + return fmt.Errorf("error reading EC2 VPN Connection (%s): %s", d.Id(), err) } - if v, ok := d.GetOk("tunnel1_phase1_encryption_algorithms"); ok { - l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) - } - options[0].Phase1EncryptionAlgorithms = l + if resp == nil || len(resp.VpnConnections) == 0 || resp.VpnConnections[0] == nil { + return fmt.Errorf("error reading EC2 VPN Connection (%s): empty response", d.Id()) } - if v, ok := d.GetOk("tunnel2_phase1_encryption_algorithms"); ok { - l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) - } - options[1].Phase1EncryptionAlgorithms = l + if len(resp.VpnConnections) > 1 { + return fmt.Errorf("error reading EC2 VPN Connection (%s): multiple responses", d.Id()) } - if v, ok := d.GetOk("tunnel1_phase1_integrity_algorithms"); ok { - l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) - } - options[0].Phase1IntegrityAlgorithms = l + vpnConnection := resp.VpnConnections[0] + + if aws.StringValue(vpnConnection.State) == ec2.VpnStateDeleted { + log.Printf("[WARN] EC2 VPN Connection (%s) already deleted, removing from state", d.Id()) + d.SetId("") + return nil } - if v, ok := d.GetOk("tunnel2_phase1_integrity_algorithms"); ok { - l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + var transitGatewayAttachmentID string + if vpnConnection.TransitGatewayId != nil { + input := &ec2.DescribeTransitGatewayAttachmentsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("resource-id"), + Values: []*string{vpnConnection.VpnConnectionId}, + }, + { + Name: aws.String("resource-type"), + Values: []*string{aws.String(ec2.TransitGatewayAttachmentResourceTypeVpn)}, + }, + { + Name: aws.String("transit-gateway-id"), + Values: []*string{vpnConnection.TransitGatewayId}, + }, + }, } - options[1].Phase1IntegrityAlgorithms = l - } - if v, ok := d.GetOk("tunnel1_phase1_lifetime_seconds"); ok { - options[0].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) + log.Printf("[DEBUG] Finding EC2 VPN Connection Transit Gateway Attachment: %s", input) + output, err := conn.DescribeTransitGatewayAttachments(input) + + if err != nil { + return fmt.Errorf("error finding EC2 VPN Connection (%s) Transit Gateway Attachment: %s", d.Id(), err) + } + + if output == nil || len(output.TransitGatewayAttachments) == 0 || output.TransitGatewayAttachments[0] == nil { + return fmt.Errorf("error finding EC2 VPN Connection (%s) Transit Gateway Attachment: empty response", d.Id()) + } + + if len(output.TransitGatewayAttachments) > 1 { + return fmt.Errorf("error reading EC2 VPN Connection (%s) Transit Gateway Attachment: multiple responses", d.Id()) + } + + transitGatewayAttachmentID = aws.StringValue(output.TransitGatewayAttachments[0].TransitGatewayAttachmentId) } - if v, ok := d.GetOk("tunnel2_phase1_lifetime_seconds"); ok { - options[1].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) + // Set attributes under the user's control. + d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId) + d.Set("customer_gateway_id", vpnConnection.CustomerGatewayId) + d.Set("transit_gateway_id", vpnConnection.TransitGatewayId) + d.Set("type", vpnConnection.Type) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpnConnection.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - if v, ok := d.GetOk("tunnel1_phase2_dh_group_numbers"); ok { - l := []*ec2.Phase2DHGroupNumbersRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + if vpnConnection.Options != nil { + if err := d.Set("enable_acceleration", vpnConnection.Options.EnableAcceleration); err != nil { + return err } - options[0].Phase2DHGroupNumbers = l - } - if v, ok := d.GetOk("tunnel2_phase2_dh_group_numbers"); ok { - l := []*ec2.Phase2DHGroupNumbersRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + if err := d.Set("local_ipv4_network_cidr", vpnConnection.Options.LocalIpv4NetworkCidr); err != nil { + return err } - options[1].Phase2DHGroupNumbers = l - } - if v, ok := d.GetOk("tunnel1_phase2_encryption_algorithms"); ok { - l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + if err := d.Set("local_ipv6_network_cidr", vpnConnection.Options.LocalIpv6NetworkCidr); err != nil { + return err } - options[0].Phase2EncryptionAlgorithms = l - } - if v, ok := d.GetOk("tunnel2_phase2_encryption_algorithms"); ok { - l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + if err := d.Set("remote_ipv4_network_cidr", vpnConnection.Options.RemoteIpv4NetworkCidr); err != nil { + return err } - options[1].Phase2EncryptionAlgorithms = l - } - if v, ok := d.GetOk("tunnel1_phase2_integrity_algorithms"); ok { - l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + if err := d.Set("remote_ipv6_network_cidr", vpnConnection.Options.RemoteIpv6NetworkCidr); err != nil { + return err } - options[0].Phase2IntegrityAlgorithms = l - } - if v, ok := d.GetOk("tunnel2_phase2_integrity_algorithms"); ok { - l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} - for _, s := range v.(*schema.Set).List() { - l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + if err := d.Set("static_routes_only", vpnConnection.Options.StaticRoutesOnly); err != nil { + return err } - options[1].Phase2IntegrityAlgorithms = l - } - if v, ok := d.GetOk("tunnel1_phase2_lifetime_seconds"); ok { - options[0].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) + if err := d.Set("tunnel_inside_ip_version", vpnConnection.Options.TunnelInsideIpVersion); err != nil { + return err + } + } else { + //If there no Options on the connection then we do not support it + d.Set("enable_acceleration", false) + d.Set("local_ipv4_network_cidr", "") + d.Set("local_ipv6_network_cidr", "") + d.Set("remote_ipv4_network_cidr", "") + d.Set("remote_ipv6_network_cidr", "") + d.Set("static_routes_only", false) + d.Set("tunnel_inside_ip_version", "") } - if v, ok := d.GetOk("tunnel2_phase2_lifetime_seconds"); ok { - options[1].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) - } + // Set read only attributes. + d.Set("customer_gateway_configuration", vpnConnection.CustomerGatewayConfiguration) + d.Set("transit_gateway_attachment_id", transitGatewayAttachmentID) - if v, ok := d.GetOk("tunnel1_rekey_fuzz_percentage"); ok { - options[0].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) + if vpnConnection.CustomerGatewayConfiguration != nil { + if tunnelInfo, err := xmlConfigToTunnelInfo(*vpnConnection.CustomerGatewayConfiguration); err != nil { + log.Printf("[ERR] Error unmarshaling XML configuration for (%s): %s", d.Id(), err) + } else { + d.Set("tunnel1_address", tunnelInfo.Tunnel1Address) + d.Set("tunnel1_cgw_inside_address", tunnelInfo.Tunnel1CgwInsideAddress) + d.Set("tunnel1_vgw_inside_address", tunnelInfo.Tunnel1VgwInsideAddress) + d.Set("tunnel1_preshared_key", tunnelInfo.Tunnel1PreSharedKey) + d.Set("tunnel1_bgp_asn", tunnelInfo.Tunnel1BGPASN) + d.Set("tunnel1_bgp_holdtime", tunnelInfo.Tunnel1BGPHoldTime) + d.Set("tunnel2_address", tunnelInfo.Tunnel2Address) + d.Set("tunnel2_preshared_key", tunnelInfo.Tunnel2PreSharedKey) + d.Set("tunnel2_cgw_inside_address", tunnelInfo.Tunnel2CgwInsideAddress) + d.Set("tunnel2_vgw_inside_address", tunnelInfo.Tunnel2VgwInsideAddress) + d.Set("tunnel2_bgp_asn", tunnelInfo.Tunnel2BGPASN) + d.Set("tunnel2_bgp_holdtime", tunnelInfo.Tunnel2BGPHoldTime) + } } - if v, ok := d.GetOk("tunnel2_rekey_fuzz_percentage"); ok { - options[1].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) + if err := d.Set("vgw_telemetry", telemetryToMapList(vpnConnection.VgwTelemetry)); err != nil { + return err } - - if v, ok := d.GetOk("tunnel1_rekey_margin_time_seconds"); ok { - options[0].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) + if err := d.Set("routes", routesToMapList(vpnConnection.Routes)); err != nil { + return err } - if v, ok := d.GetOk("tunnel2_rekey_margin_time_seconds"); ok { - options[1].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) - } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ec2", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("vpn-connection/%s", d.Id()), + }.String() - if v, ok := d.GetOk("tunnel1_replay_window_size"); ok { - options[0].ReplayWindowSize = aws.Int64(int64(v.(int))) - } + d.Set("arn", arn) - if v, ok := d.GetOk("tunnel2_replay_window_size"); ok { - options[1].ReplayWindowSize = aws.Int64(int64(v.(int))) - } + return nil +} - if v, ok := d.GetOk("tunnel1_startup_action"); ok { - options[0].StartupAction = aws.String(v.(string)) - } +func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn - if v, ok := d.GetOk("tunnel2_startup_action"); ok { - options[1].StartupAction = aws.String(v.(string)) + if err := modifyVpnConnectionOptions(d, conn); err != nil { + return err } - if v, ok := d.GetOk("tunnel1_inside_cidr"); ok { - options[0].TunnelInsideCidr = aws.String(v.(string)) + if err := modifyVpnTunnels(d, conn); err != nil { + return err } - if v, ok := d.GetOk("tunnel2_inside_cidr"); ok { - options[1].TunnelInsideCidr = aws.String(v.(string)) - } + if d.HasChange("tags") { + o, n := d.GetChange("tags") + vpnConnectionID := d.Id() - if v, ok := d.GetOk("tunnel1_inside_ipv6_cidr"); ok { - options[0].TunnelInsideIpv6Cidr = aws.String(v.(string)) + if err := keyvaluetags.Ec2UpdateTags(conn, vpnConnectionID, o, n); err != nil { + return fmt.Errorf("error updating EC2 VPN Connection (%s) tags: %s", d.Id(), err) + } } - if v, ok := d.GetOk("tunnel2_inside_ipv6_cidr"); ok { - options[1].TunnelInsideIpv6Cidr = aws.String(v.(string)) + return resourceAwsVpnConnectionRead(d, meta) +} + +func resourceAwsVpnConnectionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ + VpnConnectionId: aws.String(d.Id()), + }) + + if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { + return nil } - if v, ok := d.GetOk("tunnel1_preshared_key"); ok { - options[0].PreSharedKey = aws.String(v.(string)) + if err != nil { + return fmt.Errorf("error deleting VPN Connection (%s): %s", d.Id(), err) } - if v, ok := d.GetOk("tunnel2_preshared_key"); ok { - options[1].PreSharedKey = aws.String(v.(string)) + if err := waitForEc2VpnConnectionDeletion(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for VPN connection (%s) to delete: %s", d.Id(), err) } + return nil +} + +func expandVpnConnectionOptions(d *schema.ResourceData) *ec2.VpnConnectionOptionsSpecification { var connectOpts *ec2.VpnConnectionOptionsSpecification = new(ec2.VpnConnectionOptionsSpecification) ipv := d.Get("tunnel_inside_ip_version").(string) if ipv == "ipv6" { @@ -753,238 +816,258 @@ func resourceAwsVpnConnectionCreate(d *schema.ResourceData, meta interface{}) er connectOpts.StaticRoutesOnly = aws.Bool(v.(bool)) } - connectOpts.TunnelOptions = options + // Fill the tunnel options for the EC2 API + connectOpts.TunnelOptions = expandVpnTunnelOptions(d) - createOpts := &ec2.CreateVpnConnectionInput{ - CustomerGatewayId: aws.String(d.Get("customer_gateway_id").(string)), - Options: connectOpts, - Type: aws.String(d.Get("type").(string)), - TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), ec2.ResourceTypeVpnConnection), - } + return connectOpts +} - if v, ok := d.GetOk("transit_gateway_id"); ok { - createOpts.TransitGatewayId = aws.String(v.(string)) +func expandVpnTunnelOptions(d *schema.ResourceData) []*ec2.VpnTunnelOptionsSpecification { + options := []*ec2.VpnTunnelOptionsSpecification{ + {}, {}, } - if v, ok := d.GetOk("vpn_gateway_id"); ok { - createOpts.VpnGatewayId = aws.String(v.(string)) + if v, ok := d.GetOk("tunnel1_dpd_timeout_action"); ok { + options[0].DPDTimeoutAction = aws.String(v.(string)) } - // Create the VPN Connection - log.Printf("[DEBUG] Creating vpn connection") - resp, err := conn.CreateVpnConnection(createOpts) - if err != nil { - return fmt.Errorf("Error creating vpn connection: %s", err) + if v, ok := d.GetOk("tunnel2_dpd_timeout_action"); ok { + options[1].DPDTimeoutAction = aws.String(v.(string)) } - d.SetId(aws.StringValue(resp.VpnConnection.VpnConnectionId)) - - if err := waitForEc2VpnConnectionAvailable(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for VPN connection (%s) to become available: %s", d.Id(), err) + if v, ok := d.GetOk("tunnel1_dpd_timeout_seconds"); ok { + options[0].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) } - // Read off the API to populate our RO fields. - return resourceAwsVpnConnectionRead(d, meta) -} - -func vpnConnectionRefreshFunc(conn *ec2.EC2, connectionId string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(connectionId)}, - }) + if v, ok := d.GetOk("tunnel2_dpd_timeout_seconds"); ok { + options[1].DPDTimeoutSeconds = aws.Int64(int64(v.(int))) + } - if err != nil { - if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { - resp = nil - } else { - log.Printf("Error on VPNConnectionRefresh: %s", err) - return nil, "", err - } + if v, ok := d.GetOk("tunnel1_ike_versions"); ok { + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) } + options[0].IKEVersions = l + } - if resp == nil || len(resp.VpnConnections) == 0 { - return nil, "", nil + if v, ok := d.GetOk("tunnel2_ike_versions"); ok { + l := []*ec2.IKEVersionsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.IKEVersionsRequestListValue{Value: aws.String(s.(string))}) } + options[1].IKEVersions = l + } - connection := resp.VpnConnections[0] - return connection, aws.StringValue(connection.State), nil + if v, ok := d.GetOk("tunnel1_phase1_dh_group_numbers"); ok { + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase1DHGroupNumbers = l } -} -func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + if v, ok := d.GetOk("tunnel2_phase1_dh_group_numbers"); ok { + l := []*ec2.Phase1DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[1].Phase1DHGroupNumbers = l + } - resp, err := conn.DescribeVpnConnections(&ec2.DescribeVpnConnectionsInput{ - VpnConnectionIds: []*string{aws.String(d.Id())}, - }) + if v, ok := d.GetOk("tunnel1_phase1_encryption_algorithms"); ok { + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1EncryptionAlgorithms = l + } - if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { - log.Printf("[WARN] EC2 VPN Connection (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil + if v, ok := d.GetOk("tunnel2_phase1_encryption_algorithms"); ok { + l := []*ec2.Phase1EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1EncryptionAlgorithms = l } - if err != nil { - return fmt.Errorf("error reading EC2 VPN Connection (%s): %s", d.Id(), err) + if v, ok := d.GetOk("tunnel1_phase1_integrity_algorithms"); ok { + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase1IntegrityAlgorithms = l } - if resp == nil || len(resp.VpnConnections) == 0 || resp.VpnConnections[0] == nil { - return fmt.Errorf("error reading EC2 VPN Connection (%s): empty response", d.Id()) + if v, ok := d.GetOk("tunnel2_phase1_integrity_algorithms"); ok { + l := []*ec2.Phase1IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase1IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[1].Phase1IntegrityAlgorithms = l } - if len(resp.VpnConnections) > 1 { - return fmt.Errorf("error reading EC2 VPN Connection (%s): multiple responses", d.Id()) + if v, ok := d.GetOk("tunnel1_phase1_lifetime_seconds"); ok { + options[0].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) } - vpnConnection := resp.VpnConnections[0] + if v, ok := d.GetOk("tunnel2_phase1_lifetime_seconds"); ok { + options[1].Phase1LifetimeSeconds = aws.Int64(int64(v.(int))) + } - if aws.StringValue(vpnConnection.State) == ec2.VpnStateDeleted { - log.Printf("[WARN] EC2 VPN Connection (%s) already deleted, removing from state", d.Id()) - d.SetId("") - return nil + if v, ok := d.GetOk("tunnel1_phase2_dh_group_numbers"); ok { + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) + } + options[0].Phase2DHGroupNumbers = l } - var transitGatewayAttachmentID string - if vpnConnection.TransitGatewayId != nil { - input := &ec2.DescribeTransitGatewayAttachmentsInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String("resource-id"), - Values: []*string{vpnConnection.VpnConnectionId}, - }, - { - Name: aws.String("resource-type"), - Values: []*string{aws.String(ec2.TransitGatewayAttachmentResourceTypeVpn)}, - }, - { - Name: aws.String("transit-gateway-id"), - Values: []*string{vpnConnection.TransitGatewayId}, - }, - }, + if v, ok := d.GetOk("tunnel2_phase2_dh_group_numbers"); ok { + l := []*ec2.Phase2DHGroupNumbersRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2DHGroupNumbersRequestListValue{Value: aws.Int64(int64(s.(int)))}) } + options[1].Phase2DHGroupNumbers = l + } - log.Printf("[DEBUG] Finding EC2 VPN Connection Transit Gateway Attachment: %s", input) - output, err := conn.DescribeTransitGatewayAttachments(input) + if v, ok := d.GetOk("tunnel1_phase2_encryption_algorithms"); ok { + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) + } + options[0].Phase2EncryptionAlgorithms = l + } - if err != nil { - return fmt.Errorf("error finding EC2 VPN Connection (%s) Transit Gateway Attachment: %s", d.Id(), err) + if v, ok := d.GetOk("tunnel2_phase2_encryption_algorithms"); ok { + l := []*ec2.Phase2EncryptionAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2EncryptionAlgorithmsRequestListValue{Value: aws.String(s.(string))}) } + options[1].Phase2EncryptionAlgorithms = l + } - if output == nil || len(output.TransitGatewayAttachments) == 0 || output.TransitGatewayAttachments[0] == nil { - return fmt.Errorf("error finding EC2 VPN Connection (%s) Transit Gateway Attachment: empty response", d.Id()) + if v, ok := d.GetOk("tunnel1_phase2_integrity_algorithms"); ok { + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) } + options[0].Phase2IntegrityAlgorithms = l + } - if len(output.TransitGatewayAttachments) > 1 { - return fmt.Errorf("error reading EC2 VPN Connection (%s) Transit Gateway Attachment: multiple responses", d.Id()) + if v, ok := d.GetOk("tunnel2_phase2_integrity_algorithms"); ok { + l := []*ec2.Phase2IntegrityAlgorithmsRequestListValue{} + for _, s := range v.(*schema.Set).List() { + l = append(l, &ec2.Phase2IntegrityAlgorithmsRequestListValue{Value: aws.String(s.(string))}) } + options[1].Phase2IntegrityAlgorithms = l + } - transitGatewayAttachmentID = aws.StringValue(output.TransitGatewayAttachments[0].TransitGatewayAttachmentId) + if v, ok := d.GetOk("tunnel1_phase2_lifetime_seconds"); ok { + options[0].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) } - // Set attributes under the user's control. - d.Set("vpn_gateway_id", vpnConnection.VpnGatewayId) - d.Set("customer_gateway_id", vpnConnection.CustomerGatewayId) - d.Set("transit_gateway_id", vpnConnection.TransitGatewayId) - d.Set("type", vpnConnection.Type) + if v, ok := d.GetOk("tunnel2_phase2_lifetime_seconds"); ok { + options[1].Phase2LifetimeSeconds = aws.Int64(int64(v.(int))) + } - if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpnConnection.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + if v, ok := d.GetOk("tunnel1_rekey_fuzz_percentage"); ok { + options[0].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) } - if vpnConnection.Options != nil { - if err := d.Set("enable_acceleration", vpnConnection.Options.EnableAcceleration); err != nil { - return err - } + if v, ok := d.GetOk("tunnel2_rekey_fuzz_percentage"); ok { + options[1].RekeyFuzzPercentage = aws.Int64(int64(v.(int))) + } - if err := d.Set("local_ipv4_network_cidr", vpnConnection.Options.LocalIpv4NetworkCidr); err != nil { - return err - } + if v, ok := d.GetOk("tunnel1_rekey_margin_time_seconds"); ok { + options[0].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) + } - if err := d.Set("local_ipv6_network_cidr", vpnConnection.Options.LocalIpv6NetworkCidr); err != nil { - return err - } + if v, ok := d.GetOk("tunnel2_rekey_margin_time_seconds"); ok { + options[1].RekeyMarginTimeSeconds = aws.Int64(int64(v.(int))) + } - if err := d.Set("remote_ipv4_network_cidr", vpnConnection.Options.RemoteIpv4NetworkCidr); err != nil { - return err - } + if v, ok := d.GetOk("tunnel1_replay_window_size"); ok { + options[0].ReplayWindowSize = aws.Int64(int64(v.(int))) + } - if err := d.Set("remote_ipv6_network_cidr", vpnConnection.Options.RemoteIpv6NetworkCidr); err != nil { - return err - } + if v, ok := d.GetOk("tunnel2_replay_window_size"); ok { + options[1].ReplayWindowSize = aws.Int64(int64(v.(int))) + } - if err := d.Set("static_routes_only", vpnConnection.Options.StaticRoutesOnly); err != nil { - return err - } + if v, ok := d.GetOk("tunnel1_startup_action"); ok { + options[0].StartupAction = aws.String(v.(string)) + } - if err := d.Set("tunnel_inside_ip_version", vpnConnection.Options.TunnelInsideIpVersion); err != nil { - return err - } - } else { - //If there no Options on the connection then we do not support it - d.Set("enable_acceleration", false) - d.Set("local_ipv4_network_cidr", "") - d.Set("local_ipv6_network_cidr", "") - d.Set("remote_ipv4_network_cidr", "") - d.Set("remote_ipv6_network_cidr", "") - d.Set("static_routes_only", false) - d.Set("tunnel_inside_ip_version", "") + if v, ok := d.GetOk("tunnel2_startup_action"); ok { + options[1].StartupAction = aws.String(v.(string)) } - // Set read only attributes. - d.Set("customer_gateway_configuration", vpnConnection.CustomerGatewayConfiguration) - d.Set("transit_gateway_attachment_id", transitGatewayAttachmentID) + if v, ok := d.GetOk("tunnel1_inside_cidr"); ok { + options[0].TunnelInsideCidr = aws.String(v.(string)) + } - if vpnConnection.CustomerGatewayConfiguration != nil { - if tunnelInfo, err := xmlConfigToTunnelInfo(*vpnConnection.CustomerGatewayConfiguration); err != nil { - log.Printf("[ERR] Error unmarshaling XML configuration for (%s): %s", d.Id(), err) - } else { - d.Set("tunnel1_address", tunnelInfo.Tunnel1Address) - d.Set("tunnel1_cgw_inside_address", tunnelInfo.Tunnel1CgwInsideAddress) - d.Set("tunnel1_vgw_inside_address", tunnelInfo.Tunnel1VgwInsideAddress) - d.Set("tunnel1_preshared_key", tunnelInfo.Tunnel1PreSharedKey) - d.Set("tunnel1_bgp_asn", tunnelInfo.Tunnel1BGPASN) - d.Set("tunnel1_bgp_holdtime", tunnelInfo.Tunnel1BGPHoldTime) - d.Set("tunnel2_address", tunnelInfo.Tunnel2Address) - d.Set("tunnel2_preshared_key", tunnelInfo.Tunnel2PreSharedKey) - d.Set("tunnel2_cgw_inside_address", tunnelInfo.Tunnel2CgwInsideAddress) - d.Set("tunnel2_vgw_inside_address", tunnelInfo.Tunnel2VgwInsideAddress) - d.Set("tunnel2_bgp_asn", tunnelInfo.Tunnel2BGPASN) - d.Set("tunnel2_bgp_holdtime", tunnelInfo.Tunnel2BGPHoldTime) - } + if v, ok := d.GetOk("tunnel2_inside_cidr"); ok { + options[1].TunnelInsideCidr = aws.String(v.(string)) } - if err := d.Set("vgw_telemetry", telemetryToMapList(vpnConnection.VgwTelemetry)); err != nil { - return err + if v, ok := d.GetOk("tunnel1_inside_ipv6_cidr"); ok { + options[0].TunnelInsideIpv6Cidr = aws.String(v.(string)) } - if err := d.Set("routes", routesToMapList(vpnConnection.Routes)); err != nil { - return err + + if v, ok := d.GetOk("tunnel2_inside_ipv6_cidr"); ok { + options[1].TunnelInsideIpv6Cidr = aws.String(v.(string)) } - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "ec2", - Region: meta.(*AWSClient).region, - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("vpn-connection/%s", d.Id()), - }.String() + if v, ok := d.GetOk("tunnel1_preshared_key"); ok { + options[0].PreSharedKey = aws.String(v.(string)) + } - d.Set("arn", arn) + if v, ok := d.GetOk("tunnel2_preshared_key"); ok { + options[1].PreSharedKey = aws.String(v.(string)) + } - return nil + return options } -func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn +// routesToMapList turns the list of routes into a list of maps. +func routesToMapList(routes []*ec2.VpnStaticRoute) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(routes)) + for _, r := range routes { + staticRoute := make(map[string]interface{}) + staticRoute["destination_cidr_block"] = aws.StringValue(r.DestinationCidrBlock) + staticRoute["state"] = aws.StringValue(r.State) - tun1Changed := false - tun2Changed := false - vgwTelemetryTun1Index := 0 - vgwTelemetryTun2Index := 1 - options := []*ec2.ModifyVpnTunnelOptionsSpecification{ - {}, {}, + if r.Source != nil { + staticRoute["source"] = aws.StringValue(r.Source) + } + + result = append(result, staticRoute) + } + + return result +} + +// telemetryToMapList turns the VGW telemetry into a list of maps. +func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(telemetry)) + for _, t := range telemetry { + vgw := make(map[string]interface{}) + vgw["accepted_route_count"] = aws.Int64Value(t.AcceptedRouteCount) + vgw["outside_ip_address"] = aws.StringValue(t.OutsideIpAddress) + vgw["status"] = aws.StringValue(t.Status) + vgw["status_message"] = aws.StringValue(t.StatusMessage) + + // LastStatusChange is a time.Time(). Convert it into a string + // so it can be handled by schema's type system. + vgw["last_status_change"] = t.LastStatusChange.Format(time.RFC3339) + result = append(result, vgw) } + return result +} + +func modifyVpnConnectionOptions(d *schema.ResourceData, conn *ec2.EC2) error { var connOpts *ec2.ModifyVpnConnectionOptionsInput = new(ec2.ModifyVpnConnectionOptionsInput) connChanged := false @@ -1022,6 +1105,20 @@ func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) er } } + return nil +} + +func modifyVpnTunnels(d *schema.ResourceData, conn *ec2.EC2) error { + tun1Changed := false + tun2Changed := false + vgwTelemetryTun1Index := 0 + vgwTelemetryTun2Index := 1 + options := []*ec2.ModifyVpnTunnelOptionsSpecification{ + {}, {}, + } + + vpnConnectionID := d.Id() + if d.HasChange("tunnel1_dpd_timeout_action") { tun1Changed = true options[0].DPDTimeoutAction = aws.String(d.Get("tunnel1_dpd_timeout_action").(string)) @@ -1240,76 +1337,9 @@ func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) er } } - if d.HasChange("tags") { - o, n := d.GetChange("tags") - - if err := keyvaluetags.Ec2UpdateTags(conn, vpnConnectionID, o, n); err != nil { - return fmt.Errorf("error updating EC2 VPN Connection (%s) tags: %s", d.Id(), err) - } - } - - return resourceAwsVpnConnectionRead(d, meta) -} - -func resourceAwsVpnConnectionDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - - _, err := conn.DeleteVpnConnection(&ec2.DeleteVpnConnectionInput{ - VpnConnectionId: aws.String(d.Id()), - }) - - if isAWSErr(err, "InvalidVpnConnectionID.NotFound", "") { - return nil - } - - if err != nil { - return fmt.Errorf("error deleting VPN Connection (%s): %s", d.Id(), err) - } - - if err := waitForEc2VpnConnectionDeletion(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for VPN connection (%s) to delete: %s", d.Id(), err) - } - return nil } -// routesToMapList turns the list of routes into a list of maps. -func routesToMapList(routes []*ec2.VpnStaticRoute) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(routes)) - for _, r := range routes { - staticRoute := make(map[string]interface{}) - staticRoute["destination_cidr_block"] = aws.StringValue(r.DestinationCidrBlock) - staticRoute["state"] = aws.StringValue(r.State) - - if r.Source != nil { - staticRoute["source"] = aws.StringValue(r.Source) - } - - result = append(result, staticRoute) - } - - return result -} - -// telemetryToMapList turns the VGW telemetry into a list of maps. -func telemetryToMapList(telemetry []*ec2.VgwTelemetry) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(telemetry)) - for _, t := range telemetry { - vgw := make(map[string]interface{}) - vgw["accepted_route_count"] = aws.Int64Value(t.AcceptedRouteCount) - vgw["outside_ip_address"] = aws.StringValue(t.OutsideIpAddress) - vgw["status"] = aws.StringValue(t.Status) - vgw["status_message"] = aws.StringValue(t.StatusMessage) - - // LastStatusChange is a time.Time(). Convert it into a string - // so it can be handled by schema's type system. - vgw["last_status_change"] = t.LastStatusChange.Format(time.RFC3339) - result = append(result, vgw) - } - - return result -} - func modifyVpnTunnelOptions(conn *ec2.EC2, vgwTelemetry *schema.Set, vpnConnectionID string, vgwTelemetryTunIndex int, optionsTun *ec2.ModifyVpnTunnelOptionsSpecification) error { if v := vgwTelemetry; v.Len() > 0 { vpnTunnelOutsideIPAddress := v.List()[vgwTelemetryTunIndex].(map[string]interface{})["outside_ip_address"].(string) diff --git a/aws/resource_aws_vpn_connection_test.go b/aws/resource_aws_vpn_connection_test.go index 0f4977f4190..3b91a0b2256 100644 --- a/aws/resource_aws_vpn_connection_test.go +++ b/aws/resource_aws_vpn_connection_test.go @@ -14,6 +14,26 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +type TunnelOptions struct { + psk string + tunnelCidr string + dpdTimeoutAction string + dpdTimeoutSeconds int + ikeVersions string + phase1DhGroupNumbers string + phase1EncryptionAlgorithms string + phase1IntegrityAlgorithms string + phase1LifetimeSeconds int + phase2DhGroupNumbers string + phase2EncryptionAlgorithms string + phase2IntegrityAlgorithms string + phase2LifetimeSeconds int + rekeyFuzzPercentage int + rekeyMarginTimeSeconds int + replayWindowSize int + startupAction string +} + func init() { resource.AddTestSweepers("aws_vpn_connection", &resource.Sweeper{ Name: "aws_vpn_connection", @@ -145,6 +165,46 @@ func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { resourceName := "aws_vpn_connection.test" var vpn ec2.VpnConnection + tunnel1 := TunnelOptions{ + psk: "12345678", + tunnelCidr: "169.254.8.0/30", + dpdTimeoutAction: "clear", + dpdTimeoutSeconds: 30, + ikeVersions: "\"ikev1\", \"ikev2\"", + phase1DhGroupNumbers: "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + phase1EncryptionAlgorithms: "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + phase1IntegrityAlgorithms: "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + phase1LifetimeSeconds: 28800, + phase2DhGroupNumbers: "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + phase2EncryptionAlgorithms: "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + phase2IntegrityAlgorithms: "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + phase2LifetimeSeconds: 3600, + rekeyFuzzPercentage: 100, + rekeyMarginTimeSeconds: 540, + replayWindowSize: 1024, + startupAction: "add", + } + + tunnel2 := TunnelOptions{ + psk: "abcdefgh", + tunnelCidr: "169.254.9.0/30", + dpdTimeoutAction: "clear", + dpdTimeoutSeconds: 30, + ikeVersions: "\"ikev1\", \"ikev2\"", + phase1DhGroupNumbers: "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + phase1EncryptionAlgorithms: "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + phase1IntegrityAlgorithms: "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + phase1LifetimeSeconds: 28800, + phase2DhGroupNumbers: "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", + phase2EncryptionAlgorithms: "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", + phase2IntegrityAlgorithms: "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", + phase2LifetimeSeconds: 3600, + rekeyFuzzPercentage: 100, + rekeyMarginTimeSeconds: 540, + replayWindowSize: 1024, + startupAction: "add", + } + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: resourceName, @@ -230,44 +290,7 @@ func TestAccAWSVpnConnection_tunnelOptions(t *testing.T) { //Try actual building { - Config: testAccAwsVpnConnectionConfigTunnelOptions( - rBgpAsn, - "192.168.1.1/32", - "192.168.1.2/32", - "12345678", - "169.254.8.0/30", - "clear", - 30, - "\"ikev1\", \"ikev2\"", - "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", - "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", - "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", - 28800, - "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", - "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", - "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", - 3600, - 100, - 540, - 1024, - "add", - "abcdefgh", - "169.254.9.0/30", - "clear", - 30, - "\"ikev1\", \"ikev2\"", - "2, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", - "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", - "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", - 28800, - "2, 5, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24", - "\"AES128\", \"AES256\", \"AES128-GCM-16\", \"AES256-GCM-16\"", - "\"SHA1\", \"SHA2-256\", \"SHA2-384\", \"SHA2-512\"", - 3600, - 100, - 540, - 1024, - "add"), + Config: testAccAwsVpnConnectionConfigTunnelOptions(rBgpAsn, "192.168.1.1/32", "192.168.1.2/32", tunnel1, tunnel2), Check: resource.ComposeTestCheckFunc( testAccAwsVpnConnectionExists(resourceName, &vpn), resource.TestCheckResourceAttr(resourceName, "static_routes_only", "false"), @@ -646,12 +669,12 @@ resource "aws_vpn_connection" "test" { static_routes_only = false enable_acceleration = false - local_ipv6_network_cidr = "%s" - remote_ipv6_network_cidr = "%s" + local_ipv6_network_cidr = %[2]q + remote_ipv6_network_cidr = %[3]q tunnel_inside_ip_version = "ipv6" - tunnel1_inside_ipv6_cidr = "%s" - tunnel2_inside_ipv6_cidr = "%s" + tunnel1_inside_ipv6_cidr = %[4]q + tunnel2_inside_ipv6_cidr = %[5]q } `, rBgpAsn, localIpv6NetworkCidr, remoteIpv6NetworkCidr, tunnel1InsideIpv6Cidr, tunnel2InsideIpv6Cidr) } @@ -712,40 +735,8 @@ func testAccAwsVpnConnectionConfigTunnelOptions( rBgpAsn int, localIpv4NetworkCidr string, remoteIpv4NetworkCidr string, - psk string, - tunnelCidr string, - dpdTimeoutAction string, - dpdTimeoutSeconds int, - ikeVersions string, - phase1DhGroupNumbers string, - phase1EncryptionAlgorithms string, - phase1IntegrityAlgorithms string, - phase1LifetimeSeconds int, - phase2DhGroupNumbers string, - phase2EncryptionAlgorithms string, - phase2IntegrityAlgorithms string, - phase2LifetimeSeconds int, - rekeyFuzzPercentage int, - rekeyMarginTimeSeconds int, - replayWindowSize int, - startupAction string, - psk2 string, - tunnelCidr2 string, - dpdTimeoutAction2 string, - dpdTimeoutSeconds2 int, - ikeVersions2 string, - phase1DhGroupNumbers2 string, - phase1EncryptionAlgorithms2 string, - phase1IntegrityAlgorithms2 string, - phase1LifetimeSeconds2 int, - phase2DhGroupNumbers2 string, - phase2EncryptionAlgorithms2 string, - phase2IntegrityAlgorithms2 string, - phase2LifetimeSeconds2 int, - rekeyFuzzPercentage2 int, - rekeyMarginTimeSeconds2 int, - replayWindowSize2 int, - startupAction2 string, + tunnel1 TunnelOptions, + tunnel2 TunnelOptions, ) string { return fmt.Sprintf(` resource "aws_vpn_gateway" "vpn_gateway" { @@ -770,83 +761,83 @@ resource "aws_vpn_connection" "test" { type = "ipsec.1" static_routes_only = false - local_ipv4_network_cidr = "%s" - remote_ipv4_network_cidr = "%s" - - tunnel1_inside_cidr = "%s" - tunnel1_preshared_key = "%s" - tunnel1_dpd_timeout_action = "%s" - tunnel1_dpd_timeout_seconds = %d - tunnel1_ike_versions = [%s] - tunnel1_phase1_dh_group_numbers = [%s] - tunnel1_phase1_encryption_algorithms = [%s] - tunnel1_phase1_integrity_algorithms = [%s] - tunnel1_phase1_lifetime_seconds = %d - tunnel1_phase2_dh_group_numbers = [%s] - tunnel1_phase2_encryption_algorithms = [%s] - tunnel1_phase2_integrity_algorithms = [%s] - tunnel1_phase2_lifetime_seconds = %d - tunnel1_rekey_fuzz_percentage = %d - tunnel1_rekey_margin_time_seconds = %d - tunnel1_replay_window_size = %d - tunnel1_startup_action = "%s" - - tunnel2_inside_cidr = "%s" - tunnel2_preshared_key = "%s" - tunnel2_dpd_timeout_action = "%s" - tunnel2_dpd_timeout_seconds = %d - tunnel2_ike_versions = [%s] - tunnel2_phase1_dh_group_numbers = [%s] - tunnel2_phase1_encryption_algorithms = [%s] - tunnel2_phase1_integrity_algorithms = [%s] - tunnel2_phase1_lifetime_seconds = %d - tunnel2_phase2_dh_group_numbers = [%s] - tunnel2_phase2_encryption_algorithms = [%s] - tunnel2_phase2_integrity_algorithms = [%s] - tunnel2_phase2_lifetime_seconds = %d - tunnel2_rekey_fuzz_percentage = %d - tunnel2_rekey_margin_time_seconds = %d - tunnel2_replay_window_size = %d - tunnel2_startup_action = "%s" + local_ipv4_network_cidr = %[2]q + remote_ipv4_network_cidr = %[3]q + + tunnel1_inside_cidr = %[4]q + tunnel1_preshared_key = %[5]q + tunnel1_dpd_timeout_action = %[6]q + tunnel1_dpd_timeout_seconds = %[7]d + tunnel1_ike_versions = [%[8]s] + tunnel1_phase1_dh_group_numbers = [%[9]s] + tunnel1_phase1_encryption_algorithms = [%[10]s] + tunnel1_phase1_integrity_algorithms = [%[11]s] + tunnel1_phase1_lifetime_seconds = %[12]d + tunnel1_phase2_dh_group_numbers = [%[13]s] + tunnel1_phase2_encryption_algorithms = [%[14]s] + tunnel1_phase2_integrity_algorithms = [%[15]s] + tunnel1_phase2_lifetime_seconds = %[16]d + tunnel1_rekey_fuzz_percentage = %[17]d + tunnel1_rekey_margin_time_seconds = %[18]d + tunnel1_replay_window_size = %[19]d + tunnel1_startup_action = %[20]q + + tunnel2_inside_cidr = %[21]q + tunnel2_preshared_key = %[22]q + tunnel2_dpd_timeout_action = %[23]q + tunnel2_dpd_timeout_seconds = %[24]d + tunnel2_ike_versions = [%[25]s] + tunnel2_phase1_dh_group_numbers = [%[26]s] + tunnel2_phase1_encryption_algorithms = [%[27]s] + tunnel2_phase1_integrity_algorithms = [%[28]s] + tunnel2_phase1_lifetime_seconds = %[29]d + tunnel2_phase2_dh_group_numbers = [%[30]s] + tunnel2_phase2_encryption_algorithms = [%[31]s] + tunnel2_phase2_integrity_algorithms = [%[32]s] + tunnel2_phase2_lifetime_seconds = %[33]d + tunnel2_rekey_fuzz_percentage = %[34]d + tunnel2_rekey_margin_time_seconds = %[35]d + tunnel2_replay_window_size = %[36]d + tunnel2_startup_action = %[37]q } `, rBgpAsn, localIpv4NetworkCidr, remoteIpv4NetworkCidr, - tunnelCidr, - psk, - dpdTimeoutAction, - dpdTimeoutSeconds, - ikeVersions, - phase1DhGroupNumbers, - phase1EncryptionAlgorithms, - phase1IntegrityAlgorithms, - phase1LifetimeSeconds, - phase2DhGroupNumbers, - phase2EncryptionAlgorithms, - phase2IntegrityAlgorithms, - phase2LifetimeSeconds, - rekeyFuzzPercentage, - rekeyMarginTimeSeconds, - replayWindowSize, - startupAction, - tunnelCidr2, - psk2, - dpdTimeoutAction2, - dpdTimeoutSeconds2, - ikeVersions2, - phase1DhGroupNumbers2, - phase1EncryptionAlgorithms2, - phase1IntegrityAlgorithms2, - phase1LifetimeSeconds2, - phase2DhGroupNumbers2, - phase2EncryptionAlgorithms2, - phase2IntegrityAlgorithms2, - phase2LifetimeSeconds2, - rekeyFuzzPercentage2, - rekeyMarginTimeSeconds2, - replayWindowSize2, - startupAction2) + tunnel1.tunnelCidr, + tunnel1.psk, + tunnel1.dpdTimeoutAction, + tunnel1.dpdTimeoutSeconds, + tunnel1.ikeVersions, + tunnel1.phase1DhGroupNumbers, + tunnel1.phase1EncryptionAlgorithms, + tunnel1.phase1IntegrityAlgorithms, + tunnel1.phase1LifetimeSeconds, + tunnel1.phase2DhGroupNumbers, + tunnel1.phase2EncryptionAlgorithms, + tunnel1.phase2IntegrityAlgorithms, + tunnel1.phase2LifetimeSeconds, + tunnel1.rekeyFuzzPercentage, + tunnel1.rekeyMarginTimeSeconds, + tunnel1.replayWindowSize, + tunnel1.startupAction, + tunnel2.tunnelCidr, + tunnel2.psk, + tunnel2.dpdTimeoutAction, + tunnel2.dpdTimeoutSeconds, + tunnel2.ikeVersions, + tunnel2.phase1DhGroupNumbers, + tunnel2.phase1EncryptionAlgorithms, + tunnel2.phase1IntegrityAlgorithms, + tunnel2.phase1LifetimeSeconds, + tunnel2.phase2DhGroupNumbers, + tunnel2.phase2EncryptionAlgorithms, + tunnel2.phase2IntegrityAlgorithms, + tunnel2.phase2LifetimeSeconds, + tunnel2.rekeyFuzzPercentage, + tunnel2.rekeyMarginTimeSeconds, + tunnel2.replayWindowSize, + tunnel2.startupAction) } func testAccAwsVpnConnectionConfigTags1(rBgpAsn int, tagKey1, tagValue1 string) string { From a998c3356fd7b8027f71863e85ce0e163e1487da Mon Sep 17 00:00:00 2001 From: Repon Kumar Roy Date: Sun, 13 Dec 2020 13:25:49 +0800 Subject: [PATCH 0206/1212] chore(docs): Remove quotes from tags In general tags key doesn't require quotation marks if they are single words. This commit tries to uniform the practice throughout aws provider document --- examples/cognito-user-pool/main.tf | 4 ++-- website/docs/r/neptune_event_subscription.html.markdown | 2 +- .../docs/r/network_interface_sg_attachment.html.markdown | 6 +++--- website/docs/r/s3_bucket.html.markdown | 4 ++-- website/docs/r/wafv2_rule_group.html.markdown | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/examples/cognito-user-pool/main.tf b/examples/cognito-user-pool/main.tf index b2043938954..e1e4722a1f4 100644 --- a/examples/cognito-user-pool/main.tf +++ b/examples/cognito-user-pool/main.tf @@ -151,7 +151,7 @@ resource "aws_cognito_user_pool" "pool" { } tags = { - "Name" = "FooBar" - "Project" = "Terraform" + Name = "FooBar" + Project = "Terraform" } } diff --git a/website/docs/r/neptune_event_subscription.html.markdown b/website/docs/r/neptune_event_subscription.html.markdown index 133747fc5be..c476af91238 100644 --- a/website/docs/r/neptune_event_subscription.html.markdown +++ b/website/docs/r/neptune_event_subscription.html.markdown @@ -55,7 +55,7 @@ resource "aws_neptune_event_subscription" "default" { ] tags = { - "env" = "test" + env = "test" } } ``` diff --git a/website/docs/r/network_interface_sg_attachment.html.markdown b/website/docs/r/network_interface_sg_attachment.html.markdown index 786c917a297..c3a16e2f637 100644 --- a/website/docs/r/network_interface_sg_attachment.html.markdown +++ b/website/docs/r/network_interface_sg_attachment.html.markdown @@ -47,13 +47,13 @@ resource "aws_instance" "instance" { ami = data.aws_ami.ami.id tags = { - "type" = "terraform-test-instance" + type = "terraform-test-instance" } } resource "aws_security_group" "sg" { tags = { - "type" = "terraform-test-security-group" + type = "terraform-test-security-group" } } @@ -74,7 +74,7 @@ data "aws_instance" "instance" { resource "aws_security_group" "sg" { tags = { - "type" = "terraform-test-security-group" + type = "terraform-test-security-group" } } diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 334d30d3479..83227c87c75 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -117,8 +117,8 @@ resource "aws_s3_bucket" "bucket" { prefix = "log/" tags = { - "rule" = "log" - "autoclean" = "true" + rule = "log" + autoclean = "true" } transition { diff --git a/website/docs/r/wafv2_rule_group.html.markdown b/website/docs/r/wafv2_rule_group.html.markdown index d2ce90f13fa..26e43c86034 100644 --- a/website/docs/r/wafv2_rule_group.html.markdown +++ b/website/docs/r/wafv2_rule_group.html.markdown @@ -275,8 +275,8 @@ resource "aws_wafv2_rule_group" "example" { } tags = { - "Name" = "example-and-statement" - "Code" = "123456" + Name = "example-and-statement" + Code = "123456" } } ``` From fc0b670229e49bce22546ebee54ac2e2260cc384 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Dec 2020 09:15:45 -0500 Subject: [PATCH 0207/1212] build(deps): bump github.com/aws/aws-sdk-go in /awsproviderlint (#16744) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.0 to 1.36.7. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.0...v1.36.7) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 100 +++++++++- .../aws/session/custom_transport.go | 27 +++ ...ransport.go => custom_transport_go1.12.go} | 4 +- ...sport_1_5.go => custom_transport_go1.5.go} | 2 +- ...sport_1_6.go => custom_transport_go1.6.go} | 2 +- .../aws/aws-sdk-go/aws/session/doc.go | 27 +++ .../aws/aws-sdk-go/aws/session/env_config.go | 25 ++- .../aws/aws-sdk-go/aws/session/session.go | 187 ++++++++++++++---- .../aws-sdk-go/aws/session/shared_config.go | 13 ++ .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- awsproviderlint/vendor/modules.txt | 2 +- 13 files changed, 348 insertions(+), 49 deletions(-) create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go rename awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport.go => custom_transport_go1.12.go} (88%) rename awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport_1_5.go => custom_transport_go1.5.go} (88%) rename awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport_1_6.go => custom_transport_go1.6.go} (90%) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index a83718d80ef..6d99e006a13 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.0 + github.com/aws/aws-sdk-go v1.36.7 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 61fccec7cad..85d4e1b68c9 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -55,8 +55,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.0 h1:CscTrS+szX5iu34zk2bZrChnGO/GMtUYgMK1Xzs2hYo= -github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.7 h1:XoJPAjKoqvdL531XGWxKYn5eGX/xMoXzMN5fBtoyfSY= +github.com/aws/aws-sdk-go v1.36.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index fc8c1f92921..78f2226071d 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -2685,6 +2685,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "emr-containers": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "entitlement.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2843,6 +2851,18 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, "fips-ap-northeast-1": endpoint{ Hostname: "fms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2885,6 +2905,12 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "fips-eu-south-1": endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "fips-eu-west-1": endpoint{ Hostname: "fms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2903,6 +2929,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-me-south-1": endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "fips-sa-east-1": endpoint{ Hostname: "fms-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3224,6 +3256,14 @@ var awsPartition = partition{ }, }, }, + "healthlake": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "honeycode": service{ Endpoints: endpoints{ @@ -4701,6 +4741,18 @@ var awsPartition = partition{ }, }, }, + "profile": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "projects.iot1click": service{ Endpoints: endpoints{ @@ -6786,12 +6838,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, }, @@ -9359,6 +9435,18 @@ var awsusgovPartition = partition{ "xray": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..593aedc4218 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,27 @@ +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go similarity index 88% rename from awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go rename to awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go index ea9ebb6f6a2..1bf31cf8e56 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -1,4 +1,4 @@ -// +build go1.7 +// +build !go1.13,go1.7 package session @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go similarity index 88% rename from awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go rename to awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go index fec39dfc126..253d7bc9d55 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go similarity index 90% rename from awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go rename to awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go index 1c5a5391e65..db240605441 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index cc461bd3230..9419b518d58 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -208,6 +208,8 @@ env values as well. AWS_SDK_LOAD_CONFIG=1 +Custom Shared Config and Credential Files + Shared credentials file path can be set to instruct the SDK to use an alternative file for the shared credentials. If not set the file will be loaded from $HOME/.aws/credentials on Linux/Unix based systems, and @@ -222,6 +224,8 @@ $HOME/.aws/config on Linux/Unix based systems, and AWS_CONFIG_FILE=$HOME/my_shared_config +Custom CA Bundle + Path to a custom Credentials Authority (CA) bundle PEM file that the SDK will use instead of the default system's root CA bundle. Use this only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -242,6 +246,29 @@ Setting a custom HTTPClient in the aws.Config options will override this setting To use this option and custom HTTP client, the HTTP client needs to be provided when creating the session. Not the service client. +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + The endpoint of the EC2 IMDS client can be configured via the environment variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a Session. See Options.EC2IMDSEndpoint for more details. diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d67c261d74f..3cd5d4b5ae1 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -101,6 +101,18 @@ type envConfig struct { // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + csmEnabled string CSMEnabled *bool CSMPort string @@ -219,6 +231,15 @@ var ( ec2IMDSEndpointEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT", } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -302,7 +323,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg.SharedConfigFile = defaults.SharedConfigFilename() } - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) var err error // STS Regional Endpoint variable diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 6430a7f1526..08713cc3474 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -25,6 +25,13 @@ const ( // ErrCodeSharedConfig represents an error that occurs in the shared // configuration logic ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" ) // ErrSharedConfigSourceCollision will be returned if a section contains both @@ -229,17 +236,46 @@ type Options struct { // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK // to overwrite the Transport's TLS config's RootCAs value. If the CA // bundle reader contains multiple certificates all of them will be loaded. // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle CustomCABundle io.Reader + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + // The handlers that the session and all API clients will be created with. // This must be a complete set of handlers. Use the defaults.Handlers() // function to initialize this value before changing the handlers to be @@ -319,17 +355,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - return newSession(opts, envCfg, &opts.Config) } @@ -460,6 +485,10 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return nil, err } + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + s := &Session{ Config: cfg, Handlers: handlers, @@ -479,13 +508,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } } - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - return s, nil } @@ -529,22 +551,83 @@ func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { return csmConfig{}, nil } -func loadCustomCABundle(s *Session, bundle io.Reader) error { +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { + switch v := client.Transport.(type) { case *http.Transport: t = v default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) } } if t == nil { // Nil transport implies `http.DefaultTransport` should be used. Since // the SDK cannot modify, nor copy the `DefaultTransport` specifying // the values the next closest behavior. - t = getCABundleTransport() + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) } p, err := loadCertPool(bundle) @@ -556,7 +639,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } t.TLSClientConfig.RootCAs = p - s.Config.HTTPClient.Transport = t + client.Transport = t return nil } @@ -564,19 +647,57 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { func loadCertPool(r io.Reader) (*x509.CertPool, error) { b, err := ioutil.ReadAll(r) if err != nil { - return nil, awserr.New("LoadCustomCABundleError", + return nil, awserr.New(ErrCodeLoadCustomCABundle, "failed to read custom CA bundle PEM file", err) } p := x509.NewCertPool() if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", + return nil, awserr.New(ErrCodeLoadCustomCABundle, "failed to load custom CA bundle PEM file", err) } return p, nil } +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 680805a38ad..be7daacf308 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -34,6 +34,9 @@ const ( // Additional Config fields regionKey = `region` + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional @@ -90,6 +93,15 @@ type sharedConfig struct { // region Region string + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + // EnableEndpointDiscovery can be enabled in the shared config by setting // endpoint_discovery_enabled to true // @@ -276,6 +288,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.SourceProfileName, section, sourceProfileKey) updateString(&cfg.CredentialSource, section, credentialSourceKey) updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) if section.Has(roleDurationSecondsKey) { d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index c0e056ea83a..ecd440191b0 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.0" +const SDKVersion = "1.36.7" diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index 962d179f461..fa6ab616a68 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.36.0 +# github.com/aws/aws-sdk-go v1.36.7 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 87bdbf5f4e8ef4751dfc9a248c5b7e179e85f387 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 14 Dec 2020 09:32:36 -0500 Subject: [PATCH 0208/1212] r/aws_route_table: Refactor acceptance test configuration generator so as avoid 'terrafmt' linting exclusion (#16735) * r/aws_route_table: Refactor 'TestAccAWSRouteTable_MultipleRoutes' so that 'resource_aws_route_table_test.go' does not need exclusion from terrafmt linting. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteTable_MultipleRoutes' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSRouteTable_MultipleRoutes -timeout 120m === RUN TestAccAWSRouteTable_MultipleRoutes === PAUSE TestAccAWSRouteTable_MultipleRoutes === CONT TestAccAWSRouteTable_MultipleRoutes --- PASS: TestAccAWSRouteTable_MultipleRoutes (145.10s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 145.183s * No need to exclude 'resource_aws_route_table_test.go' from terrafmt linting. --- .github/workflows/acctest-terraform-lint.yml | 2 - aws/resource_aws_route_table_test.go | 90 +++++++++----------- 2 files changed, 41 insertions(+), 51 deletions(-) diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index d6ad542d3a8..10fcff35f1e 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -40,7 +40,6 @@ jobs: | grep -v resource_aws_kinesis_stream_test.go \ | grep -v resource_aws_kms_grant_test.go \ | grep -v resource_aws_quicksight_user_test.go \ - | grep -v resource_aws_route_table_test.go \ | grep -v resource_aws_s3_bucket_object_test.go \ | grep -v resource_aws_sns_platform_application_test.go \ | xargs -I {} terrafmt diff --check --fmtcompat {} @@ -71,7 +70,6 @@ jobs: | grep -v resource_aws_kms_grant_test.go \ | grep -v resource_aws_lambda_permission_test.go \ | grep -v resource_aws_quicksight_user_test.go \ - | grep -v resource_aws_route_table_test.go \ | grep -v resource_aws_s3_bucket_object_test.go \ | grep -v resource_aws_sns_platform_application_test.go \ | ./scripts/validate-terraform.sh diff --git a/aws/resource_aws_route_table_test.go b/aws/resource_aws_route_table_test.go index 95769e51fc3..fca6681a0e2 100644 --- a/aws/resource_aws_route_table_test.go +++ b/aws/resource_aws_route_table_test.go @@ -1933,58 +1933,50 @@ resource "aws_vpc_peering_connection" "test" { } } +locals { + routes = [ + { + destination_attr = %[2]q + destination_value = %[3]q + target_attr = %[4]q + target_value = %[5]s.id + }, + { + destination_attr = %[6]q + destination_value = %[7]q + target_attr = %[8]q + target_value = %[9]s.id + }, + { + destination_attr = %[10]q + destination_value = %[11]q + target_attr = %[12]q + target_value = %[13]s.id + } + ] +} + resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id - route { - # Destination. - cidr_block = (%[2]q == "cidr_block") ? %[3]q : null - ipv6_cidr_block = (%[2]q == "ipv6_cidr_block") ? %[3]q : null - - # Target - egress_only_gateway_id = (%[4]q == "egress_only_gateway_id") ? %[5]s.id : null - gateway_id = (%[4]q == "gateway_id") ? %[5]s.id : null - instance_id = (%[4]q == "instance_id") ? %[5]s.id : null - local_gateway_id = (%[4]q == "local_gateway_id") ? %[5]s.id : null - nat_gateway_id = (%[4]q == "nat_gateway_id") ? %[5]s.id : null - network_interface_id = (%[4]q == "network_interface_id") ? %[5]s.id : null - transit_gateway_id = (%[4]q == "transit_gateway_id") ? %[5]s.id : null - vpc_endpoint_id = (%[4]q == "vpc_endpoint_id") ? %[5]s.id : null - vpc_peering_connection_id = (%[4]q == "vpc_peering_connection_id") ? %[5]s.id : null - } - - route { - # Destination. - cidr_block = (%[6]q == "cidr_block") ? %[7]q : null - ipv6_cidr_block = (%[6]q == "ipv6_cidr_block") ? %[7]q : null - - # Target - egress_only_gateway_id = (%[8]q == "egress_only_gateway_id") ? %[9]s.id : null - gateway_id = (%[8]q == "gateway_id") ? %[9]s.id : null - instance_id = (%[8]q == "instance_id") ? %[9]s.id : null - local_gateway_id = (%[8]q == "local_gateway_id") ? %[9]s.id : null - nat_gateway_id = (%[8]q == "nat_gateway_id") ? %[9]s.id : null - network_interface_id = (%[8]q == "network_interface_id") ? %[9]s.id : null - transit_gateway_id = (%[8]q == "transit_gateway_id") ? %[9]s.id : null - vpc_endpoint_id = (%[8]q == "vpc_endpoint_id") ? %[9]s.id : null - vpc_peering_connection_id = (%[8]q == "vpc_peering_connection_id") ? %[9]s.id : null - } - - route { - # Destination. - cidr_block = (%[10]q == "cidr_block") ? %[11]q : null - ipv6_cidr_block = (%[10]q == "ipv6_cidr_block") ? %[11]q : null - - # Target - egress_only_gateway_id = (%[12]q == "egress_only_gateway_id") ? %[13]s.id : null - gateway_id = (%[12]q == "gateway_id") ? %[13]s.id : null - instance_id = (%[12]q == "instance_id") ? %[13]s.id : null - local_gateway_id = (%[12]q == "local_gateway_id") ? %[13]s.id : null - nat_gateway_id = (%[12]q == "nat_gateway_id") ? %[13]s.id : null - network_interface_id = (%[12]q == "network_interface_id") ? %[13]s.id : null - transit_gateway_id = (%[12]q == "transit_gateway_id") ? %[13]s.id : null - vpc_endpoint_id = (%[12]q == "vpc_endpoint_id") ? %[13]s.id : null - vpc_peering_connection_id = (%[12]q == "vpc_peering_connection_id") ? %[13]s.id : null + dynamic "route" { + for_each = local.routes + content { + # Destination. + cidr_block = (route.value["destination_attr"] == "cidr_block") ? route.value["destination_value"] : null + ipv6_cidr_block = (route.value["destination_attr"] == "ipv6_cidr_block") ? route.value["destination_value"] : null + + # Target. + egress_only_gateway_id = (route.value["target_attr"] == "egress_only_gateway_id") ? route.value["target_value"] : null + gateway_id = (route.value["target_attr"] == "gateway_id") ? route.value["target_value"] : null + instance_id = (route.value["target_attr"] == "instance_id") ? route.value["target_value"] : null + local_gateway_id = (route.value["target_attr"] == "local_gateway_id") ? route.value["target_value"] : null + nat_gateway_id = (route.value["target_attr"] == "nat_gateway_id") ? route.value["target_value"] : null + network_interface_id = (route.value["target_attr"] == "network_interface_id") ? route.value["target_value"] : null + transit_gateway_id = (route.value["target_attr"] == "transit_gateway_id") ? route.value["target_value"] : null + vpc_endpoint_id = (route.value["target_attr"] == "vpc_endpoint_id") ? route.value["target_value"] : null + vpc_peering_connection_id = (route.value["target_attr"] == "vpc_peering_connection_id") ? route.value["target_value"] : null + } } tags = { From 803fd63e92946c9c615bab84ebf19a2e023fd59a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Dec 2020 15:08:24 +0000 Subject: [PATCH 0209/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.36.0 to 1.36.7 (#16726) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 64399b168cb..f29f19a9217 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.14 require ( - github.com/aws/aws-sdk-go v1.36.0 + github.com/aws/aws-sdk-go v1.36.7 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index 4e66d75605f..3a28bf9bdf2 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.36.0 h1:CscTrS+szX5iu34zk2bZrChnGO/GMtUYgMK1Xzs2hYo= -github.com/aws/aws-sdk-go v1.36.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.7 h1:XoJPAjKoqvdL531XGWxKYn5eGX/xMoXzMN5fBtoyfSY= +github.com/aws/aws-sdk-go v1.36.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 1ef6047c16dbbb9c7a72d629ca7a0d976ec76efe Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 18 Nov 2020 16:09:15 -0800 Subject: [PATCH 0210/1212] Adds launch_template to aws_autoscaling_group data source --- aws/data_source_aws_autoscaling_group.go | 33 ++++++- aws/data_source_aws_autoscaling_group_test.go | 92 ++++++++++++++++--- 2 files changed, 107 insertions(+), 18 deletions(-) diff --git a/aws/data_source_aws_autoscaling_group.go b/aws/data_source_aws_autoscaling_group.go index 9510fef8e81..ad92aa60af4 100644 --- a/aws/data_source_aws_autoscaling_group.go +++ b/aws/data_source_aws_autoscaling_group.go @@ -49,6 +49,26 @@ func dataSourceAwsAutoscalingGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "launch_template": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "load_balancers": { Type: schema.TypeSet, Computed: true, @@ -120,7 +140,7 @@ func dataSourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Checking for error: %s", err) if err != nil { - return fmt.Errorf("error describing AutoScaling Groups: %s", err) + return fmt.Errorf("error describing AutoScaling Groups: %w", err) } log.Printf("[DEBUG] Found Autoscaling Group: %s", result) @@ -144,15 +164,18 @@ func dataSourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) d.Set("name", group.AutoScalingGroupName) d.Set("arn", group.AutoScalingGroupARN) if err := d.Set("availability_zones", aws.StringValueSlice(group.AvailabilityZones)); err != nil { - return err + return fmt.Errorf("error setting availability_zones: %w", err) } d.Set("default_cooldown", group.DefaultCooldown) d.Set("desired_capacity", group.DesiredCapacity) d.Set("health_check_grace_period", group.HealthCheckGracePeriod) d.Set("health_check_type", group.HealthCheckType) d.Set("launch_configuration", group.LaunchConfigurationName) + if err := d.Set("launch_template", flattenLaunchTemplateSpecification(group.LaunchTemplate)); err != nil { + return fmt.Errorf("error setting launch_template: %w", err) + } if err := d.Set("load_balancers", aws.StringValueSlice(group.LoadBalancerNames)); err != nil { - return err + return fmt.Errorf("error setting load_balancers: %w", err) } d.Set("new_instances_protected_from_scale_in", group.NewInstancesProtectedFromScaleIn) d.Set("max_size", group.MaxSize) @@ -161,10 +184,10 @@ func dataSourceAwsAutoscalingGroupRead(d *schema.ResourceData, meta interface{}) d.Set("service_linked_role_arn", group.ServiceLinkedRoleARN) d.Set("status", group.Status) if err := d.Set("target_group_arns", aws.StringValueSlice(group.TargetGroupARNs)); err != nil { - return err + return fmt.Errorf("error setting target_group_arns: %w", err) } if err := d.Set("termination_policies", aws.StringValueSlice(group.TerminationPolicies)); err != nil { - return err + return fmt.Errorf("error setting termination_policies: %w", err) } d.Set("vpc_zone_identifier", group.VPCZoneIdentifier) diff --git a/aws/data_source_aws_autoscaling_group_test.go b/aws/data_source_aws_autoscaling_group_test.go index 0ec717d381e..970215b10fe 100644 --- a/aws/data_source_aws_autoscaling_group_test.go +++ b/aws/data_source_aws_autoscaling_group_test.go @@ -9,9 +9,10 @@ import ( ) func TestAccAwsAutoScalingGroupDataSource_basic(t *testing.T) { - datasourceName := "data.aws_autoscaling_group.good_match" - resourceName := "aws_autoscaling_group.foo" - rName := fmt.Sprintf("tf-test-asg-%d", acctest.RandInt()) + datasourceName := "data.aws_autoscaling_group.test" + resourceName := "aws_autoscaling_group.match" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -27,6 +28,7 @@ func TestAccAwsAutoScalingGroupDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "health_check_grace_period", resourceName, "health_check_grace_period"), resource.TestCheckResourceAttrPair(datasourceName, "health_check_type", resourceName, "health_check_type"), resource.TestCheckResourceAttrPair(datasourceName, "launch_configuration", resourceName, "launch_configuration"), + resource.TestCheckResourceAttr(datasourceName, "launch_template.#", "0"), resource.TestCheckResourceAttrPair(datasourceName, "load_balancers.#", resourceName, "load_balancers.#"), resource.TestCheckResourceAttr(datasourceName, "new_instances_protected_from_scale_in", "false"), resource.TestCheckResourceAttrPair(datasourceName, "max_size", resourceName, "max_size"), @@ -39,6 +41,41 @@ func TestAccAwsAutoScalingGroupDataSource_basic(t *testing.T) { }) } +func TestAccAwsAutoScalingGroupDataSource_launchTemplate(t *testing.T) { + datasourceName := "data.aws_autoscaling_group.test" + resourceName := "aws_autoscaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAutoScalingGroupDataResourceConfig_launchTemplate(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, "name", resourceName, "name"), + resource.TestCheckResourceAttrPair(datasourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(datasourceName, "availability_zones.#", resourceName, "availability_zones.#"), + resource.TestCheckResourceAttrPair(datasourceName, "default_cooldown", resourceName, "default_cooldown"), + resource.TestCheckResourceAttrPair(datasourceName, "desired_capacity", resourceName, "desired_capacity"), + resource.TestCheckResourceAttrPair(datasourceName, "health_check_grace_period", resourceName, "health_check_grace_period"), + resource.TestCheckResourceAttrPair(datasourceName, "health_check_type", resourceName, "health_check_type"), + resource.TestCheckResourceAttr(datasourceName, "launch_configuration", ""), + resource.TestCheckResourceAttrPair(datasourceName, "launch_template.#", resourceName, "launch_template.#"), + resource.TestCheckResourceAttrPair(datasourceName, "load_balancers.#", resourceName, "load_balancers.#"), + resource.TestCheckResourceAttrPair(datasourceName, "load_balancers.0.id", resourceName, "load_balancers.0.id"), + resource.TestCheckResourceAttrPair(datasourceName, "load_balancers.0.name", resourceName, "load_balancers.0.name"), + resource.TestCheckResourceAttrPair(datasourceName, "load_balancers.0.version", resourceName, "load_balancers.0.version"), + resource.TestCheckResourceAttr(datasourceName, "new_instances_protected_from_scale_in", "false"), + resource.TestCheckResourceAttrPair(datasourceName, "max_size", resourceName, "max_size"), + resource.TestCheckResourceAttrPair(datasourceName, "min_size", resourceName, "min_size"), + resource.TestCheckResourceAttrPair(datasourceName, "target_group_arns.#", resourceName, "target_group_arns.#"), + resource.TestCheckResourceAttr(datasourceName, "vpc_zone_identifier", ""), + ), + }, + }, + }) +} + // Lookup based on AutoScalingGroupName func testAccAutoScalingGroupDataResourceConfig(rName string) string { return composeConfig( @@ -46,14 +83,12 @@ func testAccAutoScalingGroupDataResourceConfig(rName string) string { testAccAvailableAZsNoOptInConfig(), testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), fmt.Sprintf(` -resource "aws_launch_configuration" "data_source_aws_autoscaling_group_test" { - name = "%[1]s" - image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = data.aws_ec2_instance_type_offering.available.instance_type +data "aws_autoscaling_group" "test" { + name = aws_autoscaling_group.match.name } -resource "aws_autoscaling_group" "foo" { - name = "%[1]s_foo" +resource "aws_autoscaling_group" "match" { + name = "%[1]s_match" max_size = 0 min_size = 0 health_check_grace_period = 300 @@ -64,8 +99,8 @@ resource "aws_autoscaling_group" "foo" { availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1]] } -resource "aws_autoscaling_group" "bar" { - name = "%[1]s_bar" +resource "aws_autoscaling_group" "no_match" { + name = "%[1]s_no_match" max_size = 0 min_size = 0 health_check_grace_period = 300 @@ -76,8 +111,39 @@ resource "aws_autoscaling_group" "bar" { availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1]] } -data "aws_autoscaling_group" "good_match" { - name = aws_autoscaling_group.foo.name +resource "aws_launch_configuration" "data_source_aws_autoscaling_group_test" { + name = "%[1]s" + image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type } `, rName)) } + +func testAccAutoScalingGroupDataResourceConfig_launchTemplate() string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +data "aws_autoscaling_group" "test" { + name = aws_autoscaling_group.test.name +} + +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.available.names[0]] + desired_capacity = 0 + max_size = 0 + min_size = 0 + launch_template { + id = aws_launch_template.test.id + version = aws_launch_template.test.default_version + } +} + +resource "aws_launch_template" "test" { + name_prefix = "test" + image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type +} +`)) +} From 5a8571cd8887699ef2b57236c7ff4d8b43fe5ea3 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 14 Dec 2020 12:31:36 -0800 Subject: [PATCH 0211/1212] Update CHANGELOG for #16297 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfd88882056..71cab1674f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ FEATURES * **New Resource:** `aws_lakeformation_resource` ([#13267](https://github.com/hashicorp/terraform-provider-aws/issues/13267)) +ENHANCEMENTS + +* data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] + ## 3.21.0 (December 11, 2020) NOTES From ad7a9daa46c4b1bf4ea162f23019ce3c5c48660f Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 14 Dec 2020 15:06:41 -0800 Subject: [PATCH 0212/1212] Adds note about ElastiCache Replication Group not supporting auto minor version upgrade parameter --- website/docs/r/elasticache_replication_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 908a49fc2bd..03c4bc96d53 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -103,7 +103,7 @@ The following arguments are supported: * `number_cache_clusters` - (Required for Cluster Mode Disabled) The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. * `node_type` - (Required) The compute and memory capacity of the nodes in the node group. * `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. -* `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`. +* `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. This parameter is currently not supported by the AWS API. Defaults to `true`. * `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important. * `engine` - (Optional) The name of the cache engine to be used for the clusters in this replication group. e.g. `redis` * `at_rest_encryption_enabled` - (Optional) Whether to enable encryption at rest. From 4855e646233a3db7ddf9794c5f26b2ee7565a4e8 Mon Sep 17 00:00:00 2001 From: Shuhei Kitagawa Date: Tue, 15 Dec 2020 08:32:34 +0900 Subject: [PATCH 0213/1212] Use codestarconnections.ProviderType_Values() instead Co-authored-by: Graham Davison --- aws/resource_aws_codestarconnections_connection.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index a96f3b581e7..a0b8d1cb9e6 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -45,9 +45,7 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - codestarconnections.ProviderTypeBitbucket, - }, false), + ValidateFunc: validation.StringInSlice(codestarconnections.ProviderType_Values(), false), }, }, } From 402132bf63ffdfca3f1c66e0c286a2e9941c0e11 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Tue, 15 Dec 2020 02:41:25 +0200 Subject: [PATCH 0214/1212] resource/aws_workspaces_workspace: Fix Terminated Workspace Panic (#16692) Output from acceptance testing: ``` --- PASS: TestFlattenWorkspaceProperties (0.00s) --- PASS: TestExpandWorkspaceProperties (0.00s) --- PASS: TestAccAwsWorkspacesWorkspace_validateRootVolumeSize (3.40s) --- PASS: TestAccAwsWorkspacesWorkspace_validateUserVolumeSize (3.96s) --- PASS: TestAccAwsWorkspacesWorkspace_workspaceProperties_runningModeAlwaysOn (1775.72s) --- PASS: TestAccAwsWorkspacesWorkspace_recreate (1836.44s) --- PASS: TestAccAwsWorkspacesWorkspace_timeout (1848.37s) --- PASS: TestAccAwsWorkspacesWorkspace_basic (1852.09s) --- PASS: TestAccAwsWorkspacesWorkspace_tags (1876.63s) --- PASS: TestAccAwsWorkspacesWorkspace_workspaceProperties (1899.23s) ``` --- .../service/workspaces/waiter/status.go | 2 +- .../service/workspaces/waiter/waiter.go | 2 +- aws/resource_aws_workspaces_workspace_test.go | 107 ++++++++++++------ 3 files changed, 74 insertions(+), 37 deletions(-) diff --git a/aws/internal/service/workspaces/waiter/status.go b/aws/internal/service/workspaces/waiter/status.go index 00ec6e0b62c..8066543932d 100644 --- a/aws/internal/service/workspaces/waiter/status.go +++ b/aws/internal/service/workspaces/waiter/status.go @@ -34,7 +34,7 @@ func WorkspaceState(conn *workspaces.WorkSpaces, workspaceID string) resource.St } if len(output.Workspaces) == 0 { - return nil, "", nil + return output, workspaces.WorkspaceStateTerminated, nil } workspace := output.Workspaces[0] diff --git a/aws/internal/service/workspaces/waiter/waiter.go b/aws/internal/service/workspaces/waiter/waiter.go index dadac13f2ff..b8cd3394205 100644 --- a/aws/internal/service/workspaces/waiter/waiter.go +++ b/aws/internal/service/workspaces/waiter/waiter.go @@ -109,7 +109,7 @@ func WorkspaceTerminated(conn *workspaces.WorkSpaces, workspaceID string, timeou workspaces.WorkspaceStateTerminating, workspaces.WorkspaceStateError, }, - Target: []string{}, + Target: []string{workspaces.WorkspaceStateTerminated}, Refresh: WorkspaceState(conn, workspaceID), Timeout: timeout, } diff --git a/aws/resource_aws_workspaces_workspace_test.go b/aws/resource_aws_workspaces_workspace_test.go index eee59e1a254..50a99fcc226 100644 --- a/aws/resource_aws_workspaces_workspace_test.go +++ b/aws/resource_aws_workspaces_workspace_test.go @@ -89,7 +89,8 @@ func TestAccAwsWorkspacesWorkspace_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "workspace_properties.0.running_mode", workspaces.RunningModeAlwaysOn), resource.TestCheckResourceAttr(resourceName, "workspace_properties.0.running_mode_auto_stop_timeout_in_minutes", "0"), resource.TestCheckResourceAttr(resourceName, "workspace_properties.0.user_volume_size_gib", "10"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", fmt.Sprintf("tf-testacc-workspaces-workspace-%[1]s", rName)), ), }, { @@ -122,7 +123,7 @@ func TestAccAwsWorkspacesWorkspace_tags(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAwsWorkspacesWorkspaceExists(resourceName, &v1), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.TerraformProviderAwsTest", "true"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", fmt.Sprintf("tf-testacc-workspaces-workspace-%[1]s", rName)), resource.TestCheckResourceAttr(resourceName, "tags.Alpha", "1"), ), }, @@ -136,7 +137,7 @@ func TestAccAwsWorkspacesWorkspace_tags(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAwsWorkspacesWorkspaceExists(resourceName, &v2), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.TerraformProviderAwsTest", "true"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", fmt.Sprintf("tf-testacc-workspaces-workspace-%[1]s", rName)), resource.TestCheckResourceAttr(resourceName, "tags.Beta", "2"), ), }, @@ -145,7 +146,7 @@ func TestAccAwsWorkspacesWorkspace_tags(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAwsWorkspacesWorkspaceExists(resourceName, &v3), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.TerraformProviderAwsTest", "true"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", fmt.Sprintf("tf-testacc-workspaces-workspace-%[1]s", rName)), ), }, }, @@ -401,19 +402,25 @@ func testAccCheckAwsWorkspacesWorkspaceExists(n string, v *workspaces.Workspace) func testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName string) string { return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), - ` + fmt.Sprintf(` data "aws_workspaces_bundle" "test" { bundle_id = "wsb-bh8rsxt14" # Value with Windows 10 (English) } resource "aws_workspaces_directory" "test" { directory_id = aws_directory_service_directory.main.id + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } -`) +`, rName)) } func testAccWorkspacesWorkspaceConfig(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -421,12 +428,18 @@ resource "aws_workspaces_workspace" "test" { # NOTE: WorkSpaces API doesn't allow creating users in the directory. # However, "Administrator"" user is always present in a bare directory. user_name = "Administrator" + + tags = { + Name = "tf-testacc-workspaces-workspace-%[1]s" + } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_TagsA(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -436,15 +449,17 @@ resource "aws_workspaces_workspace" "test" { user_name = "Administrator" tags = { - TerraformProviderAwsTest = true - Alpha = 1 + Name = "tf-testacc-workspaces-workspace-%[1]s" + Alpha = 1 } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_TagsB(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -454,15 +469,17 @@ resource "aws_workspaces_workspace" "test" { user_name = "Administrator" tags = { - TerraformProviderAwsTest = true - Beta = 2 + Name = "tf-testacc-workspaces-workspace-%[1]s" + Beta = 2 } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_TagsC(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -472,14 +489,16 @@ resource "aws_workspaces_workspace" "test" { user_name = "Administrator" tags = { - TerraformProviderAwsTest = true + Name = "tf-testacc-workspaces-workspace-%[1]s" } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_WorkspacePropertiesA(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -495,14 +514,16 @@ resource "aws_workspaces_workspace" "test" { } tags = { - TerraformProviderAwsTest = true + Name = "tf-testacc-workspaces-workspace-%[1]s" } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_WorkspacePropertiesB(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -517,14 +538,16 @@ resource "aws_workspaces_workspace" "test" { } tags = { - TerraformProviderAwsTest = true + Name = "tf-testacc-workspaces-workspace-%[1]s" } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_WorkspacePropertiesC(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -535,12 +558,18 @@ resource "aws_workspaces_workspace" "test" { workspace_properties { } + + tags = { + Name = "tf-testacc-workspaces-workspace-%[1]s" + } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_validateRootVolumeSize(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -555,14 +584,16 @@ resource "aws_workspaces_workspace" "test" { } tags = { - TerraformProviderAwsTest = true + Name = "tf-testacc-workspaces-workspace-%[1]s" } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_validateUserVolumeSize(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -577,14 +608,16 @@ resource "aws_workspaces_workspace" "test" { } tags = { - TerraformProviderAwsTest = true + Name = "tf-testacc-workspaces-workspace-%[1]s" } } -` +`, rName)) } func testAccWorkspacesWorkspaceConfig_timeout(rName string) string { - return testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName) + ` + return composeConfig( + testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -598,8 +631,12 @@ resource "aws_workspaces_workspace" "test" { update = "30m" delete = "30m" } + + tags = { + Name = "tf-testacc-workspaces-workspace-%[1]s" + } } -` +`, rName)) } func TestExpandWorkspaceProperties(t *testing.T) { From 788a843691d0dee0216c9be148171a50e6cb6bab Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 14 Dec 2020 19:43:19 -0500 Subject: [PATCH 0215/1212] Update CHANGELOG for #16692 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71cab1674f4..51f1275b7b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ ENHANCEMENTS * data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] +BUG FIXES + +* resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] + ## 3.21.0 (December 11, 2020) NOTES From 3d6f3554cb7a0dd6efd82d65f92873bc63e951be Mon Sep 17 00:00:00 2001 From: vroad <396351+vroad@users.noreply.github.com> Date: Tue, 15 Dec 2020 10:45:30 +0900 Subject: [PATCH 0216/1212] docs/resource/aws_launch_template: Fix typo: throughput (#16731) --- website/docs/r/launch_template.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index 7a9e5a94ee4..904de1d1143 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -179,7 +179,7 @@ The `ebs` block supports the following: * `kms_key_id` - The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. `encrypted` must be set to `true` when this is set. * `snapshot_id` - The Snapshot ID to mount. -* `thoughput` - The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s. +* `throughput` - The throughput to provision for a `gp3` volume, with a maximum of 1,000 MiB/s. * `volume_size` - The size of the volume in gigabytes. * `volume_type` - The volume type. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). From 57641cd47b1ea187a840d8fba3d1bc5e6d25eb38 Mon Sep 17 00:00:00 2001 From: jaysiyani Date: Tue, 15 Dec 2020 02:02:59 +0000 Subject: [PATCH 0217/1212] resource/aws_launch_configuration: Add metadata_options configuration block (#14637) Output from acceptance testing: ``` --- PASS: TestAccAWSLaunchConfiguration_basic (73.44s) --- PASS: TestAccAWSLaunchConfiguration_ebs_noDevice (52.92s) --- PASS: TestAccAWSLaunchConfiguration_encryptedRootBlockDevice (52.90s) --- PASS: TestAccAWSLaunchConfiguration_metadataOptions (55.79s) --- PASS: TestAccAWSLaunchConfiguration_RootBlockDevice_AmiDisappears (373.76s) --- PASS: TestAccAWSLaunchConfiguration_RootBlockDevice_VolumeSize (76.39s) --- PASS: TestAccAWSLaunchConfiguration_updateEbsBlockDevices (74.31s) --- PASS: TestAccAWSLaunchConfiguration_userData (75.41s) --- PASS: TestAccAWSLaunchConfiguration_withBlockDevices (55.41s) --- PASS: TestAccAWSLaunchConfiguration_withEncryption (55.17s) --- PASS: TestAccAWSLaunchConfiguration_withIAMProfile (60.63s) --- PASS: TestAccAWSLaunchConfiguration_withInstanceStoreAMI (57.10s) --- PASS: TestAccAWSLaunchConfiguration_withSpotPrice (55.55s) --- PASS: TestAccAWSLaunchConfigurationDataSource_basic (53.40s) --- PASS: TestAccAWSLaunchConfigurationDataSource_ebsNoDevice (52.04s) --- PASS: TestAccAWSLaunchConfigurationDataSource_securityGroups (49.80s) --- PASS: TestAccLaunchConfigurationDataSource_metadataOptions (54.83s) ``` --- aws/data_source_aws_launch_configuration.go | 25 ++++++ ...ta_source_aws_launch_configuration_test.go | 44 ++++++++++ aws/resource_aws_launch_configuration.go | 81 +++++++++++++++++++ aws/resource_aws_launch_configuration_test.go | 46 +++++++++++ .../docs/d/launch_configuration.html.markdown | 4 + .../docs/r/launch_configuration.html.markdown | 4 + 6 files changed, 204 insertions(+) diff --git a/aws/data_source_aws_launch_configuration.go b/aws/data_source_aws_launch_configuration.go index 932cba6db92..b6b0c290085 100644 --- a/aws/data_source_aws_launch_configuration.go +++ b/aws/data_source_aws_launch_configuration.go @@ -156,6 +156,27 @@ func dataSourceAwsLaunchConfiguration() *schema.Resource { }, }, + "metadata_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "http_tokens": { + Type: schema.TypeString, + Computed: true, + }, + "http_put_response_hop_limit": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "root_block_device": { Type: schema.TypeList, Computed: true, @@ -245,6 +266,10 @@ func dataSourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface return fmt.Errorf("error setting security_groups: %s", err) } + if err := d.Set("metadata_options", flattenLaunchConfigInstanceMetadataOptions(lc.MetadataOptions)); err != nil { + return fmt.Errorf("error setting metadata_options: %s", err) + } + classicSGs := make([]string, 0, len(lc.ClassicLinkVPCSecurityGroups)) for _, sg := range lc.ClassicLinkVPCSecurityGroups { classicSGs = append(classicSGs, *sg) diff --git a/aws/data_source_aws_launch_configuration_test.go b/aws/data_source_aws_launch_configuration_test.go index 05fa7b1415a..5c7d826acff 100644 --- a/aws/data_source_aws_launch_configuration_test.go +++ b/aws/data_source_aws_launch_configuration_test.go @@ -77,6 +77,29 @@ func TestAccAWSLaunchConfigurationDataSource_ebsNoDevice(t *testing.T) { }) } +func TestAccLaunchConfigurationDataSource_metadataOptions(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_launch_configuration.test" + resourceName := "aws_launch_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccLaunchConfigurationDataSourceConfig_metadataOptions(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "metadata_options.#", resourceName, "metadata_options.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "metadata_options.0.http_endpoint", resourceName, "metadata_options.0.http_endpoint"), + resource.TestCheckResourceAttrPair(dataSourceName, "metadata_options.0.http_tokens", resourceName, "metadata_options.0.http_tokens"), + resource.TestCheckResourceAttrPair(dataSourceName, "metadata_options.0.http_put_response_hop_limit", resourceName, "metadata_options.0.http_put_response_hop_limit"), + ), + }, + }, + }) +} + func testAccLaunchConfigurationDataSourceConfig_basic(rName string) string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_launch_configuration" "test" { @@ -139,6 +162,27 @@ data "aws_launch_configuration" "foo" { `, rInt, rInt) } +func testAccLaunchConfigurationDataSourceConfig_metadataOptions(rName string) string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + fmt.Sprintf(` +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t3.nano" + name = %[1]q + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 2 + } +} + +data "aws_launch_configuration" "test" { + name = aws_launch_configuration.test.name +} +`, rName)) +} + func testAccLaunchConfigurationDataSourceConfigEbsNoDevice(rName string) string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), diff --git a/aws/resource_aws_launch_configuration.go b/aws/resource_aws_launch_configuration.go index 3cd99e88a93..f9e4f2789c0 100644 --- a/aws/resource_aws_launch_configuration.go +++ b/aws/resource_aws_launch_configuration.go @@ -247,6 +247,39 @@ func resourceAwsLaunchConfiguration() *schema.Resource { }, }, + "metadata_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_endpoint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{autoscaling.InstanceMetadataEndpointStateEnabled, autoscaling.InstanceMetadataEndpointStateDisabled}, false), + }, + "http_tokens": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{autoscaling.InstanceMetadataHttpTokensStateOptional, autoscaling.InstanceMetadataHttpTokensStateRequired}, false), + }, + "http_put_response_hop_limit": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 64), + }, + }, + }, + }, + "root_block_device": { Type: schema.TypeList, Optional: true, @@ -349,6 +382,10 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface createLaunchConfigurationOpts.ClassicLinkVPCId = aws.String(v.(string)) } + if v, ok := d.GetOk("metadata_options"); ok { + createLaunchConfigurationOpts.MetadataOptions = expandLaunchConfigInstanceMetadataOptions(v.([]interface{})) + } + if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok { createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList( v.(*schema.Set).List(), @@ -564,6 +601,10 @@ func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error setting vpc_classic_link_security_groups: %s", err) } + if err := d.Set("metadata_options", flattenLaunchConfigInstanceMetadataOptions(lc.MetadataOptions)); err != nil { + return fmt.Errorf("error setting metadata_options: %s", err) + } + if err := readLCBlockDevices(d, lc, ec2conn); err != nil { return err } @@ -631,6 +672,46 @@ func readLCBlockDevices(d *schema.ResourceData, lc *autoscaling.LaunchConfigurat return nil } +func expandLaunchConfigInstanceMetadataOptions(l []interface{}) *autoscaling.InstanceMetadataOptions { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + opts := &autoscaling.InstanceMetadataOptions{ + HttpEndpoint: aws.String(m["http_endpoint"].(string)), + } + + if m["http_endpoint"].(string) == autoscaling.InstanceMetadataEndpointStateEnabled { + // These parameters are not allowed unless HttpEndpoint is enabled + + if v, ok := m["http_tokens"].(string); ok && v != "" { + opts.HttpTokens = aws.String(v) + } + + if v, ok := m["http_put_response_hop_limit"].(int); ok && v != 0 { + opts.HttpPutResponseHopLimit = aws.Int64(int64(v)) + } + } + + return opts +} + +func flattenLaunchConfigInstanceMetadataOptions(opts *autoscaling.InstanceMetadataOptions) []interface{} { + if opts == nil { + return nil + } + + m := map[string]interface{}{ + "http_endpoint": aws.StringValue(opts.HttpEndpoint), + "http_put_response_hop_limit": aws.Int64Value(opts.HttpPutResponseHopLimit), + "http_tokens": aws.StringValue(opts.HttpTokens), + } + + return []interface{}{m} +} + func readBlockDevicesFromLaunchConfiguration(d *schema.ResourceData, lc *autoscaling.LaunchConfiguration, ec2conn *ec2.EC2) ( map[string]interface{}, error) { blockDevices := make(map[string]interface{}) diff --git a/aws/resource_aws_launch_configuration_test.go b/aws/resource_aws_launch_configuration_test.go index d043cfd1f78..28a699e3791 100644 --- a/aws/resource_aws_launch_configuration_test.go +++ b/aws/resource_aws_launch_configuration_test.go @@ -389,6 +389,35 @@ func TestAccAWSLaunchConfiguration_updateEbsBlockDevices(t *testing.T) { }) } +func TestAccAWSLaunchConfiguration_metadataOptions(t *testing.T) { + var conf autoscaling.LaunchConfiguration + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_launch_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchConfigurationConfigMetadataOptions(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "metadata_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "metadata_options.0.http_endpoint", "enabled"), + resource.TestCheckResourceAttr(resourceName, "metadata_options.0.http_tokens", "required"), + resource.TestCheckResourceAttr(resourceName, "metadata_options.0.http_put_response_hop_limit", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSLaunchConfiguration_ebs_noDevice(t *testing.T) { var conf autoscaling.LaunchConfiguration rInt := acctest.RandInt() @@ -685,6 +714,23 @@ resource "aws_launch_configuration" "test" { `, rInt)) } +func testAccAWSLaunchConfigurationConfigMetadataOptions(rName string) string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + fmt.Sprintf(` +resource "aws_launch_configuration" "test" { + image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t3.nano" + name = %[1]q + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 2 + } +} +`, rName)) +} + func testAccAWSLaunchConfigurationConfig() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_launch_configuration" "test" { diff --git a/website/docs/d/launch_configuration.html.markdown b/website/docs/d/launch_configuration.html.markdown index 7b1169294f5..68702126a22 100644 --- a/website/docs/d/launch_configuration.html.markdown +++ b/website/docs/d/launch_configuration.html.markdown @@ -35,6 +35,10 @@ In addition to all arguments above, the following attributes are exported: * `instance_type` - The Instance Type of the instance to launch. * `iam_instance_profile` - The IAM Instance Profile to associate with launched instances. * `key_name` - The Key Name that should be used for the instance. +* `metadata_options` - The metadata options for the instance. + * `http_endpoint` - The state of the metadata service: `enabled`, `disabled`. + * `http_tokens` - If session tokens are required: `optional`, `required`. + * `http_put_response_hop_limit` - The desired HTTP PUT response hop limit for instance metadata requests. * `security_groups` - A list of associated Security Group IDS. * `associate_public_ip_address` - Whether a Public IP address is associated with the instance. * `vpc_classic_link_id` - The ID of a ClassicLink-enabled VPC. diff --git a/website/docs/r/launch_configuration.html.markdown b/website/docs/r/launch_configuration.html.markdown index d96ee73ea45..ddd47210a35 100644 --- a/website/docs/r/launch_configuration.html.markdown +++ b/website/docs/r/launch_configuration.html.markdown @@ -144,6 +144,10 @@ The following arguments are supported: * `iam_instance_profile` - (Optional) The name attribute of the IAM instance profile to associate with launched instances. * `key_name` - (Optional) The key name that should be used for the instance. +* `metadata_options` - The metadata options for the instance. + * `http_endpoint` - The state of the metadata service: `enabled`, `disabled`. + * `http_tokens` - If session tokens are required: `optional`, `required`. + * `http_put_response_hop_limit` - The desired HTTP PUT response hop limit for instance metadata requests. * `security_groups` - (Optional) A list of associated security group IDS. * `associate_public_ip_address` - (Optional) Associate a public ip address with an instance in a VPC. * `vpc_classic_link_id` - (Optional) The ID of a ClassicLink-enabled VPC. Only applies to EC2-Classic instances. (eg. `vpc-2730681a`) From 95145e95347ce4fac96c5154a655d359e9dc622c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 14 Dec 2020 21:05:02 -0500 Subject: [PATCH 0218/1212] Update CHANGELOG for #14637 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51f1275b7b2..028d03c0df0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ FEATURES ENHANCEMENTS * data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] +* data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] +* resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] BUG FIXES From 039fb67ddb1a427178734ccb93af20fbde8f6fae Mon Sep 17 00:00:00 2001 From: Kristian Thornley Date: Tue, 15 Dec 2020 15:08:59 +1300 Subject: [PATCH 0219/1212] New Service: Amazon Connect (#16644) * feat: Initial Commit of the Amazon Connect Service This is the initial commit of the Amazon Connect service. New resources will follow after the initial service merge * Re-triggering the build --- .hashibot.hcl | 8 ++++++++ aws/config.go | 3 +++ aws/provider.go | 1 + website/allowed-subcategories.txt | 1 + website/docs/guides/custom-service-endpoints.html.md | 1 + 5 files changed, 14 insertions(+) diff --git a/.hashibot.hcl b/.hashibot.hcl index fd3aa0a4be0..27f9cfe1d97 100644 --- a/.hashibot.hcl +++ b/.hashibot.hcl @@ -214,6 +214,9 @@ behavior "regexp_issue_labeler_v2" "service_labels" { "service/configservice" = [ "aws_config_", ], + "service/connect" = [ + "aws_connect_", + ], "service/databasemigrationservice" = [ "aws_dms_", ], @@ -863,6 +866,11 @@ behavior "pull_request_path_labeler" "service_labels" { "aws/*_aws_config_*", "website/**/config_*" ] + "service/connect" = [ + "aws/internal/service/connect/**/*", + "aws/*_aws_connect_*", + "website/**/connect_*" + ] "service/costandusagereportservice" = [ "aws/internal/service/costandusagereportservice/**/*", "aws/*_aws_cur_*", diff --git a/aws/config.go b/aws/config.go index 433c2bc8e0b..02dfabd1e94 100644 --- a/aws/config.go +++ b/aws/config.go @@ -45,6 +45,7 @@ import ( "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/aws/aws-sdk-go/service/configservice" + "github.com/aws/aws-sdk-go/service/connect" "github.com/aws/aws-sdk-go/service/costandusagereportservice" "github.com/aws/aws-sdk-go/service/databasemigrationservice" "github.com/aws/aws-sdk-go/service/dataexchange" @@ -239,6 +240,7 @@ type AWSClient struct { cognitoconn *cognitoidentity.CognitoIdentity cognitoidpconn *cognitoidentityprovider.CognitoIdentityProvider configconn *configservice.ConfigService + connectconn *connect.Connect costandusagereportconn *costandusagereportservice.CostandUsageReportService dataexchangeconn *dataexchange.DataExchange datapipelineconn *datapipeline.DataPipeline @@ -478,6 +480,7 @@ func (c *Config) Client() (interface{}, error) { cognitoconn: cognitoidentity.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["cognitoidentity"])})), cognitoidpconn: cognitoidentityprovider.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["cognitoidp"])})), configconn: configservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["configservice"])})), + connectconn: connect.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["connect"])})), costandusagereportconn: costandusagereportservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["cur"])})), dataexchangeconn: dataexchange.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["dataexchange"])})), datapipelineconn: datapipeline.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["datapipeline"])})), diff --git a/aws/provider.go b/aws/provider.go index c56401f499a..84671afadeb 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -1156,6 +1156,7 @@ func init() { "cognitoidentity", "cognitoidp", "configservice", + "connect", "cur", "dataexchange", "datapipeline", diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index d6ab65240cd..6b36cececc6 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -28,6 +28,7 @@ CodeStar Connections CodeStar Notifications Cognito Config +Connect Cost and Usage Report Data Lifecycle Manager (DLM) DataPipeline diff --git a/website/docs/guides/custom-service-endpoints.html.md b/website/docs/guides/custom-service-endpoints.html.md index 6318cc6f40c..318e4ec4334 100644 --- a/website/docs/guides/custom-service-endpoints.html.md +++ b/website/docs/guides/custom-service-endpoints.html.md @@ -89,6 +89,7 @@ The Terraform AWS Provider allows the following endpoints to be customized:
  • cognitoidentity
  • cognitoidp
  • configservice
  • +
  • connect
  • cur
  • dataexchange
  • datapipeline
  • From 9cdf84a28b2383abcc22da0194f37035ed811f0e Mon Sep 17 00:00:00 2001 From: tmekaumput Date: Tue, 15 Dec 2020 13:10:51 +1100 Subject: [PATCH 0220/1212] update description of protocol arguments --- website/docs/r/security_group.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index 34233b86d1f..113cce0e681 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -107,7 +107,7 @@ The `ingress` block supports: * `prefix_list_ids` - (Optional) List of prefix list IDs. * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp") * `protocol` - (Required) The protocol. If you select a protocol of -"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) +"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. The supported values are defined in the "IpProtocol" argument on the [IpPermission](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html) API reference. This argument is normalized to a lowercase value to match the AWS API requirement when using with Terraform 0.12.x and above, please make sure that the value of the protocol is specified as lowercase when using with older version of Terraform to avoid an issue during upgrade. * `security_groups` - (Optional) List of security group Group Names if using EC2-Classic, or Group IDs if using a VPC. * `self` - (Optional) If true, the security group itself will be added as @@ -122,7 +122,7 @@ The `egress` block supports: * `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints) * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp") * `protocol` - (Required) The protocol. If you select a protocol of -"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) +"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. The supported values are defined in the "IpProtocol" argument on the [IpPermission](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html) API reference. This argument is normalized to a lowercase value to match the AWS API requirement when using with Terraform 0.12.x and above, please make sure that the value of the protocol is specified as lowercase when using with older version of Terraform to avoid an issue during upgrade. * `security_groups` - (Optional) List of security group Group Names if using EC2-Classic, or Group IDs if using a VPC. * `self` - (Optional) If true, the security group itself will be added as From 55a7464fa47b6277a3bade515f6dd139dc45bc78 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Tue, 15 Dec 2020 04:35:40 +0000 Subject: [PATCH 0221/1212] data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments (#16739) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccDataSourceAwsPrefixList_nameDoesNotOverrideFilter (2.68s) --- PASS: TestAccDataSourceAwsPrefixList_basic (12.81s) --- PASS: TestAccDataSourceAwsPrefixList_filter (12.83s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccDataSourceAwsPrefixList_nameDoesNotOverrideFilter (1.60s) --- PASS: TestAccDataSourceAwsPrefixList_filter (7.48s) --- PASS: TestAccDataSourceAwsPrefixList_basic (7.61s) ``` --- aws/data_source_aws_prefix_list.go | 11 ++-- aws/data_source_aws_prefix_list_test.go | 78 +++++++++++++++++++++--- website/docs/d/prefix_list.html.markdown | 2 +- 3 files changed, 75 insertions(+), 16 deletions(-) diff --git a/aws/data_source_aws_prefix_list.go b/aws/data_source_aws_prefix_list.go index 786160ec8f4..59a667dbd6a 100644 --- a/aws/data_source_aws_prefix_list.go +++ b/aws/data_source_aws_prefix_list.go @@ -45,11 +45,12 @@ func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error if prefixListID := d.Get("prefix_list_id"); prefixListID != "" { req.PrefixListIds = aws.StringSlice([]string{prefixListID.(string)}) } - req.Filters = buildEC2AttributeFilterList( - map[string]string{ - "prefix-list-name": d.Get("name").(string), - }, - ) + if prefixListName := d.Get("name"); prefixListName.(string) != "" { + req.Filters = append(req.Filters, &ec2.Filter{ + Name: aws.String("prefix-list-name"), + Values: aws.StringSlice([]string{prefixListName.(string)}), + }) + } log.Printf("[DEBUG] Reading Prefix List: %s", req) resp, err := conn.DescribePrefixLists(req) diff --git a/aws/data_source_aws_prefix_list_test.go b/aws/data_source_aws_prefix_list_test.go index 26a27f9684a..4c5304e7d9c 100644 --- a/aws/data_source_aws_prefix_list_test.go +++ b/aws/data_source_aws_prefix_list_test.go @@ -2,9 +2,12 @@ package aws import ( "fmt" + "regexp" "strconv" "testing" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -41,7 +44,41 @@ func TestAccDataSourceAwsPrefixList_filter(t *testing.T) { }) } +func TestAccDataSourceAwsPrefixList_nameDoesNotOverrideFilter(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsPrefixListConfig_nameDoesNotOverrideFilter, + ExpectError: regexp.MustCompile(`no matching prefix list found`), + }, + }, + }) +} + func testAccDataSourceAwsPrefixListCheck(name string) resource.TestCheckFunc { + getPrefixListId := func(name string) (string, error) { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + input := ec2.DescribePrefixListsInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "prefix-list-name": name, + }), + } + + output, err := conn.DescribePrefixLists(&input) + if err != nil { + return "", err + } + + if len(output.PrefixLists) != 1 { + return "", fmt.Errorf("prefix list %s not found", name) + } + + return aws.StringValue(output.PrefixLists[0].PrefixListId), nil + } + return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -50,17 +87,21 @@ func testAccDataSourceAwsPrefixListCheck(name string) resource.TestCheckFunc { attr := rs.Primary.Attributes - if attr["name"] != "com.amazonaws.us-west-2.s3" { + region := testAccGetRegion() + prefixListName := fmt.Sprintf("com.amazonaws.%s.s3", region) + prefixListId, err := getPrefixListId(prefixListName) + if err != nil { + return err + } + + if attr["name"] != prefixListName { return fmt.Errorf("bad name %s", attr["name"]) } - if attr["id"] != "pl-68a54001" { + if attr["id"] != prefixListId { return fmt.Errorf("bad id %s", attr["id"]) } - var ( - cidrBlockSize int - err error - ) + var cidrBlockSize int if cidrBlockSize, err = strconv.Atoi(attr["cidr_blocks.#"]); err != nil { return err @@ -74,27 +115,44 @@ func testAccDataSourceAwsPrefixListCheck(name string) resource.TestCheckFunc { } const testAccDataSourceAwsPrefixListConfig = ` +data "aws_region" "current" {} + data "aws_prefix_list" "s3_by_id" { - prefix_list_id = "pl-68a54001" + prefix_list_id = data.aws_prefix_list.s3_by_name.id } data "aws_prefix_list" "s3_by_name" { - name = "com.amazonaws.us-west-2.s3" + name = "com.amazonaws.${data.aws_region.current.name}.s3" } ` const testAccDataSourceAwsPrefixListConfigFilter = ` +data "aws_region" "current" {} + data "aws_prefix_list" "s3_by_name" { filter { name = "prefix-list-name" - values = ["com.amazonaws.us-west-2.s3"] + values = ["com.amazonaws.${data.aws_region.current.name}.s3"] } } data "aws_prefix_list" "s3_by_id" { filter { name = "prefix-list-id" - values = ["pl-68a54001"] + values = [data.aws_prefix_list.s3_by_name.id] + } +} +` + +const testAccDataSourceAwsPrefixListConfig_nameDoesNotOverrideFilter = ` +data "aws_region" "current" {} + +data "aws_prefix_list" "test" { + name = "com.amazonaws.${data.aws_region.current.name}.dynamodb" + + filter { + name = "prefix-list-name" + values = ["com.amazonaws.${data.aws_region.current.name}.s3"] } } ` diff --git a/website/docs/d/prefix_list.html.markdown b/website/docs/d/prefix_list.html.markdown index dbddb81a055..0a0bc0fc04d 100644 --- a/website/docs/d/prefix_list.html.markdown +++ b/website/docs/d/prefix_list.html.markdown @@ -1,7 +1,7 @@ --- subcategory: "VPC" layout: "aws" -page_title: "AWS: aws_prefix-list" +page_title: "AWS: aws_prefix_list" description: |- Provides details about a specific prefix list --- From aa7ecdcba63a6b76b3732a1692096dfea2f5920c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 14 Dec 2020 23:36:29 -0500 Subject: [PATCH 0222/1212] Update CHANGELOG for #16739 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 028d03c0df0..c7312007754 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ ENHANCEMENTS BUG FIXES +* data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] * resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] ## 3.21.0 (December 11, 2020) From 2ae27e71da89b29e158c4e5fed1d94e87d8c288a Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 18:34:31 +0900 Subject: [PATCH 0223/1212] Remove connection_arn --- aws/resource_aws_codestarconnections_connection.go | 6 ------ aws/resource_aws_codestarconnections_connection_test.go | 1 - 2 files changed, 7 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index a0b8d1cb9e6..28551696c69 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -25,11 +25,6 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { Computed: true, }, - "connection_arn": { - Type: schema.TypeString, - Computed: true, - }, - "connection_status": { Type: schema.TypeString, Computed: true, @@ -87,7 +82,6 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) d.Set("arn", rule.Connection.ConnectionArn) - d.Set("connection_arn", rule.Connection.ConnectionArn) d.Set("connection_name", rule.Connection.ConnectionName) d.Set("connection_status", rule.Connection.ConnectionStatus) d.Set("provider_type", rule.Connection.ProviderType) diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index 6a090ac8fc4..7df409d4eb6 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -26,7 +26,6 @@ func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccMatchResourceAttrRegionalARN(resourceName, "id", "codestar-connections", regexp.MustCompile("connection/.+")), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codestar-connections", regexp.MustCompile("connection/.+")), - testAccMatchResourceAttrRegionalARN(resourceName, "connection_arn", "codestar-connections", regexp.MustCompile("connection/.+")), resource.TestCheckResourceAttr(resourceName, "provider_type", codestarconnections.ProviderTypeBitbucket), resource.TestCheckResourceAttr(resourceName, "connection_name", rName), resource.TestCheckResourceAttr(resourceName, "connection_status", codestarconnections.ConnectionStatusPending), From 220e9e39d5f0843c343cb5e35ee0e428e47243e2 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 18:36:01 +0900 Subject: [PATCH 0224/1212] Rename connection_name to name --- aws/resource_aws_codestarconnections_connection.go | 6 +++--- aws/resource_aws_codestarconnections_connection_test.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index 28551696c69..40fcc3fb477 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -30,7 +30,7 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { Computed: true, }, - "connection_name": { + "name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -50,7 +50,7 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta conn := meta.(*AWSClient).codestarconnectionsconn params := &codestarconnections.CreateConnectionInput{ - ConnectionName: aws.String(d.Get("connection_name").(string)), + ConnectionName: aws.String(d.Get("name").(string)), ProviderType: aws.String(d.Get("provider_type").(string)), } @@ -82,7 +82,7 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) d.Set("arn", rule.Connection.ConnectionArn) - d.Set("connection_name", rule.Connection.ConnectionName) + d.Set("name", rule.Connection.ConnectionName) d.Set("connection_status", rule.Connection.ConnectionStatus) d.Set("provider_type", rule.Connection.ProviderType) diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index 7df409d4eb6..5ad64f8de71 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -27,7 +27,7 @@ func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { testAccMatchResourceAttrRegionalARN(resourceName, "id", "codestar-connections", regexp.MustCompile("connection/.+")), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codestar-connections", regexp.MustCompile("connection/.+")), resource.TestCheckResourceAttr(resourceName, "provider_type", codestarconnections.ProviderTypeBitbucket), - resource.TestCheckResourceAttr(resourceName, "connection_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "connection_status", codestarconnections.ConnectionStatusPending), ), }, @@ -62,7 +62,7 @@ func testAccCheckAWSCodeStarConnectionsConnectionDestroy(s *terraform.State) err func testAccAWSCodeStarConnectionsConnectionConfigBasic(rName string) string { return fmt.Sprintf(` resource "aws_codestarconnections_connection" "test" { - connection_name = %[1]q + name = %[1]q provider_type = "Bitbucket" } `, rName) From 56b53369cef374fd712a6c1bc6c203139c6e99f3 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 18:44:17 +0900 Subject: [PATCH 0225/1212] Modify error messages --- aws/resource_aws_codestarconnections_connection.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index 40fcc3fb477..cf237bcf42b 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -56,7 +56,7 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta res, err := conn.CreateConnection(params) if err != nil { - return fmt.Errorf("error creating codestar connection: %s", err) + return fmt.Errorf("error creating CodeStar connection: %w", err) } d.SetId(aws.StringValue(res.ConnectionArn)) @@ -73,11 +73,11 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i if err != nil { if isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] codestar connection (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] CodeStar connection (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - return fmt.Errorf("error reading codestar connection: %s", err) + return fmt.Errorf("error reading CodeStar connection: %s", err) } d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) @@ -97,7 +97,7 @@ func resourceAwsCodeStarConnectionsConnectionDelete(d *schema.ResourceData, meta }) if err != nil { - return fmt.Errorf("error deleting codestar connection: %s", err) + return fmt.Errorf("error deleting CodeStar connection: %w", err) } return nil From 5cc7321d836485371be9931681e34ca0472eee38 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 19:31:45 +0900 Subject: [PATCH 0226/1212] Add a check if rule and rule.Connections is nil --- aws/resource_aws_codestarconnections_connection.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index cf237bcf42b..2e2b23167c8 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -80,6 +80,10 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i return fmt.Errorf("error reading CodeStar connection: %s", err) } + if rule == nil || rule.Connection == nil { + return fmt.Errorf("error reading CodeStar connection (%s): empty response", d.Id()) + } + d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) d.Set("arn", rule.Connection.ConnectionArn) d.Set("name", rule.Connection.ConnectionName) From c6919468b80f7c622b3d371b902204a4a9be694b Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 19:33:21 +0900 Subject: [PATCH 0227/1212] Rename rule to resp --- ...ource_aws_codestarconnections_connection.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index 2e2b23167c8..cf372fbb839 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -54,12 +54,12 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta ProviderType: aws.String(d.Get("provider_type").(string)), } - res, err := conn.CreateConnection(params) + resp, err := conn.CreateConnection(params) if err != nil { return fmt.Errorf("error creating CodeStar connection: %w", err) } - d.SetId(aws.StringValue(res.ConnectionArn)) + d.SetId(aws.StringValue(resp.ConnectionArn)) return resourceAwsCodeStarConnectionsConnectionRead(d, meta) } @@ -67,7 +67,7 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codestarconnectionsconn - rule, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ + resp, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ ConnectionArn: aws.String(d.Id()), }) @@ -80,15 +80,15 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i return fmt.Errorf("error reading CodeStar connection: %s", err) } - if rule == nil || rule.Connection == nil { + if resp == nil || resp.Connection == nil { return fmt.Errorf("error reading CodeStar connection (%s): empty response", d.Id()) } - d.SetId(aws.StringValue(rule.Connection.ConnectionArn)) - d.Set("arn", rule.Connection.ConnectionArn) - d.Set("name", rule.Connection.ConnectionName) - d.Set("connection_status", rule.Connection.ConnectionStatus) - d.Set("provider_type", rule.Connection.ProviderType) + d.SetId(aws.StringValue(resp.Connection.ConnectionArn)) + d.Set("arn", resp.Connection.ConnectionArn) + d.Set("name", resp.Connection.ConnectionName) + d.Set("connection_status", resp.Connection.ConnectionStatus) + d.Set("provider_type", resp.Connection.ProviderType) return nil } From 91c5f2d9fb8ccaf474f6ccbb80d3c52991d79c41 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 19:37:31 +0900 Subject: [PATCH 0228/1212] Handle a case when a connection is not found --- aws/resource_aws_codestarconnections_connection.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index cf372fbb839..80e0cea08de 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -101,6 +101,10 @@ func resourceAwsCodeStarConnectionsConnectionDelete(d *schema.ResourceData, meta }) if err != nil { + if isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("error deleting CodeStar connection: %w", err) } From 8a963ff50d68e8ad276530b7178726673349504c Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 19:52:07 +0900 Subject: [PATCH 0229/1212] Add TestAccAWSCodeStarConnectionsConnection_disappears --- ...urce_aws_codestarconnections_connection.go | 6 +-- ...aws_codestarconnections_connection_test.go | 44 +++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index 80e0cea08de..16ffe5d4e86 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -37,9 +37,9 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { }, "provider_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.StringInSlice(codestarconnections.ProviderType_Values(), false), }, }, diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index 5ad64f8de71..a0d4ae99a78 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "regexp" "testing" @@ -24,6 +25,7 @@ func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { { Config: testAccAWSCodeStarConnectionsConnectionConfigBasic(rName), Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName), testAccMatchResourceAttrRegionalARN(resourceName, "id", "codestar-connections", regexp.MustCompile("connection/.+")), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codestar-connections", regexp.MustCompile("connection/.+")), resource.TestCheckResourceAttr(resourceName, "provider_type", codestarconnections.ProviderTypeBitbucket), @@ -40,6 +42,48 @@ func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { }) } +func TestAccAWSCodeStarConnectionsConnection_disappears(t *testing.T) { + resourceName := "aws_codestarconnections_connection.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodeStarConnectionsConnectionConfigBasic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCodeStarConnectionsConnection(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSCodeStarConnectionsConnectionExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No CodeStar connection ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).codestarconnectionsconn + + _, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ + ConnectionArn: aws.String(rs.Primary.ID), + }) + + return err + } +} + func testAccCheckAWSCodeStarConnectionsConnectionDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).codestarconnectionsconn From 51e997dcf0c6c9595b2f495c14b7b931dc18a1f5 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 15 Dec 2020 20:28:59 +0900 Subject: [PATCH 0230/1212] Fix a lint error --- aws/resource_aws_codestarconnections_connection_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index a0d4ae99a78..40753285254 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -106,8 +106,8 @@ func testAccCheckAWSCodeStarConnectionsConnectionDestroy(s *terraform.State) err func testAccAWSCodeStarConnectionsConnectionConfigBasic(rName string) string { return fmt.Sprintf(` resource "aws_codestarconnections_connection" "test" { - name = %[1]q - provider_type = "Bitbucket" + name = %[1]q + provider_type = "Bitbucket" } `, rName) } From 36c913c7a0f0abffb6c19de05b45b21d7b6fc154 Mon Sep 17 00:00:00 2001 From: Matthew Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 15 Dec 2020 13:06:33 +0000 Subject: [PATCH 0231/1212] tests/provider: Fix and enable AT009 lint check (#16637) --- GNUmakefile | 1 - aws/data_source_aws_dynamodb_table_test.go | 2 +- aws/data_source_aws_elb_test.go | 2 +- aws/data_source_aws_instance_test.go | 20 ++-- aws/data_source_aws_kms_key_test.go | 2 +- aws/data_source_aws_lb_listener_test.go | 12 +-- aws/data_source_aws_lb_target_group_test.go | 8 +- aws/data_source_aws_lb_test.go | 6 +- aws/data_source_aws_rds_cluster_test.go | 2 +- ..._source_aws_route53_resolver_rules_test.go | 4 +- aws/resource_aws_alb_target_group_test.go | 24 ++--- aws/resource_aws_backup_plan_test.go | 12 +-- ...ito_identity_pool_roles_attachment_test.go | 14 +-- ...resource_aws_cognito_identity_pool_test.go | 16 +-- ...source_aws_cognito_resource_server_test.go | 14 +-- aws/resource_aws_cognito_user_group_test.go | 12 +-- ...ource_aws_cognito_user_pool_client_test.go | 10 +- ...ource_aws_cognito_user_pool_domain_test.go | 6 +- ...rce_aws_ec2_traffic_mirror_session_test.go | 6 +- ...urce_aws_ec2_traffic_mirror_target_test.go | 8 +- aws/resource_aws_elasticsearch_domain_test.go | 6 +- aws/resource_aws_instance_test.go | 98 +++++++++---------- aws/resource_aws_kms_key_test.go | 12 +-- aws/resource_aws_lb_listener_test.go | 22 ++--- ...rce_aws_lb_target_group_attachment_test.go | 12 +-- aws/resource_aws_lb_target_group_test.go | 36 +++---- aws/resource_aws_lb_test.go | 48 ++++----- ...ce_aws_ram_resource_share_accepter_test.go | 2 +- aws/resource_aws_ram_resource_share_test.go | 10 +- aws/resource_aws_ses_domain_dkim_test.go | 2 +- aws/resource_aws_ses_domain_identity_test.go | 6 +- ...s_ses_domain_identity_verification_test.go | 4 +- aws/resource_aws_ses_domain_mail_from_test.go | 8 +- aws/resource_aws_ses_email_identity_test.go | 4 +- ...ws_ses_identity_notification_topic_test.go | 2 +- aws/resource_aws_ses_identity_policy_test.go | 6 +- aws/resource_aws_shield_protection_test.go | 12 +-- ...c_endpoint_connection_notification_test.go | 2 +- ...c_endpoint_route_table_association_test.go | 2 +- ...endpoint_service_allowed_principal_test.go | 2 +- ...esource_aws_vpc_peering_connection_test.go | 18 ++-- 41 files changed, 247 insertions(+), 248 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 5c422bf71f7..8c6994b2392 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -87,7 +87,6 @@ golangci-lint: awsproviderlint: @awsproviderlint \ -c 1 \ - -AT009=false \ -AWSAT003=false \ -AWSAT006=false \ -AWSV001=false \ diff --git a/aws/data_source_aws_dynamodb_table_test.go b/aws/data_source_aws_dynamodb_table_test.go index ed7703b648a..03a3d66c4a6 100644 --- a/aws/data_source_aws_dynamodb_table_test.go +++ b/aws/data_source_aws_dynamodb_table_test.go @@ -10,7 +10,7 @@ import ( func TestAccDataSourceAwsDynamoDbTable_basic(t *testing.T) { datasourceName := "data.aws_dynamodb_table.test" - tableName := fmt.Sprintf("testaccawsdynamodbtable-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + tableName := fmt.Sprintf("testaccawsdynamodbtable-basic-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/data_source_aws_elb_test.go b/aws/data_source_aws_elb_test.go index c142d4ceb84..813eaeb44bc 100644 --- a/aws/data_source_aws_elb_test.go +++ b/aws/data_source_aws_elb_test.go @@ -10,7 +10,7 @@ import ( func TestAccDataSourceAWSELB_basic(t *testing.T) { // Must be less than 32 characters for ELB name - rName := fmt.Sprintf("TestAccDataSourceAWSELB-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("TestAccDataSourceAWSELB-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/data_source_aws_instance_test.go b/aws/data_source_aws_instance_test.go index e8c56d7caaa..aecf29b381c 100644 --- a/aws/data_source_aws_instance_test.go +++ b/aws/data_source_aws_instance_test.go @@ -175,7 +175,7 @@ func TestAccAWSInstanceDataSource_rootInstanceStore(t *testing.T) { func TestAccAWSInstanceDataSource_privateIP(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -196,7 +196,7 @@ func TestAccAWSInstanceDataSource_privateIP(t *testing.T) { func TestAccAWSInstanceDataSource_secondaryPrivateIPs(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -239,7 +239,7 @@ func TestAccAWSInstanceDataSource_keyPair(t *testing.T) { func TestAccAWSInstanceDataSource_VPC(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -262,7 +262,7 @@ func TestAccAWSInstanceDataSource_VPC(t *testing.T) { func TestAccAWSInstanceDataSource_PlacementGroup(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -304,7 +304,7 @@ func TestAccAWSInstanceDataSource_SecurityGroups(t *testing.T) { func TestAccAWSInstanceDataSource_VPCSecurityGroups(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -325,7 +325,7 @@ func TestAccAWSInstanceDataSource_VPCSecurityGroups(t *testing.T) { func TestAccAWSInstanceDataSource_getPasswordData_trueToFalse(t *testing.T) { datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -351,7 +351,7 @@ func TestAccAWSInstanceDataSource_getPasswordData_trueToFalse(t *testing.T) { func TestAccAWSInstanceDataSource_getPasswordData_falseToTrue(t *testing.T) { datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -377,7 +377,7 @@ func TestAccAWSInstanceDataSource_getPasswordData_falseToTrue(t *testing.T) { func TestAccAWSInstanceDataSource_GetUserData(t *testing.T) { datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -411,7 +411,7 @@ func TestAccAWSInstanceDataSource_GetUserData(t *testing.T) { func TestAccAWSInstanceDataSource_GetUserData_NoUserData(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -448,7 +448,7 @@ func TestAccAWSInstanceDataSource_GetUserData_NoUserData(t *testing.T) { func TestAccAWSInstanceDataSource_creditSpecification(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/data_source_aws_kms_key_test.go b/aws/data_source_aws_kms_key_test.go index 920be95fe52..4f374ed25a7 100644 --- a/aws/data_source_aws_kms_key_test.go +++ b/aws/data_source_aws_kms_key_test.go @@ -12,7 +12,7 @@ import ( func TestAccDataSourceAwsKmsKey_basic(t *testing.T) { resourceName := "aws_kms_key.test" datasourceName := "data.aws_kms_key.test" - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/data_source_aws_lb_listener_test.go b/aws/data_source_aws_lb_listener_test.go index 317f9e7d6ce..437bb9d806e 100644 --- a/aws/data_source_aws_lb_listener_test.go +++ b/aws/data_source_aws_lb_listener_test.go @@ -9,8 +9,8 @@ import ( ) func TestAccDataSourceAWSLBListener_basic(t *testing.T) { - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -40,8 +40,8 @@ func TestAccDataSourceAWSLBListener_basic(t *testing.T) { } func TestAccDataSourceAWSLBListener_BackwardsCompatibility(t *testing.T) { - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -71,8 +71,8 @@ func TestAccDataSourceAWSLBListener_BackwardsCompatibility(t *testing.T) { } func TestAccDataSourceAWSLBListener_https(t *testing.T) { - lbName := fmt.Sprintf("testlistener-https-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-https-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) key := tlsRsaPrivateKeyPem(2048) certificate := tlsRsaX509SelfSignedCertificatePem(key, "example.com") diff --git a/aws/data_source_aws_lb_target_group_test.go b/aws/data_source_aws_lb_target_group_test.go index 30c5ae39032..561c52666bf 100644 --- a/aws/data_source_aws_lb_target_group_test.go +++ b/aws/data_source_aws_lb_target_group_test.go @@ -9,8 +9,8 @@ import ( ) func TestAccDataSourceAWSALBTargetGroup_basic(t *testing.T) { - lbName := fmt.Sprintf("testlb-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlb-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resourceNameArn := "data.aws_lb_target_group.alb_tg_test_with_arn" resourceName := "data.aws_lb_target_group.alb_tg_test_with_name" @@ -69,8 +69,8 @@ func TestAccDataSourceAWSALBTargetGroup_basic(t *testing.T) { } func TestAccDataSourceAWSLBTargetGroup_BackwardsCompatibility(t *testing.T) { - lbName := fmt.Sprintf("testlb-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlb-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resourceNameArn := "data.aws_alb_target_group.alb_tg_test_with_arn" resourceName := "data.aws_alb_target_group.alb_tg_test_with_name" diff --git a/aws/data_source_aws_lb_test.go b/aws/data_source_aws_lb_test.go index d4e108d092d..766db4b0bc9 100644 --- a/aws/data_source_aws_lb_test.go +++ b/aws/data_source_aws_lb_test.go @@ -9,7 +9,7 @@ import ( ) func TestAccDataSourceAWSLB_basic(t *testing.T) { - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) dataSourceName := "data.aws_lb.alb_test_with_arn" dataSourceName2 := "data.aws_lb.alb_test_with_name" resourceName := "aws_lb.alb_test" @@ -56,7 +56,7 @@ func TestAccDataSourceAWSLB_basic(t *testing.T) { } func TestAccDataSourceAWSLB_outpost(t *testing.T) { - lbName := fmt.Sprintf("testaccawslb-outpost-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-outpost-%s", acctest.RandString(10)) dataSourceName := "data.aws_lb.alb_test_with_arn" resourceName := "aws_lb.alb_test" @@ -89,7 +89,7 @@ func TestAccDataSourceAWSLB_outpost(t *testing.T) { } func TestAccDataSourceAWSLB_BackwardsCompatibility(t *testing.T) { - lbName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandString(10)) dataSourceName1 := "data.aws_alb.alb_test_with_arn" dataSourceName2 := "data.aws_alb.alb_test_with_name" resourceName := "aws_alb.alb_test" diff --git a/aws/data_source_aws_rds_cluster_test.go b/aws/data_source_aws_rds_cluster_test.go index 3d12b3adac0..fe4d956d078 100644 --- a/aws/data_source_aws_rds_cluster_test.go +++ b/aws/data_source_aws_rds_cluster_test.go @@ -9,7 +9,7 @@ import ( ) func TestAccDataSourceAWSRDSCluster_basic(t *testing.T) { - clusterName := fmt.Sprintf("testaccawsrdscluster-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + clusterName := fmt.Sprintf("testaccawsrdscluster-basic-%s", acctest.RandString(10)) dataSourceName := "data.aws_rds_cluster.test" resourceName := "aws_rds_cluster.test" diff --git a/aws/data_source_aws_route53_resolver_rules_test.go b/aws/data_source_aws_route53_resolver_rules_test.go index c692612a040..e22287e880b 100644 --- a/aws/data_source_aws_route53_resolver_rules_test.go +++ b/aws/data_source_aws_route53_resolver_rules_test.go @@ -28,8 +28,8 @@ func TestAccAWSRoute53ResolverRulesDataSource_basic(t *testing.T) { } func TestAccAWSRoute53ResolverRulesDataSource_ResolverEndpointId(t *testing.T) { - rName1 := fmt.Sprintf("tf-testacc-r53-resolver-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlphaNum)) - rName2 := fmt.Sprintf("tf-testacc-r53-resolver-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlphaNum)) + rName1 := fmt.Sprintf("tf-testacc-r53-resolver-%s", acctest.RandString(8)) + rName2 := fmt.Sprintf("tf-testacc-r53-resolver-%s", acctest.RandString(8)) ds1ResourceName := "data.aws_route53_resolver_rules.by_resolver_endpoint_id" ds2ResourceName := "data.aws_route53_resolver_rules.by_resolver_endpoint_id_rule_type_share_status" ds3ResourceName := "data.aws_route53_resolver_rules.by_invalid_owner_id" diff --git a/aws/resource_aws_alb_target_group_test.go b/aws/resource_aws_alb_target_group_test.go index 92537679f66..c1061cb746e 100644 --- a/aws/resource_aws_alb_target_group_test.go +++ b/aws/resource_aws_alb_target_group_test.go @@ -46,7 +46,7 @@ func TestALBTargetGroupCloudwatchSuffixFromARN(t *testing.T) { func TestAccAWSALBTargetGroup_basic(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -128,8 +128,8 @@ func TestAccAWSALBTargetGroup_generatedName(t *testing.T) { func TestAccAWSALBTargetGroup_changeNameForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlphaNum)) + targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) + targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandString(4)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -157,7 +157,7 @@ func TestAccAWSALBTargetGroup_changeNameForceNew(t *testing.T) { func TestAccAWSALBTargetGroup_changeProtocolForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -185,7 +185,7 @@ func TestAccAWSALBTargetGroup_changeProtocolForceNew(t *testing.T) { func TestAccAWSALBTargetGroup_changePortForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -213,7 +213,7 @@ func TestAccAWSALBTargetGroup_changePortForceNew(t *testing.T) { func TestAccAWSALBTargetGroup_changeVpcForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -239,7 +239,7 @@ func TestAccAWSALBTargetGroup_changeVpcForceNew(t *testing.T) { func TestAccAWSALBTargetGroup_tags(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -270,7 +270,7 @@ func TestAccAWSALBTargetGroup_tags(t *testing.T) { func TestAccAWSALBTargetGroup_updateHealthCheck(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -332,7 +332,7 @@ func TestAccAWSALBTargetGroup_updateHealthCheck(t *testing.T) { func TestAccAWSALBTargetGroup_updateSticknessEnabled(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -417,7 +417,7 @@ func TestAccAWSALBTargetGroup_updateSticknessEnabled(t *testing.T) { func TestAccAWSALBTargetGroup_setAndUpdateSlowStart(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -445,7 +445,7 @@ func TestAccAWSALBTargetGroup_setAndUpdateSlowStart(t *testing.T) { func TestAccAWSALBTargetGroup_updateLoadBalancingAlgorithmType(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -628,7 +628,7 @@ func TestAccAWSALBTargetGroup_lambdaMultiValueHeadersEnabled(t *testing.T) { } func TestAccAWSALBTargetGroup_missingPortProtocolVpc(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_backup_plan_test.go b/aws/resource_aws_backup_plan_test.go index 0ed144f1146..30aa7c16796 100644 --- a/aws/resource_aws_backup_plan_test.go +++ b/aws/resource_aws_backup_plan_test.go @@ -16,7 +16,7 @@ import ( func TestAccAwsBackupPlan_basic(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -52,7 +52,7 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { func TestAccAwsBackupPlan_withTags(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -104,7 +104,7 @@ func TestAccAwsBackupPlan_withTags(t *testing.T) { func TestAccAwsBackupPlan_withRules(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) rule1Name := fmt.Sprintf("%s_1", rName) rule2Name := fmt.Sprintf("%s_2", rName) rule3Name := fmt.Sprintf("%s_3", rName) @@ -189,7 +189,7 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -260,7 +260,7 @@ func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { func TestAccAwsBackupPlan_withRecoveryPointTags(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -560,7 +560,7 @@ func TestAccAwsBackupPlan_AdvancedBackupSetting(t *testing.T) { func TestAccAwsBackupPlan_disappears(t *testing.T) { var plan backup.GetBackupPlanOutput resourceName := "aws_backup_plan.test" - rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandString(14)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, diff --git a/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go index e1c56db5c76..af012f813e5 100644 --- a/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go +++ b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go @@ -15,8 +15,8 @@ import ( func TestAccAWSCognitoIdentityPoolRolesAttachment_basic(t *testing.T) { resourceName := "aws_cognito_identity_pool_roles_attachment.test" - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - updatedName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) + updatedName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -50,7 +50,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_basic(t *testing.T) { func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappings(t *testing.T) { resourceName := "aws_cognito_identity_pool_roles_attachment.test" - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -104,7 +104,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappings(t *testing.T) { func TestAccAWSCognitoIdentityPoolRolesAttachment_disappears(t *testing.T) { resourceName := "aws_cognito_identity_pool_roles_attachment.test" - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -124,7 +124,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_disappears(t *testing.T) { } func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithAmbiguousRoleResolutionError(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -140,7 +140,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithAmbiguousRoleR } func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithRulesTypeError(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -156,7 +156,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithRulesTypeError } func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithTokenTypeError(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, diff --git a/aws/resource_aws_cognito_identity_pool_test.go b/aws/resource_aws_cognito_identity_pool_test.go index f1525c0ba28..22029caf626 100644 --- a/aws/resource_aws_cognito_identity_pool_test.go +++ b/aws/resource_aws_cognito_identity_pool_test.go @@ -15,8 +15,8 @@ import ( ) func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - updatedName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) + updatedName := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ @@ -50,7 +50,7 @@ func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { } func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ @@ -92,7 +92,7 @@ func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { } func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ @@ -133,7 +133,7 @@ func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { } func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ @@ -175,7 +175,7 @@ func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { } func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ @@ -229,7 +229,7 @@ func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { } func TestAccAWSCognitoIdentityPool_addingNewProviderKeepsOldProvider(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.Test(t, resource.TestCase{ @@ -273,7 +273,7 @@ func TestAccAWSCognitoIdentityPool_addingNewProviderKeepsOldProvider(t *testing. } func TestAccAWSCognitoIdentityPool_tags(t *testing.T) { - name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + name := acctest.RandString(10) resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_cognito_resource_server_test.go b/aws/resource_aws_cognito_resource_server_test.go index ab7308ecc66..0cc3312a576 100644 --- a/aws/resource_aws_cognito_resource_server_test.go +++ b/aws/resource_aws_cognito_resource_server_test.go @@ -14,10 +14,10 @@ import ( func TestAccAWSCognitoResourceServer_basic(t *testing.T) { var resourceServer cognitoidentityprovider.ResourceServerType - identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - name1 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - name2 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", acctest.RandString(10)) + name1 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandString(10)) + name2 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandString(10)) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandString(10)) resourceName := "aws_cognito_resource_server.main" resource.ParallelTest(t, resource.TestCase{ @@ -56,9 +56,9 @@ func TestAccAWSCognitoResourceServer_basic(t *testing.T) { func TestAccAWSCognitoResourceServer_scope(t *testing.T) { var resourceServer cognitoidentityprovider.ResourceServerType - identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - name := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", acctest.RandString(10)) + name := fmt.Sprintf("tf-acc-test-resource-server-name-%s", acctest.RandString(10)) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandString(10)) resourceName := "aws_cognito_resource_server.main" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_cognito_user_group_test.go b/aws/resource_aws_cognito_user_group_test.go index a349ceb9826..ee3ded40641 100644 --- a/aws/resource_aws_cognito_user_group_test.go +++ b/aws/resource_aws_cognito_user_group_test.go @@ -14,9 +14,9 @@ import ( ) func TestAccAWSCognitoUserGroup_basic(t *testing.T) { - poolName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + poolName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) + groupName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) + updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) resourceName := "aws_cognito_user_group.main" resource.ParallelTest(t, resource.TestCase{ @@ -48,9 +48,9 @@ func TestAccAWSCognitoUserGroup_basic(t *testing.T) { } func TestAccAWSCognitoUserGroup_complex(t *testing.T) { - poolName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + poolName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) + groupName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) + updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandString(10)) resourceName := "aws_cognito_user_group.main" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_cognito_user_pool_client_test.go b/aws/resource_aws_cognito_user_pool_client_test.go index 82befdf461c..75d922230d5 100644 --- a/aws/resource_aws_cognito_user_pool_client_test.go +++ b/aws/resource_aws_cognito_user_pool_client_test.go @@ -15,7 +15,7 @@ import ( func TestAccAWSCognitoUserPoolClient_basic(t *testing.T) { var client cognitoidentityprovider.UserPoolClientType userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + clientName := acctest.RandString(10) resourceName := "aws_cognito_user_pool_client.test" resource.ParallelTest(t, resource.TestCase{ @@ -113,7 +113,7 @@ func TestAccAWSCognitoUserPoolClient_Name(t *testing.T) { func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { var client cognitoidentityprovider.UserPoolClientType userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + clientName := acctest.RandString(10) resourceName := "aws_cognito_user_pool_client.test" resource.ParallelTest(t, resource.TestCase{ @@ -169,7 +169,7 @@ func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { func TestAccAWSCognitoUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { var client cognitoidentityprovider.UserPoolClientType userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + clientName := acctest.RandString(10) resourceName := "aws_cognito_user_pool_client.test" resource.ParallelTest(t, resource.TestCase{ @@ -228,7 +228,7 @@ func TestAccAWSCognitoUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { func TestAccAWSCognitoUserPoolClient_analyticsConfig(t *testing.T) { var client cognitoidentityprovider.UserPoolClientType userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + clientName := acctest.RandString(10) resourceName := "aws_cognito_user_pool_client.test" resource.ParallelTest(t, resource.TestCase{ @@ -278,7 +278,7 @@ func TestAccAWSCognitoUserPoolClient_analyticsConfig(t *testing.T) { func TestAccAWSCognitoUserPoolClient_disappears(t *testing.T) { var client cognitoidentityprovider.UserPoolClientType userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + clientName := acctest.RandString(10) resourceName := "aws_cognito_user_pool_client.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_cognito_user_pool_domain_test.go b/aws/resource_aws_cognito_user_pool_domain_test.go index ba196dd2a3f..11c4d6149eb 100644 --- a/aws/resource_aws_cognito_user_pool_domain_test.go +++ b/aws/resource_aws_cognito_user_pool_domain_test.go @@ -75,7 +75,7 @@ func testSweepCognitoUserPoolDomains(region string) error { func TestAccAWSCognitoUserPoolDomain_basic(t *testing.T) { domainName := fmt.Sprintf("tf-acc-test-domain-%d", acctest.RandInt()) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -106,7 +106,7 @@ func TestAccAWSCognitoUserPoolDomain_basic(t *testing.T) { func TestAccAWSCognitoUserPoolDomain_custom(t *testing.T) { rootDomain := testAccAwsAcmCertificateDomainFromEnv(t) domain := testAccAwsAcmCertificateRandomSubDomain(rootDomain) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandString(10)) acmCertificateResourceName := "aws_acm_certificate.test" cognitoUserPoolResourceName := "aws_cognito_user_pool.test" @@ -142,7 +142,7 @@ func TestAccAWSCognitoUserPoolDomain_custom(t *testing.T) { func TestAccAWSCognitoUserPoolDomain_disappears(t *testing.T) { domainName := fmt.Sprintf("tf-acc-test-domain-%d", acctest.RandInt()) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandString(10)) resourceName := "aws_cognito_user_pool_domain.main" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_ec2_traffic_mirror_session_test.go b/aws/resource_aws_ec2_traffic_mirror_session_test.go index e6f7a68db17..e72729664d4 100644 --- a/aws/resource_aws_ec2_traffic_mirror_session_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_session_test.go @@ -18,7 +18,7 @@ func TestAccAWSEc2TrafficMirrorSession_basic(t *testing.T) { resourceName := "aws_ec2_traffic_mirror_session.test" description := "test session" session := acctest.RandIntRange(1, 32766) - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) pLen := acctest.RandIntRange(1, 255) vni := acctest.RandIntRange(1, 16777216) @@ -79,7 +79,7 @@ func TestAccAWSEc2TrafficMirrorSession_tags(t *testing.T) { var v ec2.TrafficMirrorSession resourceName := "aws_ec2_traffic_mirror_session.test" session := acctest.RandIntRange(1, 32766) - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -127,7 +127,7 @@ func TestAccAWSEc2TrafficMirrorSession_disappears(t *testing.T) { var v ec2.TrafficMirrorSession resourceName := "aws_ec2_traffic_mirror_session.test" session := acctest.RandIntRange(1, 32766) - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { diff --git a/aws/resource_aws_ec2_traffic_mirror_target_test.go b/aws/resource_aws_ec2_traffic_mirror_target_test.go index 486d44fbbbd..6c39895493d 100644 --- a/aws/resource_aws_ec2_traffic_mirror_target_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_target_test.go @@ -16,7 +16,7 @@ func TestAccAWSEc2TrafficMirrorTarget_nlb(t *testing.T) { var v ec2.TrafficMirrorTarget resourceName := "aws_ec2_traffic_mirror_target.test" description := "test nlb target" - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -48,7 +48,7 @@ func TestAccAWSEc2TrafficMirrorTarget_nlb(t *testing.T) { func TestAccAWSEc2TrafficMirrorTarget_eni(t *testing.T) { var v ec2.TrafficMirrorTarget resourceName := "aws_ec2_traffic_mirror_target.test" - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) description := "test eni target" resource.ParallelTest(t, resource.TestCase{ @@ -81,7 +81,7 @@ func TestAccAWSEc2TrafficMirrorTarget_tags(t *testing.T) { var v ec2.TrafficMirrorTarget resourceName := "aws_ec2_traffic_mirror_target.test" description := "test nlb target" - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -129,7 +129,7 @@ func TestAccAWSEc2TrafficMirrorTarget_disappears(t *testing.T) { var v ec2.TrafficMirrorTarget resourceName := "aws_ec2_traffic_mirror_target.test" description := "test nlb target" - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { diff --git a/aws/resource_aws_elasticsearch_domain_test.go b/aws/resource_aws_elasticsearch_domain_test.go index 3694d7673ee..ad5dc7d74b0 100644 --- a/aws/resource_aws_elasticsearch_domain_test.go +++ b/aws/resource_aws_elasticsearch_domain_test.go @@ -123,7 +123,7 @@ func TestAccAWSElasticSearchDomain_RequireHTTPS(t *testing.T) { func TestAccAWSElasticSearchDomain_ClusterConfig_ZoneAwarenessConfig(t *testing.T) { var domain1, domain2, domain3, domain4 elasticsearch.ElasticsearchDomainStatus - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(16, acctest.CharSetAlphaNum)) // len = 28 + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(16)) // len = 28 resourceName := "aws_elasticsearch_domain.test" resource.ParallelTest(t, resource.TestCase{ @@ -181,7 +181,7 @@ func TestAccAWSElasticSearchDomain_ClusterConfig_ZoneAwarenessConfig(t *testing. func TestAccAWSElasticSearchDomain_warm(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(16, acctest.CharSetAlphaNum)) // len = 28 + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(16)) // len = 28 resourceName := "aws_elasticsearch_domain.test" resource.ParallelTest(t, resource.TestCase{ @@ -1005,7 +1005,7 @@ func TestAccAWSElasticSearchDomain_update_volume_type(t *testing.T) { func TestAccAWSElasticSearchDomain_WithVolumeType_Missing(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus resourceName := "aws_elasticsearch_domain.test" - rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandStringFromCharSet(16, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(16)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckIamServiceLinkedRoleEs(t) }, diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 2132b71a968..a9bc65fc5c9 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -153,7 +153,7 @@ func TestFetchRootDevice(t *testing.T) { func TestAccAWSInstance_inDefaultVpcBySgName(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -178,7 +178,7 @@ func TestAccAWSInstance_inDefaultVpcBySgName(t *testing.T) { func TestAccAWSInstance_inDefaultVpcBySgId(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -658,7 +658,7 @@ func TestAccAWSInstance_noAMIEphemeralDevices(t *testing.T) { func TestAccAWSInstance_sourceDestCheck(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheck := func(enabled bool) resource.TestCheckFunc { return func(*terraform.State) error { @@ -712,7 +712,7 @@ func TestAccAWSInstance_sourceDestCheck(t *testing.T) { func TestAccAWSInstance_disableApiTermination(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) checkDisableApiTermination := func(expected bool) resource.TestCheckFunc { return func(*terraform.State) error { @@ -764,7 +764,7 @@ func TestAccAWSInstance_disableApiTermination(t *testing.T) { func TestAccAWSInstance_dedicatedInstance(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -822,7 +822,7 @@ func TestAccAWSInstance_outpost(t *testing.T) { func TestAccAWSInstance_placementGroup(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -850,7 +850,7 @@ func TestAccAWSInstance_placementGroup(t *testing.T) { func TestAccAWSInstance_ipv6_supportAddressCount(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -874,7 +874,7 @@ func TestAccAWSInstance_ipv6_supportAddressCount(t *testing.T) { } func TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError(t *testing.T) { - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -892,7 +892,7 @@ func TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError(t *testing.T func TestAccAWSInstance_ipv6_supportAddressCountWithIpv4(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -918,7 +918,7 @@ func TestAccAWSInstance_ipv6_supportAddressCountWithIpv4(t *testing.T) { func TestAccAWSInstance_NetworkInstanceSecurityGroups(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -945,7 +945,7 @@ func TestAccAWSInstance_NetworkInstanceSecurityGroups(t *testing.T) { func TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -982,7 +982,7 @@ func TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups(t *testing.T) { func TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1118,8 +1118,8 @@ func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { func TestAccAWSInstance_instanceProfileChange(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) - rName2 := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) + rName2 := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckInstanceProfile := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1175,7 +1175,7 @@ func TestAccAWSInstance_instanceProfileChange(t *testing.T) { func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckInstanceProfile := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1212,7 +1212,7 @@ func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) { func TestAccAWSInstance_privateIP(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckPrivateIP := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1249,7 +1249,7 @@ func TestAccAWSInstance_privateIP(t *testing.T) { func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckPrivateIP := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1289,7 +1289,7 @@ func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { func TestAccAWSInstance_Empty_PrivateIP(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckPrivateIP := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1328,7 +1328,7 @@ func TestAccAWSInstance_Empty_PrivateIP(t *testing.T) { func TestAccAWSInstance_keyPairCheck(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) testCheckKeyPair := func(keyName string) resource.TestCheckFunc { return func(*terraform.State) error { @@ -1400,7 +1400,7 @@ func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1900,7 +1900,7 @@ func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) { var eni ec2.NetworkInterface resourceName := "aws_instance.test" eniResourceName := "aws_network_interface.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1930,7 +1930,7 @@ func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { var eni ec2.NetworkInterface resourceName := "aws_instance.test" eniResourceName := "aws_network_interface.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1963,7 +1963,7 @@ func TestAccAWSInstance_addSecondaryInterface(t *testing.T) { resourceName := "aws_instance.test" eniPrimaryResourceName := "aws_network_interface.primary" eniSecondaryResourceName := "aws_network_interface.secondary" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2001,7 +2001,7 @@ func TestAccAWSInstance_addSecurityGroupNetworkInterface(t *testing.T) { var before ec2.Instance var after ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2225,7 +2225,7 @@ func TestAccAWSInstance_NewNetworkInterface_PrivateIPAndSecondaryPrivateIPsUpdat func TestAccAWSInstance_associatePublic_defaultPrivate(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2253,7 +2253,7 @@ func TestAccAWSInstance_associatePublic_defaultPrivate(t *testing.T) { func TestAccAWSInstance_associatePublic_defaultPublic(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2281,7 +2281,7 @@ func TestAccAWSInstance_associatePublic_defaultPublic(t *testing.T) { func TestAccAWSInstance_associatePublic_explicitPublic(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2309,7 +2309,7 @@ func TestAccAWSInstance_associatePublic_explicitPublic(t *testing.T) { func TestAccAWSInstance_associatePublic_explicitPrivate(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2337,7 +2337,7 @@ func TestAccAWSInstance_associatePublic_explicitPrivate(t *testing.T) { func TestAccAWSInstance_associatePublic_overridePublic(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2365,7 +2365,7 @@ func TestAccAWSInstance_associatePublic_overridePublic(t *testing.T) { func TestAccAWSInstance_associatePublic_overridePrivate(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2392,7 +2392,7 @@ func TestAccAWSInstance_associatePublic_overridePrivate(t *testing.T) { func TestAccAWSInstance_getPasswordData_falseToTrue(t *testing.T) { var before, after ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2428,7 +2428,7 @@ func TestAccAWSInstance_getPasswordData_falseToTrue(t *testing.T) { func TestAccAWSInstance_getPasswordData_trueToFalse(t *testing.T) { var before, after ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2465,7 +2465,7 @@ func TestAccAWSInstance_getPasswordData_trueToFalse(t *testing.T) { func TestAccAWSInstance_CreditSpecification_Empty_NonBurstable(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2492,7 +2492,7 @@ func TestAccAWSInstance_CreditSpecification_Empty_NonBurstable(t *testing.T) { func TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable(t *testing.T) { var instance ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2523,7 +2523,7 @@ func TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable(t *t func TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2550,7 +2550,7 @@ func TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard(t *tes func TestAccAWSInstance_creditSpecification_standardCpuCredits(t *testing.T) { var first, second ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2585,7 +2585,7 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits(t *testing.T) { var first, second ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2620,7 +2620,7 @@ func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2647,7 +2647,7 @@ func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2(t *testing.T) { func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2674,7 +2674,7 @@ func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3(t *testing.T) { func TestAccAWSInstance_creditSpecification_updateCpuCredits(t *testing.T) { var first, second, third ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2717,7 +2717,7 @@ func TestAccAWSInstance_creditSpecification_updateCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2743,7 +2743,7 @@ func TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable(t *testin func TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2770,7 +2770,7 @@ func TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited(t * func TestAccAWSInstance_creditSpecificationT3_standardCpuCredits(t *testing.T) { var first, second ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2805,7 +2805,7 @@ func TestAccAWSInstance_creditSpecificationT3_standardCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits(t *testing.T) { var first, second ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2840,7 +2840,7 @@ func TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits(t *testing.T) func TestAccAWSInstance_creditSpecificationT3_updateCpuCredits(t *testing.T) { var first, second, third ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2883,7 +2883,7 @@ func TestAccAWSInstance_creditSpecificationT3_updateCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint(t *testing.T) { var before, after ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2919,7 +2919,7 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint(t *te func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint(t *testing.T) { var before, after ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2976,7 +2976,7 @@ func TestAccAWSInstance_disappears(t *testing.T) { func TestAccAWSInstance_UserData_EmptyStringToUnspecified(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -3008,7 +3008,7 @@ func TestAccAWSInstance_UserData_EmptyStringToUnspecified(t *testing.T) { func TestAccAWSInstance_UserData_UnspecifiedToEmptyString(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandStringFromCharSet(12, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_kms_key_test.go b/aws/resource_aws_kms_key_test.go index 2831ae4489e..c4be79ad2c3 100644 --- a/aws/resource_aws_kms_key_test.go +++ b/aws/resource_aws_kms_key_test.go @@ -69,7 +69,7 @@ func testSweepKmsKeys(region string) error { func TestAccAWSKmsKey_basic(t *testing.T) { var key kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ @@ -97,7 +97,7 @@ func TestAccAWSKmsKey_basic(t *testing.T) { func TestAccAWSKmsKey_asymmetricKey(t *testing.T) { var key kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ @@ -119,7 +119,7 @@ func TestAccAWSKmsKey_asymmetricKey(t *testing.T) { func TestAccAWSKmsKey_disappears(t *testing.T) { var key kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ @@ -141,7 +141,7 @@ func TestAccAWSKmsKey_disappears(t *testing.T) { func TestAccAWSKmsKey_policy(t *testing.T) { var key kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" expectedPolicyText := `{"Version":"2012-10-17","Id":"kms-tf-1","Statement":[{"Sid":"Enable IAM User Permissions","Effect":"Allow","Principal":{"AWS":"*"},"Action":"kms:*","Resource":"*"}]}` @@ -228,7 +228,7 @@ func TestAccAWSKmsKey_Policy_IamServiceLinkedRole(t *testing.T) { func TestAccAWSKmsKey_isEnabled(t *testing.T) { var key1, key2, key3 kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ @@ -275,7 +275,7 @@ func TestAccAWSKmsKey_isEnabled(t *testing.T) { func TestAccAWSKmsKey_tags(t *testing.T) { var key kms.KeyMetadata - rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-kms-key-%s", acctest.RandString(13)) resourceName := "aws_kms_key.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_lb_listener_test.go b/aws/resource_aws_lb_listener_test.go index 7149f8f0c04..9eb954f46c0 100644 --- a/aws/resource_aws_lb_listener_test.go +++ b/aws/resource_aws_lb_listener_test.go @@ -15,8 +15,8 @@ import ( func TestAccAWSLBListener_basic(t *testing.T) { var conf elbv2.Listener - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -47,9 +47,9 @@ func TestAccAWSLBListener_basic(t *testing.T) { func TestAccAWSLBListener_forwardWeighted(t *testing.T) { var conf elbv2.Listener resourceName := "aws_lb_listener.weighted" - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName1 := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - targetGroupName2 := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName1 := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) + targetGroupName2 := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -119,8 +119,8 @@ func TestAccAWSLBListener_forwardWeighted(t *testing.T) { func TestAccAWSLBListener_basicUdp(t *testing.T) { var conf elbv2.Listener - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -150,8 +150,8 @@ func TestAccAWSLBListener_basicUdp(t *testing.T) { func TestAccAWSLBListener_BackwardsCompatibility(t *testing.T) { var conf elbv2.Listener - lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) - targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-basic-%s", acctest.RandString(13)) + targetGroupName := fmt.Sprintf("testtargetgroup-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -262,7 +262,7 @@ func TestAccAWSLBListener_Protocol_Tls(t *testing.T) { func TestAccAWSLBListener_redirect(t *testing.T) { var conf elbv2.Listener - lbName := fmt.Sprintf("testlistener-redirect-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-redirect-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -298,7 +298,7 @@ func TestAccAWSLBListener_redirect(t *testing.T) { func TestAccAWSLBListener_fixedResponse(t *testing.T) { var conf elbv2.Listener - lbName := fmt.Sprintf("testlistener-fixedresponse-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testlistener-fixedresponse-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_lb_target_group_attachment_test.go b/aws/resource_aws_lb_target_group_attachment_test.go index 548be6adc97..af3fed5fef6 100644 --- a/aws/resource_aws_lb_target_group_attachment_test.go +++ b/aws/resource_aws_lb_target_group_attachment_test.go @@ -14,7 +14,7 @@ import ( ) func TestAccAWSLBTargetGroupAttachment_basic(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -32,7 +32,7 @@ func TestAccAWSLBTargetGroupAttachment_basic(t *testing.T) { } func TestAccAWSLBTargetGroupAttachment_disappears(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -51,7 +51,7 @@ func TestAccAWSLBTargetGroupAttachment_disappears(t *testing.T) { } func TestAccAWSLBTargetGroupAttachment_BackwardsCompatibility(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -69,7 +69,7 @@ func TestAccAWSLBTargetGroupAttachment_BackwardsCompatibility(t *testing.T) { } func TestAccAWSLBTargetGroupAttachment_Port(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -87,7 +87,7 @@ func TestAccAWSLBTargetGroupAttachment_Port(t *testing.T) { } func TestAccAWSLBTargetGroupAttachment_ipAddress(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -105,7 +105,7 @@ func TestAccAWSLBTargetGroupAttachment_ipAddress(t *testing.T) { } func TestAccAWSLBTargetGroupAttachment_lambda(t *testing.T) { - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_lb_target_group_test.go b/aws/resource_aws_lb_target_group_test.go index b0fb78dd9ec..34e4f3dfc61 100644 --- a/aws/resource_aws_lb_target_group_test.go +++ b/aws/resource_aws_lb_target_group_test.go @@ -93,7 +93,7 @@ func TestLBTargetGroupCloudwatchSuffixFromARN(t *testing.T) { func TestAccAWSLBTargetGroup_basic(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -137,7 +137,7 @@ func TestAccAWSLBTargetGroup_basic(t *testing.T) { func TestAccAWSLBTargetGroup_basicUdp(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -168,7 +168,7 @@ func TestAccAWSLBTargetGroup_basicUdp(t *testing.T) { func TestAccAWSLBTargetGroup_withoutHealthcheck(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -193,7 +193,7 @@ func TestAccAWSLBTargetGroup_withoutHealthcheck(t *testing.T) { func TestAccAWSLBTargetGroup_networkLB_TargetGroup(t *testing.T) { var targetGroup1, targetGroup2, targetGroup3 elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -305,7 +305,7 @@ func TestAccAWSLBTargetGroup_Protocol_Geneve(t *testing.T) { func TestAccAWSLBTargetGroup_Protocol_Tcp_HealthCheck_Protocol(t *testing.T) { var targetGroup1, targetGroup2 elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -358,7 +358,7 @@ func TestAccAWSLBTargetGroup_Protocol_Tls(t *testing.T) { func TestAccAWSLBTargetGroup_networkLB_TargetGroupWithProxy(t *testing.T) { var confBefore, confAfter elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -455,7 +455,7 @@ func TestAccAWSLBTargetGroup_TCP_HTTPHealthCheck(t *testing.T) { func TestAccAWSLBTargetGroup_BackwardsCompatibility(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_alb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -539,8 +539,8 @@ func TestAccAWSLBTargetGroup_generatedName(t *testing.T) { func TestAccAWSLBTargetGroup_changeNameForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlphaNum)) + targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) + targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandString(4)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -569,7 +569,7 @@ func TestAccAWSLBTargetGroup_changeNameForceNew(t *testing.T) { func TestAccAWSLBTargetGroup_changeProtocolForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -598,7 +598,7 @@ func TestAccAWSLBTargetGroup_changeProtocolForceNew(t *testing.T) { func TestAccAWSLBTargetGroup_changePortForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -627,7 +627,7 @@ func TestAccAWSLBTargetGroup_changePortForceNew(t *testing.T) { func TestAccAWSLBTargetGroup_changeVpcForceNew(t *testing.T) { var before, after elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -654,7 +654,7 @@ func TestAccAWSLBTargetGroup_changeVpcForceNew(t *testing.T) { func TestAccAWSLBTargetGroup_tags(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -694,7 +694,7 @@ func TestAccAWSLBTargetGroup_tags(t *testing.T) { func TestAccAWSLBTargetGroup_enableHealthCheck(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -731,7 +731,7 @@ func TestAccAWSLBTargetGroup_enableHealthCheck(t *testing.T) { func TestAccAWSLBTargetGroup_updateHealthCheck(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -803,7 +803,7 @@ func TestAccAWSLBTargetGroup_updateHealthCheck(t *testing.T) { func TestAccAWSLBTargetGroup_updateSticknessEnabled(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -889,7 +889,7 @@ func TestAccAWSLBTargetGroup_updateSticknessEnabled(t *testing.T) { func TestAccAWSLBTargetGroup_defaults_application(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -926,7 +926,7 @@ func TestAccAWSLBTargetGroup_defaults_application(t *testing.T) { func TestAccAWSLBTargetGroup_defaults_network(t *testing.T) { var conf elbv2.TargetGroup - targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) resourceName := "aws_lb_target_group.test" healthCheckInvalid1 := ` path = "/health" diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index a01f1e4cd74..fc7691a697d 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -94,7 +94,7 @@ func TestLBCloudwatchSuffixFromARN(t *testing.T) { func TestAccAWSLB_ALB_basic(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -131,7 +131,7 @@ func TestAccAWSLB_ALB_basic(t *testing.T) { func TestAccAWSLB_NLB_basic(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -238,7 +238,7 @@ func TestAccAWSLB_LoadBalancerType_Gateway_EnableCrossZoneLoadBalancing(t *testi func TestAccAWSLB_ALB_outpost(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-outpost-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-outpost-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -278,7 +278,7 @@ func TestAccAWSLB_ALB_outpost(t *testing.T) { func TestAccAWSLB_networkLoadbalancerEIP(t *testing.T) { var conf elbv2.LoadBalancer resourceName := "aws_lb.lb_test" - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -306,7 +306,7 @@ func TestAccAWSLB_networkLoadbalancerEIP(t *testing.T) { func TestAccAWSLB_NLB_privateipv4address(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-pipv4a-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-pipv4a-%s", acctest.RandString(10)) resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ @@ -335,7 +335,7 @@ func TestAccAWSLB_NLB_privateipv4address(t *testing.T) { func TestAccAWSLB_BackwardsCompatibility(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_alb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -435,7 +435,7 @@ func TestAccAWSLB_namePrefix(t *testing.T) { func TestAccAWSLB_tags(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -475,7 +475,7 @@ func TestAccAWSLB_tags(t *testing.T) { func TestAccAWSLB_networkLoadbalancer_updateCrossZone(t *testing.T) { var pre, mid, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-nlbcz-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-nlbcz-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -516,7 +516,7 @@ func TestAccAWSLB_networkLoadbalancer_updateCrossZone(t *testing.T) { func TestAccAWSLB_applicationLoadBalancer_updateHttp2(t *testing.T) { var pre, mid, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawsalb-http2-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsalb-http2-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -557,7 +557,7 @@ func TestAccAWSLB_applicationLoadBalancer_updateHttp2(t *testing.T) { func TestAccAWSLB_applicationLoadBalancer_updateDropInvalidHeaderFields(t *testing.T) { var pre, mid, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawsalb-headers-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsalb-headers-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -597,7 +597,7 @@ func TestAccAWSLB_applicationLoadBalancer_updateDropInvalidHeaderFields(t *testi func TestAccAWSLB_applicationLoadBalancer_updateDeletionProtection(t *testing.T) { var pre, mid, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsalb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -638,7 +638,7 @@ func TestAccAWSLB_applicationLoadBalancer_updateDeletionProtection(t *testing.T) func TestAccAWSLB_updatedSecurityGroups(t *testing.T) { var pre, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -668,7 +668,7 @@ func TestAccAWSLB_updatedSecurityGroups(t *testing.T) { func TestAccAWSLB_updatedSubnets(t *testing.T) { var pre, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -698,7 +698,7 @@ func TestAccAWSLB_updatedSubnets(t *testing.T) { func TestAccAWSLB_updatedIpAddressType(t *testing.T) { var pre, post elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -730,7 +730,7 @@ func TestAccAWSLB_updatedIpAddressType(t *testing.T) { // is assigned. func TestAccAWSLB_noSecurityGroup(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-nosg-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-nosg-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ @@ -762,8 +762,8 @@ func TestAccAWSLB_noSecurityGroup(t *testing.T) { func TestAccAWSLB_ALB_AccessLogs(t *testing.T) { var conf elbv2.LoadBalancer - bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandStringFromCharSet(6, acctest.CharSetAlphaNum)) - lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlpha)) + bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandString(6)) + lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandString(4)) resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ @@ -850,8 +850,8 @@ func TestAccAWSLB_ALB_AccessLogs(t *testing.T) { func TestAccAWSLB_ALB_AccessLogs_Prefix(t *testing.T) { var conf elbv2.LoadBalancer - bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandStringFromCharSet(6, acctest.CharSetAlphaNum)) - lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlpha)) + bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandString(6)) + lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandString(4)) resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ @@ -920,8 +920,8 @@ func TestAccAWSLB_ALB_AccessLogs_Prefix(t *testing.T) { func TestAccAWSLB_NLB_AccessLogs(t *testing.T) { var conf elbv2.LoadBalancer - bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandStringFromCharSet(6, acctest.CharSetAlphaNum)) - lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlpha)) + bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandString(6)) + lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandString(4)) resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ @@ -1008,8 +1008,8 @@ func TestAccAWSLB_NLB_AccessLogs(t *testing.T) { func TestAccAWSLB_NLB_AccessLogs_Prefix(t *testing.T) { var conf elbv2.LoadBalancer - bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandStringFromCharSet(6, acctest.CharSetAlphaNum)) - lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlpha)) + bucketName := fmt.Sprintf("tf-test-access-logs-%s", acctest.RandString(6)) + lbName := fmt.Sprintf("testaccawslbaccesslog-%s", acctest.RandString(4)) resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ @@ -1078,7 +1078,7 @@ func TestAccAWSLB_NLB_AccessLogs_Prefix(t *testing.T) { func TestAccAWSLB_networkLoadbalancer_subnet_change(t *testing.T) { var conf elbv2.LoadBalancer - lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandString(10)) resourceName := "aws_lb.lb_test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_ram_resource_share_accepter_test.go b/aws/resource_aws_ram_resource_share_accepter_test.go index 7262f521ba1..bf057f3651f 100644 --- a/aws/resource_aws_ram_resource_share_accepter_test.go +++ b/aws/resource_aws_ram_resource_share_accepter_test.go @@ -18,7 +18,7 @@ func TestAccAwsRamResourceShareAccepter_basic(t *testing.T) { resourceName := "aws_ram_resource_share_accepter.test" principalAssociationResourceName := "aws_ram_principal_association.test" - shareName := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + shareName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { diff --git a/aws/resource_aws_ram_resource_share_test.go b/aws/resource_aws_ram_resource_share_test.go index 246f02ea733..a3c67850c8d 100644 --- a/aws/resource_aws_ram_resource_share_test.go +++ b/aws/resource_aws_ram_resource_share_test.go @@ -15,7 +15,7 @@ import ( func TestAccAwsRamResourceShare_basic(t *testing.T) { var resourceShare ram.ResourceShare resourceName := "aws_ram_resource_share.example" - shareName := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + shareName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,7 +44,7 @@ func TestAccAwsRamResourceShare_basic(t *testing.T) { func TestAccAwsRamResourceShare_AllowExternalPrincipals(t *testing.T) { var resourceShare1, resourceShare2 ram.ResourceShare resourceName := "aws_ram_resource_share.example" - shareName := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + shareName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -77,8 +77,8 @@ func TestAccAwsRamResourceShare_AllowExternalPrincipals(t *testing.T) { func TestAccAwsRamResourceShare_Name(t *testing.T) { var resourceShare1, resourceShare2 ram.ResourceShare resourceName := "aws_ram_resource_share.example" - shareName1 := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - shareName2 := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + shareName1 := fmt.Sprintf("tf-%s", acctest.RandString(10)) + shareName2 := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -111,7 +111,7 @@ func TestAccAwsRamResourceShare_Name(t *testing.T) { func TestAccAwsRamResourceShare_Tags(t *testing.T) { var resourceShare1, resourceShare2, resourceShare3 ram.ResourceShare resourceName := "aws_ram_resource_share.example" - shareName := fmt.Sprintf("tf-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + shareName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_ses_domain_dkim_test.go b/aws/resource_aws_ses_domain_dkim_test.go index 66a3934fdbd..b2343f241cb 100644 --- a/aws/resource_aws_ses_domain_dkim_test.go +++ b/aws/resource_aws_ses_domain_dkim_test.go @@ -17,7 +17,7 @@ func TestAccAWSSESDomainDkim_basic(t *testing.T) { resourceName := "aws_ses_domain_dkim.test" domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { diff --git a/aws/resource_aws_ses_domain_identity_test.go b/aws/resource_aws_ses_domain_identity_test.go index efa62bb77ea..2be3189dc21 100644 --- a/aws/resource_aws_ses_domain_identity_test.go +++ b/aws/resource_aws_ses_domain_identity_test.go @@ -70,7 +70,7 @@ func testSweepSesIdentities(region, identityType string) error { func TestAccAWSSESDomainIdentity_basic(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSES(t) }, @@ -91,7 +91,7 @@ func TestAccAWSSESDomainIdentity_basic(t *testing.T) { func TestAccAWSSESDomainIdentity_disappears(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSES(t) }, @@ -115,7 +115,7 @@ func TestAccAWSSESDomainIdentity_disappears(t *testing.T) { func TestAccAWSSESDomainIdentity_trailingPeriod(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com.", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSES(t) }, diff --git a/aws/resource_aws_ses_domain_identity_verification_test.go b/aws/resource_aws_ses_domain_identity_verification_test.go index b30dfcc5055..869f8ba0a26 100644 --- a/aws/resource_aws_ses_domain_identity_verification_test.go +++ b/aws/resource_aws_ses_domain_identity_verification_test.go @@ -45,7 +45,7 @@ func TestAccAwsSesDomainIdentityVerification_basic(t *testing.T) { func TestAccAwsSesDomainIdentityVerification_timeout(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSES(t) }, @@ -63,7 +63,7 @@ func TestAccAwsSesDomainIdentityVerification_timeout(t *testing.T) { func TestAccAwsSesDomainIdentityVerification_nonexistent(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSES(t) }, diff --git a/aws/resource_aws_ses_domain_mail_from_test.go b/aws/resource_aws_ses_domain_mail_from_test.go index 125366d5e50..266819f71cc 100644 --- a/aws/resource_aws_ses_domain_mail_from_test.go +++ b/aws/resource_aws_ses_domain_mail_from_test.go @@ -14,7 +14,7 @@ import ( func TestAccAWSSESDomainMailFrom_basic(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) mailFromDomain1 := fmt.Sprintf("bounce1.%s", domain) mailFromDomain2 := fmt.Sprintf("bounce2.%s", domain) resourceName := "aws_ses_domain_mail_from.test" @@ -54,7 +54,7 @@ func TestAccAWSSESDomainMailFrom_basic(t *testing.T) { func TestAccAWSSESDomainMailFrom_disappears(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) mailFromDomain := fmt.Sprintf("bounce.%s", domain) resourceName := "aws_ses_domain_mail_from.test" @@ -78,7 +78,7 @@ func TestAccAWSSESDomainMailFrom_disappears(t *testing.T) { func TestAccAWSSESDomainMailFrom_disappears_Identity(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) mailFromDomain := fmt.Sprintf("bounce.%s", domain) resourceName := "aws_ses_domain_mail_from.test" @@ -102,7 +102,7 @@ func TestAccAWSSESDomainMailFrom_disappears_Identity(t *testing.T) { func TestAccAWSSESDomainMailFrom_behaviorOnMxFailure(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_domain_mail_from.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_ses_email_identity_test.go b/aws/resource_aws_ses_email_identity_test.go index 0fa5bcc595f..423cb76b26f 100644 --- a/aws/resource_aws_ses_email_identity_test.go +++ b/aws/resource_aws_ses_email_identity_test.go @@ -23,7 +23,7 @@ func init() { func TestAccAWSSESEmailIdentity_basic(t *testing.T) { email := fmt.Sprintf( "%s@terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_email_identity.test" resource.ParallelTest(t, resource.TestCase{ @@ -50,7 +50,7 @@ func TestAccAWSSESEmailIdentity_basic(t *testing.T) { func TestAccAWSSESEmailIdentity_trailingPeriod(t *testing.T) { email := fmt.Sprintf( "%s@terraformtesting.com.", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_email_identity.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_ses_identity_notification_topic_test.go b/aws/resource_aws_ses_identity_notification_topic_test.go index 0d496e9cdaf..ec85138f51e 100644 --- a/aws/resource_aws_ses_identity_notification_topic_test.go +++ b/aws/resource_aws_ses_identity_notification_topic_test.go @@ -16,7 +16,7 @@ import ( func TestAccAwsSESIdentityNotificationTopic_basic(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) topicName := fmt.Sprintf("test-topic-%d", acctest.RandInt()) resourceName := "aws_ses_identity_notification_topic.test" diff --git a/aws/resource_aws_ses_identity_policy_test.go b/aws/resource_aws_ses_identity_policy_test.go index 6d8f504dc54..4db95bf9002 100644 --- a/aws/resource_aws_ses_identity_policy_test.go +++ b/aws/resource_aws_ses_identity_policy_test.go @@ -14,7 +14,7 @@ import ( func TestAccAWSSESIdentityPolicy_basic(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_identity_policy.test" resource.ParallelTest(t, resource.TestCase{ @@ -40,7 +40,7 @@ func TestAccAWSSESIdentityPolicy_basic(t *testing.T) { func TestAccAWSSESIdentityPolicy_Identity_Email(t *testing.T) { email := fmt.Sprintf( "%s@terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_identity_policy.test" resource.ParallelTest(t, resource.TestCase{ @@ -66,7 +66,7 @@ func TestAccAWSSESIdentityPolicy_Identity_Email(t *testing.T) { func TestAccAWSSESIdentityPolicy_Policy(t *testing.T) { domain := fmt.Sprintf( "%s.terraformtesting.com", - acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + acctest.RandString(10)) resourceName := "aws_ses_identity_policy.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_shield_protection_test.go b/aws/resource_aws_shield_protection_test.go index 452b1f01f3e..eb4b6c4a10e 100644 --- a/aws/resource_aws_shield_protection_test.go +++ b/aws/resource_aws_shield_protection_test.go @@ -15,7 +15,7 @@ import ( func TestAccAWSShieldProtection_GlobalAccelerator(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -43,7 +43,7 @@ func TestAccAWSShieldProtection_GlobalAccelerator(t *testing.T) { func TestAccAWSShieldProtection_ElasticIPAddress(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -71,7 +71,7 @@ func TestAccAWSShieldProtection_ElasticIPAddress(t *testing.T) { func TestAccAWSShieldProtection_Alb(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -99,7 +99,7 @@ func TestAccAWSShieldProtection_Alb(t *testing.T) { func TestAccAWSShieldProtection_Elb(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -127,7 +127,7 @@ func TestAccAWSShieldProtection_Elb(t *testing.T) { func TestAccAWSShieldProtection_Cloudfront(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -156,7 +156,7 @@ func TestAccAWSShieldProtection_Cloudfront(t *testing.T) { func TestAccAWSShieldProtection_Route53(t *testing.T) { resourceName := "aws_shield_protection.acctest" - rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { diff --git a/aws/resource_aws_vpc_endpoint_connection_notification_test.go b/aws/resource_aws_vpc_endpoint_connection_notification_test.go index 81e7cef6993..9e918d0bae8 100644 --- a/aws/resource_aws_vpc_endpoint_connection_notification_test.go +++ b/aws/resource_aws_vpc_endpoint_connection_notification_test.go @@ -13,7 +13,7 @@ import ( ) func TestAccAWSVpcEndpointConnectionNotification_basic(t *testing.T) { - lbName := fmt.Sprintf("testaccawsnlb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsnlb-basic-%s", acctest.RandString(10)) resourceName := "aws_vpc_endpoint_connection_notification.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_vpc_endpoint_route_table_association_test.go b/aws/resource_aws_vpc_endpoint_route_table_association_test.go index 1af4dcd23a2..f71797c5070 100644 --- a/aws/resource_aws_vpc_endpoint_route_table_association_test.go +++ b/aws/resource_aws_vpc_endpoint_route_table_association_test.go @@ -15,7 +15,7 @@ import ( func TestAccAWSVpcEndpointRouteTableAssociation_basic(t *testing.T) { var vpce ec2.VpcEndpoint resourceName := "aws_vpc_endpoint_route_table_association.test" - rName := fmt.Sprintf("tf-testacc-vpce-%s", acctest.RandStringFromCharSet(16, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-vpce-%s", acctest.RandString(16)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_vpc_endpoint_service_allowed_principal_test.go b/aws/resource_aws_vpc_endpoint_service_allowed_principal_test.go index 0600778711f..940b2625652 100644 --- a/aws/resource_aws_vpc_endpoint_service_allowed_principal_test.go +++ b/aws/resource_aws_vpc_endpoint_service_allowed_principal_test.go @@ -13,7 +13,7 @@ import ( ) func TestAccAWSVpcEndpointServiceAllowedPrincipal_basic(t *testing.T) { - lbName := fmt.Sprintf("testaccawsnlb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + lbName := fmt.Sprintf("testaccawsnlb-basic-%s", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/resource_aws_vpc_peering_connection_test.go b/aws/resource_aws_vpc_peering_connection_test.go index 82922fa373c..dfd7ffc986b 100644 --- a/aws/resource_aws_vpc_peering_connection_test.go +++ b/aws/resource_aws_vpc_peering_connection_test.go @@ -89,7 +89,7 @@ func testSweepEc2VpcPeeringConnections(region string) error { func TestAccAWSVPCPeeringConnection_basic(t *testing.T) { var connection ec2.VpcPeeringConnection - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" resource.ParallelTest(t, resource.TestCase{ @@ -121,7 +121,7 @@ func TestAccAWSVPCPeeringConnection_basic(t *testing.T) { func TestAccAWSVPCPeeringConnection_plan(t *testing.T) { var connection ec2.VpcPeeringConnection - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" // reach out and DELETE the VPC Peering connection outside of Terraform @@ -160,7 +160,7 @@ func TestAccAWSVPCPeeringConnection_plan(t *testing.T) { func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { var connection ec2.VpcPeeringConnection - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" resource.ParallelTest(t, resource.TestCase{ @@ -210,7 +210,7 @@ func TestAccAWSVPCPeeringConnection_tags(t *testing.T) { func TestAccAWSVPCPeeringConnection_options(t *testing.T) { var connection ec2.VpcPeeringConnection - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" testAccepterChange := func(*terraform.State) error { @@ -385,7 +385,7 @@ func TestAccAWSVPCPeeringConnection_options(t *testing.T) { } func TestAccAWSVPCPeeringConnection_failedState(t *testing.T) { - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -521,7 +521,7 @@ func testAccCheckAWSVpcPeeringConnectionOptionsWithProvider(n, block string, opt func TestAccAWSVPCPeeringConnection_peerRegionAutoAccept(t *testing.T) { var providers []*schema.Provider - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -543,7 +543,7 @@ func TestAccAWSVPCPeeringConnection_peerRegionAutoAccept(t *testing.T) { func TestAccAWSVPCPeeringConnection_region(t *testing.T) { var connection ec2.VpcPeeringConnection var providers []*schema.Provider - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" resource.ParallelTest(t, resource.TestCase{ @@ -578,7 +578,7 @@ func TestAccAWSVPCPeeringConnection_region(t *testing.T) { // Tests the peering connection acceptance functionality for same region, same account. func TestAccAWSVPCPeeringConnection_accept(t *testing.T) { var connection ec2.VpcPeeringConnection - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resourceName := "aws_vpc_peering_connection.test" resource.ParallelTest(t, resource.TestCase{ @@ -646,7 +646,7 @@ func TestAccAWSVPCPeeringConnection_accept(t *testing.T) { // Tests that VPC peering connection options can't be set on non-active connection. func TestAccAWSVPCPeeringConnection_optionsNoAutoAccept(t *testing.T) { - rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandStringFromCharSet(17, acctest.CharSetAlphaNum)) + rName := fmt.Sprintf("tf-testacc-pcx-%s", acctest.RandString(17)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 5601d741f9dfb236e6a98a3b68990379cb063e53 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 15 Dec 2020 08:34:28 -0500 Subject: [PATCH 0232/1212] service/ec2: Add `carrier_ip` attribute to aws_eip resource and data source (#16724) * r/aws_eip: Add 'carrier_ip' attribute. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSEIP_CarrierIP' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccAWSEIP_CarrierIP -timeout 120m === RUN TestAccAWSEIP_CarrierIP === PAUSE TestAccAWSEIP_CarrierIP === CONT TestAccAWSEIP_CarrierIP --- PASS: TestAccAWSEIP_CarrierIP (17.36s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 17.450s * d/aws_eip: Add 'carrier_ip' attribute. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAWSEIP_CarrierIP' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccDataSourceAWSEIP_CarrierIP -timeout 120m === RUN TestAccDataSourceAWSEIP_CarrierIP === PAUSE TestAccDataSourceAWSEIP_CarrierIP === CONT TestAccDataSourceAWSEIP_CarrierIP --- PASS: TestAccDataSourceAWSEIP_CarrierIP (14.74s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 14.829s --- aws/data_source_aws_eip.go | 5 ++++ aws/data_source_aws_eip_test.go | 43 +++++++++++++++++++++++++++ aws/resource_aws_eip.go | 9 +++++- aws/resource_aws_eip_test.go | 48 +++++++++++++++++++++++++++++++ aws/resource_aws_instance_test.go | 24 ++++++++++++++++ website/docs/d/eip.html.markdown | 1 + website/docs/r/eip.html.markdown | 1 + 7 files changed, 130 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index 07d7eab9f6d..3fed18c62eb 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -64,6 +64,10 @@ func dataSourceAwsEip() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "carrier_ip": { + Type: schema.TypeString, + Computed: true, + }, "customer_owned_ipv4_pool": { Type: schema.TypeString, Computed: true, @@ -158,6 +162,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { } } d.Set("public_ipv4_pool", eip.PublicIpv4Pool) + d.Set("carrier_ip", eip.CarrierIp) d.Set("customer_owned_ipv4_pool", eip.CustomerOwnedIpv4Pool) d.Set("customer_owned_ip", eip.CustomerOwnedIp) diff --git a/aws/data_source_aws_eip_test.go b/aws/data_source_aws_eip_test.go index 338dd25c5ed..30c7d68e7ab 100644 --- a/aws/data_source_aws_eip_test.go +++ b/aws/data_source_aws_eip_test.go @@ -152,6 +152,26 @@ func TestAccDataSourceAwsEip_Instance(t *testing.T) { }) } +func TestAccDataSourceAWSEIP_CarrierIP(t *testing.T) { + dataSourceName := "data.aws_eip.test" + resourceName := "aws_eip.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSEIPConfigCarrierIP(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "carrier_ip", resourceName, "carrier_ip"), + resource.TestCheckResourceAttrPair(dataSourceName, "public_ip", resourceName, "public_ip"), + ), + }, + }, + }) +} + func TestAccDataSourceAWSEIP_CustomerOwnedIpv4Pool(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -328,3 +348,26 @@ data "aws_eip" "test" { } } ` + +func testAccDataSourceAWSEIPConfigCarrierIP(rName string) string { + return composeConfig( + testAccAvailableAZsWavelengthZonesDefaultExcludeConfig(), + fmt.Sprintf(` +data "aws_availability_zone" "available" { + name = data.aws_availability_zones.available.names[0] +} + +resource "aws_eip" "test" { + vpc = true + network_border_group = data.aws_availability_zone.available.network_border_group + + tags = { + Name = %[1]q + } +} + +data "aws_eip" "test" { + id = aws_eip.test.id +} +`, rName)) +} diff --git a/aws/resource_aws_eip.go b/aws/resource_aws_eip.go index 20a5c8c1896..3fa6364b321 100644 --- a/aws/resource_aws_eip.go +++ b/aws/resource_aws_eip.go @@ -98,6 +98,11 @@ func resourceAwsEip() *schema.Resource { Optional: true, }, + "carrier_ip": { + Type: schema.TypeString, + Computed: true, + }, + "customer_owned_ipv4_pool": { Type: schema.TypeString, Optional: true, @@ -293,6 +298,7 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { } } d.Set("public_ipv4_pool", address.PublicIpv4Pool) + d.Set("carrier_ip", address.CarrierIp) d.Set("customer_owned_ipv4_pool", address.CustomerOwnedIpv4Pool) d.Set("customer_owned_ip", address.CustomerOwnedIp) d.Set("network_border_group", address.NetworkBorderGroup) @@ -441,7 +447,8 @@ func resourceAwsEipDelete(d *schema.ResourceData, meta interface{}) error { case ec2.DomainTypeVpc: log.Printf("[DEBUG] EIP release (destroy) address allocation: %v", d.Id()) input = &ec2.ReleaseAddressInput{ - AllocationId: aws.String(d.Id()), + AllocationId: aws.String(d.Id()), + NetworkBorderGroup: aws.String(d.Get("network_border_group").(string)), } case ec2.DomainTypeStandard: log.Printf("[DEBUG] EIP release (destroy) address: %v", d.Id()) diff --git a/aws/resource_aws_eip_test.go b/aws/resource_aws_eip_test.go index f460c407756..811014e4c16 100644 --- a/aws/resource_aws_eip_test.go +++ b/aws/resource_aws_eip_test.go @@ -468,6 +468,35 @@ func TestAccAWSEIP_NetworkBorderGroup(t *testing.T) { }) } +func TestAccAWSEIP_CarrierIP(t *testing.T) { + var conf ec2.Address + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_eip.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEIPDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSEIPConfigCarrierIP(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEIPExists(resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "carrier_ip"), + resource.TestCheckResourceAttrSet(resourceName, "network_border_group"), + resource.TestCheckResourceAttr(resourceName, "public_ip", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSEIP_PublicIpv4Pool_custom(t *testing.T) { if os.Getenv("AWS_EC2_EIP_PUBLIC_IPV4_POOL") == "" { t.Skip("Environment variable AWS_EC2_EIP_PUBLIC_IPV4_POOL is not set") @@ -1176,3 +1205,22 @@ resource "aws_eip" "test" { network_border_group = data.aws_region.current.name } ` + +func testAccAWSEIPConfigCarrierIP(rName string) string { + return composeConfig( + testAccAvailableAZsWavelengthZonesDefaultExcludeConfig(), + fmt.Sprintf(` +data "aws_availability_zone" "available" { + name = data.aws_availability_zones.available.names[0] +} + +resource "aws_eip" "test" { + vpc = true + network_border_group = data.aws_availability_zone.available.network_border_group + + tags = { + Name = %[1]q + } +} +`, rName)) +} diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index a9bc65fc5c9..4926c82d204 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -3307,6 +3307,30 @@ data "aws_availability_zones" "available" { ` } +func testAccAvailableAZsWavelengthZonesExcludeConfig(excludeZoneIds ...string) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + exclude_zone_ids = ["%[1]s"] + state = "available" + + filter { + name = "zone-type" + values = ["wavelength-zone"] + } + + filter { + name = "opt-in-status" + values = ["opted-in"] + } +} +`, strings.Join(excludeZoneIds, "\", \"")) +} + +func testAccAvailableAZsWavelengthZonesDefaultExcludeConfig() string { + // Exclude usw2-wl1-den-wlz1 as there may be problems allocating carrier IP addresses. + return testAccAvailableAZsWavelengthZonesExcludeConfig("usw2-wl1-den-wlz1") +} + func testAccInstanceConfigInDefaultVpcBySgName(rName string) string { return testAccAvailableAZsNoOptInDefaultExcludeConfig() + testAccLatestAmazonLinuxHvmEbsAmiConfig() + diff --git a/website/docs/d/eip.html.markdown b/website/docs/d/eip.html.markdown index e60c427de2a..074e6c0a409 100644 --- a/website/docs/d/eip.html.markdown +++ b/website/docs/d/eip.html.markdown @@ -75,6 +75,7 @@ In addition to all arguments above, the following attributes are exported: * `public_ip` - Public IP address of Elastic IP. * `public_dns` - Public DNS associated with the Elastic IP address. * `public_ipv4_pool` - The ID of an address pool. +* `carrier_ip` - The carrier IP address. * `customer_owned_ipv4_pool` - The ID of a Customer Owned IP Pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing) * `customer_owned_ip` - Customer Owned IP. * `tags` - Key-value map of tags associated with Elastic IP. diff --git a/website/docs/r/eip.html.markdown b/website/docs/r/eip.html.markdown index 6d05f40da32..82d9ce0ed7d 100644 --- a/website/docs/r/eip.html.markdown +++ b/website/docs/r/eip.html.markdown @@ -127,6 +127,7 @@ In addition to all arguments above, the following attributes are exported: * `instance` - Contains the ID of the attached instance. * `network_interface` - Contains the ID of the attached network interface. * `public_ipv4_pool` - EC2 IPv4 address pool identifier (if in VPC). +* `carrier_ip` - The carrier IP address. * `customer_owned_ipv4_pool` - The ID of a customer-owned address pool. For more on customer owned IP addressed check out [Customer-owned IP addresses guide](https://docs.aws.amazon.com/outposts/latest/userguide/outposts-networking-components.html#ip-addressing) * `customer_owned_ip` - Customer owned IP. * `domain` - Indicates if this EIP is for use in VPC (`vpc`) or EC2 Classic (`standard`). From 66e05722745690cedf47c1520f510b3246862879 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 15 Dec 2020 08:35:50 -0500 Subject: [PATCH 0233/1212] Update CHANGELOG for #16724 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7312007754..d4d397a7d6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,9 @@ FEATURES ENHANCEMENTS * data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] +* data-source/aws_eip: Add `carrier_ip` attribute [GH-16724] * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] +* resource/aws_eip: Add `carrier_ip` attribute [GH-16724] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] BUG FIXES From ca78d533489a4f3bf245d223ba6bd75a49bdb13a Mon Sep 17 00:00:00 2001 From: Hans Nielsen Date: Tue, 15 Dec 2020 06:37:32 -0800 Subject: [PATCH 0234/1212] Support Nitro Enclaves in aws_instance and aws_launch_template (#16361) Output from acceptance testing in AWS Commercial (failure known and unrelated): ``` --- FAIL: TestAccAWSInstance_instanceProfileChange (131.45s) --- PASS: TestAccAWSInstance_addSecondaryInterface (168.25s) --- PASS: TestAccAWSInstance_addSecurityGroupNetworkInterface (145.34s) --- PASS: TestAccAWSInstance_associatePublic_defaultPrivate (93.37s) --- PASS: TestAccAWSInstance_associatePublic_defaultPublic (193.44s) --- PASS: TestAccAWSInstance_associatePublic_explicitPrivate (90.86s) --- PASS: TestAccAWSInstance_associatePublic_explicitPublic (89.46s) --- PASS: TestAccAWSInstance_associatePublic_overridePrivate (91.19s) --- PASS: TestAccAWSInstance_associatePublic_overridePublic (81.27s) --- PASS: TestAccAWSInstance_associatePublicIPAndPrivateIP (80.40s) --- PASS: TestAccAWSInstance_atLeastOneOtherEbsVolume (192.83s) --- PASS: TestAccAWSInstance_basic (89.76s) --- PASS: TestAccAWSInstance_blockDevices (78.09s) --- PASS: TestAccAWSInstance_changeInstanceType (149.50s) --- PASS: TestAccAWSInstance_CreditSpecification_Empty_NonBurstable (322.48s) --- PASS: TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable (95.59s) --- PASS: TestAccAWSInstance_creditSpecification_standardCpuCredits (119.17s) --- PASS: TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint (404.66s) --- PASS: TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2 (91.53s) --- PASS: TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3 (313.08s) --- PASS: TestAccAWSInstance_creditSpecification_unlimitedCpuCredits (118.40s) --- PASS: TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint (395.94s) --- PASS: TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard (78.13s) --- PASS: TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable (108.23s) --- PASS: TestAccAWSInstance_creditSpecification_updateCpuCredits (134.94s) --- PASS: TestAccAWSInstance_creditSpecificationT3_standardCpuCredits (131.22s) --- PASS: TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits (117.74s) --- PASS: TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited (309.62s) --- PASS: TestAccAWSInstance_creditSpecificationT3_updateCpuCredits (146.03s) --- PASS: TestAccAWSInstance_dedicatedInstance (106.61s) --- PASS: TestAccAWSInstance_disableApiTermination (118.10s) --- PASS: TestAccAWSInstance_disappears (92.80s) --- PASS: TestAccAWSInstance_EbsBlockDevice_InvalidIopsForVolumeType (17.27s) --- PASS: TestAccAWSInstance_EbsBlockDevice_KmsKeyArn (142.27s) --- PASS: TestAccAWSInstance_EbsRootDevice_basic (132.83s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyAll (164.65s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyDeleteOnTermination (97.23s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyIOPS_Io1 (121.82s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyIOPS_Io2 (147.63s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifySize (236.63s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyType (123.66s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleBlockDevices_ModifyDeleteOnTermination (199.08s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleBlockDevices_ModifySize (123.52s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleDynamicEBSBlockDevices (207.68s) --- PASS: TestAccAWSInstance_Empty_PrivateIP (78.02s) --- PASS: TestAccAWSInstance_enclaveOptions (430.58s) --- PASS: TestAccAWSInstance_forceNewAndTagsDrift (270.39s) --- PASS: TestAccAWSInstance_getPasswordData_falseToTrue (208.61s) --- PASS: TestAccAWSInstance_getPasswordData_trueToFalse (269.53s) --- PASS: TestAccAWSInstance_GP2IopsDevice (80.89s) --- PASS: TestAccAWSInstance_GP2WithIopsValue (11.19s) --- PASS: TestAccAWSInstance_hibernation (204.56s) --- PASS: TestAccAWSInstance_inDefaultVpcBySgId (100.86s) --- PASS: TestAccAWSInstance_inDefaultVpcBySgName (99.23s) --- PASS: TestAccAWSInstance_ipv6_supportAddressCount (99.03s) --- PASS: TestAccAWSInstance_ipv6_supportAddressCountWithIpv4 (189.48s) --- PASS: TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError (16.71s) --- PASS: TestAccAWSInstance_keyPairCheck (86.95s) --- PASS: TestAccAWSInstance_metadataOptions (154.98s) --- PASS: TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups (112.12s) --- PASS: TestAccAWSInstance_NetworkInstanceSecurityGroups (103.81s) --- PASS: TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs (136.45s) --- PASS: TestAccAWSInstance_NewNetworkInterface_EmptyPrivateIPAndSecondaryPrivateIPs (341.20s) --- PASS: TestAccAWSInstance_NewNetworkInterface_EmptyPrivateIPAndSecondaryPrivateIPsUpdate (161.33s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PrivateIPAndSecondaryPrivateIPs (134.73s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PrivateIPAndSecondaryPrivateIPsUpdate (121.24s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PublicIPAndSecondaryPrivateIPs (410.27s) --- PASS: TestAccAWSInstance_noAMIEphemeralDevices (59.54s) --- PASS: TestAccAWSInstance_placementGroup (304.38s) --- PASS: TestAccAWSInstance_primaryNetworkInterface (112.14s) --- PASS: TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck (112.05s) --- PASS: TestAccAWSInstance_privateIP (75.33s) --- PASS: TestAccAWSInstance_RootBlockDevice_KmsKeyArn (99.55s) --- PASS: TestAccAWSInstance_rootBlockDeviceMismatch (121.64s) --- PASS: TestAccAWSInstance_rootInstanceStore (149.85s) --- PASS: TestAccAWSInstance_sourceDestCheck (159.49s) --- PASS: TestAccAWSInstance_tags (104.40s) --- PASS: TestAccAWSInstance_UserData_EmptyStringToUnspecified (113.94s) --- PASS: TestAccAWSInstance_UserData_UnspecifiedToEmptyString (105.43s) --- PASS: TestAccAWSInstance_userDataBase64 (105.42s) --- PASS: TestAccAWSInstance_volumeTags (166.33s) --- PASS: TestAccAWSInstance_volumeTagsComputed (118.47s) --- PASS: TestAccAWSInstance_withIamInstanceProfile (99.55s) --- SKIP: TestAccAWSInstance_inEc2Classic (2.60s) --- SKIP: TestAccAWSInstance_outpost (1.99s) --- PASS: TestAccAWSInstanceDataSource_AzUserData (109.31s) --- PASS: TestAccAWSInstanceDataSource_basic (125.30s) --- PASS: TestAccAWSInstanceDataSource_blockDevices (97.25s) --- PASS: TestAccAWSInstanceDataSource_creditSpecification (84.91s) --- PASS: TestAccAWSInstanceDataSource_EbsBlockDevice_KmsKeyId (110.17s) --- PASS: TestAccAWSInstanceDataSource_enclaveOptions (68.12s) --- PASS: TestAccAWSInstanceDataSource_getPasswordData_falseToTrue (247.32s) --- PASS: TestAccAWSInstanceDataSource_getPasswordData_trueToFalse (255.13s) --- PASS: TestAccAWSInstanceDataSource_GetUserData (152.33s) --- PASS: TestAccAWSInstanceDataSource_GetUserData_NoUserData (182.10s) --- PASS: TestAccAWSInstanceDataSource_gp2IopsDevice (108.60s) --- PASS: TestAccAWSInstanceDataSource_keyPair (122.63s) --- PASS: TestAccAWSInstanceDataSource_metadataOptions (305.70s) --- PASS: TestAccAWSInstanceDataSource_PlacementGroup (335.92s) --- PASS: TestAccAWSInstanceDataSource_privateIP (106.49s) --- PASS: TestAccAWSInstanceDataSource_RootBlockDevice_KmsKeyId (141.35s) --- PASS: TestAccAWSInstanceDataSource_rootInstanceStore (107.10s) --- PASS: TestAccAWSInstanceDataSource_secondaryPrivateIPs (101.78s) --- PASS: TestAccAWSInstanceDataSource_SecurityGroups (114.84s) --- PASS: TestAccAWSInstanceDataSource_tags (113.19s) --- PASS: TestAccAWSInstanceDataSource_VPC (118.35s) --- PASS: TestAccAWSInstanceDataSource_VPCSecurityGroups (128.09s) --- PASS: TestAccAWSInstancesDataSource_basic (345.58s) --- PASS: TestAccAWSInstancesDataSource_instanceStateNames (91.95s) --- PASS: TestAccAWSInstancesDataSource_tags (334.64s) --- PASS: TestAccAWSLaunchTemplate_associateCarrierIPAddress (95.60s) --- PASS: TestAccAWSLaunchTemplate_associatePublicIPAddress (96.67s) --- PASS: TestAccAWSLaunchTemplate_basic (15.00s) --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS (66.34s) --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_DeleteOnTermination (86.09s) --- PASS: TestAccAWSLaunchTemplate_BlockDeviceMappings_EBS_Gp3 (65.39s) --- PASS: TestAccAWSLaunchTemplate_capacityReservation_preference (30.21s) --- PASS: TestAccAWSLaunchTemplate_capacityReservation_target (31.33s) --- PASS: TestAccAWSLaunchTemplate_cpuOptions (30.11s) --- PASS: TestAccAWSLaunchTemplate_creditSpecification_nonBurstable (34.89s) --- PASS: TestAccAWSLaunchTemplate_creditSpecification_t2 (35.68s) --- PASS: TestAccAWSLaunchTemplate_creditSpecification_t3 (32.18s) --- PASS: TestAccAWSLaunchTemplate_data (21.55s) --- PASS: TestAccAWSLaunchTemplate_defaultVersion (60.20s) --- PASS: TestAccAWSLaunchTemplate_description (53.26s) --- PASS: TestAccAWSLaunchTemplate_disappears (19.56s) --- PASS: TestAccAWSLaunchTemplate_EbsOptimized (106.63s) --- PASS: TestAccAWSLaunchTemplate_ElasticInferenceAccelerator (40.95s) --- PASS: TestAccAWSLaunchTemplate_enclaveOptions (64.33s) --- PASS: TestAccAWSLaunchTemplate_hibernation (63.36s) --- PASS: TestAccAWSLaunchTemplate_IamInstanceProfile_EmptyConfigurationBlock (28.60s) --- PASS: TestAccAWSLaunchTemplate_instanceMarketOptions (84.40s) --- PASS: TestAccAWSLaunchTemplate_licenseSpecification (30.35s) --- PASS: TestAccAWSLaunchTemplate_metadataOptions (30.84s) --- PASS: TestAccAWSLaunchTemplate_networkInterface (69.40s) --- PASS: TestAccAWSLaunchTemplate_networkInterface_ipv6AddressCount (25.96s) --- PASS: TestAccAWSLaunchTemplate_networkInterface_ipv6Addresses (27.09s) --- PASS: TestAccAWSLaunchTemplate_networkInterfaceAddresses (68.27s) --- PASS: TestAccAWSLaunchTemplate_NetworkInterfaces_DeleteOnTermination (85.02s) --- PASS: TestAccAWSLaunchTemplate_placement_partitionNum (51.60s) --- PASS: TestAccAWSLaunchTemplate_tags (52.39s) --- PASS: TestAccAWSLaunchTemplate_update (78.88s) --- PASS: TestAccAWSLaunchTemplate_updateDefaultVersion (69.42s) --- PASS: TestAccAWSLaunchTemplateDataSource_associateCarrierIPAddress (63.16s) --- PASS: TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress (63.57s) --- PASS: TestAccAWSLaunchTemplateDataSource_basic (27.41s) --- PASS: TestAccAWSLaunchTemplateDataSource_enclaveOptions (33.37s) --- PASS: TestAccAWSLaunchTemplateDataSource_filter_basic (28.83s) --- PASS: TestAccAWSLaunchTemplateDataSource_filter_tags (33.26s) --- PASS: TestAccAWSLaunchTemplateDataSource_id_basic (26.74s) --- PASS: TestAccAWSLaunchTemplateDataSource_metadataOptions (32.68s) --- PASS: TestAccAWSLaunchTemplateDataSource_networkInterfaces_deleteOnTermination (62.31s) --- PASS: TestAccAWSLaunchTemplateDataSource_NonExistent (8.99s) ``` --- aws/data_source_aws_instance.go | 16 ++++++ aws/data_source_aws_instance_test.go | 46 ++++++++++++++++ aws/data_source_aws_launch_template.go | 16 ++++++ aws/data_source_aws_launch_template_test.go | 37 +++++++++++++ aws/resource_aws_instance.go | 50 +++++++++++++++++ aws/resource_aws_instance_test.go | 58 ++++++++++++++++++++ aws/resource_aws_launch_template.go | 41 ++++++++++++++ aws/resource_aws_launch_template_test.go | 52 ++++++++++++++++++ website/docs/d/instance.html.markdown | 2 + website/docs/d/launch_template.html.markdown | 2 + website/docs/r/instance.html.markdown | 13 +++++ website/docs/r/launch_template.html.markdown | 9 +++ 12 files changed, 342 insertions(+) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 248b9bedd14..6de84be9267 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -316,6 +316,18 @@ func dataSourceAwsInstance() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "enclave_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, }, } } @@ -535,5 +547,9 @@ func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instanc return fmt.Errorf("error setting metadata_options: %s", err) } + if err := d.Set("enclave_options", flattenEc2EnclaveOptions(instance.EnclaveOptions)); err != nil { + return fmt.Errorf("error setting enclave_options: %s", err) + } + return nil } diff --git a/aws/data_source_aws_instance_test.go b/aws/data_source_aws_instance_test.go index aecf29b381c..8dddd0e2f6e 100644 --- a/aws/data_source_aws_instance_test.go +++ b/aws/data_source_aws_instance_test.go @@ -489,6 +489,26 @@ func TestAccAWSInstanceDataSource_metadataOptions(t *testing.T) { }) } +func TestAccAWSInstanceDataSource_enclaveOptions(t *testing.T) { + resourceName := "aws_instance.test" + datasourceName := "data.aws_instance.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccInstanceDataSourceConfig_enclaveOptions(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, "enclave_options.#", resourceName, "enclave_options.#"), + resource.TestCheckResourceAttrPair(datasourceName, "enclave_options.0.enabled", resourceName, "enclave_options.0.enabled"), + ), + }, + }, + }) +} + // Lookup based on InstanceID var testAccInstanceDataSourceConfig = testAccLatestAmazonLinuxHvmEbsAmiConfig() + ` resource "aws_instance" "test" { @@ -920,3 +940,29 @@ data "aws_instance" "test" { } `, rName)) } + +func testAccInstanceDataSourceConfig_enclaveOptions(rName string) string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + testAccAvailableEc2InstanceTypeForRegion("c5a.xlarge", "c5.xlarge"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } + + enclave_options { + enabled = true + } +} + +data "aws_instance" "test" { + instance_id = aws_instance.test.id +} +`, rName)) +} diff --git a/aws/data_source_aws_launch_template.go b/aws/data_source_aws_launch_template.go index 964ce62aae5..16ae3882959 100644 --- a/aws/data_source_aws_launch_template.go +++ b/aws/data_source_aws_launch_template.go @@ -231,6 +231,18 @@ func dataSourceAwsLaunchTemplate() *schema.Resource { }, }, }, + "enclave_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, "monitoring": { Type: schema.TypeList, Computed: true, @@ -515,6 +527,10 @@ func dataSourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("error setting metadata_options: %w", err) } + if err := d.Set("enclave_options", getEnclaveOptions(ltData.EnclaveOptions)); err != nil { + return fmt.Errorf("error setting enclave_options: %w", err) + } + if err := d.Set("monitoring", getMonitoring(ltData.Monitoring)); err != nil { return fmt.Errorf("error setting monitoring: %w", err) } diff --git a/aws/data_source_aws_launch_template_test.go b/aws/data_source_aws_launch_template_test.go index 1eec0725a84..41d8dfb559e 100644 --- a/aws/data_source_aws_launch_template_test.go +++ b/aws/data_source_aws_launch_template_test.go @@ -149,6 +149,27 @@ func TestAccAWSLaunchTemplateDataSource_metadataOptions(t *testing.T) { }) } +func TestAccAWSLaunchTemplateDataSource_enclaveOptions(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_launch_template.test" + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateDataSourceConfig_enclaveOptions(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "enclave_options.#", resourceName, "enclave_options.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "enclave_options.0.enabled", resourceName, "enclave_options.0.enabled"), + ), + }, + }, + }) +} + func TestAccAWSLaunchTemplateDataSource_associatePublicIPAddress(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") dataSourceName := "data.aws_launch_template.test" @@ -345,6 +366,22 @@ data "aws_launch_template" "test" { `, rName) } +func testAccAWSLaunchTemplateDataSourceConfig_enclaveOptions(rName string) string { + return fmt.Sprintf(` +resource "aws_launch_template" "test" { + name = %[1]q + + enclave_options { + enabled = true + } +} + +data "aws_launch_template" "test" { + name = aws_launch_template.test.name +} +`, rName) +} + func testAccAWSLaunchTemplateDataSourceConfig_associatePublicIpAddress(rName, associatePublicIPAddress string) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 626d9116e85..2d00658b75f 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -580,6 +580,23 @@ func resourceAwsInstance() *schema.Resource { }, }, }, + + "enclave_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, }, } } @@ -629,6 +646,7 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { CpuOptions: instanceOpts.CpuOptions, HibernationOptions: instanceOpts.HibernationOptions, MetadataOptions: instanceOpts.MetadataOptions, + EnclaveOptions: instanceOpts.EnclaveOptions, TagSpecifications: tagSpecifications, } @@ -784,6 +802,10 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting metadata_options: %s", err) } + if err := d.Set("enclave_options", flattenEc2EnclaveOptions(instance.EnclaveOptions)); err != nil { + return fmt.Errorf("error setting enclave_options: %s", err) + } + d.Set("ami", instance.ImageId) d.Set("instance_type", instance.InstanceType) d.Set("key_name", instance.KeyName) @@ -2175,6 +2197,7 @@ type awsInstanceOpts struct { CpuOptions *ec2.CpuOptionsRequest HibernationOptions *ec2.HibernationOptionsRequest MetadataOptions *ec2.InstanceMetadataOptionsRequest + EnclaveOptions *ec2.EnclaveOptionsRequest } func buildAwsInstanceOpts(d *schema.ResourceData, meta interface{}) (*awsInstanceOpts, error) { @@ -2187,6 +2210,7 @@ func buildAwsInstanceOpts(d *schema.ResourceData, meta interface{}) (*awsInstanc ImageID: aws.String(d.Get("ami").(string)), InstanceType: aws.String(instanceType), MetadataOptions: expandEc2InstanceMetadataOptions(d.Get("metadata_options").([]interface{})), + EnclaveOptions: expandEc2EnclaveOptions(d.Get("enclave_options").([]interface{})), } // Set default cpu_credits as Unlimited for T3 instance type @@ -2490,6 +2514,20 @@ func expandEc2InstanceMetadataOptions(l []interface{}) *ec2.InstanceMetadataOpti return opts } +func expandEc2EnclaveOptions(l []interface{}) *ec2.EnclaveOptionsRequest { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + opts := &ec2.EnclaveOptionsRequest{ + Enabled: aws.Bool(m["enabled"].(bool)), + } + + return opts +} + //Expands an array of secondary Private IPs into a ec2 Private IP Address Spec func expandSecondaryPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification { specs := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips)) @@ -2517,6 +2555,18 @@ func flattenEc2InstanceMetadataOptions(opts *ec2.InstanceMetadataOptionsResponse return []interface{}{m} } +func flattenEc2EnclaveOptions(opts *ec2.EnclaveOptions) []interface{} { + if opts == nil { + return nil + } + + m := map[string]interface{}{ + "enabled": aws.BoolValue(opts.Enabled), + } + + return []interface{}{m} +} + // resourceAwsInstanceFindByID returns the EC2 instance by ID // * If the instance is found, returns the instance and nil // * If no instance is found, returns nil and nil diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 4926c82d204..da1c1f0afdf 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -3109,6 +3109,41 @@ func TestAccAWSInstance_metadataOptions(t *testing.T) { }) } +func TestAccAWSInstance_enclaveOptions(t *testing.T) { + var instance1, instance2 ec2.Instance + resourceName := "aws_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfigEnclaveOptions(true), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &instance1), + resource.TestCheckResourceAttr(resourceName, "enclave_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enclave_options.0.enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccInstanceConfigEnclaveOptions(false), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &instance2), + testAccCheckInstanceRecreated(&instance1, &instance2), + resource.TestCheckResourceAttr(resourceName, "enclave_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enclave_options.0.enabled", "false"), + ), + }, + }, + }) +} + func testAccCheckInstanceNotRecreated(t *testing.T, before, after *ec2.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -5125,6 +5160,29 @@ resource "aws_instance" "test" { `, rName)) } +func testAccInstanceConfigEnclaveOptions(enabled bool) string { + name := "tf-acc-instance-enclaves" + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(name, false), + testAccAvailableEc2InstanceTypeForRegion("c5a.xlarge", "c5.xlarge"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id + + enclave_options { + enabled = %[2]t + } + + tags = { + Name = %[1]q + } +} +`, name, enabled)) +} + func testAccAwsEc2InstanceConfigDynamicEBSBlockDevices() string { return composeConfig(testAccLatestAmazonLinuxPvEbsAmiConfig(), ` resource "aws_instance" "test" { diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index ebdb5b4792a..684f22bdc95 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -405,6 +405,20 @@ func resourceAwsLaunchTemplate() *schema.Resource { }, }, + "enclave_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "monitoring": { Type: schema.TypeList, Optional: true, @@ -803,6 +817,10 @@ func resourceAwsLaunchTemplateRead(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("error setting metadata_options: %s", err) } + if err := d.Set("enclave_options", getEnclaveOptions(ltData.EnclaveOptions)); err != nil { + return fmt.Errorf("error setting enclave_options: %s", err) + } + if err := d.Set("monitoring", getMonitoring(ltData.Monitoring)); err != nil { return fmt.Errorf("error setting monitoring: %s", err) } @@ -1120,6 +1138,17 @@ func flattenLaunchTemplateInstanceMetadataOptions(opts *ec2.LaunchTemplateInstan return []interface{}{m} } +func getEnclaveOptions(m *ec2.LaunchTemplateEnclaveOptions) []interface{} { + s := []interface{}{} + if m != nil { + mo := map[string]interface{}{ + "enabled": aws.BoolValue(m.Enabled), + } + s = append(s, mo) + } + return s +} + func getMonitoring(m *ec2.LaunchTemplatesMonitoring) []interface{} { s := []interface{}{} if m != nil { @@ -1393,6 +1422,17 @@ func buildLaunchTemplateData(d *schema.ResourceData) (*ec2.RequestLaunchTemplate opts.MetadataOptions = expandLaunchTemplateInstanceMetadataOptions(v.([]interface{})) } + if v, ok := d.GetOk("enclave_options"); ok { + m := v.([]interface{}) + if len(m) > 0 && m[0] != nil { + mData := m[0].(map[string]interface{}) + enclaveOptions := &ec2.LaunchTemplateEnclaveOptionsRequest{ + Enabled: aws.Bool(mData["enabled"].(bool)), + } + opts.EnclaveOptions = enclaveOptions + } + } + if v, ok := d.GetOk("monitoring"); ok { m := v.([]interface{}) if len(m) > 0 && m[0] != nil { @@ -1816,6 +1856,7 @@ var updateKeys = []string{ "ebs_optimized", "elastic_gpu_specifications", "elastic_inference_accelerator", + "enclave_options", "hibernation_options", "iam_instance_profile", "image_id", diff --git a/aws/resource_aws_launch_template_test.go b/aws/resource_aws_launch_template_test.go index 99dfabd6fab..187050cfb8f 100644 --- a/aws/resource_aws_launch_template_test.go +++ b/aws/resource_aws_launch_template_test.go @@ -1039,6 +1039,46 @@ func TestAccAWSLaunchTemplate_metadataOptions(t *testing.T) { }) } +func TestAccAWSLaunchTemplate_enclaveOptions(t *testing.T) { + var template ec2.LaunchTemplate + resourceName := "aws_launch_template.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLaunchTemplateConfig_enclaveOptions(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "enclave_options.0.enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSLaunchTemplateConfig_enclaveOptions(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "enclave_options.0.enabled", "false"), + ), + }, + { + Config: testAccAWSLaunchTemplateConfig_enclaveOptions(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "enclave_options.0.enabled", "true"), + ), + }, + }, + }) +} + func TestAccAWSLaunchTemplate_hibernation(t *testing.T) { var template ec2.LaunchTemplate resourceName := "aws_launch_template.test" @@ -1956,6 +1996,18 @@ resource "aws_launch_template" "test" { `, rName) } +func testAccAWSLaunchTemplateConfig_enclaveOptions(rName string, enabled bool) string { + return fmt.Sprintf(` +resource "aws_launch_template" "test" { + name = %[1]q + + enclave_options { + enabled = %[2]t + } +} +`, rName, enabled) +} + func testAccAWSLaunchTemplateConfigHibernation(rName string, enabled bool) string { return fmt.Sprintf(` resource "aws_launch_template" "test" { diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 83dcb7f3880..8753f953a4c 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -119,5 +119,7 @@ interpolation. * `http_endpoint` - The state of the metadata service: `enabled`, `disabled`. * `http_tokens` - If session tokens are required: `optional`, `required`. * `http_put_response_hop_limit` - The desired HTTP PUT response hop limit for instance metadata requests. +* `enclave_options` - The enclave options of the Instance. + * `enabled` - Whether Nitro Enclaves are enabled. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html diff --git a/website/docs/d/launch_template.html.markdown b/website/docs/d/launch_template.html.markdown index 940e4a2b6f0..d84ba31afae 100644 --- a/website/docs/d/launch_template.html.markdown +++ b/website/docs/d/launch_template.html.markdown @@ -88,4 +88,6 @@ In addition to all arguments above, the following attributes are exported: * `tags` - (Optional) A map of tags to assign to the launch template. * `user_data` - The Base64-encoded user data to provide when launching the instance. * `hibernation_options` - The hibernation options for the instance. +* `enclave_options` - The enclave options of the Instance. + * `enabled` - Whether Nitro Enclaves are enabled. diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 55d6a9498df..ad1e5d7ba8c 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -108,6 +108,7 @@ instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/Use * `credit_specification` - (Optional) Customize the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. * `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. * `metadata_options` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. +* `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. ### Timeouts @@ -211,6 +212,18 @@ The `metadata_options` block supports the following: For more information, see the documentation on the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +### Enclave Options + +-> **NOTE:** Changing `enabled` will cause the resource to be destroyed and re-created. + +Enclave options apply to the instance at boot time. + +The `enclave_options` block supports the following: + +* `enabled` - (Optional) Whether Nitro Enclaves will be enabled on the instance. (Default: `"false"`). + +For more information, see the documentation on [Nitro Enclaves](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html). + ### Example ```hcl diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index 904de1d1143..aa509c5f637 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -150,6 +150,7 @@ The following arguments are supported: * `tags` - (Optional) A map of tags to assign to the launch template. * `user_data` - The Base64-encoded user data to provide when launching the instance. * `hibernation_options` - The hibernation options for the instance. See [Hibernation Options](#hibernation-options) below for more details. +* `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. ### Block devices @@ -324,6 +325,14 @@ The `hibernation_options` block supports the following: * `configured` - If set to `true`, the launched EC2 instance will hibernation enabled. +### Enclave Options + +The `enclave_options` block supports the following: + +* `enabled` - If set to `true`, Nitro Enclaves will be enabled on the instance. + +For more information, see the documentation on [Nitro Enclaves](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html). + ### Tag Specifications The tags to apply to the resources during launch. You can tag instances, volumes, elastic GPUs and spot instance requests. More information can be found in the [EC2 API documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateTagSpecificationRequest.html). From b6ac60a56c2b67fc76b18927d6334ad6b7d22c92 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 15 Dec 2020 09:40:21 -0500 Subject: [PATCH 0235/1212] Update CHANGELOG for #16361 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4d397a7d6d..887695fbc67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,13 @@ ENHANCEMENTS * data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] * data-source/aws_eip: Add `carrier_ip` attribute [GH-16724] +* data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] +* data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * resource/aws_eip: Add `carrier_ip` attribute [GH-16724] +* resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] +* resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] BUG FIXES From acf1efa3a5fdd48a7bad5ba986cc77995de05064 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 15 Dec 2020 09:59:38 -0500 Subject: [PATCH 0236/1212] provider: Bump Go version to 1.15 and standardize on .go-version file for GitHub Actions workflows (#16557) Reference: https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/MAINTAINING.md#go-default-version-update --- .github/workflows/acctest-terraform-lint.yml | 8 ++++++-- .github/workflows/changelog.yml | 4 +++- .github/workflows/dependencies.yml | 5 ++--- .github/workflows/documentation.yml | 4 +++- .github/workflows/examples.yml | 4 +++- .github/workflows/snapshot.yml | 3 ++- .github/workflows/terraform_provider.yml | 20 +++++++++++++++++++- .github/workflows/website.yml | 8 +++++++- .go-version | 2 +- docs/DEVELOPMENT.md | 2 +- docs/MAINTAINING.md | 1 - go.mod | 2 +- 12 files changed, 48 insertions(+), 15 deletions(-) diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index 10fcff35f1e..8dce20eb365 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -7,12 +7,12 @@ on: pull_request: paths: - .github/workflows/acctest-terraform-lint.yml + - .go-version - aws/*_test.go - scripts/validate-terraform.sh - tools/go.mod env: - GO_VERSION: "1.15" GO111MODULE: on jobs: @@ -20,6 +20,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -48,9 +50,11 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: - go-version: ${{ env.GO_VERSION }} + go-version: ${{ steps.go-version.outputs.content }} - uses: actions/cache@v2 continue-on-error: true timeout-minutes: 2 diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 6efd1ac737e..9e80f9fd261 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -6,11 +6,11 @@ on: - 'release/**' pull_request: paths: + - .go-version - CHANGELOG.md pull_request_target: env: - GO_VERSION: "1.14" GO111MODULE: on jobs: @@ -58,6 +58,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index a61dfa20d04..0674c4983be 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -7,9 +7,6 @@ on: - 'release/**' pull_request_target: -env: - GO_VERSION: "1.14" - jobs: changes: if: github.event_name == 'pull_request_target' && !contains(fromJSON('["anGie44", "bflad", "breathingdust", "dependabot[bot]", "DrFaust92", "ewbankkit", "gdavison", "maryelizbeth", "YakDriver"]'), github.actor) @@ -64,6 +61,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index b6e510fd46a..f05b37d19df 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -8,10 +8,10 @@ on: - .markdownlinkcheck.json - .markdownlint.yml - .github/workflows/documentation.yml + - .go-version - docs/** env: - GO_VERSION: "1.14" GO111MODULE: on jobs: @@ -38,6 +38,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index b78732eb591..e6cf40feee5 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -6,11 +6,11 @@ on: pull_request: paths: - .github/workflows/examples.yml + - .go-version - examples/** - tools/go.mod env: - GO_VERSION: "1.15" AWS_DEFAULT_REGION: us-west-2 jobs: @@ -28,6 +28,8 @@ jobs: with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-pkg-mod-${{ hashFiles('go.sum') }} + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index ef83963a9b9..b4accb34088 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -6,7 +6,6 @@ on: workflow_dispatch: env: - GO_VERSION: "1.14" GO111MODULE: on jobs: @@ -14,6 +13,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/terraform_provider.yml b/.github/workflows/terraform_provider.yml index cdcd1e63e4e..4f5b4f62752 100644 --- a/.github/workflows/terraform_provider.yml +++ b/.github/workflows/terraform_provider.yml @@ -8,6 +8,7 @@ on: pull_request: paths: - .github/workflows/terraform_provider.yml + - .go-version - .golangci.yml - .goreleaser.yml - .semgrep.yml @@ -26,7 +27,6 @@ on: env: AWS_DEFAULT_REGION: us-west-2 - GO_VERSION: "1.14" GO111MODULE: on TERRAFORM_VERSION: "0.12.25" @@ -36,6 +36,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -62,6 +64,8 @@ jobs: with: path: terraform-plugin-dir key: ${{ runner.os }}-terraform-plugin-dir-${{ hashFiles('go.sum') }}-${{ hashFiles('aws/**') }} + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - if: steps.cache-terraform-plugin-dir.outputs.cache-hit != 'true' || steps.cache-terraform-plugin-dir.outcome == 'failure' uses: actions/setup-go@v2 with: @@ -126,6 +130,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -155,6 +161,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -186,6 +194,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -212,6 +222,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -240,6 +252,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -267,6 +281,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -300,6 +316,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml index d3729248912..a3ba7eeb3d3 100644 --- a/.github/workflows/website.yml +++ b/.github/workflows/website.yml @@ -10,13 +10,13 @@ on: pull_request: paths: - .github/workflows/website.yml + - .go-version - .markdownlinkcheck.json - .markdownlint.yml - website/docs/** - tools/go.mod env: - GO_VERSION: "1.14" GO111MODULE: on jobs: @@ -52,6 +52,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -67,6 +69,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} @@ -82,6 +86,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + # See also: https://github.com/actions/setup-go/pull/62 + - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: go-version: ${{ env.GO_VERSION }} diff --git a/.go-version b/.go-version index 24a57f28a41..d32434904bc 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.14.5 +1.15.5 diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md index c49943ea72c..3a5b81eb004 100644 --- a/docs/DEVELOPMENT.md +++ b/docs/DEVELOPMENT.md @@ -3,7 +3,7 @@ ## Requirements - [Terraform](https://www.terraform.io/downloads.html) 0.12.26+ (to run acceptance tests) -- [Go](https://golang.org/doc/install) 1.14 (to build the provider plugin) +- [Go](https://golang.org/doc/install) 1.15 (to build the provider plugin) ## Quick Start diff --git a/docs/MAINTAINING.md b/docs/MAINTAINING.md index 8d8748d9d9b..696f8574ded 100644 --- a/docs/MAINTAINING.md +++ b/docs/MAINTAINING.md @@ -88,7 +88,6 @@ Ensure that the following steps are tracked within the issue and completed withi - Verify `goreleaser build --snapshot` succeeds for all currently supported architectures - Verify `goenv` support for the new version - Update `docs/DEVELOPMENT.md` -- Update `.github/workflows/*.yml` - Update `.go-version` - Update `CHANGELOG.md` detailing the update and mention any notes practitioners need to be aware of. diff --git a/go.mod b/go.mod index f29f19a9217..597534f085f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/terraform-providers/terraform-provider-aws -go 1.14 +go 1.15 require ( github.com/aws/aws-sdk-go v1.36.7 From 073f1dab8cda2080026c5950a0f7b4909d1fd040 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 15 Dec 2020 10:17:26 -0500 Subject: [PATCH 0237/1212] data-source/aws_network_interface: Add association carrier_ip and customer_owned_ip attributes (#16723) * d/aws_network_interface: Add 'association.carrier_ip' attribute. * d/aws_network_interface: Move all configurations below acceptance tests. * d/aws_network_interface: Refactor existing acceptance tests before testing new functionality. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsNetworkInterface_' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccDataSourceAwsNetworkInterface_ -timeout 120m === RUN TestAccDataSourceAwsNetworkInterface_basic === PAUSE TestAccDataSourceAwsNetworkInterface_basic === RUN TestAccDataSourceAwsNetworkInterface_filters === PAUSE TestAccDataSourceAwsNetworkInterface_filters === CONT TestAccDataSourceAwsNetworkInterface_basic === CONT TestAccDataSourceAwsNetworkInterface_filters --- PASS: TestAccDataSourceAwsNetworkInterface_filters (57.63s) --- PASS: TestAccDataSourceAwsNetworkInterface_basic (57.84s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 57.929s * d/aws_network_interface: Add 'TestAccDataSourceAwsNetworkInterface_EIPAssociation'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsNetworkInterface_EIPAssociation' ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 2 -run=TestAccDataSourceAwsNetworkInterface_EIPAssociation -timeout 120m === RUN TestAccDataSourceAwsNetworkInterface_EIPAssociation === PAUSE TestAccDataSourceAwsNetworkInterface_EIPAssociation === CONT TestAccDataSourceAwsNetworkInterface_EIPAssociation --- PASS: TestAccDataSourceAwsNetworkInterface_EIPAssociation (67.86s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 67.946s * d/aws_network_interface: Add 'TestAccDataSourceAwsNetworkInterface_CarrierIPAssociation'. * Fix linter errors. * 'EIPAssociation' -> 'PublicIPAssociation'. * d/aws_network_interface: Add 'association.customer_owned_ip' attribute. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsNetworkInterface_PublicIPAssociation' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsNetworkInterface_PublicIPAssociation -timeout 120m === RUN TestAccDataSourceAwsNetworkInterface_PublicIPAssociation === PAUSE TestAccDataSourceAwsNetworkInterface_PublicIPAssociation === CONT TestAccDataSourceAwsNetworkInterface_PublicIPAssociation --- PASS: TestAccDataSourceAwsNetworkInterface_PublicIPAssociation (67.42s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 67.484s * Modify 'flattenEc2NetworkInterfaceAssociation' to conform to suggested code pattern (#16748). --- aws/data_source_aws_network_interface.go | 8 + aws/data_source_aws_network_interface_test.go | 281 ++++++++++++++---- aws/structure.go | 22 +- .../docs/d/network_interface.html.markdown | 4 +- 4 files changed, 250 insertions(+), 65 deletions(-) diff --git a/aws/data_source_aws_network_interface.go b/aws/data_source_aws_network_interface.go index 76a44ec9791..e836bf310de 100644 --- a/aws/data_source_aws_network_interface.go +++ b/aws/data_source_aws_network_interface.go @@ -33,6 +33,14 @@ func dataSourceAwsNetworkInterface() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "carrier_ip": { + Type: schema.TypeString, + Computed: true, + }, + "customer_owned_ip": { + Type: schema.TypeString, + Computed: true, + }, "ip_owner_id": { Type: schema.TypeString, Computed: true, diff --git a/aws/data_source_aws_network_interface_test.go b/aws/data_source_aws_network_interface_test.go index 1822ef1c669..f5f101db7b3 100644 --- a/aws/data_source_aws_network_interface_test.go +++ b/aws/data_source_aws_network_interface_test.go @@ -9,46 +9,156 @@ import ( ) func TestAccDataSourceAwsNetworkInterface_basic(t *testing.T) { - rName := acctest.RandString(5) + datasourceName := "data.aws_network_interface.test" + resourceName := "aws_network_interface.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsNetworkInterface_basic(rName), + Config: testAccDataSourceAwsNetworkInterfaceConfigBasic(rName), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_network_interface.test", "private_ips.#", "1"), - resource.TestCheckResourceAttr("data.aws_network_interface.test", "security_groups.#", "1"), - resource.TestCheckResourceAttrPair("data.aws_network_interface.test", "private_ip", "aws_network_interface.test", "private_ip"), - resource.TestCheckResourceAttrSet("data.aws_network_interface.test", "availability_zone"), - resource.TestCheckResourceAttrPair("data.aws_network_interface.test", "description", "aws_network_interface.test", "description"), - resource.TestCheckResourceAttrSet("data.aws_network_interface.test", "interface_type"), - resource.TestCheckResourceAttrPair("data.aws_network_interface.test", "private_dns_name", "aws_network_interface.test", "private_dns_name"), - resource.TestCheckResourceAttrPair("data.aws_network_interface.test", "subnet_id", "aws_network_interface.test", "subnet_id"), - resource.TestCheckResourceAttr("data.aws_network_interface.test", "outpost_arn", ""), - resource.TestCheckResourceAttrSet("data.aws_network_interface.test", "vpc_id"), + resource.TestCheckResourceAttr(datasourceName, "private_ips.#", "1"), + resource.TestCheckResourceAttr(datasourceName, "security_groups.#", "1"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ip", resourceName, "private_ip"), + resource.TestCheckResourceAttrSet(datasourceName, "availability_zone"), + resource.TestCheckResourceAttrPair(datasourceName, "description", resourceName, "description"), + resource.TestCheckResourceAttrSet(datasourceName, "interface_type"), + resource.TestCheckResourceAttrPair(datasourceName, "private_dns_name", resourceName, "private_dns_name"), + resource.TestCheckResourceAttrPair(datasourceName, "subnet_id", resourceName, "subnet_id"), + resource.TestCheckResourceAttr(datasourceName, "outpost_arn", ""), + resource.TestCheckResourceAttrSet(datasourceName, "vpc_id"), ), }, }, }) } -func testAccDataSourceAwsNetworkInterface_basic(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" +func TestAccDataSourceAwsNetworkInterface_filters(t *testing.T) { + datasourceName := "data.aws_network_interface.test" + rName := acctest.RandomWithPrefix("tf-acc-test") - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsNetworkInterfaceConfigFilters(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "private_ips.#", "1"), + resource.TestCheckResourceAttr(datasourceName, "security_groups.#", "1"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsNetworkInterface_CarrierIPAssociation(t *testing.T) { + datasourceName := "data.aws_network_interface.test" + resourceName := "aws_network_interface.test" + eipResourceName := "aws_eip.test" + eipAssociationResourceName := "aws_eip_association.test" + securityGroupResourceName := "aws_security_group.test" + vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsNetworkInterfaceConfigCarrierIPAssociation(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "association.#", "1"), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.allocation_id", eipResourceName, "id"), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.association_id", eipAssociationResourceName, "id"), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.carrier_ip", eipResourceName, "carrier_ip"), + resource.TestCheckResourceAttr(datasourceName, "association.0.customer_owned_ip", ""), + testAccCheckResourceAttrAccountID(datasourceName, "association.0.ip_owner_id"), + resource.TestCheckResourceAttr(datasourceName, "association.0.public_dns_name", ""), + resource.TestCheckResourceAttr(datasourceName, "association.0.public_ip", ""), + resource.TestCheckResourceAttr(datasourceName, "attachment.#", "0"), + resource.TestCheckResourceAttrSet(datasourceName, "availability_zone"), + resource.TestCheckResourceAttrPair(datasourceName, "description", resourceName, "description"), + resource.TestCheckResourceAttr(datasourceName, "interface_type", "interface"), + resource.TestCheckResourceAttr(datasourceName, "ipv6_addresses.#", "0"), + resource.TestCheckResourceAttrSet(datasourceName, "mac_address"), + resource.TestCheckResourceAttr(datasourceName, "outpost_arn", ""), + testAccCheckResourceAttrAccountID(datasourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasourceName, "private_dns_name", resourceName, "private_dns_name"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ip", resourceName, "private_ip"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ips.#", resourceName, "private_ips.#"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ips.0", resourceName, "private_ip"), + resource.TestCheckResourceAttrPair(datasourceName, "security_groups.#", resourceName, "security_groups.#"), + resource.TestCheckTypeSetElemAttrPair(datasourceName, "security_groups.*", securityGroupResourceName, "id"), + resource.TestCheckResourceAttrPair(datasourceName, "subnet_id", resourceName, "subnet_id"), + resource.TestCheckResourceAttrPair(datasourceName, "tags.%", resourceName, "tags.%"), + resource.TestCheckResourceAttrPair(datasourceName, "vpc_id", vpcResourceName, "id"), + ), + }, + }, + }) } +func TestAccDataSourceAwsNetworkInterface_PublicIPAssociation(t *testing.T) { + datasourceName := "data.aws_network_interface.test" + resourceName := "aws_network_interface.test" + eipResourceName := "aws_eip.test" + eipAssociationResourceName := "aws_eip_association.test" + securityGroupResourceName := "aws_security_group.test" + vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsNetworkInterfaceConfigPublicIPAssociation(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "association.#", "1"), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.allocation_id", eipResourceName, "id"), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.association_id", eipAssociationResourceName, "id"), + resource.TestCheckResourceAttr(datasourceName, "association.0.carrier_ip", ""), + resource.TestCheckResourceAttr(datasourceName, "association.0.customer_owned_ip", ""), + testAccCheckResourceAttrAccountID(datasourceName, "association.0.ip_owner_id"), + // Public DNS name is not set by the EC2 API. + resource.TestCheckResourceAttr(datasourceName, "association.0.public_dns_name", ""), + resource.TestCheckResourceAttrPair(datasourceName, "association.0.public_ip", eipResourceName, "public_ip"), + resource.TestCheckResourceAttr(datasourceName, "attachment.#", "0"), + resource.TestCheckResourceAttrSet(datasourceName, "availability_zone"), + resource.TestCheckResourceAttrPair(datasourceName, "description", resourceName, "description"), + resource.TestCheckResourceAttr(datasourceName, "interface_type", "interface"), + resource.TestCheckResourceAttr(datasourceName, "ipv6_addresses.#", "0"), + resource.TestCheckResourceAttrSet(datasourceName, "mac_address"), + resource.TestCheckResourceAttr(datasourceName, "outpost_arn", ""), + testAccCheckResourceAttrAccountID(datasourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasourceName, "private_dns_name", resourceName, "private_dns_name"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ip", resourceName, "private_ip"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ips.#", resourceName, "private_ips.#"), + resource.TestCheckResourceAttrPair(datasourceName, "private_ips.0", resourceName, "private_ip"), + resource.TestCheckResourceAttrPair(datasourceName, "security_groups.#", resourceName, "security_groups.#"), + resource.TestCheckTypeSetElemAttrPair(datasourceName, "security_groups.*", securityGroupResourceName, "id"), + resource.TestCheckResourceAttrPair(datasourceName, "subnet_id", resourceName, "subnet_id"), + resource.TestCheckResourceAttrPair(datasourceName, "tags.%", resourceName, "tags.%"), + resource.TestCheckResourceAttrPair(datasourceName, "vpc_id", vpcResourceName, "id"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsNetworkInterfaceConfigBase(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags = { - Name = "terraform-testacc-eni-data-source-basic" + Name = %[1]q } } @@ -58,12 +168,12 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-eni-data-source-basic" + Name = %[1]q } } resource "aws_security_group" "test" { - name = "tf-sg-%s" + name = %[1]q vpc_id = aws_vpc.test.id } @@ -71,47 +181,33 @@ resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id private_ips = ["10.0.0.50"] security_groups = [aws_security_group.test.id] -} -data "aws_network_interface" "test" { - id = aws_network_interface.test.id + tags = { + Name = %[1]q + } } -`, rName) +`, rName)) } -func TestAccDataSourceAwsNetworkInterface_filters(t *testing.T) { - rName := acctest.RandString(5) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsNetworkInterface_filters(rName), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_network_interface.test", "private_ips.#", "1"), - resource.TestCheckResourceAttr("data.aws_network_interface.test", "security_groups.#", "1"), - ), - }, - }, - }) +func testAccDataSourceAwsNetworkInterfaceConfigBasic(rName string) string { + return composeConfig( + testAccDataSourceAwsNetworkInterfaceConfigBase(rName), + ` +data "aws_network_interface" "test" { + id = aws_network_interface.test.id } - -func testAccDataSourceAwsNetworkInterface_filters(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } +`) } +func testAccDataSourceAwsNetworkInterfaceConfigCarrierIPAssociation(rName string) string { + return composeConfig( + testAccAvailableAZsWavelengthZonesDefaultExcludeConfig(), + fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags = { - Name = "terraform-testacc-eni-data-source-filters" + Name = %[1]q } } @@ -121,26 +217,97 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-eni-data-source-filters" + Name = %[1]q } } resource "aws_security_group" "test" { - name = "tf-sg-%s" + name = %[1]q vpc_id = aws_vpc.test.id } resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id - private_ips = ["10.0.0.60"] + private_ips = ["10.0.0.50"] security_groups = [aws_security_group.test.id] + + tags = { + Name = %[1]q + } +} + +resource "aws_ec2_carrier_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +data "aws_availability_zone" "available" { + name = data.aws_availability_zones.available.names[0] +} + +resource "aws_eip" "test" { + vpc = true + network_border_group = data.aws_availability_zone.available.network_border_group + + tags = { + Name = %[1]q + } +} + +resource "aws_eip_association" "test" { + allocation_id = aws_eip.test.id + network_interface_id = aws_network_interface.test.id +} + +data "aws_network_interface" "test" { + id = aws_eip_association.test.network_interface_id +} +`, rName)) +} + +func testAccDataSourceAwsNetworkInterfaceConfigPublicIPAssociation(rName string) string { + return composeConfig( + testAccDataSourceAwsNetworkInterfaceConfigBase(rName), + fmt.Sprintf(` +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_eip" "test" { + vpc = true + + tags = { + Name = %[1]q + } +} + +resource "aws_eip_association" "test" { + allocation_id = aws_eip.test.id + network_interface_id = aws_network_interface.test.id +} + +data "aws_network_interface" "test" { + id = aws_eip_association.test.network_interface_id +} +`, rName)) } +func testAccDataSourceAwsNetworkInterfaceConfigFilters(rName string) string { + return composeConfig( + testAccDataSourceAwsNetworkInterfaceConfigBase(rName), + ` data "aws_network_interface" "test" { filter { name = "network-interface-id" values = [aws_network_interface.test.id] } } -`, rName) +`) } diff --git a/aws/structure.go b/aws/structure.go index c6cdddc44d3..10a7f881773 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -1158,23 +1158,31 @@ func flattenEc2AttributeValues(l []*ec2.AttributeValue) []string { } func flattenEc2NetworkInterfaceAssociation(a *ec2.NetworkInterfaceAssociation) []interface{} { - att := make(map[string]interface{}) + tfMap := map[string]interface{}{} + if a.AllocationId != nil { - att["allocation_id"] = *a.AllocationId + tfMap["allocation_id"] = aws.StringValue(a.AllocationId) } if a.AssociationId != nil { - att["association_id"] = *a.AssociationId + tfMap["association_id"] = aws.StringValue(a.AssociationId) + } + if a.CarrierIp != nil { + tfMap["carrier_ip"] = aws.StringValue(a.CarrierIp) + } + if a.CustomerOwnedIp != nil { + tfMap["customer_owned_ip"] = aws.StringValue(a.CustomerOwnedIp) } if a.IpOwnerId != nil { - att["ip_owner_id"] = *a.IpOwnerId + tfMap["ip_owner_id"] = aws.StringValue(a.IpOwnerId) } if a.PublicDnsName != nil { - att["public_dns_name"] = *a.PublicDnsName + tfMap["public_dns_name"] = aws.StringValue(a.PublicDnsName) } if a.PublicIp != nil { - att["public_ip"] = *a.PublicIp + tfMap["public_ip"] = aws.StringValue(a.PublicIp) } - return []interface{}{att} + + return []interface{}{tfMap} } func flattenEc2NetworkInterfaceIpv6Address(niia []*ec2.NetworkInterfaceIpv6Address) []string { diff --git a/website/docs/d/network_interface.html.markdown b/website/docs/d/network_interface.html.markdown index 3510a6ab001..d1436d78831 100644 --- a/website/docs/d/network_interface.html.markdown +++ b/website/docs/d/network_interface.html.markdown @@ -2,7 +2,7 @@ subcategory: "VPC" layout: "aws" page_title: "AWS: aws_network_interface" -description: |- +description: |- Get information on a Network Interface resource. --- @@ -52,6 +52,8 @@ Additionally, the following attributes are exported: * `allocation_id` - The allocation ID. * `association_id` - The association ID. +* `carrier_ip` - The carrier IP address associated with the network interface. This attribute is only set when the network interface is in a subnet which is associated with a Wavelength Zone. +* `customer_owned_ip` - The customer-owned IP address. * `ip_owner_id` - The ID of the Elastic IP address owner. * `public_dns_name` - The public DNS name. * `public_ip` - The address of the Elastic IP address bound to the network interface. From 174fd13e05d525dfb34531afb3d901810198b25d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 15 Dec 2020 10:18:10 -0500 Subject: [PATCH 0238/1212] Update CHANGELOG for #16723 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 887695fbc67..d1c64e2880e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ ENHANCEMENTS * data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] * data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] +* data-source/aws_network_interface: Add `association` `carrier_ip` and `customer_owned_ip` attributes [GH-16723] * resource/aws_eip: Add `carrier_ip` attribute [GH-16724] * resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] From 896c7d3d59c5976c83305bed6384f671e9ccb669 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 15 Dec 2020 10:46:35 -0500 Subject: [PATCH 0239/1212] data-source/aws_availability_zone: Add `parent_zone_id`, `parent_zone_name`, and `zone_type` attributes (additional support for Local and Wavelength Zones) (#16770) * d/aws_availability_zone: Add 'zone_type' attribute. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsAvailabilityZone_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsAvailabilityZone_ -timeout 120m === RUN TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === PAUSE TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === RUN TestAccDataSourceAwsAvailabilityZone_Filter === PAUSE TestAccDataSourceAwsAvailabilityZone_Filter === RUN TestAccDataSourceAwsAvailabilityZone_Name === PAUSE TestAccDataSourceAwsAvailabilityZone_Name === RUN TestAccDataSourceAwsAvailabilityZone_ZoneId === PAUSE TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === CONT TestAccDataSourceAwsAvailabilityZone_Filter === CONT TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_Name --- PASS: TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones (14.09s) --- PASS: TestAccDataSourceAwsAvailabilityZone_Filter (15.90s) --- PASS: TestAccDataSourceAwsAvailabilityZone_ZoneId (16.10s) --- PASS: TestAccDataSourceAwsAvailabilityZone_Name (16.13s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 16.194s * d/aws_availability_zone: Add Local Zone test. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsAvailabilityZone_LocalZone' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsAvailabilityZone_LocalZone -timeout 120m === RUN TestAccDataSourceAwsAvailabilityZone_LocalZone === PAUSE TestAccDataSourceAwsAvailabilityZone_LocalZone === CONT TestAccDataSourceAwsAvailabilityZone_LocalZone --- PASS: TestAccDataSourceAwsAvailabilityZone_LocalZone (10.55s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 10.625s * d/aws_availability_zone: Add Wavelength Zone test. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsAvailabilityZone_WavelengthZone' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsAvailabilityZone_WavelengthZone -timeout 120m === RUN TestAccDataSourceAwsAvailabilityZone_WavelengthZone === PAUSE TestAccDataSourceAwsAvailabilityZone_WavelengthZone === CONT TestAccDataSourceAwsAvailabilityZone_WavelengthZone --- PASS: TestAccDataSourceAwsAvailabilityZone_WavelengthZone (10.63s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 10.711s * d/aws_availability_zone: Add 'parent_zone_id' and 'parent_zone_name' attributes. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsAvailabilityZone_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsAvailabilityZone_ -timeout 120m === RUN TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === PAUSE TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === RUN TestAccDataSourceAwsAvailabilityZone_Filter === PAUSE TestAccDataSourceAwsAvailabilityZone_Filter === RUN TestAccDataSourceAwsAvailabilityZone_LocalZone === PAUSE TestAccDataSourceAwsAvailabilityZone_LocalZone === RUN TestAccDataSourceAwsAvailabilityZone_Name === PAUSE TestAccDataSourceAwsAvailabilityZone_Name === RUN TestAccDataSourceAwsAvailabilityZone_WavelengthZone === PAUSE TestAccDataSourceAwsAvailabilityZone_WavelengthZone === RUN TestAccDataSourceAwsAvailabilityZone_ZoneId === PAUSE TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === CONT TestAccDataSourceAwsAvailabilityZone_WavelengthZone === CONT TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_LocalZone === CONT TestAccDataSourceAwsAvailabilityZone_Name === CONT TestAccDataSourceAwsAvailabilityZone_Filter --- PASS: TestAccDataSourceAwsAvailabilityZone_Filter (19.67s) --- PASS: TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones (21.28s) --- PASS: TestAccDataSourceAwsAvailabilityZone_WavelengthZone (21.55s) --- PASS: TestAccDataSourceAwsAvailabilityZone_Name (21.65s) --- PASS: TestAccDataSourceAwsAvailabilityZone_ZoneId (21.69s) --- PASS: TestAccDataSourceAwsAvailabilityZone_LocalZone (21.70s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 21.778s * d/aws_availability_zone: Remove leading '-' from 'name_suffix' for Local and Wavelength Zones. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsAvailabilityZone_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccDataSourceAwsAvailabilityZone_ -timeout 120m === RUN TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === PAUSE TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === RUN TestAccDataSourceAwsAvailabilityZone_Filter === PAUSE TestAccDataSourceAwsAvailabilityZone_Filter === RUN TestAccDataSourceAwsAvailabilityZone_LocalZone === PAUSE TestAccDataSourceAwsAvailabilityZone_LocalZone === RUN TestAccDataSourceAwsAvailabilityZone_Name === PAUSE TestAccDataSourceAwsAvailabilityZone_Name === RUN TestAccDataSourceAwsAvailabilityZone_WavelengthZone === PAUSE TestAccDataSourceAwsAvailabilityZone_WavelengthZone === RUN TestAccDataSourceAwsAvailabilityZone_ZoneId === PAUSE TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones === CONT TestAccDataSourceAwsAvailabilityZone_ZoneId === CONT TestAccDataSourceAwsAvailabilityZone_WavelengthZone === CONT TestAccDataSourceAwsAvailabilityZone_Name === CONT TestAccDataSourceAwsAvailabilityZone_LocalZone === CONT TestAccDataSourceAwsAvailabilityZone_Filter --- PASS: TestAccDataSourceAwsAvailabilityZone_Name (17.92s) --- PASS: TestAccDataSourceAwsAvailabilityZone_ZoneId (19.40s) --- PASS: TestAccDataSourceAwsAvailabilityZone_WavelengthZone (20.22s) --- PASS: TestAccDataSourceAwsAvailabilityZone_LocalZone (20.37s) --- PASS: TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones (20.69s) --- PASS: TestAccDataSourceAwsAvailabilityZone_Filter (20.69s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 20.770s --- aws/data_source_aws_availability_zone.go | 20 ++- aws/data_source_aws_availability_zone_test.go | 147 ++++++++++++++++-- .../docs/d/availability_zone.html.markdown | 5 + 3 files changed, 159 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_availability_zone.go b/aws/data_source_aws_availability_zone.go index e611eda7c80..dfae144d050 100644 --- a/aws/data_source_aws_availability_zone.go +++ b/aws/data_source_aws_availability_zone.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -40,6 +41,14 @@ func dataSourceAwsAvailabilityZone() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "parent_zone_id": { + Type: schema.TypeString, + Computed: true, + }, + "parent_zone_name": { + Type: schema.TypeString, + Computed: true, + }, "region": { Type: schema.TypeString, Computed: true, @@ -54,6 +63,10 @@ func dataSourceAwsAvailabilityZone() *schema.Resource { Optional: true, Computed: true, }, + "zone_type": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -108,7 +121,9 @@ func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) // the AZ suffix alone, without the region name. // This can be used e.g. to create lookup tables by AZ letter that // work regardless of region. - nameSuffix := (*az.ZoneName)[len(*az.RegionName):] + nameSuffix := aws.StringValue(az.ZoneName)[len(aws.StringValue(az.RegionName)):] + // For Local and Wavelength zones, remove any leading "-". + nameSuffix = strings.TrimLeft(nameSuffix, "-") d.SetId(aws.StringValue(az.ZoneName)) d.Set("group_name", az.GroupName) @@ -116,9 +131,12 @@ func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) d.Set("name_suffix", nameSuffix) d.Set("network_border_group", az.NetworkBorderGroup) d.Set("opt_in_status", az.OptInStatus) + d.Set("parent_zone_id", az.ParentZoneId) + d.Set("parent_zone_name", az.ParentZoneName) d.Set("region", az.RegionName) d.Set("state", az.State) d.Set("zone_id", az.ZoneId) + d.Set("zone_type", az.ZoneType) return nil } diff --git a/aws/data_source_aws_availability_zone_test.go b/aws/data_source_aws_availability_zone_test.go index b018a1a3488..9f0f28f8f44 100644 --- a/aws/data_source_aws_availability_zone_test.go +++ b/aws/data_source_aws_availability_zone_test.go @@ -1,9 +1,11 @@ package aws import ( + "fmt" "regexp" "testing" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -22,9 +24,12 @@ func TestAccDataSourceAwsAvailabilityZone_AllAvailabilityZones(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z]$`)), resource.TestCheckResourceAttr(dataSourceName, "network_border_group", testAccGetRegion()), - resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", "opt-in-not-required"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptInNotRequired), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_id", ""), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_name", ""), resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "availability-zone"), ), }, }, @@ -46,9 +51,39 @@ func TestAccDataSourceAwsAvailabilityZone_Filter(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z]$`)), resource.TestCheckResourceAttr(dataSourceName, "network_border_group", testAccGetRegion()), - resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", "opt-in-not-required"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptInNotRequired), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_id", ""), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_name", ""), resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "availability-zone"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsAvailabilityZone_LocalZone(t *testing.T) { + availabilityZonesDataSourceName := "data.aws_availability_zones.available" + dataSourceName := "data.aws_availability_zone.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLocalZoneAvailable(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsAvailabilityZoneConfigZoneType("local-zone"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "group_name"), + resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), + resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z0-9][a-z0-9-]+$`)), + resource.TestCheckResourceAttrSet(dataSourceName, "network_border_group"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptedIn), + resource.TestCheckResourceAttrSet(dataSourceName, "parent_zone_id"), + resource.TestCheckResourceAttrSet(dataSourceName, "parent_zone_name"), + resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), + resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "local-zone"), ), }, }, @@ -70,9 +105,39 @@ func TestAccDataSourceAwsAvailabilityZone_Name(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z]$`)), resource.TestCheckResourceAttr(dataSourceName, "network_border_group", testAccGetRegion()), - resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", "opt-in-not-required"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptInNotRequired), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_id", ""), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_name", ""), + resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), + resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "availability-zone"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsAvailabilityZone_WavelengthZone(t *testing.T) { + availabilityZonesDataSourceName := "data.aws_availability_zones.available" + dataSourceName := "data.aws_availability_zone.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSWavelengthZoneAvailable(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsAvailabilityZoneConfigZoneType("wavelength-zone"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "group_name"), + resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), + resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z0-9][a-z0-9-]+$`)), + resource.TestCheckResourceAttrSet(dataSourceName, "network_border_group"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptedIn), + resource.TestCheckResourceAttrSet(dataSourceName, "parent_zone_id"), + resource.TestCheckResourceAttrSet(dataSourceName, "parent_zone_name"), resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "wavelength-zone"), ), }, }, @@ -94,47 +159,105 @@ func TestAccDataSourceAwsAvailabilityZone_ZoneId(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "name", availabilityZonesDataSourceName, "names.0"), resource.TestMatchResourceAttr(dataSourceName, "name_suffix", regexp.MustCompile(`^[a-z]$`)), resource.TestCheckResourceAttr(dataSourceName, "network_border_group", testAccGetRegion()), - resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", "opt-in-not-required"), + resource.TestCheckResourceAttr(dataSourceName, "opt_in_status", ec2.AvailabilityZoneOptInStatusOptInNotRequired), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_id", ""), + resource.TestCheckResourceAttr(dataSourceName, "parent_zone_name", ""), resource.TestCheckResourceAttr(dataSourceName, "region", testAccGetRegion()), resource.TestCheckResourceAttrPair(dataSourceName, "zone_id", availabilityZonesDataSourceName, "zone_ids.0"), + resource.TestCheckResourceAttr(dataSourceName, "zone_type", "availability-zone"), ), }, }, }) } +func testAccPreCheckAWSLocalZoneAvailable(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + input := &ec2.DescribeAvailabilityZonesInput{ + Filters: buildEC2AttributeFilterList(map[string]string{ + "zone-type": "local-zone", + "opt-in-status": "opted-in", + }), + } + + output, err := conn.DescribeAvailabilityZones(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } + + if output == nil || len(output.AvailabilityZones) == 0 { + t.Skip("skipping since no Local Zones are available") + } +} + func testAccDataSourceAwsAvailabilityZoneConfigAllAvailabilityZones() string { - return testAccAvailableAZsNoOptInConfig() + ` + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + ` data "aws_availability_zone" "test" { all_availability_zones = true name = data.aws_availability_zones.available.names[0] } -` +`) } func testAccDataSourceAwsAvailabilityZoneConfigFilter() string { - return testAccAvailableAZsNoOptInConfig() + ` + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + ` data "aws_availability_zone" "test" { filter { name = "zone-name" values = [data.aws_availability_zones.available.names[0]] } } -` +`) } func testAccDataSourceAwsAvailabilityZoneConfigName() string { - return testAccAvailableAZsNoOptInConfig() + ` + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + ` data "aws_availability_zone" "test" { name = data.aws_availability_zones.available.names[0] } -` +`) } func testAccDataSourceAwsAvailabilityZoneConfigZoneId() string { - return testAccAvailableAZsNoOptInConfig() + ` + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + ` +data "aws_availability_zone" "test" { + zone_id = data.aws_availability_zones.available.zone_ids[0] +} +`) +} + +func testAccDataSourceAwsAvailabilityZoneConfigZoneType(zoneType string) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-type" + values = [%[1]q] + } + + filter { + name = "opt-in-status" + values = ["opted-in"] + } +} + data "aws_availability_zone" "test" { zone_id = data.aws_availability_zones.available.zone_ids[0] } -` +`, zoneType) } diff --git a/website/docs/d/availability_zone.html.markdown b/website/docs/d/availability_zone.html.markdown index 4252243f644..58b160b1074 100644 --- a/website/docs/d/availability_zone.html.markdown +++ b/website/docs/d/availability_zone.html.markdown @@ -93,6 +93,11 @@ In addition to all arguments above, the following attributes are exported: * `group_name` - For Availability Zones, this is the same value as the Region name. For Local Zones, the name of the associated group, for example `us-west-2-lax-1`. * `name_suffix` - The part of the AZ name that appears after the region name, uniquely identifying the AZ within its region. +For Availability Zones this is usually a single letter, for example `a` for the `us-west-2a` zone. +For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz-1` for the `us-west-2-wl1-sfo-wlz-1` zone. * `network_border_group` - The name of the location from which the address is advertised. * `opt_in_status` - For Availability Zones, this always has the value of `opt-in-not-required`. For Local Zones, this is the opt in status. The possible values are `opted-in` and `not-opted-in`. +* `parent_zone_id` - The ID of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. +* `parent_zone_name` - The name of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls. * `region` - The region where the selected availability zone resides. This is always the region selected on the provider, since this data source searches only within that region. +* `zone_type` - The type of zone. Values are `availability-zone`, `local-zone`, and `wavelength-zone`. From e7bf763adad8906529af84000bec75b16189497f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 15 Dec 2020 10:47:15 -0500 Subject: [PATCH 0240/1212] Update CHANGELOG for #16770 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1c64e2880e..0f8e5285891 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES ENHANCEMENTS * data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] +* data-source/aws_availability_zone: Add `parent_zone_id`, `parent_zone_name`, and `zone_type` attributes (additional support for Local and Wavelength Zones) [GH-16770] * data-source/aws_eip: Add `carrier_ip` attribute [GH-16724] * data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] From 91395a5814bb27ca2d6f1784ad67c41ff7daf76c Mon Sep 17 00:00:00 2001 From: Edmund Craske Date: Tue, 15 Dec 2020 18:00:23 +0000 Subject: [PATCH 0241/1212] Correctly quote `shared` in ecs_task_definition --- website/docs/r/ecs_task_definition.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ecs_task_definition.html.markdown b/website/docs/r/ecs_task_definition.html.markdown index eec1ec9e7dc..3683054e1c9 100644 --- a/website/docs/r/ecs_task_definition.html.markdown +++ b/website/docs/r/ecs_task_definition.html.markdown @@ -127,7 +127,7 @@ parameter of container definition in the `mountPoints` section. For more information, see [Specifying a Docker volume in your Task Definition Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-volumes.html#specify-volume-config) -* `scope` - (Optional) The scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are `scoped` as shared persist after the task stops. +* `scope` - (Optional) The scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops. * `autoprovision` - (Optional) If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`. * `driver` - (Optional) The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. * `driver_opts` - (Optional) A map of Docker driver specific options. From 0d5c24fe8c3e9e5cb53db6eb4f114a0d01733e7b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 15 Dec 2020 15:16:26 -0800 Subject: [PATCH 0242/1212] Minor cleanup --- ...urce_aws_codestarconnections_connection.go | 22 ++-- ...aws_codestarconnections_connection_test.go | 21 ++-- .../r/codestarconnections_connection.markdown | 109 ++---------------- 3 files changed, 36 insertions(+), 116 deletions(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index 16ffe5d4e86..df38fa73ea5 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -6,6 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codestarconnections" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -70,14 +71,13 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i resp, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ ConnectionArn: aws.String(d.Id()), }) - + if tfawserr.ErrCodeEquals(err, codestarconnections.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] CodeStar connection (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - if isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] CodeStar connection (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - return fmt.Errorf("error reading CodeStar connection: %s", err) + return fmt.Errorf("error reading CodeStar connection: %w", err) } if resp == nil || resp.Connection == nil { @@ -99,12 +99,10 @@ func resourceAwsCodeStarConnectionsConnectionDelete(d *schema.ResourceData, meta _, err := conn.DeleteConnection(&codestarconnections.DeleteConnectionInput{ ConnectionArn: aws.String(d.Id()), }) - + if tfawserr.ErrCodeEquals(err, codestarconnections.ErrCodeResourceNotFoundException) { + return nil + } if err != nil { - if isAWSErr(err, codestarconnections.ErrCodeResourceNotFoundException, "") { - return nil - } - return fmt.Errorf("error deleting CodeStar connection: %w", err) } diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index 40753285254..9d59baa3512 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -14,18 +14,19 @@ import ( ) func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { + var v codestarconnections.Connection resourceName := "aws_codestarconnections_connection.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, Steps: []resource.TestStep{ { Config: testAccAWSCodeStarConnectionsConnectionConfigBasic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName), + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), testAccMatchResourceAttrRegionalARN(resourceName, "id", "codestar-connections", regexp.MustCompile("connection/.+")), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codestar-connections", regexp.MustCompile("connection/.+")), resource.TestCheckResourceAttr(resourceName, "provider_type", codestarconnections.ProviderTypeBitbucket), @@ -43,18 +44,19 @@ func TestAccAWSCodeStarConnectionsConnection_Basic(t *testing.T) { } func TestAccAWSCodeStarConnectionsConnection_disappears(t *testing.T) { + var v codestarconnections.Connection resourceName := "aws_codestarconnections_connection.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, Steps: []resource.TestStep{ { Config: testAccAWSCodeStarConnectionsConnectionConfigBasic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName), + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), testAccCheckResourceDisappears(testAccProvider, resourceAwsCodeStarConnectionsConnection(), resourceName), ), ExpectNonEmptyPlan: true, @@ -63,7 +65,7 @@ func TestAccAWSCodeStarConnectionsConnection_disappears(t *testing.T) { }) } -func testAccCheckAWSCodeStarConnectionsConnectionExists(n string) resource.TestCheckFunc { +func testAccCheckAWSCodeStarConnectionsConnectionExists(n string, v *codestarconnections.Connection) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -76,11 +78,16 @@ func testAccCheckAWSCodeStarConnectionsConnectionExists(n string) resource.TestC conn := testAccProvider.Meta().(*AWSClient).codestarconnectionsconn - _, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ + resp, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ ConnectionArn: aws.String(rs.Primary.ID), }) + if err != nil { + return err + } + + *v = *resp.Connection - return err + return nil } } diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown index 3d01a0acb9b..5aadbb83228 100644 --- a/website/docs/r/codestarconnections_connection.markdown +++ b/website/docs/r/codestarconnections_connection.markdown @@ -10,89 +10,24 @@ description: |- Provides a CodeStar Connection. +~> **NOTE:** The `aws_codestarconnections_connection` resource is created in the state `PENDING`. Authentication with the connection provider must be completed in the AWS Console. + ## Example Usage ```hcl -resource "aws_s3_bucket" "codepipeline_bucket" { - bucket = "tf-codestarconnections-codepipeline-bucket" - acl = "private" -} - resource "aws_codestarconnections_connection" "example" { connection_name = "example-connection" provider_type = "Bitbucket" } -resource "aws_iam_role" "codepipeline_role" { - name = "test-role" - assume_role_policy = < Date: Tue, 15 Dec 2020 15:24:14 -0800 Subject: [PATCH 0243/1212] Update CHANGELOG for #15990 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f8e5285891..c214f115c6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES +* **New Resource:** `aws_codestarconnections_connection` [GH-15990] * **New Resource:** `aws_lakeformation_resource` ([#13267](https://github.com/hashicorp/terraform-provider-aws/issues/13267)) ENHANCEMENTS From 7528f747ad0f7445bfeded50e71ff0b540863725 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 15 Dec 2020 21:48:16 -0500 Subject: [PATCH 0244/1212] update policy of test configs and examples in documentation --- ...ws_networkfirewall_resource_policy_test.go | 40 +++++++++++++++---- ...workfirewall_resource_policy.html.markdown | 15 ++++++- 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_networkfirewall_resource_policy_test.go b/aws/resource_aws_networkfirewall_resource_policy_test.go index 3eee0d8de0f..9fdd47a59d3 100644 --- a/aws/resource_aws_networkfirewall_resource_policy_test.go +++ b/aws/resource_aws_networkfirewall_resource_policy_test.go @@ -28,14 +28,15 @@ func TestAccAwsNetworkFirewallResourcePolicy_firewallPolicy(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAwsNetworkFirewallResourcePolicyExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "resource_arn", "aws_networkfirewall_firewall_policy.test", "arn"), - resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":"network-firewall:ListFirewallPolicies"`)), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:CreateFirewall","network-firewall:UpdateFirewall","network-firewall:AssociateFirewallPolicy","network-firewall:ListFirewallPolicies"\]`)), ), }, { + // Update the policy's Actions Config: testAccNetworkFirewallResourcePolicy_firewallPolicy_updatePolicy(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsNetworkFirewallResourcePolicyExists(resourceName), - resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:ListFirewallPolicies","network-firewall:AssociateFirewallPolicy"\]`)), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:UpdateFirewall","network-firewall:AssociateFirewallPolicy","network-firewall:ListFirewallPolicies","network-firewall:CreateFirewall"\]`)), ), }, { @@ -61,14 +62,15 @@ func TestAccAwsNetworkFirewallResourcePolicy_ruleGroup(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAwsNetworkFirewallResourcePolicyExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "resource_arn", "aws_networkfirewall_rule_group.test", "arn"), - resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":"network-firewall:ListRuleGroups"`)), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:CreateFirewallPolicy","network-firewall:UpdateFirewallPolicy","network-firewall:ListRuleGroups"\]`)), ), }, { + // Update the policy's Actions Config: testAccNetworkFirewallResourcePolicy_ruleGroup_updatePolicy(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsNetworkFirewallResourcePolicyExists(resourceName), - resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:ListRuleGroups","network-firewall:CreateFirewallPolicy"\]`)), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Action":\["network-firewall:UpdateFirewallPolicy","network-firewall:ListRuleGroups","network-firewall:CreateFirewallPolicy"\]`)), ), }, { @@ -212,9 +214,15 @@ func testAccNetworkFirewallResourcePolicy_firewallPolicy(rName string) string { testAccNetworkFirewallResourcePolicyFirewallPolicyBaseConfig(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_firewall_policy.test.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = "network-firewall:ListFirewallPolicies" + Action = [ + "network-firewall:CreateFirewall", + "network-firewall:UpdateFirewall", + "network-firewall:AssociateFirewallPolicy", + "network-firewall:ListFirewallPolicies" + ] Effect = "Allow" Resource = aws_networkfirewall_firewall_policy.test.arn Principal = { @@ -232,9 +240,15 @@ func testAccNetworkFirewallResourcePolicy_firewallPolicy_updatePolicy(rName stri testAccNetworkFirewallResourcePolicyFirewallPolicyBaseConfig(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_firewall_policy.test.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = ["network-firewall:ListFirewallPolicies", "network-firewall:AssociateFirewallPolicy"] + Action = [ + "network-firewall:UpdateFirewall", + "network-firewall:AssociateFirewallPolicy", + "network-firewall:ListFirewallPolicies", + "network-firewall:CreateFirewall" + ] Effect = "Allow" Resource = aws_networkfirewall_firewall_policy.test.arn Principal = { @@ -275,9 +289,14 @@ func testAccNetworkFirewallResourcePolicy_ruleGroup(rName string) string { testAccNetworkFirewallResourcePolicyRuleGroupBaseConfig(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_rule_group.test.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = "network-firewall:ListRuleGroups" + Action = [ + "network-firewall:CreateFirewallPolicy", + "network-firewall:UpdateFirewallPolicy", + "network-firewall:ListRuleGroups" + ] Effect = "Allow" Resource = aws_networkfirewall_rule_group.test.arn Principal = { @@ -295,9 +314,14 @@ func testAccNetworkFirewallResourcePolicy_ruleGroup_updatePolicy(rName string) s testAccNetworkFirewallResourcePolicyRuleGroupBaseConfig(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_rule_group.test.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = ["network-firewall:ListRuleGroups", "network-firewall:CreateFirewallPolicy"] + Action = [ + "network-firewall:UpdateFirewallPolicy", + "network-firewall:ListRuleGroups", + "network-firewall:CreateFirewallPolicy" + ] Effect = "Allow" Resource = aws_networkfirewall_rule_group.test.arn Principal = { diff --git a/website/docs/r/networkfirewall_resource_policy.html.markdown b/website/docs/r/networkfirewall_resource_policy.html.markdown index a456f502267..e4efd999965 100644 --- a/website/docs/r/networkfirewall_resource_policy.html.markdown +++ b/website/docs/r/networkfirewall_resource_policy.html.markdown @@ -17,9 +17,15 @@ Provides an AWS Network Firewall Resource Policy Resource for a rule group or fi ```hcl resource "aws_networkfirewall_resource_policy" "example" { resource_arn = aws_networkfirewall_firewall_policy.example.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = "network-firewall:ListFirewallPolicies" + Action = [ + "network-firewall:ListFirewallPolicies", + "network-firewall:CreateFirewall", + "network-firewall:UpdateFirewall", + "network-firewall:AssociateFirewallPolicy" + ] Effect = "Allow" Resource = aws_networkfirewall_firewall_policy.example.arn Principal = { @@ -36,9 +42,14 @@ resource "aws_networkfirewall_resource_policy" "example" { ```hcl resource "aws_networkfirewall_resource_policy" "example" { resource_arn = aws_networkfirewall_rule_group.example.arn + # policy's Action element must include all of the following operations policy = jsonencode({ Statement = [{ - Action = "network-firewall:ListRuleGroups" + Action = [ + "network-firewall:ListRuleGroups", + "network-firewall:CreateFirewallPolicy", + "network-firewall:UpdateFirewallPolicy" + ] Effect = "Allow" Resource = aws_networkfirewall_rule_group.example.arn Principal = { From c2102e4b7191c655400ee66a211e7c97a799d619 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 15 Dec 2020 22:51:57 -0800 Subject: [PATCH 0245/1212] Allows cluster_mode configuration when cluster mode is not enabled for parity with API --- ...ource_aws_elasticache_replication_group.go | 17 ++--- ..._aws_elasticache_replication_group_test.go | 72 ++++++++++++++++--- 2 files changed, 66 insertions(+), 23 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 0473bc2863a..c940ef56827 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -446,7 +446,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int if err := d.Set("member_clusters", flattenStringSet(rgp.MemberClusters)); err != nil { return fmt.Errorf("error setting member_clusters: %w", err) } - if err := d.Set("cluster_mode", flattenElasticacheNodeGroupsToClusterMode(aws.BoolValue(rgp.ClusterEnabled), rgp.NodeGroups)); err != nil { + if err := d.Set("cluster_mode", flattenElasticacheNodeGroupsToClusterMode(rgp.NodeGroups)); err != nil { return fmt.Errorf("error setting cluster_mode attribute: %w", err) } d.Set("replication_group_id", rgp.ReplicationGroupId) @@ -938,22 +938,15 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica return err } -func flattenElasticacheNodeGroupsToClusterMode(clusterEnabled bool, nodeGroups []*elasticache.NodeGroup) []map[string]interface{} { - if !clusterEnabled { +func flattenElasticacheNodeGroupsToClusterMode(nodeGroups []*elasticache.NodeGroup) []map[string]interface{} { + if len(nodeGroups) == 0 { return []map[string]interface{}{} } m := map[string]interface{}{ - "num_node_groups": 0, - "replicas_per_node_group": 0, + "num_node_groups": len(nodeGroups), + "replicas_per_node_group": (len(nodeGroups[0].NodeGroupMembers) - 1), } - - if len(nodeGroups) == 0 { - return []map[string]interface{}{m} - } - - m["num_node_groups"] = len(nodeGroups) - m["replicas_per_node_group"] = (len(nodeGroups[0].NodeGroupMembers) - 1) return []map[string]interface{}{m} } diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index ed54080f3c6..db61a57c186 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -26,7 +26,7 @@ func init() { func testSweepElasticacheReplicationGroups(region string) error { client, err := sharedClientForRegion(region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } conn := client.(*AWSClient).elasticacheconn @@ -52,7 +52,7 @@ func testSweepElasticacheReplicationGroups(region string) error { log.Printf("[WARN] Skipping Elasticache Replication Group sweep for %s: %s", region, err) return nil } - return fmt.Errorf("Error retrieving Elasticache Replication Groups: %s", err) + return fmt.Errorf("Error retrieving Elasticache Replication Groups: %w", err) } return nil } @@ -71,14 +71,13 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr( - resourceName, "cluster_mode.#", "0"), - resource.TestCheckResourceAttr( - resourceName, "number_cache_clusters", "2"), - resource.TestCheckResourceAttr( - resourceName, "member_clusters.#", "2"), - resource.TestCheckResourceAttr( - resourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), ), }, { @@ -407,6 +406,38 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { }) } +func TestAccAWSElasticacheReplicationGroup_ClusterMode_NonClusteredParameterGroup(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig_NonClusteredParameterGroup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexp.MustCompile(fmt.Sprintf("%s\\..+\\.%s", rName, testAccGetPartitionDNSSuffix()))), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckNoResourceAttr(resourceName, "configuration_endpoint_address"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -963,7 +994,7 @@ resource "aws_elasticache_parameter_group" "test" { # We do not have a data source for "latest" Elasticache family # so unfortunately we must hardcode this for now - family = "redis5.0" + family = "redis6.x" name = "%[1]s-${count.index}" @@ -1399,6 +1430,25 @@ resource "aws_elasticache_replication_group" "test" { `, rName, numNodeGroups, replicasPerNodeGroup) } +func testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig_NonClusteredParameterGroup(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + node_type = "cache.t2.medium" + automatic_failover_enabled = false + + parameter_group_name = "default.redis6.x" + cluster_mode { + num_node_groups = 1 + replicas_per_node_group = 1 + } +} +`, rName)) +} + func testAccAWSElasticacheReplicationGroup_UseCmkKmsKeyId(rInt int, rString string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { From 4acc6d8ee1223857a448cd898f370ae1ec35e2d3 Mon Sep 17 00:00:00 2001 From: Bill Rich Date: Wed, 16 Dec 2020 09:20:43 -0800 Subject: [PATCH 0246/1212] Read all atrributes in tunnel options. --- aws/resource_aws_vpn_connection.go | 204 ++++++++++++++++++++++++ aws/resource_aws_vpn_connection_test.go | 1 - 2 files changed, 204 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_vpn_connection.go b/aws/resource_aws_vpn_connection.go index 669ec37a13c..7445e4bed41 100644 --- a/aws/resource_aws_vpn_connection.go +++ b/aws/resource_aws_vpn_connection.go @@ -684,6 +684,10 @@ func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("tunnel_inside_ip_version", vpnConnection.Options.TunnelInsideIpVersion); err != nil { return err } + if err := flattenTunnelOptions(d, vpnConnection); err != nil { + return err + } + } else { //If there no Options on the connection then we do not support it d.Set("enable_acceleration", false) @@ -738,6 +742,206 @@ func resourceAwsVpnConnectionRead(d *schema.ResourceData, meta interface{}) erro return nil } +func flattenTunnelOptions(d *schema.ResourceData, vpnConnection *ec2.VpnConnection) error { + if len(vpnConnection.Options.TunnelOptions) >= 1 { + if err := d.Set("tunnel1_dpd_timeout_action", vpnConnection.Options.TunnelOptions[0].DpdTimeoutAction); err != nil { + return err + } + + if err := d.Set("tunnel1_dpd_timeout_seconds", vpnConnection.Options.TunnelOptions[0].DpdTimeoutSeconds); err != nil { + return err + } + + ikeVersions := []string{} + for _, ikeVersion := range vpnConnection.Options.TunnelOptions[0].IkeVersions { + ikeVersions = append(ikeVersions, *ikeVersion.Value) + } + if err := d.Set("tunnel1_ike_versions", ikeVersions); err != nil { + return err + } + + phase1DHGroupNumbers := []int64{} + for _, phase1DHGroupNumber := range vpnConnection.Options.TunnelOptions[0].Phase1DHGroupNumbers { + phase1DHGroupNumbers = append(phase1DHGroupNumbers, *phase1DHGroupNumber.Value) + } + if err := d.Set("tunnel1_phase1_dh_group_numbers", phase1DHGroupNumbers); err != nil { + return err + } + + phase1EncAlgorithms := []string{} + for _, phase1EncAlgorithm := range vpnConnection.Options.TunnelOptions[0].Phase1EncryptionAlgorithms { + phase1EncAlgorithms = append(phase1EncAlgorithms, *phase1EncAlgorithm.Value) + } + if err := d.Set("tunnel1_phase1_encryption_algorithms", phase1EncAlgorithms); err != nil { + return err + } + + phase1IntegrityAlgorithms := []string{} + for _, phase1IntegrityAlgorithm := range vpnConnection.Options.TunnelOptions[0].Phase1IntegrityAlgorithms { + phase1IntegrityAlgorithms = append(phase1IntegrityAlgorithms, *phase1IntegrityAlgorithm.Value) + } + if err := d.Set("tunnel1_phase1_integrity_algorithms", phase1IntegrityAlgorithms); err != nil { + return err + } + + if err := d.Set("tunnel1_phase1_lifetime_seconds", vpnConnection.Options.TunnelOptions[0].Phase1LifetimeSeconds); err != nil { + return err + } + + phase2DHGroupNumbers := []int64{} + for _, phase2DHGroupNumber := range vpnConnection.Options.TunnelOptions[0].Phase2DHGroupNumbers { + phase2DHGroupNumbers = append(phase2DHGroupNumbers, *phase2DHGroupNumber.Value) + } + if err := d.Set("tunnel1_phase2_dh_group_numbers", phase2DHGroupNumbers); err != nil { + return err + } + + phase2EncAlgorithms := []string{} + for _, phase2EncAlgorithm := range vpnConnection.Options.TunnelOptions[0].Phase2EncryptionAlgorithms { + phase2EncAlgorithms = append(phase2EncAlgorithms, *phase2EncAlgorithm.Value) + } + if err := d.Set("tunnel1_phase2_encryption_algorithms", phase2EncAlgorithms); err != nil { + return err + } + + phase2IntegrityAlgorithms := []string{} + for _, phase2IntegrityAlgorithm := range vpnConnection.Options.TunnelOptions[0].Phase2IntegrityAlgorithms { + phase2IntegrityAlgorithms = append(phase2IntegrityAlgorithms, *phase2IntegrityAlgorithm.Value) + } + if err := d.Set("tunnel1_phase2_integrity_algorithms", phase2IntegrityAlgorithms); err != nil { + return err + } + + if err := d.Set("tunnel1_phase2_lifetime_seconds", vpnConnection.Options.TunnelOptions[0].Phase2LifetimeSeconds); err != nil { + return err + } + + if err := d.Set("tunnel1_rekey_fuzz_percentage", vpnConnection.Options.TunnelOptions[0].RekeyFuzzPercentage); err != nil { + return err + } + + if err := d.Set("tunnel1_rekey_margin_time_seconds", vpnConnection.Options.TunnelOptions[0].RekeyMarginTimeSeconds); err != nil { + return err + } + + if err := d.Set("tunnel1_replay_window_size", vpnConnection.Options.TunnelOptions[0].ReplayWindowSize); err != nil { + return err + } + + if err := d.Set("tunnel1_startup_action", vpnConnection.Options.TunnelOptions[0].StartupAction); err != nil { + return err + } + + if err := d.Set("tunnel1_inside_cidr", vpnConnection.Options.TunnelOptions[0].TunnelInsideCidr); err != nil { + return err + } + + if err := d.Set("tunnel1_inside_ipv6_cidr", vpnConnection.Options.TunnelOptions[0].TunnelInsideIpv6Cidr); err != nil { + return err + } + } + if len(vpnConnection.Options.TunnelOptions) >= 2 { + if err := d.Set("tunnel2_dpd_timeout_action", vpnConnection.Options.TunnelOptions[1].DpdTimeoutAction); err != nil { + return err + } + + if err := d.Set("tunnel2_dpd_timeout_seconds", vpnConnection.Options.TunnelOptions[1].DpdTimeoutSeconds); err != nil { + return err + } + + ikeVersions := []string{} + for _, ikeVersion := range vpnConnection.Options.TunnelOptions[1].IkeVersions { + ikeVersions = append(ikeVersions, *ikeVersion.Value) + } + if err := d.Set("tunnel2_ike_versions", ikeVersions); err != nil { + return err + } + + phase1DHGroupNumbers := []int64{} + for _, phase1DHGroupNumber := range vpnConnection.Options.TunnelOptions[1].Phase1DHGroupNumbers { + phase1DHGroupNumbers = append(phase1DHGroupNumbers, *phase1DHGroupNumber.Value) + } + if err := d.Set("tunnel2_phase1_dh_group_numbers", phase1DHGroupNumbers); err != nil { + return err + } + + phase1EncAlgorithms := []string{} + for _, phase1EncAlgorithm := range vpnConnection.Options.TunnelOptions[1].Phase1EncryptionAlgorithms { + phase1EncAlgorithms = append(phase1EncAlgorithms, *phase1EncAlgorithm.Value) + } + + if err := d.Set("tunnel2_phase1_encryption_algorithms", phase1EncAlgorithms); err != nil { + return err + } + + phase1IntegrityAlgorithms := []string{} + for _, phase1IntegrityAlgorithm := range vpnConnection.Options.TunnelOptions[1].Phase1IntegrityAlgorithms { + phase1IntegrityAlgorithms = append(phase1IntegrityAlgorithms, *phase1IntegrityAlgorithm.Value) + } + if err := d.Set("tunnel2_phase1_integrity_algorithms", phase1IntegrityAlgorithms); err != nil { + return err + } + + if err := d.Set("tunnel2_phase1_lifetime_seconds", vpnConnection.Options.TunnelOptions[1].Phase1LifetimeSeconds); err != nil { + return err + } + + phase2DHGroupNumbers := []int64{} + for _, phase2DHGroupNumber := range vpnConnection.Options.TunnelOptions[1].Phase2DHGroupNumbers { + phase2DHGroupNumbers = append(phase2DHGroupNumbers, *phase2DHGroupNumber.Value) + } + if err := d.Set("tunnel2_phase2_dh_group_numbers", phase2DHGroupNumbers); err != nil { + return err + } + + phase2EncAlgorithms := []string{} + for _, phase2EncAlgorithm := range vpnConnection.Options.TunnelOptions[1].Phase2EncryptionAlgorithms { + phase2EncAlgorithms = append(phase2EncAlgorithms, *phase2EncAlgorithm.Value) + } + + if err := d.Set("tunnel2_phase2_encryption_algorithms", phase2EncAlgorithms); err != nil { + return err + } + + phase2IntegrityAlgorithms := []string{} + for _, phase2IntegrityAlgorithm := range vpnConnection.Options.TunnelOptions[1].Phase2IntegrityAlgorithms { + phase2IntegrityAlgorithms = append(phase2IntegrityAlgorithms, *phase2IntegrityAlgorithm.Value) + } + if err := d.Set("tunnel2_phase2_integrity_algorithms", phase2IntegrityAlgorithms); err != nil { + return err + } + + if err := d.Set("tunnel2_phase2_lifetime_seconds", vpnConnection.Options.TunnelOptions[1].Phase2LifetimeSeconds); err != nil { + return err + } + + if err := d.Set("tunnel2_rekey_fuzz_percentage", vpnConnection.Options.TunnelOptions[1].RekeyFuzzPercentage); err != nil { + return err + } + + if err := d.Set("tunnel2_rekey_margin_time_seconds", vpnConnection.Options.TunnelOptions[1].RekeyMarginTimeSeconds); err != nil { + return err + } + + if err := d.Set("tunnel2_replay_window_size", vpnConnection.Options.TunnelOptions[1].ReplayWindowSize); err != nil { + return err + } + + if err := d.Set("tunnel2_startup_action", vpnConnection.Options.TunnelOptions[1].StartupAction); err != nil { + return err + } + + if err := d.Set("tunnel2_inside_cidr", vpnConnection.Options.TunnelOptions[1].TunnelInsideCidr); err != nil { + return err + } + + if err := d.Set("tunnel2_inside_ipv6_cidr", vpnConnection.Options.TunnelOptions[1].TunnelInsideIpv6Cidr); err != nil { + return err + } + } + return nil +} + func resourceAwsVpnConnectionUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn diff --git a/aws/resource_aws_vpn_connection_test.go b/aws/resource_aws_vpn_connection_test.go index 3b91a0b2256..e94ba820444 100644 --- a/aws/resource_aws_vpn_connection_test.go +++ b/aws/resource_aws_vpn_connection_test.go @@ -625,7 +625,6 @@ resource "aws_vpn_connection" "test" { customer_gateway_id = aws_customer_gateway.customer_gateway.id type = "ipsec.1" static_routes_only = false - enable_acceleration = false } `, rBgpAsn, rInt) } From 8a629a78ebda0329ed30c41f6f59d423d2786564 Mon Sep 17 00:00:00 2001 From: Bill Rich Date: Wed, 16 Dec 2020 10:23:08 -0800 Subject: [PATCH 0247/1212] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c214f115c6e..7263ffeb0c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS * resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] * resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] +* resource/aws_vpn_connection: Add support for VPN tunnel options and enable acceleration, DPDTimeoutAction, StartupAction, local/remote IPv4/IPv6 network CIDR and tunnel inside IP version. [GH-14740] BUG FIXES From fad86498622c63ca8b15232b8d723780ec8ea48f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 13:56:31 -0500 Subject: [PATCH 0248/1212] resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found (#16680) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16593 This is quick fix to replace the crash behavior with an actual error message. Additional information is needed to determine why this issue occurs (e.g. EC2 eventual consistency, problematic configurations, etc.) but this at least gives operators a better chance to continue other parts of the apply successfully and potentially just rerun the errant resource. Output from acceptance testing: ``` --- PASS: TestAccAWSMainRouteTableAssociation_basic (62.60s) ``` --- aws/resource_aws_main_route_table_association.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_main_route_table_association.go b/aws/resource_aws_main_route_table_association.go index bd9213017fa..a3f08b082e7 100644 --- a/aws/resource_aws_main_route_table_association.go +++ b/aws/resource_aws_main_route_table_association.go @@ -47,8 +47,13 @@ func resourceAwsMainRouteTableAssociationCreate(d *schema.ResourceData, meta int log.Printf("[INFO] Creating main route table association: %s => %s", vpcId, routeTableId) mainAssociation, err := findMainRouteTableAssociation(conn, vpcId) + if err != nil { - return err + return fmt.Errorf("error finding EC2 VPC (%s) main route table association for replacement: %w", vpcId, err) + } + + if mainAssociation == nil { + return fmt.Errorf("error finding EC2 VPC (%s) main route table association for replacement: association not found", vpcId) } resp, err := conn.ReplaceRouteTableAssociation(&ec2.ReplaceRouteTableAssociationInput{ From a51dca61581d381ea036e6f71dc2ad253754ac2d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 13:59:31 -0500 Subject: [PATCH 0249/1212] Update CHANGELOG for #16680 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7263ffeb0c5..8401e6a6b33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ ENHANCEMENTS BUG FIXES * data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] +* resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found [GH-16680] * resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] ## 3.21.0 (December 11, 2020) From 4ca17d1f5bc5f1c5bf21b0ab6ee8ee79ccb7701e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 16 Dec 2020 11:06:41 -0800 Subject: [PATCH 0250/1212] Update ROADMAP.md --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index fcbebf31230..c2295cf5a9a 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -48,7 +48,7 @@ The Elasticache work will begin with a research spike to ensure that the we can - [#14959](https://github.com/hashicorp/terraform-provider-aws/issues/14959): Research Spike: Elasticache Service Fixes and Improvements - [#12708](https://github.com/hashicorp/terraform-provider-aws/issues/12708): resource/aws_elasticache_replication_group: Add MultiAZ support -- [#13517](https://github.com/hashicorp/terraform-provider-aws/issues/13517): Feature Request: `aws_elasticache_cluster` allow auto-minor-version-upgrade to be set +- ~[#13517](https://github.com/hashicorp/terraform-provider-aws/issues/13517): Feature Request: `aws_elasticache_cluster` allow auto-minor-version-upgrade to be set~ This parameter is not enabled in the AWS API. - [#5118](https://github.com/hashicorp/terraform-provider-aws/issues/5118): support setting primary/replica AZ attributes inside NodeGroupConfiguration for RedisClusterModelEnabled ### Workflow Improvements From 0dcab47720b2a089978d6fbf95ed92926b47f739 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Wed, 16 Dec 2020 15:13:41 +0000 Subject: [PATCH 0251/1212] r/aws_ec2_managed_prefix_list_entry: remove resource --- aws/provider.go | 1 - ...ource_aws_ec2_managed_prefix_list_entry.go | 289 ---------- ..._aws_ec2_managed_prefix_list_entry_test.go | 498 ------------------ ...c2_managed_prefix_list_entry.html.markdown | 66 --- 4 files changed, 854 deletions(-) delete mode 100644 aws/resource_aws_ec2_managed_prefix_list_entry.go delete mode 100644 aws/resource_aws_ec2_managed_prefix_list_entry_test.go delete mode 100644 website/docs/r/ec2_managed_prefix_list_entry.html.markdown diff --git a/aws/provider.go b/aws/provider.go index 61c70d73e10..ab4733b46f7 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -600,7 +600,6 @@ func Provider() *schema.Provider { "aws_ec2_local_gateway_route": resourceAwsEc2LocalGatewayRoute(), "aws_ec2_local_gateway_route_table_vpc_association": resourceAwsEc2LocalGatewayRouteTableVpcAssociation(), "aws_ec2_managed_prefix_list": resourceAwsEc2ManagedPrefixList(), - "aws_ec2_managed_prefix_list_entry": resourceAwsEc2ManagedPrefixListEntry(), "aws_ec2_tag": resourceAwsEc2Tag(), "aws_ec2_traffic_mirror_filter": resourceAwsEc2TrafficMirrorFilter(), "aws_ec2_traffic_mirror_filter_rule": resourceAwsEc2TrafficMirrorFilterRule(), diff --git a/aws/resource_aws_ec2_managed_prefix_list_entry.go b/aws/resource_aws_ec2_managed_prefix_list_entry.go deleted file mode 100644 index 82a91db7301..00000000000 --- a/aws/resource_aws_ec2_managed_prefix_list_entry.go +++ /dev/null @@ -1,289 +0,0 @@ -package aws - -import ( - "errors" - "fmt" - "log" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" -) - -func resourceAwsEc2ManagedPrefixListEntry() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsEc2ManagedPrefixListEntryCreate, - Read: resourceAwsEc2ManagedPrefixListEntryRead, - Update: resourceAwsEc2ManagedPrefixListEntryUpdate, - Delete: resourceAwsEc2ManagedPrefixListEntryDelete, - - Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - ss := strings.Split(d.Id(), "_") - if len(ss) != 2 || ss[0] == "" || ss[1] == "" { - return nil, fmt.Errorf("invalid id %s: expected pl-123456_1.0.0.0/8", d.Id()) - } - - d.Set("prefix_list_id", ss[0]) - d.Set("cidr_block", ss[1]) - return []*schema.ResourceData{d}, nil - }, - }, - - Schema: map[string]*schema.Schema{ - "prefix_list_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cidr_block": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.IsCIDR, - }, - "description": { - Type: schema.TypeString, - Optional: true, - Default: "", - ValidateFunc: validation.StringLenBetween(0, 255), - }, - }, - } -} - -func resourceAwsEc2ManagedPrefixListEntryCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - prefixListId := d.Get("prefix_list_id").(string) - cidrBlock := d.Get("cidr_block").(string) - - log.Printf( - "[INFO] adding entry %s to prefix list %s...", - cidrBlock, prefixListId) - - err := modifyAwsManagedPrefixListConcurrently( - prefixListId, conn, d.Timeout(schema.TimeoutUpdate), - ec2.ModifyManagedPrefixListInput{ - PrefixListId: aws.String(prefixListId), - CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently - AddEntries: []*ec2.AddPrefixListEntry{ - { - Cidr: aws.String(cidrBlock), - Description: aws.String(d.Get("description").(string)), - }, - }, - }, - func(pl *ec2.ManagedPrefixList) *resource.RetryError { - currentVersion := int(aws.Int64Value(pl.Version)) - - _, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, currentVersion, cidrBlock) - switch { - case err != nil: - return resource.NonRetryableError(err) - case ok: - return resource.NonRetryableError(errors.New("an entry for this cidr block already exists")) - } - - return nil - }) - - if err != nil { - return fmt.Errorf("failed to add entry %s to prefix list %s: %s", cidrBlock, prefixListId, err) - } - - d.SetId(fmt.Sprintf("%s_%s", prefixListId, cidrBlock)) - - return resourceAwsEc2ManagedPrefixListEntryRead(d, meta) -} - -func resourceAwsEc2ManagedPrefixListEntryRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - prefixListId := d.Get("prefix_list_id").(string) - cidrBlock := d.Get("cidr_block").(string) - - entry, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, 0, cidrBlock) - switch { - case err != nil: - return err - case !ok: - log.Printf( - "[WARN] entry %s of managed prefix list %s not found; removing from state.", - cidrBlock, prefixListId) - d.SetId("") - return nil - } - - d.Set("description", entry.Description) - - return nil -} - -func resourceAwsEc2ManagedPrefixListEntryUpdate(d *schema.ResourceData, meta interface{}) error { - if !d.HasChange("description") { - return fmt.Errorf("all attributes except description should force new resource") - } - - conn := meta.(*AWSClient).ec2conn - prefixListId := d.Get("prefix_list_id").(string) - cidrBlock := d.Get("cidr_block").(string) - - err := modifyAwsManagedPrefixListConcurrently( - prefixListId, conn, d.Timeout(schema.TimeoutUpdate), - ec2.ModifyManagedPrefixListInput{ - PrefixListId: aws.String(prefixListId), - CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently - AddEntries: []*ec2.AddPrefixListEntry{ - { - Cidr: aws.String(cidrBlock), - Description: aws.String(d.Get("description").(string)), - }, - }, - }, - nil) - - if err != nil { - return fmt.Errorf("failed to update entry %s in prefix list %s: %s", cidrBlock, prefixListId, err) - } - - return resourceAwsEc2ManagedPrefixListEntryRead(d, meta) -} - -func resourceAwsEc2ManagedPrefixListEntryDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ec2conn - prefixListId := d.Get("prefix_list_id").(string) - cidrBlock := d.Get("cidr_block").(string) - - err := modifyAwsManagedPrefixListConcurrently( - prefixListId, conn, d.Timeout(schema.TimeoutUpdate), - ec2.ModifyManagedPrefixListInput{ - PrefixListId: aws.String(prefixListId), - CurrentVersion: nil, // set by modifyAwsManagedPrefixListConcurrently - RemoveEntries: []*ec2.RemovePrefixListEntry{ - { - Cidr: aws.String(cidrBlock), - }, - }, - }, - nil) - - switch { - case isResourceNotFoundError(err): - log.Printf("[WARN] managed prefix list %s not found; removing from state", prefixListId) - return nil - case err != nil: - return fmt.Errorf("failed to remove entry %s from prefix list %s: %s", cidrBlock, prefixListId, err) - } - - return nil -} - -func getManagedPrefixListEntryByCIDR( - id string, - conn *ec2.EC2, - version int, - cidr string, -) (*ec2.PrefixListEntry, bool, error) { - input := ec2.GetManagedPrefixListEntriesInput{ - PrefixListId: aws.String(id), - } - - if version > 0 { - input.TargetVersion = aws.Int64(int64(version)) - } - - result := (*ec2.PrefixListEntry)(nil) - - err := conn.GetManagedPrefixListEntriesPages( - &input, - func(output *ec2.GetManagedPrefixListEntriesOutput, last bool) bool { - for _, entry := range output.Entries { - entryCidr := aws.StringValue(entry.Cidr) - if entryCidr == cidr { - result = entry - return false - } - } - - return true - }) - - switch { - case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): - return nil, false, nil - case err != nil: - return nil, false, fmt.Errorf("failed to get entries in prefix list %s: %v", id, err) - case result == nil: - return nil, false, nil - } - - return result, true, nil -} - -func modifyAwsManagedPrefixListConcurrently( - id string, - conn *ec2.EC2, - timeout time.Duration, - input ec2.ModifyManagedPrefixListInput, - check func(pl *ec2.ManagedPrefixList) *resource.RetryError, -) error { - isModified := false - err := resource.Retry(timeout, func() *resource.RetryError { - if !isModified { - pl, ok, err := getManagedPrefixList(id, conn) - switch { - case err != nil: - return resource.NonRetryableError(err) - case !ok: - return resource.NonRetryableError(&resource.NotFoundError{}) - } - - input.CurrentVersion = pl.Version - - if check != nil { - if err := check(pl); err != nil { - return err - } - } - - switch _, err := conn.ModifyManagedPrefixList(&input); { - case isManagedPrefixListModificationConflictErr(err): - return resource.RetryableError(err) - case err != nil: - return resource.NonRetryableError(fmt.Errorf("modify failed: %s", err)) - } - - isModified = true - } - - switch settled, err := isAwsManagedPrefixListSettled(id, conn); { - case err != nil: - return resource.NonRetryableError(fmt.Errorf("resource failed to settle: %s", err)) - case !settled: - return resource.RetryableError(errors.New("resource not yet settled")) - } - - return nil - }) - - if tfresource.TimedOut(err) { - return err - } - - if err != nil { - return err - } - - return nil -} - -func isManagedPrefixListModificationConflictErr(err error) bool { - return isAWSErr(err, "IncorrectState", "in the current state (modify-in-progress)") || - isAWSErr(err, "IncorrectState", "in the current state (create-in-progress)") || - isAWSErr(err, "PrefixListVersionMismatch", "") || - isAWSErr(err, "ConcurrentMutationLimitExceeded", "") -} diff --git a/aws/resource_aws_ec2_managed_prefix_list_entry_test.go b/aws/resource_aws_ec2_managed_prefix_list_entry_test.go deleted file mode 100644 index 4c241a4068e..00000000000 --- a/aws/resource_aws_ec2_managed_prefix_list_entry_test.go +++ /dev/null @@ -1,498 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "regexp" - "sort" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -func TestAccAwsEc2ManagedPrefixListEntry_basic(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list_entry.test" - entry := ec2.PrefixListEntry{} - - checkAttributes := func(*terraform.State) error { - if actual := aws.StringValue(entry.Cidr); actual != "1.0.0.0/8" { - return fmt.Errorf("bad cidr: %s", actual) - } - - if actual := aws.StringValue(entry.Description); actual != "Create" { - return fmt.Errorf("bad description: %s", actual) - } - - return nil - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_basic_create, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - checkAttributes, - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", "Create"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_basic_update, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", "Update"), - ), - }, - }, - }) -} - -const testAccAwsEc2ManagedPrefixListEntryConfig_basic_create = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "Create" -} -` - -const testAccAwsEc2ManagedPrefixListEntryConfig_basic_update = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "Update" -} -` - -func testAccAwsEc2ManagedPrefixListEntryExists( - name string, - out *ec2.PrefixListEntry, -) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - switch { - case !ok: - return fmt.Errorf("resource %s not found", name) - case rs.Primary.ID == "": - return fmt.Errorf("resource %s has not set its id", name) - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - ss := strings.Split(rs.Primary.ID, "_") - prefixListId, cidrBlock := ss[0], ss[1] - - entry, ok, err := getManagedPrefixListEntryByCIDR(prefixListId, conn, 0, cidrBlock) - switch { - case err != nil: - return err - case !ok: - return fmt.Errorf("resource %s (%s) has not been created", name, prefixListId) - } - - if out != nil { - *out = *entry - } - - return nil - } -} - -func TestAccAwsEc2ManagedPrefixListEntry_disappears(t *testing.T) { - prefixListResourceName := "aws_ec2_managed_prefix_list.test" - resourceName := "aws_ec2_managed_prefix_list_entry.test" - pl := ec2.ManagedPrefixList{} - entry := ec2.PrefixListEntry{} - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_disappears, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, nil), - testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixListEntry(), resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -const testAccAwsEc2ManagedPrefixListEntryConfig_disappears = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" -} -` - -func TestAccAwsEc2ManagedPrefixListEntry_prefixListDisappears(t *testing.T) { - prefixListResourceName := "aws_ec2_managed_prefix_list.test" - resourceName := "aws_ec2_managed_prefix_list_entry.test" - pl := ec2.ManagedPrefixList{} - entry := ec2.PrefixListEntry{} - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_disappears, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, nil), - testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixList(), prefixListResourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAwsEc2ManagedPrefixListEntry_alreadyExists(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list_entry.test" - entry := ec2.PrefixListEntry{} - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_alreadyExists, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - ), - ExpectError: regexp.MustCompile(`an entry for this cidr block already exists`), - }, - }, - }) -} - -const testAccAwsEc2ManagedPrefixListEntryConfig_alreadyExists = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 - - entry { - cidr_block = "1.0.0.0/8" - } -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "Test" -} -` - -func TestAccAwsEc2ManagedPrefixListEntry_description(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list_entry.test" - entry := ec2.PrefixListEntry{} - - checkDescription := func(expect string) resource.TestCheckFunc { - return func(*terraform.State) error { - if actual := aws.StringValue(entry.Description); actual != expect { - return fmt.Errorf("bad description: %s", actual) - } - - return nil - } - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_none, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - checkDescription("Test1"), - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", "Test1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_some, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - checkDescription("Test2"), - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", "Test2"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_empty, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - checkDescription(""), - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", ""), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_description_null, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry), - checkDescription(""), - resource.TestCheckResourceAttr(resourceName, "cidr_block", "1.0.0.0/8"), - resource.TestCheckResourceAttr(resourceName, "description", ""), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -const testAccAwsEc2ManagedPrefixListEntryConfig_description_none = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "Test1" -} -` - -const testAccAwsEc2ManagedPrefixListEntryConfig_description_some = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "Test2" -} -` - -const testAccAwsEc2ManagedPrefixListEntryConfig_description_empty = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" - description = "" -} -` - -const testAccAwsEc2ManagedPrefixListEntryConfig_description_null = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "test" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "1.0.0.0/8" -} -` - -func TestAccAwsEc2ManagedPrefixListEntry_exceedLimit(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list_entry.test_1" - entry := ec2.PrefixListEntry{} - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(2), - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListEntryExists(resourceName, &entry)), - }, - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(3), - ResourceName: resourceName, - ExpectError: regexp.MustCompile(`You've reached the maximum number of entries for the prefix list.`), - }, - }, - }) -} - -func testAccAwsEc2ManagedPrefixListEntryConfig_exceedLimit(count int) string { - entries := `` - for i := 0; i < count; i++ { - entries += fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list_entry" "test_%[1]d" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "%[1]d.0.0.0/8" - description = "Test_%[1]d" -} -`, - i+1) - } - - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 2 -} - -%[1]s -`, - entries) -} - -func testAccAwsEc2ManagedPrefixListSortEntries(list []*ec2.PrefixListEntry) { - sort.Slice(list, func(i, j int) bool { - return aws.StringValue(list[i].Cidr) < aws.StringValue(list[j].Cidr) - }) -} - -func TestAccAwsEc2ManagedPrefixListEntry_concurrentModification(t *testing.T) { - prefixListResourceName := "aws_ec2_managed_prefix_list.test" - pl, entries := ec2.ManagedPrefixList{}, []*ec2.PrefixListEntry(nil) - - checkAllEntriesExist := func(prefix string, count int) resource.TestCheckFunc { - return func(state *terraform.State) error { - if len(entries) != count { - return fmt.Errorf("expected %d entries", count) - } - - expectEntries := make([]*ec2.PrefixListEntry, 0, count) - for i := 0; i < count; i++ { - expectEntries = append(expectEntries, &ec2.PrefixListEntry{ - Cidr: aws.String(fmt.Sprintf("%d.0.0.0/8", i+1)), - Description: aws.String(fmt.Sprintf("%s%d", prefix, i+1))}) - } - testAccAwsEc2ManagedPrefixListSortEntries(expectEntries) - - testAccAwsEc2ManagedPrefixListSortEntries(entries) - - if !reflect.DeepEqual(expectEntries, entries) { - return fmt.Errorf("expected entries %#v, got %#v", expectEntries, entries) - } - - return nil - } - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification("Step0_", 20), - ResourceName: prefixListResourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, &entries), - checkAllEntriesExist("Step0_", 20)), - }, - { - // update the first 10 and drop the last 10 - Config: testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification("Step1_", 10), - ResourceName: prefixListResourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(prefixListResourceName, &pl, &entries), - checkAllEntriesExist("Step1_", 10)), - }, - }, - }) -} - -func testAccAwsEc2ManagedPrefixListEntryConfig_concurrentModification(prefix string, count int) string { - entries := `` - for i := 0; i < count; i++ { - entries += fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list_entry" "test_%[1]d" { - prefix_list_id = aws_ec2_managed_prefix_list.test.id - cidr_block = "%[1]d.0.0.0/8" - description = "%[2]s%[1]d" -} -`, - i+1, - prefix) - } - - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 20 -} - -%[1]s -`, - entries) -} diff --git a/website/docs/r/ec2_managed_prefix_list_entry.html.markdown b/website/docs/r/ec2_managed_prefix_list_entry.html.markdown deleted file mode 100644 index 3c2ab6b03bd..00000000000 --- a/website/docs/r/ec2_managed_prefix_list_entry.html.markdown +++ /dev/null @@ -1,66 +0,0 @@ ---- -subcategory: "VPC" -layout: "aws" -page_title: "AWS: aws_ec2_managed_prefix_list_entry" -description: |- - Provides a managed prefix list entry resource. ---- - -# Resource: aws_ec2_managed_prefix_list_entry - -Provides a managed prefix list entry resource. Represents a single `entry`, which -can be added to external Prefix Lists. - -~> **NOTE on Prefix Lists and Prefix List Entries:** Terraform currently -provides both a standalone Prefix List Entry, and a [Managed Prefix List resource](ec2_managed_prefix_list.html) -with an `entry` set defined in-line. At this time you -cannot use a Prefix List with in-line rules in conjunction with any Prefix List Entry -resources. Doing so will cause a conflict of rule settings and will unpredictably -fail or overwrite rules. - -~> **NOTE:** A Prefix List will have an upper bound on the number of rules -that it can support. - -~> **NOTE:** Resource creation will fail if the target Prefix List already has a -rule against the given CIDR block. - -## Example Usage - -Basic usage - -```hcl -resource "aws_ec2_managed_prefix_list" "example" { - name = "All VPC CIDR-s" - address_family = "IPv4" - max_entries = 5 -} - -resource "aws_ec2_managed_prefix_list_entry" "example" { - prefix_list_id = aws_ec2_managed_prefix_list.example.id - cidr_block = aws_vpc.example.cidr_block - description = "Primary" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `prefix_list_id` - (Required, Forces new resource) ID of the Prefix List to add this entry to. -* `cidr_block` - (Required, Forces new resource) The CIDR block to add an entry for. Different entries may have - overlapping CIDR blocks, but duplicating a particular block is not allowed. -* `description` - (Optional, Up to 255 characters) The description of this entry. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The ID of the prefix list entry. - -## Import - -Prefix List Entries can be imported using a concatenation of the `prefix_list_id` and `cidr_block` by an underscore (`_`). For example: - -```console -$ terraform import aws_ec2_managed_prefix_list_entry.example pl-0570a1d2d725c16be_10.30.0.0/16 -``` From a78644ee6c0602059b04f43a48569519f0a7d9bc Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Wed, 16 Dec 2020 16:33:07 +0000 Subject: [PATCH 0252/1212] r/aws_ec2_managed_prefix_list: code review updates --- aws/internal/service/ec2/finder/finder.go | 17 + aws/internal/service/ec2/waiter/status.go | 19 + aws/internal/service/ec2/waiter/waiter.go | 60 +++ aws/resource_aws_ec2_managed_prefix_list.go | 293 ++++-------- ...source_aws_ec2_managed_prefix_list_test.go | 418 ++++-------------- .../r/ec2_managed_prefix_list.html.markdown | 8 +- 6 files changed, 262 insertions(+), 553 deletions(-) diff --git a/aws/internal/service/ec2/finder/finder.go b/aws/internal/service/ec2/finder/finder.go index 6752919b3ec..1cac48c0163 100644 --- a/aws/internal/service/ec2/finder/finder.go +++ b/aws/internal/service/ec2/finder/finder.go @@ -130,3 +130,20 @@ func VpnGatewayByID(conn *ec2.EC2, id string) (*ec2.VpnGateway, error) { return output.VpnGateways[0], nil } + +func ManagedPrefixListByID(conn *ec2.EC2, id string) (*ec2.ManagedPrefixList, error) { + input := &ec2.DescribeManagedPrefixListsInput{ + PrefixListIds: aws.StringSlice([]string{id}), + } + + output, err := conn.DescribeManagedPrefixLists(input) + if err != nil { + return nil, err + } + + if output == nil || len(output.PrefixLists) == 0 { + return nil, nil + } + + return output.PrefixLists[0], nil +} diff --git a/aws/internal/service/ec2/waiter/status.go b/aws/internal/service/ec2/waiter/status.go index 3bdd28b219f..dc5d400785c 100644 --- a/aws/internal/service/ec2/waiter/status.go +++ b/aws/internal/service/ec2/waiter/status.go @@ -267,3 +267,22 @@ func VpnGatewayVpcAttachmentState(conn *ec2.EC2, vpnGatewayID, vpcID string) res return vpcAttachment, aws.StringValue(vpcAttachment.State), nil } } + +const ( + managedPrefixListStateNotFound = "NotFound" + managedPrefixListStateUnknown = "Unknown" +) + +func ManagedPrefixListState(conn *ec2.EC2, prefixListId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + managedPrefixList, err := finder.ManagedPrefixListByID(conn, prefixListId) + if err != nil { + return nil, managedPrefixListStateUnknown, err + } + if managedPrefixList == nil { + return nil, managedPrefixListStateNotFound, nil + } + + return managedPrefixList, aws.StringValue(managedPrefixList.State), nil + } +} diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index cb597291ee9..886e9133b34 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -4,6 +4,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -244,3 +245,62 @@ func VpnGatewayVpcAttachmentDetached(conn *ec2.EC2, vpnGatewayID, vpcID string) return nil, err } + +const ( + ManagedPrefixListTimeout = 15 * time.Minute +) + +func ManagedPrefixListCreated(conn *ec2.EC2, prefixListId string) (*ec2.ManagedPrefixList, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.PrefixListStateCreateInProgress}, + Target: []string{ec2.PrefixListStateCreateComplete}, + Timeout: ManagedPrefixListTimeout, + Refresh: ManagedPrefixListState(conn, prefixListId), + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.ManagedPrefixList); ok { + return output, err + } + + return nil, err +} + +func ManagedPrefixListModified(conn *ec2.EC2, prefixListId string) (*ec2.ManagedPrefixList, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.PrefixListStateModifyInProgress}, + Target: []string{ec2.PrefixListStateModifyComplete}, + Timeout: ManagedPrefixListTimeout, + Refresh: ManagedPrefixListState(conn, prefixListId), + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.ManagedPrefixList); ok { + return output, err + } + + return nil, err +} + +func ManagedPrefixListDeleted(conn *ec2.EC2, prefixListId string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.PrefixListStateDeleteInProgress}, + Target: []string{ec2.PrefixListStateDeleteComplete}, + Timeout: ManagedPrefixListTimeout, + Refresh: ManagedPrefixListState(conn, prefixListId), + } + + _, err := stateConf.WaitForState() + + if tfawserr.ErrCodeEquals(err, "InvalidPrefixListID.NotFound") { + return nil + } + + if err != nil { + return err + } + + return nil +} diff --git a/aws/resource_aws_ec2_managed_prefix_list.go b/aws/resource_aws_ec2_managed_prefix_list.go index aa88a32d50a..fddf4835120 100644 --- a/aws/resource_aws_ec2_managed_prefix_list.go +++ b/aws/resource_aws_ec2_managed_prefix_list.go @@ -1,22 +1,16 @@ package aws import ( - "errors" "fmt" "log" - "sort" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" -) - -var ( - awsPrefixListEntrySetHashFunc = schema.HashResource(prefixListEntrySchema()) + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" ) func resourceAwsEc2ManagedPrefixList() *schema.Resource { @@ -44,12 +38,24 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { Computed: true, }, "entry": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: prefixListEntrySchema(), - Set: awsPrefixListEntrySetHashFunc, + Type: schema.TypeSet, + Optional: true, + // Computed: true, + // ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsCIDR, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + }, + }, }, "max_entries": { Type: schema.TypeInt, @@ -67,22 +73,9 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { Computed: true, }, "tags": tagsSchema(), - }, - } -} - -func prefixListEntrySchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr_block": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.IsCIDR, - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 255), + "version": { + Type: schema.TypeInt, + Computed: true, }, }, } @@ -110,19 +103,17 @@ func resourceAwsEc2ManagedPrefixListCreate(d *schema.ResourceData, meta interfac output, err := conn.CreateManagedPrefixList(&input) if err != nil { - return fmt.Errorf("failed to create managed prefix list: %v", err) + return fmt.Errorf("failed to create managed prefix list: %w", err) } - id := aws.StringValue(output.PrefixList.PrefixListId) + d.SetId(aws.StringValue(output.PrefixList.PrefixListId)) - log.Printf("[INFO] Created Managed Prefix List %s (%s)", d.Get("name").(string), id) + log.Printf("[INFO] Created Managed Prefix List %s (%s)", d.Get("name").(string), d.Id()) - if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutCreate)); err != nil { - return fmt.Errorf("prefix list %s did not settle after create: %s", id, err) + if _, err := waiter.ManagedPrefixListCreated(conn, d.Id()); err != nil { + return fmt.Errorf("managed prefix list %s failed to create: %w", d.Id(), err) } - d.SetId(id) - return resourceAwsEc2ManagedPrefixListRead(d, meta) } @@ -132,10 +123,11 @@ func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{ id := d.Id() pl, ok, err := getManagedPrefixList(id, conn) - switch { - case err != nil: - return err - case !ok: + if err != nil { + return fmt.Errorf("failed to get managed prefix list %s: %w", id, err) + } + + if !ok { log.Printf("[WARN] Managed Prefix List %s not found; removing from state.", id) d.SetId("") return nil @@ -146,7 +138,7 @@ func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{ entries, err := getPrefixListEntries(id, conn, 0) if err != nil { - return err + return fmt.Errorf("error listing entries of EC2 Managed Prefix List (%s): %w", d.Id(), err) } if err := d.Set("entry", flattenPrefixListEntries(entries)); err != nil { @@ -161,70 +153,56 @@ func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{ return fmt.Errorf("error settings attribute tags of managed prefix list %s: %s", id, err) } + d.Set("version", pl.Version) + return nil } func resourceAwsEc2ManagedPrefixListUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn id := d.Id() - modifyPrefixList := false input := ec2.ModifyManagedPrefixListInput{} input.PrefixListId = aws.String(id) - if d.HasChange("name") { + if d.HasChangeExcept("tags") { input.PrefixListName = aws.String(d.Get("name").(string)) - modifyPrefixList = true - } - - if d.HasChange("entry") { - pl, ok, err := getManagedPrefixList(id, conn) - switch { - case err != nil: - return err - case !ok: - return &resource.NotFoundError{} - } + currentVersion := int64(d.Get("version").(int)) + wait := false - currentVersion := aws.Int64Value(pl.Version) + oldAttr, newAttr := d.GetChange("entry") + os := oldAttr.(*schema.Set) + ns := newAttr.(*schema.Set) - oldEntries, err := getPrefixListEntries(id, conn, currentVersion) - if err != nil { - return err + if addEntries := ns.Difference(os); addEntries.Len() > 0 { + input.AddEntries = expandAddPrefixListEntries(addEntries) + input.CurrentVersion = aws.Int64(currentVersion) + wait = true } - newEntries := expandAddPrefixListEntries(d.Get("entry")) - adds, removes := computePrefixListEntriesModification(oldEntries, newEntries) - - if len(adds) > 0 || len(removes) > 0 { - if len(adds) > 0 { - // the Modify API doesn't like empty lists - input.AddEntries = adds - } - - if len(removes) > 0 { - // the Modify API doesn't like empty lists - input.RemoveEntries = removes - } - + if removeEntries := os.Difference(ns); removeEntries.Len() > 0 { + input.RemoveEntries = expandRemovePrefixListEntries(removeEntries) input.CurrentVersion = aws.Int64(currentVersion) - modifyPrefixList = true + wait = true } - } - if modifyPrefixList { log.Printf("[INFO] modifying managed prefix list %s...", id) - switch _, err := conn.ModifyManagedPrefixList(&input); { - case isAWSErr(err, "PrefixListVersionMismatch", "prefix list has the incorrect version number"): + _, err := conn.ModifyManagedPrefixList(&input) + + if isAWSErr(err, "PrefixListVersionMismatch", "prefix list has the incorrect version number") { return fmt.Errorf("failed to modify managed prefix list %s: conflicting change", id) - case err != nil: + } + + if err != nil { return fmt.Errorf("failed to modify managed prefix list %s: %s", id, err) } - if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutUpdate)); err != nil { - return fmt.Errorf("prefix list did not settle after update: %s", err) + if wait { + if _, err := waiter.ManagedPrefixListModified(conn, d.Id()); err != nil { + return fmt.Errorf("failed to modify managed prefix list %s: %w", d.Id(), err) + } } } @@ -246,31 +224,18 @@ func resourceAwsEc2ManagedPrefixListDelete(d *schema.ResourceData, meta interfac PrefixListId: aws.String(id), } - err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - _, err := conn.DeleteManagedPrefixList(&input) - switch { - case isManagedPrefixListModificationConflictErr(err): - return resource.RetryableError(err) - case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): - log.Printf("[WARN] managed prefix list %s has already been deleted", id) - return nil - case err != nil: - return resource.NonRetryableError(err) - } + _, err := conn.DeleteManagedPrefixList(&input) + if tfawserr.ErrCodeEquals(err, "InvalidPrefixListID.NotFound") { return nil - }) - - if isResourceTimeoutError(err) { - _, err = conn.DeleteManagedPrefixList(&input) } if err != nil { - return fmt.Errorf("failed to delete managed prefix list %s: %s", id, err) + return fmt.Errorf("error deleting EC2 Managed Prefix List (%s): %w", d.Id(), err) } - if err := waitUntilAwsManagedPrefixListSettled(id, conn, d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("prefix list %s did not settle after delete: %s", id, err) + if err := waiter.ManagedPrefixListDeleted(conn, d.Id()); err != nil { + return fmt.Errorf("failed to delete managed prefix list %s: %w", d.Id(), err) } return nil @@ -301,7 +266,25 @@ func expandAddPrefixListEntries(input interface{}) []*ec2.AddPrefixListEntry { return result } -func flattenPrefixListEntries(entries []*ec2.PrefixListEntry) *schema.Set { +func expandRemovePrefixListEntries(input interface{}) []*ec2.RemovePrefixListEntry { + if input == nil { + return nil + } + + list := input.(*schema.Set).List() + result := make([]*ec2.RemovePrefixListEntry, 0, len(list)) + + for _, entry := range list { + m := entry.(map[string]interface{}) + output := ec2.RemovePrefixListEntry{} + output.Cidr = aws.String(m["cidr_block"].(string)) + result = append(result, &output) + } + + return result +} + +func flattenPrefixListEntries(entries []*ec2.PrefixListEntry) []interface{} { list := make([]interface{}, 0, len(entries)) for _, entry := range entries { @@ -315,7 +298,7 @@ func flattenPrefixListEntries(entries []*ec2.PrefixListEntry) *schema.Set { list = append(list, m) } - return schema.NewSet(awsPrefixListEntrySetHashFunc, list) + return list } func getManagedPrefixList( @@ -365,111 +348,3 @@ func getPrefixListEntries( return result, nil } - -func computePrefixListEntriesModification( - oldEntries []*ec2.PrefixListEntry, - newEntries []*ec2.AddPrefixListEntry, -) ([]*ec2.AddPrefixListEntry, []*ec2.RemovePrefixListEntry) { - adds := map[string]string{} // CIDR => Description - - removes := map[string]struct{}{} // set of CIDR - for _, oldEntry := range oldEntries { - oldCIDR := aws.StringValue(oldEntry.Cidr) - removes[oldCIDR] = struct{}{} - } - - for _, newEntry := range newEntries { - newCIDR := aws.StringValue(newEntry.Cidr) - newDescription := aws.StringValue(newEntry.Description) - - for _, oldEntry := range oldEntries { - oldCIDR := aws.StringValue(oldEntry.Cidr) - oldDescription := aws.StringValue(oldEntry.Description) - - if oldCIDR == newCIDR { - delete(removes, oldCIDR) - - if oldDescription != newDescription { - adds[oldCIDR] = newDescription - } - - goto nextNewEntry - } - } - - // reach this point when no matching oldEntry found - adds[newCIDR] = newDescription - - nextNewEntry: - } - - addList := make([]*ec2.AddPrefixListEntry, 0, len(adds)) - for cidr, description := range adds { - addList = append(addList, &ec2.AddPrefixListEntry{ - Cidr: aws.String(cidr), - Description: aws.String(description), - }) - } - sort.Slice(addList, func(i, j int) bool { - return aws.StringValue(addList[i].Cidr) < aws.StringValue(addList[j].Cidr) - }) - - removeList := make([]*ec2.RemovePrefixListEntry, 0, len(removes)) - for cidr := range removes { - removeList = append(removeList, &ec2.RemovePrefixListEntry{ - Cidr: aws.String(cidr), - }) - } - sort.Slice(removeList, func(i, j int) bool { - return aws.StringValue(removeList[i].Cidr) < aws.StringValue(removeList[j].Cidr) - }) - - return addList, removeList -} - -func waitUntilAwsManagedPrefixListSettled( - id string, - conn *ec2.EC2, - timeout time.Duration, -) error { - log.Printf("[INFO] Waiting for managed prefix list %s to settle...", id) - - err := resource.Retry(timeout, func() *resource.RetryError { - settled, err := isAwsManagedPrefixListSettled(id, conn) - switch { - case err != nil: - return resource.NonRetryableError(err) - case !settled: - return resource.RetryableError(errors.New("resource not yet settled")) - } - - return nil - }) - - if isResourceTimeoutError(err) { - return fmt.Errorf("timed out: %s", err) - } - - return nil -} - -func isAwsManagedPrefixListSettled(id string, conn *ec2.EC2) (bool, error) { - pl, ok, err := getManagedPrefixList(id, conn) - switch { - case err != nil: - return false, err - case !ok: - return true, nil - } - - switch state := aws.StringValue(pl.State); state { - case ec2.PrefixListStateCreateComplete, ec2.PrefixListStateModifyComplete, ec2.PrefixListStateDeleteComplete: - return true, nil - case ec2.PrefixListStateCreateInProgress, ec2.PrefixListStateModifyInProgress, ec2.PrefixListStateDeleteInProgress: - return false, nil - case ec2.PrefixListStateCreateFailed, ec2.PrefixListStateModifyFailed, ec2.PrefixListStateDeleteFailed: - return false, fmt.Errorf("terminal state %s indicates failure", state) - default: - return false, fmt.Errorf("unexpected state %s", state) - } -} diff --git a/aws/resource_aws_ec2_managed_prefix_list_test.go b/aws/resource_aws_ec2_managed_prefix_list_test.go index ec2667036d3..6c767623cb4 100644 --- a/aws/resource_aws_ec2_managed_prefix_list_test.go +++ b/aws/resource_aws_ec2_managed_prefix_list_test.go @@ -2,136 +2,16 @@ package aws import ( "fmt" - "reflect" "regexp" - "sort" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccAwsEc2ManagedPrefixList_computePrefixListEntriesModification(t *testing.T) { - type testEntry struct { - CIDR string - Description string - } - - tests := []struct { - name string - oldEntries []testEntry - newEntries []testEntry - expectedAdds []testEntry - expectedRemoves []testEntry - }{ - { - name: "add two", - oldEntries: []testEntry{}, - newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - expectedAdds: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - expectedRemoves: []testEntry{}, - }, - { - name: "remove one", - oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - newEntries: []testEntry{{"1.2.3.4/32", "test1"}}, - expectedAdds: []testEntry{}, - expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, - }, - { - name: "modify description of one", - oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2-1"}}, - expectedAdds: []testEntry{{"2.3.4.5/32", "test2-1"}}, - expectedRemoves: []testEntry{}, - }, - { - name: "add third", - oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}, {"3.4.5.6/32", "test3"}}, - expectedAdds: []testEntry{{"3.4.5.6/32", "test3"}}, - expectedRemoves: []testEntry{}, - }, - { - name: "add and remove one", - oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - newEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"3.4.5.6/32", "test3"}}, - expectedAdds: []testEntry{{"3.4.5.6/32", "test3"}}, - expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, - }, - { - name: "add and remove one with description change", - oldEntries: []testEntry{{"1.2.3.4/32", "test1"}, {"2.3.4.5/32", "test2"}}, - newEntries: []testEntry{{"1.2.3.4/32", "test1-1"}, {"3.4.5.6/32", "test3"}}, - expectedAdds: []testEntry{{"1.2.3.4/32", "test1-1"}, {"3.4.5.6/32", "test3"}}, - expectedRemoves: []testEntry{{"2.3.4.5/32", "test2"}}, - }, - { - name: "basic test update", - oldEntries: []testEntry{{"1.0.0.0/8", "Test1"}}, - newEntries: []testEntry{{"1.0.0.0/8", "Test1-1"}, {"2.2.0.0/16", "Test2"}}, - expectedAdds: []testEntry{{"1.0.0.0/8", "Test1-1"}, {"2.2.0.0/16", "Test2"}}, - expectedRemoves: []testEntry{}, - }, - } - - for _, test := range tests { - oldEntryList := []*ec2.PrefixListEntry(nil) - for _, entry := range test.oldEntries { - oldEntryList = append(oldEntryList, &ec2.PrefixListEntry{ - Cidr: aws.String(entry.CIDR), - Description: aws.String(entry.Description), - }) - } - - newEntryList := []*ec2.AddPrefixListEntry(nil) - for _, entry := range test.newEntries { - newEntryList = append(newEntryList, &ec2.AddPrefixListEntry{ - Cidr: aws.String(entry.CIDR), - Description: aws.String(entry.Description), - }) - } - - addList, removeList := computePrefixListEntriesModification(oldEntryList, newEntryList) - - if len(addList) != len(test.expectedAdds) { - t.Errorf("expected %d adds, got %d", len(test.expectedAdds), len(addList)) - } - - for i, added := range addList { - expected := test.expectedAdds[i] - - actualCidr := aws.StringValue(added.Cidr) - expectedCidr := expected.CIDR - if actualCidr != expectedCidr { - t.Errorf("add[%d]: expected cidr %s, got %s", i, expectedCidr, actualCidr) - } - - actualDesc := aws.StringValue(added.Description) - expectedDesc := expected.Description - if actualDesc != expectedDesc { - t.Errorf("add[%d]: expected description '%s', got '%s'", i, expectedDesc, actualDesc) - } - } - - if len(removeList) != len(test.expectedRemoves) { - t.Errorf("expected %d removes, got %d", len(test.expectedRemoves), len(removeList)) - } - - for i, removed := range removeList { - expected := test.expectedRemoves[i] - - actualCidr := aws.StringValue(removed.Cidr) - expectedCidr := expected.CIDR - if actualCidr != expectedCidr { - t.Errorf("add[%d]: expected cidr %s, got %s", i, expectedCidr, actualCidr) - } - } - } -} - func testAccCheckAwsEc2ManagedPrefixListDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -169,52 +49,8 @@ func testAccCheckAwsEc2ManagedPrefixListVersion( func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" pl, entries := ec2.ManagedPrefixList{}, []*ec2.PrefixListEntry(nil) - - checkAttributes := func(*terraform.State) error { - if actual := aws.StringValue(pl.AddressFamily); actual != "IPv4" { - return fmt.Errorf("bad address family: %s", actual) - } - - if actual := aws.Int64Value(pl.MaxEntries); actual != 5 { - return fmt.Errorf("bad max entries: %d", actual) - } - - if actual := aws.StringValue(pl.OwnerId); actual != testAccGetAccountID() { - return fmt.Errorf("bad owner id: %s", actual) - } - - if actual := aws.StringValue(pl.PrefixListName); actual != "tf-test-basic-create" { - return fmt.Errorf("bad name: %s", actual) - } - - sort.Slice(pl.Tags, func(i, j int) bool { - return aws.StringValue(pl.Tags[i].Key) < aws.StringValue(pl.Tags[j].Key) - }) - - expectTags := []*ec2.Tag{ - {Key: aws.String("Key1"), Value: aws.String("Value1")}, - {Key: aws.String("Key2"), Value: aws.String("Value2")}, - } - - if !reflect.DeepEqual(expectTags, pl.Tags) { - return fmt.Errorf("expected tags %#v, got %#v", expectTags, pl.Tags) - } - - sort.Slice(entries, func(i, j int) bool { - return aws.StringValue(entries[i].Cidr) < aws.StringValue(entries[j].Cidr) - }) - - expectEntries := []*ec2.PrefixListEntry{ - {Cidr: aws.String("1.0.0.0/8"), Description: aws.String("Test1")}, - {Cidr: aws.String("2.0.0.0/8"), Description: aws.String("Test2")}, - } - - if !reflect.DeepEqual(expectEntries, entries) { - return fmt.Errorf("expected entries %#v, got %#v", expectEntries, entries) - } - - return nil - } + rName1 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -222,12 +58,11 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_basic_create, + Config: testAccAwsEc2ManagedPrefixListConfig_basic_create(rName1), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), - checkAttributes, - resource.TestCheckResourceAttr(resourceName, "name", "tf-test-basic-create"), + resource.TestCheckResourceAttr(resourceName, "name", rName1), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`prefix-list/pl-[[:xdigit:]]+`)), resource.TestCheckResourceAttr(resourceName, "address_family", "IPv4"), resource.TestCheckResourceAttr(resourceName, "max_entries", "5"), @@ -253,11 +88,11 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_basic_update, + Config: testAccAwsEc2ManagedPrefixListConfig_basic_update(rName2), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), - resource.TestCheckResourceAttr(resourceName, "name", "tf-test-basic-update"), + resource.TestCheckResourceAttr(resourceName, "name", rName2), resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ "cidr_block": "1.0.0.0/8", @@ -273,13 +108,19 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -const testAccAwsEc2ManagedPrefixListConfig_basic_create = ` +func testAccAwsEc2ManagedPrefixListConfig_basic_create(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-basic-create" + name = %[1]q address_family = "IPv4" max_entries = 5 @@ -298,11 +139,13 @@ resource "aws_ec2_managed_prefix_list" "test" { Key2 = "Value2" } } -` +`, rName) +} -const testAccAwsEc2ManagedPrefixListConfig_basic_update = ` +func testAccAwsEc2ManagedPrefixListConfig_basic_update(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-basic-update" + name = %[1]q address_family = "IPv4" max_entries = 5 @@ -321,7 +164,8 @@ resource "aws_ec2_managed_prefix_list" "test" { Key3 = "Value3" } } -` +`, rName) +} func testAccAwsEc2ManagedPrefixListExists( name string, @@ -368,6 +212,7 @@ func testAccAwsEc2ManagedPrefixListExists( func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" pl := ec2.ManagedPrefixList{} + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -375,7 +220,7 @@ func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_disappears, + Config: testAccAwsEc2ManagedPrefixListConfig_disappears(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), @@ -387,9 +232,10 @@ func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { }) } -const testAccAwsEc2ManagedPrefixListConfig_disappears = ` +func testAccAwsEc2ManagedPrefixListConfig_disappears(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-disappears" + name = %[1]q address_family = "IPv4" max_entries = 2 @@ -397,21 +243,14 @@ resource "aws_ec2_managed_prefix_list" "test" { cidr_block = "1.0.0.0/8" } } -` +`, rName) +} func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" pl := ec2.ManagedPrefixList{} - - checkName := func(name string) resource.TestCheckFunc { - return func(*terraform.State) error { - if actual := aws.StringValue(pl.PrefixListName); actual != name { - return fmt.Errorf("expected name %s, got %s", name, actual) - } - - return nil - } - } + rName1 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -419,22 +258,25 @@ func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_name_create, + Config: testAccAwsEc2ManagedPrefixListConfig_name_create(rName1), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "name", "tf-test-name-create"), - checkName("tf-test-name-create"), + resource.TestCheckResourceAttr(resourceName, "name", rName1), testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), ), }, { - Config: testAccAwsEc2ManagedPrefixListConfig_name_update, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2ManagedPrefixListConfig_name_update(rName2), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "name", "tf-test-name-update"), - checkName("tf-test-name-update"), + resource.TestCheckResourceAttr(resourceName, "name", rName2), testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), ), }, @@ -447,54 +289,30 @@ func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { }) } -const testAccAwsEc2ManagedPrefixListConfig_name_create = ` +func testAccAwsEc2ManagedPrefixListConfig_name_create(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-name-create" + name = %[1]q address_family = "IPv4" max_entries = 5 } -` +`, rName) +} -const testAccAwsEc2ManagedPrefixListConfig_name_update = ` +func testAccAwsEc2ManagedPrefixListConfig_name_update(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-name-update" + name = %[1]q address_family = "IPv4" max_entries = 5 } -` +`, rName) +} func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" pl := ec2.ManagedPrefixList{} - - checkTags := func(m map[string]string) resource.TestCheckFunc { - return func(*terraform.State) error { - sort.Slice(pl.Tags, func(i, j int) bool { - return aws.StringValue(pl.Tags[i].Key) < aws.StringValue(pl.Tags[j].Key) - }) - - expectTags := []*ec2.Tag(nil) - - if m != nil { - for k, v := range m { - expectTags = append(expectTags, &ec2.Tag{ - Key: aws.String(k), - Value: aws.String(v), - }) - } - - sort.Slice(expectTags, func(i, j int) bool { - return aws.StringValue(expectTags[i].Key) < aws.StringValue(expectTags[j].Key) - }) - } - - if !reflect.DeepEqual(expectTags, pl.Tags) { - return fmt.Errorf("expected tags %#v, got %#v", expectTags, pl.Tags) - } - - return nil - } - } + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -502,11 +320,10 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_none, + Config: testAccAwsEc2ManagedPrefixListConfig_tags_none(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - checkTags(nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -516,11 +333,10 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_addSome, + Config: testAccAwsEc2ManagedPrefixListConfig_tags_addSome(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - checkTags(map[string]string{"Key1": "Value1", "Key2": "Value2", "Key3": "Value3"}), resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2"), @@ -533,11 +349,10 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome, + Config: testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - checkTags(map[string]string{"Key2": "Value2-1", "Key3": "Value3"}), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2-1"), resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), @@ -549,11 +364,10 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_empty, + Config: testAccAwsEc2ManagedPrefixListConfig_tags_empty(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - checkTags(nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -563,11 +377,10 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_none, + Config: testAccAwsEc2ManagedPrefixListConfig_tags_none(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - checkTags(nil), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -580,17 +393,20 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { }) } -const testAccAwsEc2ManagedPrefixListConfig_tags_none = ` +func testAccAwsEc2ManagedPrefixListConfig_tags_none(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" + name = %[1]q address_family = "IPv4" max_entries = 5 } -` +`, rName) +} -const testAccAwsEc2ManagedPrefixListConfig_tags_addSome = ` +func testAccAwsEc2ManagedPrefixListConfig_tags_addSome(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" + name = %[1]q address_family = "IPv4" max_entries = 5 @@ -600,11 +416,13 @@ resource "aws_ec2_managed_prefix_list" "test" { Key3 = "Value3" } } -` +`, rName) +} -const testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome = ` +func testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" + name = %[1]q address_family = "IPv4" max_entries = 5 @@ -613,97 +431,24 @@ resource "aws_ec2_managed_prefix_list" "test" { Key3 = "Value3" } } -` - -const testAccAwsEc2ManagedPrefixListConfig_tags_empty = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - address_family = "IPv4" - max_entries = 5 - tags = {} +`, rName) } -` -func TestAccAwsEc2ManagedPrefixList_entryConfigMode(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list.test" - prefixList := ec2.ManagedPrefixList{} - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_blocks, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), - resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_noBlocks, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), - resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), - ), - }, - { - Config: testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_zeroed, - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), - resource.TestCheckResourceAttr(resourceName, "entry.#", "0"), - ), - }, - }, - }) -} - -const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_blocks = ` +func testAccAwsEc2ManagedPrefixListConfig_tags_empty(rName string) string { + return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - max_entries = 5 + name = %[1]q address_family = "IPv4" - - entry { - cidr_block = "1.0.0.0/8" - description = "Entry1" - } - - entry { - cidr_block = "2.0.0.0/8" - description = "Entry2" - } -} -` - -const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_noBlocks = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" max_entries = 5 - address_family = "IPv4" + tags = {} } -` - -const testAccAwsEc2ManagedPrefixListConfig_entryConfigMode_zeroed = ` -resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" - max_entries = 5 - address_family = "IPv4" - entry = [] +`, rName) } -` func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" prefixList := ec2.ManagedPrefixList{} + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -711,7 +456,7 @@ func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(2), + Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName, 2), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), @@ -719,7 +464,7 @@ func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { ), }, { - Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(3), + Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName, 3), ResourceName: resourceName, ExpectError: regexp.MustCompile(`You've reached the maximum number of entries for the prefix list.`), }, @@ -727,7 +472,7 @@ func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { }) } -func testAccAwsEc2ManagedPrefixListConfig_exceedLimit(count int) string { +func testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName string, count int) string { entries := `` for i := 0; i < count; i++ { entries += fmt.Sprintf(` @@ -740,11 +485,10 @@ func testAccAwsEc2ManagedPrefixListConfig_exceedLimit(count int) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = "tf-test-acc" + name = %[2]q address_family = "IPv4" max_entries = 2 %[1]s } -`, - entries) +`, entries, rName) } diff --git a/website/docs/r/ec2_managed_prefix_list.html.markdown b/website/docs/r/ec2_managed_prefix_list.html.markdown index d372d874d75..d0f25d54b9d 100644 --- a/website/docs/r/ec2_managed_prefix_list.html.markdown +++ b/website/docs/r/ec2_managed_prefix_list.html.markdown @@ -10,13 +10,6 @@ description: |- Provides a managed prefix list resource. -~> **NOTE on Prefix Lists and Prefix List Entries:** Terraform currently -provides both a standalone [Managed Prefix List Entry resource](ec2_managed_prefix_list_entry.html), -and a Prefix List resource with an `entry` set defined in-line. At this time you -cannot use a Prefix List with in-line rules in conjunction with any Prefix List Entry -resources. Doing so will cause a conflict of rule settings and will unpredictably -fail or overwrite rules. - ~> **NOTE on `max_entries`:** When you reference a Prefix List in a resource, the maximum number of entries for the prefix lists counts as the same number of rules or entries for the resource. For example, if you create a prefix list with a maximum @@ -75,6 +68,7 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ID of the prefix list. * `arn` - The ARN of the prefix list. * `owner_id` - The ID of the AWS account that owns this prefix list. +* `version` - The latest version of this prefix list. ## Import From aba670720e4c9847be3097fae5a6794ba86261f7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 16:14:44 -0500 Subject: [PATCH 0253/1212] tests/resource/aws_elasticsearch_domain: Use multierror in sweeper and skip domains stuck in deleting status (#16648) Workaround for buggy API. Output from sweeper in AWS Commercial: ``` 2020/12/08 12:52:28 [DEBUG] Running Sweepers for region (us-west-2): 2020/12/08 12:52:28 [DEBUG] Running Sweeper (aws_elasticsearch_domain) in region (us-west-2) 2020/12/08 12:52:30 [INFO] Skipping Elasticsearch Domain (tf-test-5082256297594881822) with deleted status 2020/12/08 12:52:30 [INFO] Skipping Elasticsearch Domain (tf-test-841758105109071952) with deleted status 2020/12/08 12:52:30 Sweeper Tests ran successfully: - aws_elasticsearch_domain 2020/12/08 12:52:30 [DEBUG] Running Sweepers for region (us-east-1): 2020/12/08 12:52:30 [DEBUG] Running Sweeper (aws_elasticsearch_domain) in region (us-east-1) 2020/12/08 12:52:31 Sweeper Tests ran successfully: - aws_elasticsearch_domain ok github.com/terraform-providers/terraform-provider-aws/aws 4.998s ``` Output from sweeper in AWS GovCloud (US): ``` 2020/12/08 12:54:08 [DEBUG] Running Sweepers for region (us-gov-west-1): 2020/12/08 12:54:08 [DEBUG] Running Sweeper (aws_elasticsearch_domain) in region (us-gov-west-1) 2020/12/08 12:54:10 [DEBUG] Deleting ElasticSearch domain: "tf-test-579248222605063900" 2020/12/08 12:54:11 [DEBUG] Waiting for ElasticSearch domain "tf-test-579248222605063900" to be deleted 2020/12/08 12:56:54 Sweeper Tests ran successfully: - aws_elasticsearch_domain ok github.com/terraform-providers/terraform-provider-aws/aws 168.818s ``` --- aws/resource_aws_elasticsearch_domain_test.go | 77 +++++++++++++++---- 1 file changed, 61 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_elasticsearch_domain_test.go b/aws/resource_aws_elasticsearch_domain_test.go index ad5dc7d74b0..b904795d0c5 100644 --- a/aws/resource_aws_elasticsearch_domain_test.go +++ b/aws/resource_aws_elasticsearch_domain_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -26,35 +27,79 @@ func init() { func testSweepElasticSearchDomains(region string) error { client, err := sharedClientForRegion(region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } conn := client.(*AWSClient).esconn - out, err := conn.ListDomainNames(&elasticsearch.ListDomainNamesInput{}) + var sweeperErrs *multierror.Error + + input := &elasticsearch.ListDomainNamesInput{} + + // ListDomainNames has no pagination support whatsoever + output, err := conn.ListDomainNames(input) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Elasticsearch Domain sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() + } + if err != nil { - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Elasticsearch Domain sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving Elasticsearch Domains: %s", err) + sweeperErr := fmt.Errorf("error listing Elasticsearch Domains: %w", err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + return sweeperErrs.ErrorOrNil() } - for _, domain := range out.DomainNames { - log.Printf("[INFO] Deleting Elasticsearch Domain: %s", *domain.DomainName) - _, err := conn.DeleteElasticsearchDomain(&elasticsearch.DeleteElasticsearchDomainInput{ - DomainName: domain.DomainName, - }) + if output == nil { + log.Printf("[WARN] Skipping Elasticsearch Domain sweep for %s: empty response", region) + return sweeperErrs.ErrorOrNil() + } + + for _, domainInfo := range output.DomainNames { + if domainInfo == nil { + continue + } + + name := aws.StringValue(domainInfo.DomainName) + + // Elasticsearch Domains have regularly gotten stuck in a "being deleted" state + // e.g. Deleted and Processing are both true for days in the API + // Filter out domains that are Deleted already. + + input := &elasticsearch.DescribeElasticsearchDomainInput{ + DomainName: domainInfo.DomainName, + } + + output, err := conn.DescribeElasticsearchDomain(input) + if err != nil { - log.Printf("[ERROR] Failed to delete Elasticsearch Domain %s: %s", *domain.DomainName, err) + sweeperErr := fmt.Errorf("error describing Elasticsearch Domain (%s): %w", name, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + + if output != nil && output.DomainStatus != nil && aws.BoolValue(output.DomainStatus.Deleted) { + log.Printf("[INFO] Skipping Elasticsearch Domain (%s) with deleted status", name) continue } - err = resourceAwsElasticSearchDomainDeleteWaiter(*domain.DomainName, conn) + + r := resourceAwsElasticSearchDomain() + d := r.Data(nil) + d.SetId(name) + d.Set("domain_name", name) + + err = r.Delete(d, client) + if err != nil { - log.Printf("[ERROR] Failed to wait for deletion of Elasticsearch Domain %s: %s", *domain.DomainName, err) + sweeperErr := fmt.Errorf("error deleting Elasticsearch Domain (%s): %w", name, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } - return nil + return sweeperErrs.ErrorOrNil() } func TestAccAWSElasticSearchDomain_basic(t *testing.T) { From 6ef340ca867b52f0a3e4985da77a97a07b64c8f8 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 16:16:25 -0500 Subject: [PATCH 0254/1212] service/ec2: Use paginated functions in plural COIP and Local Gateway data sources (#16669) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16659 --- aws/data_source_aws_ec2_coip_pools.go | 33 +++++++++++++------ ...urce_aws_ec2_local_gateway_route_tables.go | 33 +++++++++++++------ ..._local_gateway_virtual_interface_groups.go | 24 +++++++++++--- aws/data_source_aws_ec2_local_gateways.go | 33 +++++++++++++------ 4 files changed, 88 insertions(+), 35 deletions(-) diff --git a/aws/data_source_aws_ec2_coip_pools.go b/aws/data_source_aws_ec2_coip_pools.go index a1b8d30c02e..5bdabf1ea3e 100644 --- a/aws/data_source_aws_ec2_coip_pools.go +++ b/aws/data_source_aws_ec2_coip_pools.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -49,26 +48,40 @@ func dataSourceAwsEc2CoipPoolsRead(d *schema.ResourceData, meta interface{}) err req.Filters = nil } - log.Printf("[DEBUG] DescribeCoipPools %s\n", req) - resp, err := conn.DescribeCoipPools(req) + var coipPools []*ec2.CoipPool + + err := conn.DescribeCoipPoolsPages(req, func(page *ec2.DescribeCoipPoolsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + coipPools = append(coipPools, page.CoipPools...) + + return !lastPage + }) + if err != nil { return fmt.Errorf("error describing EC2 COIP Pools: %w", err) } - if resp == nil || len(resp.CoipPools) == 0 { - return fmt.Errorf("no matching Coip Pool found") + if len(coipPools) == 0 { + return fmt.Errorf("no matching EC2 COIP Pools found") } - coippools := make([]string, 0) + var poolIDs []string + + for _, coipPool := range coipPools { + if coipPool == nil { + continue + } - for _, coippool := range resp.CoipPools { - coippools = append(coippools, aws.StringValue(coippool.PoolId)) + poolIDs = append(poolIDs, aws.StringValue(coipPool.PoolId)) } d.SetId(meta.(*AWSClient).region) - if err := d.Set("pool_ids", coippools); err != nil { - return fmt.Errorf("Error setting coip pool ids: %s", err) + if err := d.Set("pool_ids", poolIDs); err != nil { + return fmt.Errorf("error setting pool_ids: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_local_gateway_route_tables.go b/aws/data_source_aws_ec2_local_gateway_route_tables.go index cf86861ab1d..d2b2a873fcd 100644 --- a/aws/data_source_aws_ec2_local_gateway_route_tables.go +++ b/aws/data_source_aws_ec2_local_gateway_route_tables.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -45,26 +44,40 @@ func dataSourceAwsEc2LocalGatewayRouteTablesRead(d *schema.ResourceData, meta in req.Filters = nil } - log.Printf("[DEBUG] DescribeLocalGatewayRouteTables %s\n", req) - resp, err := conn.DescribeLocalGatewayRouteTables(req) + var localGatewayRouteTables []*ec2.LocalGatewayRouteTable + + err := conn.DescribeLocalGatewayRouteTablesPages(req, func(page *ec2.DescribeLocalGatewayRouteTablesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + localGatewayRouteTables = append(localGatewayRouteTables, page.LocalGatewayRouteTables...) + + return !lastPage + }) + if err != nil { return fmt.Errorf("error describing EC2 Local Gateway Route Tables: %w", err) } - if resp == nil || len(resp.LocalGatewayRouteTables) == 0 { - return fmt.Errorf("no matching Local Gateway Route Table found") + if len(localGatewayRouteTables) == 0 { + return fmt.Errorf("no matching EC2 Local Gateway Route Tables found") } - localgatewayroutetables := make([]string, 0) + var ids []string + + for _, localGatewayRouteTable := range localGatewayRouteTables { + if localGatewayRouteTable == nil { + continue + } - for _, localgatewayroutetable := range resp.LocalGatewayRouteTables { - localgatewayroutetables = append(localgatewayroutetables, aws.StringValue(localgatewayroutetable.LocalGatewayRouteTableId)) + ids = append(ids, aws.StringValue(localGatewayRouteTable.LocalGatewayRouteTableId)) } d.SetId(meta.(*AWSClient).region) - if err := d.Set("ids", localgatewayroutetables); err != nil { - return fmt.Errorf("Error setting local gateway route table ids: %s", err) + if err := d.Set("ids", ids); err != nil { + return fmt.Errorf("error setting ids: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_local_gateway_virtual_interface_groups.go b/aws/data_source_aws_ec2_local_gateway_virtual_interface_groups.go index d7eecdbb3b6..abc927d14c6 100644 --- a/aws/data_source_aws_ec2_local_gateway_virtual_interface_groups.go +++ b/aws/data_source_aws_ec2_local_gateway_virtual_interface_groups.go @@ -47,19 +47,33 @@ func dataSourceAwsEc2LocalGatewayVirtualInterfaceGroupsRead(d *schema.ResourceDa input.Filters = nil } - output, err := conn.DescribeLocalGatewayVirtualInterfaceGroups(input) + var localGatewayVirtualInterfaceGroups []*ec2.LocalGatewayVirtualInterfaceGroup + + err := conn.DescribeLocalGatewayVirtualInterfaceGroupsPages(input, func(page *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + localGatewayVirtualInterfaceGroups = append(localGatewayVirtualInterfaceGroups, page.LocalGatewayVirtualInterfaceGroups...) + + return !lastPage + }) if err != nil { - return fmt.Errorf("error describing EC2 Virtual Interface Groups: %w", err) + return fmt.Errorf("error describing EC2 Local Gateway Virtual Interface Groups: %w", err) } - if output == nil || len(output.LocalGatewayVirtualInterfaceGroups) == 0 { - return fmt.Errorf("no matching Virtual Interface Group found") + if len(localGatewayVirtualInterfaceGroups) == 0 { + return fmt.Errorf("no matching EC2 Local Gateway Virtual Interface Groups found") } var ids, localGatewayVirtualInterfaceIds []*string - for _, group := range output.LocalGatewayVirtualInterfaceGroups { + for _, group := range localGatewayVirtualInterfaceGroups { + if group == nil { + continue + } + ids = append(ids, group.LocalGatewayVirtualInterfaceGroupId) localGatewayVirtualInterfaceIds = append(localGatewayVirtualInterfaceIds, group.LocalGatewayVirtualInterfaceIds...) } diff --git a/aws/data_source_aws_ec2_local_gateways.go b/aws/data_source_aws_ec2_local_gateways.go index ccc7a1afdc1..6e1a3d37003 100644 --- a/aws/data_source_aws_ec2_local_gateways.go +++ b/aws/data_source_aws_ec2_local_gateways.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -49,26 +48,40 @@ func dataSourceAwsEc2LocalGatewaysRead(d *schema.ResourceData, meta interface{}) req.Filters = nil } - log.Printf("[DEBUG] DescribeLocalGateways %s\n", req) - resp, err := conn.DescribeLocalGateways(req) + var localGateways []*ec2.LocalGateway + + err := conn.DescribeLocalGatewaysPages(req, func(page *ec2.DescribeLocalGatewaysOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + localGateways = append(localGateways, page.LocalGateways...) + + return !lastPage + }) + if err != nil { return fmt.Errorf("error describing EC2 Local Gateways: %w", err) } - if resp == nil || len(resp.LocalGateways) == 0 { - return fmt.Errorf("no matching Local Gateways found") + if len(localGateways) == 0 { + return fmt.Errorf("no matching EC2 Local Gateways found") } - localgateways := make([]string, 0) + var ids []string + + for _, localGateway := range localGateways { + if localGateway == nil { + continue + } - for _, localgateway := range resp.LocalGateways { - localgateways = append(localgateways, aws.StringValue(localgateway.LocalGatewayId)) + ids = append(ids, aws.StringValue(localGateway.LocalGatewayId)) } d.SetId(meta.(*AWSClient).region) - if err := d.Set("ids", localgateways); err != nil { - return fmt.Errorf("Error setting local gateway ids: %s", err) + if err := d.Set("ids", ids); err != nil { + return fmt.Errorf("error setting ids: %w", err) } return nil From baa60431efe8a9c7726d975c050f527854cbef2f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 16:18:44 -0500 Subject: [PATCH 0255/1212] Update CHANGELOG for #16669 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8401e6a6b33..0383507a186 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,10 @@ ENHANCEMENTS BUG FIXES +* data-source/aws_ec2_coip_pools: Ensure all results from large environments are returned [GH-16669] +* data-source/aws_ec2_local_gateways: Ensure all results from large environments are returned [GH-16669] +* data-source/aws_ec2_local_gateway_route_tables: Ensure all results from large environments are returned [GH-16669] +* data-source/aws_ec2_local_gateway_virtual_interface_groups: Ensure all results from large environments are returned [GH-16669] * data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] * resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found [GH-16680] * resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] From 995b7e1ba0048444714a547ef314f910d7324768 Mon Sep 17 00:00:00 2001 From: Alvaro Del Valle Date: Wed, 16 Dec 2020 16:57:42 -0500 Subject: [PATCH 0256/1212] updated mq broker user property to 'required' --- website/docs/r/mq_broker.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/mq_broker.html.markdown b/website/docs/r/mq_broker.html.markdown index 7109b1bf9fd..3646c4a4fbc 100644 --- a/website/docs/r/mq_broker.html.markdown +++ b/website/docs/r/mq_broker.html.markdown @@ -67,7 +67,7 @@ The following arguments are supported: * `subnet_ids` - (Optional) The list of subnet IDs in which to launch the broker. A `SINGLE_INSTANCE` deployment requires one subnet. An `ACTIVE_STANDBY_MULTI_AZ` deployment requires two subnets. * `maintenance_window_start_time` - (Optional) Maintenance window start time. See below. * `logs` - (Optional) Logging configuration of the broker. See below. -* `user` - (Optional) The list of all ActiveMQ usernames for the specified broker. See below. +* `user` - (Required) The list of all ActiveMQ usernames for the specified broker. See below. * `tags` - (Optional) A map of tags to assign to the resource. ### Nested Fields From cc7b134730f0de784c6297014455d8a8d696bf17 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 17:42:26 +0200 Subject: [PATCH 0257/1212] New resource: aws_lakeformation_datalake_settings --- aws/provider.go | 1 + ...rce_aws_lakeformation_datalake_settings.go | 135 ++++++++++++++++++ ...ws_lakeformation_datalake_settings_test.go | 66 +++++++++ 3 files changed, 202 insertions(+) create mode 100644 aws/resource_aws_lakeformation_datalake_settings.go create mode 100644 aws/resource_aws_lakeformation_datalake_settings_test.go diff --git a/aws/provider.go b/aws/provider.go index b67cfbd35dc..3442f0bba04 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -744,6 +744,7 @@ func Provider() *schema.Provider { "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), "aws_lakeformation_resource": resourceAwsLakeFormationResource(), + "aws_lakeformation_datalake_settings": resourceAwsLakeFormationDataLakeSettings(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go new file mode 100644 index 00000000000..c708abcef59 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -0,0 +1,135 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLakeFormationDataLakeSettingsPut, + Update: resourceAwsLakeFormationDataLakeSettingsPut, + Read: resourceAwsLakeFormationDataLakeSettingsRead, + Delete: resourceAwsLakeFormationDataLakeSettingsReset, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + "admins": { + Type: schema.TypeList, + Required: true, + MinItems: 0, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + }, + } +} + +func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: expandAdmins(d), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error updating DataLakeSettings: %s", err) + } + + d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) + d.Set("catalog_id", catalogId) + + return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) +} + +func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.GetDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + } + + out, err := conn.GetDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reading DataLakeSettings: %s", err) + } + + d.Set("catalog_id", catalogId) + if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { + return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) + } + // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions + + return nil +} + +func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reseting DataLakeSettings: %s", err) + } + + return nil +} + +func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { + if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { + catalogId = inputCatalogId.(string) + } else { + catalogId = accountId + } + return +} + +func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { + xs := d.Get("admins") + ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) + + for i, x := range xs.([]interface{}) { + ys[i] = &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(x.(string)), + } + } + + return ys +} + +func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { + admins := make([]string, len(xs)) + for i, x := range xs { + admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) + } + + return admins +} diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go new file mode 100644 index 00000000000..d2ac3b0c538 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -0,0 +1,66 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = ["${data.aws_caller_identity.current.arn}"] +} +` + +func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + catalog_id = "${data.aws_caller_identity.current.account_id}" + admins = ["${data.aws_caller_identity.current.arn}"] +} +` From 5220967ecdf02ef8f18988a24072a4559e833c67 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 19:21:36 +0200 Subject: [PATCH 0258/1212] Add documentation --- ...eformation_datalake_settings.html.markdown | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 website/docs/r/lakeformation_datalake_settings.html.markdown diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown new file mode 100644 index 00000000000..9bffac8da53 --- /dev/null +++ b/website/docs/r/lakeformation_datalake_settings.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "LakeFormation" +layout: "aws" +page_title: "AWS: aws_lakeformation_datalake_settings" +description: |- + Manages the data lake settings for the current account +--- + +# Resource: aws_lakeformation_datalake_settings + +Manages the data lake settings for the current account. + +## Example Usage + +```hcl +data "aws_iam_user" "existing_user" { + user_name = "an_existing_user_name" +} + +data "aws_iam_role" "existing_role" { + name = "an_existing_role_name" +} + +resource "aws_lakeformation_datalake_settings" "example" { + admins = [ + "${aws_iam_user.existing_user.arn}", + "${aws_iam_user.existing_role.arn}", + ] +} +``` + +## Argument Reference + +The following arguments are required: + +* `admins` – (Required) A list of up to 10 AWS Lake Formation principals (users or roles). + +The following arguments are optional: + +* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Resource identifier with the pattern `lakeformation:settings:ACCOUNT_ID`. From c6edb39fe1095af816a32cc2733fdc4d42d56932 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Mon, 11 May 2020 09:48:05 +0200 Subject: [PATCH 0259/1212] Use Lake Formation official spelling --- website/docs/r/lakeformation_datalake_settings.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_datalake_settings.html.markdown index 9bffac8da53..f3971da0eb3 100644 --- a/website/docs/r/lakeformation_datalake_settings.html.markdown +++ b/website/docs/r/lakeformation_datalake_settings.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "LakeFormation" +subcategory: "Lake Formation" layout: "aws" page_title: "AWS: aws_lakeformation_datalake_settings" description: |- From 33dba2974a55cddb403217f0e6de72b4066c6f51 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 27 May 2020 17:27:49 +0200 Subject: [PATCH 0260/1212] Remove redundant check --- go.mod | 1 + go.sum | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/go.mod b/go.mod index 597534f085f..43cd6ff74af 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 + github.com/hashicorp/terraform-plugin-sdk v1.16.0 // indirect github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba diff --git a/go.sum b/go.sum index 3a28bf9bdf2..f9fbbe06859 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,8 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -162,6 +164,8 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -177,6 +181,7 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -194,22 +199,34 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= +github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= +github.com/hashicorp/terraform-plugin-test/v2 v2.1.2 h1:p96IIn+XpvVjw7AtN8y9MKxn0x69S7wtbGf7JgDJoIk= +github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -249,11 +266,13 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -262,6 +281,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -293,6 +314,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -300,7 +323,10 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -309,9 +335,11 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -319,9 +347,13 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= +github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -383,6 +415,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From 07aa5973365e49dde07d19290694cedd6858b474 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 15 Dec 2020 18:31:06 -0500 Subject: [PATCH 0261/1212] resource/lakeformation_data_lake_settings: Upgrade to plugin v2 --- aws/resource_aws_lakeformation_datalake_settings.go | 4 ++-- aws/resource_aws_lakeformation_datalake_settings_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go index c708abcef59..2fe3f3cbff3 100644 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go index d2ac3b0c538..bdc3e3b9bf2 100644 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -3,7 +3,7 @@ package aws import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { From 394016d330d8c30eb0647a1509f2b5ef80da95c6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 15 Dec 2020 18:39:08 -0500 Subject: [PATCH 0262/1212] resource/lakeformation_data_lake_settings: Rename 'datalake' to 'data lake' --- aws/provider.go | 2 +- ...o => resource_aws_lakeformation_data_lake_settings.go} | 0 ...resource_aws_lakeformation_data_lake_settings_test.go} | 8 ++++---- ...own => lakeformation_data_lake_settings.html.markdown} | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) rename aws/{resource_aws_lakeformation_datalake_settings.go => resource_aws_lakeformation_data_lake_settings.go} (100%) rename aws/{resource_aws_lakeformation_datalake_settings_test.go => resource_aws_lakeformation_data_lake_settings_test.go} (89%) rename website/docs/r/{lakeformation_datalake_settings.html.markdown => lakeformation_data_lake_settings.html.markdown} (85%) diff --git a/aws/provider.go b/aws/provider.go index 3442f0bba04..88299f99f2f 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -743,8 +743,8 @@ func Provider() *schema.Provider { "aws_kms_grant": resourceAwsKmsGrant(), "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), + "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), "aws_lakeformation_resource": resourceAwsLakeFormationResource(), - "aws_lakeformation_datalake_settings": resourceAwsLakeFormationDataLakeSettings(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), "aws_lambda_event_source_mapping": resourceAwsLambdaEventSourceMapping(), diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go similarity index 100% rename from aws/resource_aws_lakeformation_datalake_settings.go rename to aws/resource_aws_lakeformation_data_lake_settings.go diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_data_lake_settings_test.go similarity index 89% rename from aws/resource_aws_lakeformation_datalake_settings_test.go rename to aws/resource_aws_lakeformation_data_lake_settings_test.go index bdc3e3b9bf2..e13ae09f8d2 100644 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ b/aws/resource_aws_lakeformation_data_lake_settings_test.go @@ -8,7 +8,7 @@ import ( func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" + resourceName := "aws_lakeformation_data_lake_settings.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -30,14 +30,14 @@ func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` data "aws_caller_identity" "current" {} -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { admins = ["${data.aws_caller_identity.current.arn}"] } ` func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" + resourceName := "aws_lakeformation_data_lake_settings.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -59,7 +59,7 @@ func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` data "aws_caller_identity" "current" {} -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { catalog_id = "${data.aws_caller_identity.current.account_id}" admins = ["${data.aws_caller_identity.current.arn}"] } diff --git a/website/docs/r/lakeformation_datalake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown similarity index 85% rename from website/docs/r/lakeformation_datalake_settings.html.markdown rename to website/docs/r/lakeformation_data_lake_settings.html.markdown index f3971da0eb3..8717673330c 100644 --- a/website/docs/r/lakeformation_datalake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -1,12 +1,12 @@ --- subcategory: "Lake Formation" layout: "aws" -page_title: "AWS: aws_lakeformation_datalake_settings" +page_title: "AWS: aws_lakeformation_data_lake_settings" description: |- Manages the data lake settings for the current account --- -# Resource: aws_lakeformation_datalake_settings +# Resource: aws_lakeformation_data_lake_settings Manages the data lake settings for the current account. @@ -21,7 +21,7 @@ data "aws_iam_role" "existing_role" { name = "an_existing_role_name" } -resource "aws_lakeformation_datalake_settings" "example" { +resource "aws_lakeformation_data_lake_settings" "example" { admins = [ "${aws_iam_user.existing_user.arn}", "${aws_iam_user.existing_role.arn}", From f6113ed0c964043d0620c676d5ec030b2e020baf Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 15 Dec 2020 18:43:45 -0500 Subject: [PATCH 0263/1212] resource/lakeformation_data_lake_settings: Remove go.mod, go.sum from PR --- go.mod | 1 - go.sum | 33 --------------------------------- 2 files changed, 34 deletions(-) diff --git a/go.mod b/go.mod index 43cd6ff74af..597534f085f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk v1.16.0 // indirect github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba diff --git a/go.sum b/go.sum index f9fbbe06859..3a28bf9bdf2 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -164,8 +162,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -181,7 +177,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -199,34 +194,22 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= -github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2 h1:p96IIn+XpvVjw7AtN8y9MKxn0x69S7wtbGf7JgDJoIk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -266,13 +249,11 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -281,8 +262,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -314,8 +293,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -323,10 +300,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -335,11 +309,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -347,13 +319,9 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -415,7 +383,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From 2f34f0bfaf5fede3edfa012bf5627a34a8b3675d Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 16:15:24 -0500 Subject: [PATCH 0264/1212] resource/lakeformation_data_lake_settings: Add arguments, tests --- ...ce_aws_lakeformation_data_lake_settings.go | 298 +++++++++++++++--- ...s_lakeformation_data_lake_settings_test.go | 167 ++++++++-- ...formation_data_lake_settings.html.markdown | 62 +++- 3 files changed, 435 insertions(+), 92 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 2fe3f3cbff3..7dbde07ffdd 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -2,19 +2,22 @@ package aws import ( "fmt" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" ) func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { return &schema.Resource{ - Create: resourceAwsLakeFormationDataLakeSettingsPut, - Update: resourceAwsLakeFormationDataLakeSettingsPut, + Create: resourceAwsLakeFormationDataLakeSettingsCreate, + Update: resourceAwsLakeFormationDataLakeSettingsCreate, Read: resourceAwsLakeFormationDataLakeSettingsRead, - Delete: resourceAwsLakeFormationDataLakeSettingsReset, + Delete: resourceAwsLakeFormationDataLakeSettingsDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, @@ -22,114 +25,307 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Schema: map[string]*schema.Schema{ "catalog_id": { Type: schema.TypeString, + Computed: true, ForceNew: true, Optional: true, + }, + "create_database_default_permissions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "permissions": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(lakeformation.Permission_Values(), false), + }, + }, + "principal": { + Type: schema.TypeString, + Optional: true, + Computed: true, + //ValidateFunc: validateArn, + }, + }, + }, + }, + "create_table_default_permissions": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 3, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "permissions": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(lakeformation.Permission_Values(), false), + }, + }, + "principal": { + Type: schema.TypeString, + Optional: true, + Computed: true, + //ValidateFunc: validateArn, + }, + }, + }, + }, + "data_lake_admins": { + Type: schema.TypeList, Computed: true, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, }, - "admins": { + "trusted_resource_owners": { Type: schema.TypeList, - Required: true, - MinItems: 0, - MaxItems: 10, + Computed: true, + Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.NoZeroValues, + ValidateFunc: validateAwsAccountId, }, }, }, } } -func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) - input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: expandAdmins(d), - }, + if err := resourceAwsLakeFormationDataLakeSettingsAdminUpdate(d, meta); err != nil { + return fmt.Errorf("error updating Lake Formation data lake admins: %w", err) } - _, err := conn.PutDataLakeSettings(input) + input := &lakeformation.PutDataLakeSettingsInput{} + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + settings := &lakeformation.DataLakeSettings{} + + if v, ok := d.GetOk("create_database_default_permissions"); ok { + settings.CreateDatabaseDefaultPermissions = expandDataLakeSettingsCreateDefaultPermissions(v.([]interface{})) + } + + if v, ok := d.GetOk("create_table_default_permissions"); ok { + settings.CreateTableDefaultPermissions = expandDataLakeSettingsCreateDefaultPermissions(v.([]interface{})) + } + + if v, ok := d.GetOk("data_lake_admins"); ok { + settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.([]interface{})) + } + + if v, ok := d.GetOk("trusted_resource_owners"); ok { + settings.TrustedResourceOwners = expandStringList(v.([]interface{})) + } + + input.DataLakeSettings = settings + output, err := conn.PutDataLakeSettings(input) + if err != nil { - return fmt.Errorf("Error updating DataLakeSettings: %s", err) + return fmt.Errorf("error creating Lake Formation data lake settings: %w", err) } - d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) - d.Set("catalog_id", catalogId) + if output == nil { + return fmt.Errorf("error creating Lake Formation data lake settings: empty response") + } + + d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) } func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - input := &lakeformation.GetDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), + input := &lakeformation.GetDataLakeSettingsInput{} + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + output, err := conn.GetDataLakeSettings(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Lake Formation data lake settings (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - out, err := conn.GetDataLakeSettings(input) if err != nil { - return fmt.Errorf("Error reading DataLakeSettings: %s", err) + return fmt.Errorf("error reading Lake Formation data lake settings (%s): %w", d.Id(), err) } - d.Set("catalog_id", catalogId) - if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { - return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) + if output == nil || output.DataLakeSettings == nil { + return fmt.Errorf("error reading Lake Formation data lake settings (%s): empty response", d.Id()) } - // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions + + settings := output.DataLakeSettings + + if settings.CreateDatabaseDefaultPermissions != nil { + d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) + } else { + d.Set("create_database_default_permissions", nil) + } + + if settings.CreateTableDefaultPermissions != nil { + d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) + } else { + d.Set("create_table_default_permissions", nil) + } + + d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) + d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) return nil } -func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationDataLakeSettingsDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), + CreateDatabaseDefaultPermissions: make([]*lakeformation.PrincipalPermissions, 0), + CreateTableDefaultPermissions: make([]*lakeformation.PrincipalPermissions, 0), + DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), + TrustedResourceOwners: make([]*string, 0), }, } + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + _, err := conn.PutDataLakeSettings(input) + + if tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Lake Formation data lake settings (%s) not found, removing from state", d.Id()) + return nil + } + if err != nil { - return fmt.Errorf("Error reseting DataLakeSettings: %s", err) + return fmt.Errorf("error deleting Lake Formation data lake settings (%s): %w", d.Id(), err) } return nil } -func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { - if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { - catalogId = inputCatalogId.(string) - } else { - catalogId = accountId +func resourceAwsLakeFormationDataLakeSettingsAdminUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + + if v, ok := d.GetOk("data_lake_admins"); ok { + input := &lakeformation.PutDataLakeSettingsInput{} + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + settings := &lakeformation.DataLakeSettings{} + settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.([]interface{})) + + input.DataLakeSettings = settings + output, err := conn.PutDataLakeSettings(input) + + if err != nil { + return err + } + + if output == nil { + return fmt.Errorf("empty response") + } } - return + + return nil } -func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { - xs := d.Get("admins") - ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) +func expandDataLakeSettingsCreateDefaultPermissions(tfMaps []interface{}) []*lakeformation.PrincipalPermissions { + apiObjects := make([]*lakeformation.PrincipalPermissions, 0, len(tfMaps)) + + for _, tfMap := range tfMaps { + apiObjects = append(apiObjects, expandDataLakeSettingsCreateDefaultPermission(tfMap.(map[string]interface{}))) + } + + return apiObjects +} - for i, x := range xs.([]interface{}) { - ys[i] = &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(x.(string)), +func expandDataLakeSettingsCreateDefaultPermission(tfMap map[string]interface{}) *lakeformation.PrincipalPermissions { + apiObject := &lakeformation.PrincipalPermissions{ + Permissions: expandStringSet(tfMap["permissions"].(*schema.Set)), + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(tfMap["principal"].(string)), + }, + } + + return apiObject +} + +func flattenDataLakeSettingsCreateDefaultPermissions(apiObjects []*lakeformation.PrincipalPermissions) []map[string]interface{} { + tfMaps := make([]map[string]interface{}, len(apiObjects)) + if len(apiObjects) > 0 { + for i, v := range apiObjects { + tfMaps[i] = flattenDataLakeSettingsCreateDefaultPermission(v) } } - return ys + return tfMaps } -func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { - admins := make([]string, len(xs)) - for i, x := range xs { - admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) +func flattenDataLakeSettingsCreateDefaultPermission(apiObject *lakeformation.PrincipalPermissions) map[string]interface{} { + tfMap := make(map[string]interface{}) + + if apiObject == nil { + return tfMap + } + + if apiObject.Permissions != nil { + tfMap["permissions"] = flattenStringSet(apiObject.Permissions) + } + + if v := aws.StringValue(apiObject.Principal.DataLakePrincipalIdentifier); v != "" { + tfMap["principal"] = v + } + + return tfMap +} + +func expandDataLakeSettingsAdmins(tfSlice []interface{}) []*lakeformation.DataLakePrincipal { + apiObjects := make([]*lakeformation.DataLakePrincipal, 0, len(tfSlice)) + + for _, tfItem := range tfSlice { + val, ok := tfItem.(string) + if ok && val != "" { + apiObjects = append(apiObjects, &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(tfItem.(string)), + }) + } + } + + return apiObjects +} + +func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) []interface{} { + if apiObjects == nil || len(apiObjects) == 0 { + return nil + } + + tfSlice := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfSlice = append(tfSlice, *apiObject.DataLakePrincipalIdentifier) } - return admins + return tfSlice } diff --git a/aws/resource_aws_lakeformation_data_lake_settings_test.go b/aws/resource_aws_lakeformation_data_lake_settings_test.go index e13ae09f8d2..dd32823a87d 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings_test.go +++ b/aws/resource_aws_lakeformation_data_lake_settings_test.go @@ -1,66 +1,183 @@ package aws import ( + "fmt" "testing" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { +func TestAccAWSLakeFormationDataLakeSettings_serial(t *testing.T) { + testCases := map[string]map[string]func(t *testing.T){ + "ResourcePolicy": { + "basic": testAccAWSLakeFormationDataLakeSettings_basic, + "disappears": testAccAWSLakeFormationDataLakeSettings_disappears, + "withoutCatalogId": testAccAWSLakeFormationDataLakeSettings_withoutCatalogId, + }, + } + + for group, m := range testCases { + m := m + t.Run(group, func(t *testing.T) { + for name, tc := range m { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } + }) + } +} + +func testAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { callerIdentityName := "data.aws_caller_identity.current" resourceName := "aws_lakeformation_data_lake_settings.test" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, Steps: []resource.TestStep{ { Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "catalog_id", callerIdentityName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), ), }, }, }) } -const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` -data "aws_caller_identity" "current" {} +func testAccAWSLakeFormationDataLakeSettings_disappears(t *testing.T) { + resourceName := "aws_lakeformation_data_lake_settings.test" -resource "aws_lakeformation_data_lake_settings" "test" { - admins = ["${data.aws_caller_identity.current.arn}"] + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsLakeFormationDataLakeSettings(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) } -` -func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { +func testAccAWSLakeFormationDataLakeSettings_withoutCatalogId(t *testing.T) { callerIdentityName := "data.aws_caller_identity.current" resourceName := "aws_lakeformation_data_lake_settings.test" - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, + Config: testAccAWSLakeFormationDataLakeSettingsConfig_withoutCatalogId, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), ), }, }, }) } -const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` +func testAccCheckAWSLakeFormationDataLakeSettingsDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_data_lake_settings" { + continue + } + + input := &lakeformation.GetDataLakeSettingsInput{} + + if rs.Primary.Attributes["catalog_id"] != "" { + input.CatalogId = aws.String(rs.Primary.Attributes["catalog_id"]) + } + + output, err := conn.GetDataLakeSettings(input) + + if tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + continue + } + + if err != nil { + return fmt.Errorf("error getting Lake Formation data lake settings (%s): %w", rs.Primary.ID, err) + } + + if output != nil && output.DataLakeSettings != nil && len(output.DataLakeSettings.DataLakeAdmins) > 0 { + return fmt.Errorf("Lake Formation data lake admin(s) (%s) still exist", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("resource not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + input := &lakeformation.GetDataLakeSettingsInput{} + + if rs.Primary.Attributes["catalog_id"] != "" { + input.CatalogId = aws.String(rs.Primary.Attributes["catalog_id"]) + } + + _, err := conn.GetDataLakeSettings(input) + + if err != nil { + return fmt.Errorf("error getting Lake Formation data lake settings (%s): %w", rs.Primary.ID, err) + } + + return nil + } +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_data_lake_settings" "test" { + catalog_id = data.aws_caller_identity.current.account_id + + create_database_default_permissions { + principal = "IAM_ALLOWED_PRINCIPALS" + permissions = ["ALL"] + } + + create_table_default_permissions { + principal = "IAM_ALLOWED_PRINCIPALS" + permissions = ["ALL"] + } + + data_lake_admins = [data.aws_caller_identity.current.arn] + trusted_resource_owners = [data.aws_caller_identity.current.account_id] +} +` + +const testAccAWSLakeFormationDataLakeSettingsConfig_withoutCatalogId = ` data "aws_caller_identity" "current" {} resource "aws_lakeformation_data_lake_settings" "test" { - catalog_id = "${data.aws_caller_identity.current.account_id}" - admins = ["${data.aws_caller_identity.current.arn}"] + data_lake_admins = [data.aws_caller_identity.current.arn] } ` diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index 8717673330c..bfde75c3c49 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -3,44 +3,74 @@ subcategory: "Lake Formation" layout: "aws" page_title: "AWS: aws_lakeformation_data_lake_settings" description: |- - Manages the data lake settings for the current account + Manages data lake administrators and default database and table permissions --- # Resource: aws_lakeformation_data_lake_settings -Manages the data lake settings for the current account. +Manages Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions. ## Example Usage + +### Data Lake Admins + ```hcl -data "aws_iam_user" "existing_user" { - user_name = "an_existing_user_name" +resource "aws_iam_user" "test" { + name = "username" } -data "aws_iam_role" "existing_role" { - name = "an_existing_role_name" +resource "aws_iam_role" "test" { + name = "rolename" } resource "aws_lakeformation_data_lake_settings" "example" { - admins = [ - "${aws_iam_user.existing_user.arn}", - "${aws_iam_user.existing_role.arn}", - ] + data_lake_admins = [aws_iam_user.test.arn, aws_iam_role.test.arn] +} +``` + +### Create Default Permissions + +```hcl +resource "aws_lakeformation_data_lake_settings" "example" { + data_lake_admins = [aws_iam_user.test.arn, aws_iam_role.test.arn] + + create_database_default_permissions { + permissions = ["SELECT", "ALTER", "DROP"] + principal = aws_iam_user.test.arn + } + + create_table_default_permissions { + permissions = ["ALL"] + principal = aws_iam_role.test.arn + } } ``` ## Argument Reference -The following arguments are required: +The following arguments are optional: + +* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. +* `create_database_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. +* `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. +* `data_lake_admins` – (Optional) List of ARNs of AWS Lake Formation principals (IAM users or roles). +* `trusted_resource_owners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). -* `admins` – (Required) A list of up to 10 AWS Lake Formation principals (users or roles). +### create_database_default_permissions The following arguments are optional: -* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. +* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `principal` - (Optional) Identifier for the Lake Formation principal. Supported principals are IAM users or IAM roles. -## Attributes Reference +### create_table_default_permissions -In addition to all arguments above, the following attributes are exported: +The following arguments are optional: + +* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `principal` - (Optional) Identifier for the Lake Formation principal. Supported principals are IAM users or IAM roles. + +## Attributes Reference -* `id` - Resource identifier with the pattern `lakeformation:settings:ACCOUNT_ID`. +In addition to all arguments above, no attributes are exported. From ac91a5f637ba3535f6a2bdad4e096c7747cbe2fc Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 16:34:23 -0500 Subject: [PATCH 0265/1212] resource/lakeformation_data_lake_settings: Fix linter issues, simplify --- ...ce_aws_lakeformation_data_lake_settings.go | 26 +++++++------------ ...s_lakeformation_data_lake_settings_test.go | 23 ++++++---------- 2 files changed, 17 insertions(+), 32 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 7dbde07ffdd..bbc4d6265cf 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -175,18 +175,8 @@ func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta i settings := output.DataLakeSettings - if settings.CreateDatabaseDefaultPermissions != nil { - d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) - } else { - d.Set("create_database_default_permissions", nil) - } - - if settings.CreateTableDefaultPermissions != nil { - d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) - } else { - d.Set("create_table_default_permissions", nil) - } - + d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) + d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) @@ -273,11 +263,13 @@ func expandDataLakeSettingsCreateDefaultPermission(tfMap map[string]interface{}) } func flattenDataLakeSettingsCreateDefaultPermissions(apiObjects []*lakeformation.PrincipalPermissions) []map[string]interface{} { + if apiObjects == nil { + return nil + } + tfMaps := make([]map[string]interface{}, len(apiObjects)) - if len(apiObjects) > 0 { - for i, v := range apiObjects { - tfMaps[i] = flattenDataLakeSettingsCreateDefaultPermission(v) - } + for i, v := range apiObjects { + tfMaps[i] = flattenDataLakeSettingsCreateDefaultPermission(v) } return tfMaps @@ -317,7 +309,7 @@ func expandDataLakeSettingsAdmins(tfSlice []interface{}) []*lakeformation.DataLa } func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) []interface{} { - if apiObjects == nil || len(apiObjects) == 0 { + if apiObjects == nil { return nil } diff --git a/aws/resource_aws_lakeformation_data_lake_settings_test.go b/aws/resource_aws_lakeformation_data_lake_settings_test.go index dd32823a87d..6c2c358a526 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings_test.go +++ b/aws/resource_aws_lakeformation_data_lake_settings_test.go @@ -12,23 +12,16 @@ import ( ) func TestAccAWSLakeFormationDataLakeSettings_serial(t *testing.T) { - testCases := map[string]map[string]func(t *testing.T){ - "ResourcePolicy": { - "basic": testAccAWSLakeFormationDataLakeSettings_basic, - "disappears": testAccAWSLakeFormationDataLakeSettings_disappears, - "withoutCatalogId": testAccAWSLakeFormationDataLakeSettings_withoutCatalogId, - }, + testCases := map[string]func(t *testing.T){ + "basic": testAccAWSLakeFormationDataLakeSettings_basic, + "disappears": testAccAWSLakeFormationDataLakeSettings_disappears, + "withoutCatalogId": testAccAWSLakeFormationDataLakeSettings_withoutCatalogId, } - for group, m := range testCases { - m := m - t.Run(group, func(t *testing.T) { - for name, tc := range m { - tc := tc - t.Run(name, func(t *testing.T) { - tc(t) - }) - } + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) }) } } From b33132751f2962ba5b257c3077714148a7ab724b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 16:59:08 -0500 Subject: [PATCH 0266/1212] resource/lakeformation_data_lake_settings: Add docs, validate function --- ...ource_aws_lakeformation_data_lake_settings.go | 16 ++++++++-------- ...akeformation_data_lake_settings.html.markdown | 15 ++++----------- 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index bbc4d6265cf..499ab50ca02 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -46,10 +46,10 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, "principal": { - Type: schema.TypeString, - Optional: true, - Computed: true, - //ValidateFunc: validateArn, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.NoZeroValues, // can be non-ARN, e.g. "IAM_ALLOWED_PRINCIPALS" }, }, }, @@ -71,10 +71,10 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, "principal": { - Type: schema.TypeString, - Optional: true, - Computed: true, - //ValidateFunc: validateArn, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.NoZeroValues, // can be non-ARN, e.g. "IAM_ALLOWED_PRINCIPALS" }, }, }, diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index bfde75c3c49..364c058b35f 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -10,20 +10,13 @@ description: |- Manages Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions. -## Example Usage +~> **NOTE:** Lake Formation introduces fine-grained access control for data in your data lake. In order to make Lake Formation backwards compatible with existing IAM and Glue permissions, AWS introduced the `IAMAllowedPrincipals` principal. For more details, see [Changing the Default Security Settings for Your Data Lake](https://docs.aws.amazon.com/lake-formation/latest/dg/change-settings.html) and [Upgrading AWS Glue Data Permissions to the AWS Lake Formation Model](https://docs.aws.amazon.com/lake-formation/latest/dg/upgrade-glue-lake-formation.html). +## Example Usage ### Data Lake Admins ```hcl -resource "aws_iam_user" "test" { - name = "username" -} - -resource "aws_iam_role" "test" { - name = "rolename" -} - resource "aws_lakeformation_data_lake_settings" "example" { data_lake_admins = [aws_iam_user.test.arn, aws_iam_role.test.arn] } @@ -62,14 +55,14 @@ The following arguments are optional: The following arguments are optional: * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. -* `principal` - (Optional) Identifier for the Lake Formation principal. Supported principals are IAM users or IAM roles. +* `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. ### create_table_default_permissions The following arguments are optional: * `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. -* `principal` - (Optional) Identifier for the Lake Formation principal. Supported principals are IAM users or IAM roles. +* `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. ## Attributes Reference From 47891b573ed3368d991d1acb404e0fd51fe19af7 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 17:42:17 -0500 Subject: [PATCH 0267/1212] ds/lakeformat_data_lake_settings: New data source --- ...ce_aws_lakeformation_data_lake_settings.go | 106 ++++++++++++++++++ ...s_lakeformation_data_lake_settings_test.go | 56 +++++++++ aws/provider.go | 7 +- ...formation_data_lake_settings.html.markdown | 44 ++++++++ 4 files changed, 210 insertions(+), 3 deletions(-) create mode 100644 aws/data_source_aws_lakeformation_data_lake_settings.go create mode 100644 aws/data_source_aws_lakeformation_data_lake_settings_test.go create mode 100644 website/docs/d/lakeformation_data_lake_settings.html.markdown diff --git a/aws/data_source_aws_lakeformation_data_lake_settings.go b/aws/data_source_aws_lakeformation_data_lake_settings.go new file mode 100644 index 00000000000..e73e386507f --- /dev/null +++ b/aws/data_source_aws_lakeformation_data_lake_settings.go @@ -0,0 +1,106 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" +) + +func dataSourceAwsLakeFormationDataLakeSettings() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsLakeFormationDataLakeSettingsRead, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + }, + "create_database_default_permissions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "permissions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "create_table_default_permissions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "permissions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "principal": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "data_lake_admins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "trusted_resource_owners": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + + input := &lakeformation.GetDataLakeSettingsInput{} + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + output, err := conn.GetDataLakeSettings(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Lake Formation data lake settings (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Lake Formation data lake settings (%s): %w", d.Id(), err) + } + + if output == nil || output.DataLakeSettings == nil { + return fmt.Errorf("error reading Lake Formation data lake settings (%s): empty response", d.Id()) + } + + settings := output.DataLakeSettings + + d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) + d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) + d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) + d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) + + d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) + + return nil +} diff --git a/aws/data_source_aws_lakeformation_data_lake_settings_test.go b/aws/data_source_aws_lakeformation_data_lake_settings_test.go new file mode 100644 index 00000000000..61597552d58 --- /dev/null +++ b/aws/data_source_aws_lakeformation_data_lake_settings_test.go @@ -0,0 +1,56 @@ +package aws + +import ( + "testing" + + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSLakeFormationDataLakeSettingsDataSource_serial(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "basic": testAccAWSLakeFormationDataLakeSettingsDataSource_basic, + // if more tests are added, they should be serial (data catalog is account-shared resource) + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccAWSLakeFormationDataLakeSettingsDataSource_basic(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "data.aws_lakeformation_data_lake_settings.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsDataSourceConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "catalog_id", callerIdentityName, "account_id"), + resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsDataSourceConfig_basic = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_data_lake_settings" "test" { + catalog_id = data.aws_caller_identity.current.account_id + data_lake_admins = [data.aws_caller_identity.current.arn] +} + +data "aws_lakeformation_data_lake_settings" "test" { + catalog_id = aws_lakeformation_data_lake_settings.test.catalog_id +} +` diff --git a/aws/provider.go b/aws/provider.go index 88299f99f2f..9eb5c30013f 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -271,11 +271,11 @@ func Provider() *schema.Provider { "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), - "aws_internet_gateway": dataSourceAwsInternetGateway(), - "aws_iot_endpoint": dataSourceAwsIotEndpoint(), "aws_inspector_rules_packages": dataSourceAwsInspectorRulesPackages(), "aws_instance": dataSourceAwsInstance(), "aws_instances": dataSourceAwsInstances(), + "aws_internet_gateway": dataSourceAwsInternetGateway(), + "aws_iot_endpoint": dataSourceAwsIotEndpoint(), "aws_ip_ranges": dataSourceAwsIPRanges(), "aws_kinesis_stream": dataSourceAwsKinesisStream(), "aws_kms_alias": dataSourceAwsKmsAlias(), @@ -283,6 +283,7 @@ func Provider() *schema.Provider { "aws_kms_key": dataSourceAwsKmsKey(), "aws_kms_secret": dataSourceAwsKmsSecret(), "aws_kms_secrets": dataSourceAwsKmsSecrets(), + "aws_lakeformation_data_lake_settings": dataSourceAwsLakeFormationDataLakeSettings(), "aws_lambda_alias": dataSourceAwsLambdaAlias(), "aws_lambda_code_signing_config": dataSourceAwsLambdaCodeSigningConfig(), "aws_lambda_function": dataSourceAwsLambdaFunction(), @@ -290,8 +291,8 @@ func Provider() *schema.Provider { "aws_lambda_layer_version": dataSourceAwsLambdaLayerVersion(), "aws_launch_configuration": dataSourceAwsLaunchConfiguration(), "aws_launch_template": dataSourceAwsLaunchTemplate(), - "aws_lex_bot": dataSourceAwsLexBot(), "aws_lex_bot_alias": dataSourceAwsLexBotAlias(), + "aws_lex_bot": dataSourceAwsLexBot(), "aws_lex_intent": dataSourceAwsLexIntent(), "aws_lex_slot_type": dataSourceAwsLexSlotType(), "aws_mq_broker": dataSourceAwsMqBroker(), diff --git a/website/docs/d/lakeformation_data_lake_settings.html.markdown b/website/docs/d/lakeformation_data_lake_settings.html.markdown new file mode 100644 index 00000000000..7c0640a2a2a --- /dev/null +++ b/website/docs/d/lakeformation_data_lake_settings.html.markdown @@ -0,0 +1,44 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_data_lake_settings" +description: |- + Get data lake administrators and default database and table permissions +--- + +# Data Source: aws_lakeformation_data_lake_settings + +Get Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions. + +## Example Usage + +```hcl +data "aws_lakeformation_data_lake_settings" "example" { + catalog_id = "14916253649" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. + +## Attributes Reference + +In addition to arguments above, the following attributes are exported. + +* `create_database_default_permissions` - Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. +* `create_table_default_permissions` - Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. +* `data_lake_admins` – List of ARNs of AWS Lake Formation principals (IAM users or roles). +* `trusted_resource_owners` – List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). + +### create_database_default_permissions + +* `permissions` - List of permissions granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `principal` - Principal who is granted permissions. + +### create_table_default_permissions + +* `permissions` - List of permissions granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `principal` - Principal who is granted permissions. From acf8218e08aac8f7c87860dade633cc84462cf0d Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 18:41:21 -0500 Subject: [PATCH 0268/1212] resource/lakeformation_data_lake_settings: Clean up before merge --- ...ce_aws_lakeformation_data_lake_settings.go | 3 +- ...ce_aws_lakeformation_data_lake_settings.go | 33 ------------------- docs/FAQ.md | 1 + docs/roadmaps/2020_August_to_October.md | 2 +- ...formation_data_lake_settings.html.markdown | 2 +- 5 files changed, 4 insertions(+), 37 deletions(-) diff --git a/aws/data_source_aws_lakeformation_data_lake_settings.go b/aws/data_source_aws_lakeformation_data_lake_settings.go index e73e386507f..01e46336a6a 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings.go @@ -76,6 +76,7 @@ func dataSourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta if v, ok := d.GetOk("catalog_id"); ok { input.CatalogId = aws.String(v.(string)) } + d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) output, err := conn.GetDataLakeSettings(input) @@ -100,7 +101,5 @@ func dataSourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) - d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) - return nil } diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 499ab50ca02..51463b4b756 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -25,7 +25,6 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Schema: map[string]*schema.Schema{ "catalog_id": { Type: schema.TypeString, - Computed: true, ForceNew: true, Optional: true, }, @@ -104,10 +103,6 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - if err := resourceAwsLakeFormationDataLakeSettingsAdminUpdate(d, meta); err != nil { - return fmt.Errorf("error updating Lake Formation data lake admins: %w", err) - } - input := &lakeformation.PutDataLakeSettingsInput{} if v, ok := d.GetOk("catalog_id"); ok { @@ -213,34 +208,6 @@ func resourceAwsLakeFormationDataLakeSettingsDelete(d *schema.ResourceData, meta return nil } -func resourceAwsLakeFormationDataLakeSettingsAdminUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - - if v, ok := d.GetOk("data_lake_admins"); ok { - input := &lakeformation.PutDataLakeSettingsInput{} - - if v, ok := d.GetOk("catalog_id"); ok { - input.CatalogId = aws.String(v.(string)) - } - - settings := &lakeformation.DataLakeSettings{} - settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.([]interface{})) - - input.DataLakeSettings = settings - output, err := conn.PutDataLakeSettings(input) - - if err != nil { - return err - } - - if output == nil { - return fmt.Errorf("empty response") - } - } - - return nil -} - func expandDataLakeSettingsCreateDefaultPermissions(tfMaps []interface{}) []*lakeformation.PrincipalPermissions { apiObjects := make([]*lakeformation.PrincipalPermissions, 0, len(tfMaps)) diff --git a/docs/FAQ.md b/docs/FAQ.md index 04973301df1..84ed2d8762a 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -10,6 +10,7 @@ The HashiCorp Terraform AWS provider team is : * Brian Flad, Engineering Lead - GitHub [@bflad](https://github.com/bflad) * Graham Davison, Engineer - GitHub [@gdavison](https://github.com/gdavison) * Angie Pinilla, Engineer - GitHub [@angie44](https://github.com/angie44) +* Dirk Avery (Federal), Engineer - GitHub [@YakDriver](https://github.com/yakdriver) * Bill Rich, Engineer - GitHub [@bill-rich](https://github.com/bill-rich) * Simon Davis, Engineering Manager - GitHub [@breathingdust](https://github.com/breathingdust) * Kerim Satirli, Developer Advocate - GitHub [@ksatirli](https://github.com/ksatirli) diff --git a/docs/roadmaps/2020_August_to_October.md b/docs/roadmaps/2020_August_to_October.md index 6ea8ac4413f..7408976a71c 100644 --- a/docs/roadmaps/2020_August_to_October.md +++ b/docs/roadmaps/2020_August_to_October.md @@ -54,7 +54,7 @@ Support for AWS Lake Formation will include: New Resource(s): - aws_lakeformation_resource -- aws_lakeformation_datalake_settings +- aws_lakeformation_data_lake_settings - aws_lakeformation_permissions ### AWS Serverless Application Repository diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index 364c058b35f..6da510981c7 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -10,7 +10,7 @@ description: |- Manages Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions. -~> **NOTE:** Lake Formation introduces fine-grained access control for data in your data lake. In order to make Lake Formation backwards compatible with existing IAM and Glue permissions, AWS introduced the `IAMAllowedPrincipals` principal. For more details, see [Changing the Default Security Settings for Your Data Lake](https://docs.aws.amazon.com/lake-formation/latest/dg/change-settings.html) and [Upgrading AWS Glue Data Permissions to the AWS Lake Formation Model](https://docs.aws.amazon.com/lake-formation/latest/dg/upgrade-glue-lake-formation.html). +~> **NOTE:** Lake Formation introduces fine-grained access control for data in your data lake. Part of the changes include the `IAMAllowedPrincipals` principal in order to make Lake Formation backwards compatible with existing IAM and Glue permissions. For more information, see [Changing the Default Security Settings for Your Data Lake](https://docs.aws.amazon.com/lake-formation/latest/dg/change-settings.html) and [Upgrading AWS Glue Data Permissions to the AWS Lake Formation Model](https://docs.aws.amazon.com/lake-formation/latest/dg/upgrade-glue-lake-formation.html). ## Example Usage From 5c97264aa12f8986e6424eaafa1026292402cdec Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 16 Dec 2020 18:46:33 -0500 Subject: [PATCH 0269/1212] Update with Lake Formation resource and settings --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0383507a186..45df5ab8e69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,10 @@ FEATURES +* **New Data Source:** `aws_lakeformation_data_lake_settings` [GH-13250] * **New Resource:** `aws_codestarconnections_connection` [GH-15990] -* **New Resource:** `aws_lakeformation_resource` ([#13267](https://github.com/hashicorp/terraform-provider-aws/issues/13267)) +* **New Resource:** `aws_lakeformation_data_lake_settings` [GH-13250] +* **New Resource:** `aws_lakeformation_resource` [GH-13267] ENHANCEMENTS From 3ae747bca9c9fd1f0f2bdd62e3eaff79ec8b26b3 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 19:12:27 -0500 Subject: [PATCH 0270/1212] docs/contributing: Add data handling and conversion documentation (#16748) Initial Contributing Guide documentation on how to handle data and its conversion. Provides a walkthrough of the current Terraform Plugin SDK type system, AWS (API and SDK) type systems, and how to convert between the two. Also includes an initial section on how the maintainers treat sensitive values in terms of the Terraform Plugin SDK implementation that exists today. In the future, this documentation can be augmented with further information on how to handle "nullable" types and other data concerns such as "passthrough" and "virtual" attributes. --- docs/CONTRIBUTING.md | 4 + docs/contributing/contribution-checklists.md | 4 +- .../data-handling-and-conversion.md | 809 ++++++++++++++++++ 3 files changed, 814 insertions(+), 3 deletions(-) create mode 100644 docs/contributing/data-handling-and-conversion.md diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 0455bda6f02..9e32dc2a6b5 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -15,4 +15,8 @@ ability to merge PRs and respond to issues. - [Issue Reporting and Lifecycle](contributing/issue-reporting-and-lifecycle.md) - [Pull Request Submission and Lifecycle](contributing/pullrequest-submission-and-lifecycle.md) - [Contribution Types and Checklists](contributing/contribution-checklists.md) + +This documentation also contains reference material specific to certain functionality: + - [Running and Writing Acceptance Tests](contributing/running-and-writing-acceptance-tests.md) +- [Data Handling and Conversion](contributing/data-handling-and-conversion.md) diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 15394ea4dc1..6b359cce4bf 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -478,9 +478,7 @@ guidelines. `CreateThing`, `DeleteThing`, `DescribeThing`, and `ModifyThing` the name of the resource would end in `_thing`. -- [ ] __Arguments_and_Attributes__: The HCL for arguments and attributes should - mimic the types and structs presented by the AWS API. API arguments should be - converted from `CamelCase` to `camel_case`. +- [ ] __Arguments_and_Attributes__: The HCL for arguments and attributes should mimic the types and structs presented by the AWS API. API arguments should be converted from `CamelCase` to `camel_case`. The resource logic for handling these should follow the recommended implementations in the [Data Handling and Conversion](data-handling-and-conversion.md) documentation. - [ ] __Documentation__: Each data source and resource gets a page in the Terraform documentation, which lives at `website/docs/d/_.html.markdown` and `website/docs/r/_.html.markdown` respectively. diff --git a/docs/contributing/data-handling-and-conversion.md b/docs/contributing/data-handling-and-conversion.md new file mode 100644 index 00000000000..acc6f79ea8b --- /dev/null +++ b/docs/contributing/data-handling-and-conversion.md @@ -0,0 +1,809 @@ +# Data Handling and Conversion + +_Please Note: This documentation is intended for Terraform AWS Provider code developers. Typical operators writing and applying Terraform configurations do not need to read or understand this material._ + +The Terraform AWS Provider codebase bridges the implementation of a [Terraform Plugin](https://www.terraform.io/docs/extend/how-terraform-works.html) and an AWS API client to support AWS operations and data types as Terraform Resources. Data handling and conversion is a large portion of resource implementation given the domain specific implementations of each side of the provider. The first where Terraform is a generic infrastructure as code tool with a generic data model and the other where the details are driven by AWS API data modeling concepts. This guide is intended to explain and show preferred Terraform AWS Provider code implementations required to successfully translate data between these two systems. + +At the bottom of this documentation is a [Glossary section](#glossary), which may be a helpful reference while reading the other sections. + +- [Data Conversions in Terraform Providers](#data-conversions-in-terraform-providers) +- [Data Conversions in the Terraform AWS Provider](#data-conversions-in-the-terraform-aws-provider) + - [Type Mapping](#type-mapping) + - [Zero Value Mapping](#zero-value-mapping) + - [Root Attributes Versus Block Attributes](#root-attributes-versus-block-attributes) +- [Recommended Implementations](#recommended-implementations) + - [Expand Functions for Blocks](#expand-functions-for-blocks) + - [Flatten Functions for Blocks](#flatten-functions-for-blocks) + - [Root TypeBool and AWS Boolean](#root-typebool-and-aws-boolean) + - [Root TypeFloat and AWS Float](#root-typefloat-and-aws-float) + - [Root TypeInt and AWS Integer](#root-typeint-and-aws-integer) + - [Root TypeList of Resource and AWS List of Structure](#root-typelist-of-resource-and-aws-list-of-structure) + - [Root TypeList of Resource and AWS Structure](#root-typelist-of-resource-and-aws-structure) + - [Root TypeList of TypeString and AWS List of String](#root-typelist-of-typestring-and-aws-list-of-string) + - [Root TypeMap of TypeString and AWS Map of String](#root-typemap-of-typestring-and-aws-map-of-string) + - [Root TypeSet of Resource and AWS List of Structure](#root-typeset-of-resource-and-aws-list-of-structure) + - [Root TypeSet of TypeString and AWS List of String](#root-typeset-of-typestring-and-aws-list-of-string) + - [Root TypeString and AWS String](#root-typestring-and-aws-string) + - [Nested TypeBool and AWS Boolean](#nested-typebool-and-aws-boolean) + - [Nested TypeFloat and AWS Float](#nested-typefloat-and-aws-float) + - [Nested TypeInt and AWS Integer](#nested-typeint-and-aws-integer) + - [Nested TypeList of Resource and AWS List of Structure](#nested-typelist-of-resource-and-aws-list-of-structure) + - [Nested TypeList of Resource and AWS Structure](#nested-typelist-of-resource-and-aws-structure) + - [Nested TypeList of TypeString and AWS List of String](#nested-typelist-of-typestring-and-aws-list-of-string) + - [Nested TypeMap of TypeString and AWS Map of String](#nested-typemap-of-typestring-and-aws-map-of-string) + - [Nested TypeSet of Resource and AWS List of Structure](#nested-typeset-of-resource-and-aws-list-of-structure) + - [Nested TypeList of TypeString and AWS List of String](#nested-typelist-of-typestring-and-aws-list-of-string-1) + - [Nested TypeString and AWS String](#nested-typestring-and-aws-string) +- [Further Guidelines](#further-guidelines) + - [Sensitive Values](#sensitive-values) +- [Glossary](#glossary) + +## Data Conversions in Terraform Providers + +Before getting into highly specifc documentation about the Terraform AWS Provider handling of data, it may be helpful to briefly highlight how Terraform Plugins (Terraform Providers in this case) interact with Terraform CLI and the Terraform State in general and where this documentation fits into the whole process. + +There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/master/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). + +As a generic walkthrough, the following data handling occurs when creating a Terraform Resource: + +- An operator creates a Terraform configuration with a new resource defined and runs `terraform apply` +- Terraform CLI merges an empty prior state for the resource, along with the given configuration state, to create a planned new state for the resource +- Terraform CLI sends a Terraform Plugin Protocol request to create the new resource with its planned new state data +- If the Terraform Plugin is using a higher level library, such as the Terraform Plugin SDK, that library receives the request and translates the Terraform Plugin Protocol data types into the expected library types +- Terraform Plugin invokes the resource creation function with the planned new state data + - **The planned new state data is converted into an remote system request (e.g. API creation request) that is invoked** + - **The remote system response is received and the data is converted into an applied new state** +- If the Terraform Plugin is using a higher level library, such as the Terraform Plugin SDK, that library translates the library types back into Terraform Plugin Protocol data types +- Terraform Plugin responds to Terraform Plugin Protocol request with the new state data +- Terraform CLI verifies and stores the new state + +The highlighted lines are the focus of this documentation today. In the future however, the Terraform AWS Provider may replace certain functionality in the items mentioning the Terraform Plugin SDK above to workaround certain limitations of that particular library. + +## Data Conversions in the Terraform AWS Provider + +To expand on the data handling that occurs specifically within the Terraform AWS Provider resource implementations, the above resource creation items become the below in practice given our current usage of the Terraform Plugin SDK: + +- The `Create`/`CreateContext` function of a `schema.Resource` is invoked with `*schema.ResourceData` containing the planned new state data (conventionally named `d`) and an AWS API client (conventionally named `meta`). + - Note: Before reaching this point, the `ResourceData` was already translated from the Terraform Plugin Protocol data types by the Terraform Plugin SDK so values can be read by invoking `d.Get()` and `d.GetOk()` receiver methods with Attribute and Block names from the `Schema` of the `schema.Resource`. +- An AWS Go SDK operation input type (e.g. `*ec2.CreateVpcInput`) is initialized +- For each necessary field to configure in the operation input type, the data is read from the `ResourceData` (e.g. `d.Get()`, `d.GetOk()`) and converted into the AWS Go SDK type for the field (e.g. `*string`) +- The AWS Go SDK operation is invoked and the output type (e.g. `*ec2.CreateVpcOutput`) is initialized +- For each necessary Attribute, Block, or resource identifier to be saved in the state, the data is read from the AWS Go SDK type for the field (`*string`), if necessary converted into a `ResourceData` compatible type, and saved into a mutated `ResourceData` (e.g. `d.Set()`, `d.SetId()`) +- Function is returned + +### Type Mapping + +To further understand the necessary data conversions used throughout the Terraform AWS Provider codebase between AWS Go SDK types and the Terraform Plugin SDK, the following table can be referenced for most scenarios: + + + +| AWS API Model | AWS Go SDK | Terraform Plugin SDK | Terraform Language/State | +|---------------|------------|----------------------|--------------------------| +| `boolean` | `*bool` | `TypeBool` (`bool`) | `bool` | +| `float` | `*float64` | `TypeFloat` (`float64`) | `number` | +| `integer` | `*int64` | `TypeInt` (`int`) | `number` | +| `list` | `[]*T` | `TypeList` (`[]interface{}` of `T`)
    `TypeSet` (`*schema.Set` of `T`) | `list(any)`
    `set(any)` | +| `map` | `map[T1]*T2` | `TypeMap` (`map[string]interface{}`) | `map(any)` | +| `string` | `*string` | `TypeString` (`string`) | `string` | +| `structure` | `struct` | `TypeList` (`[]interface{}` of `map[string]interface{}`) | `list(object(any))` | +| `timestamp` | `*time.Time` | `TypeString` (typically RFC3339 formatted) | `string` | + + + +You may notice there are type encoding differences the AWS Go SDK and Terraform Plugin SDK: + +- AWS Go SDK types are all Go pointer types, while Terraform Plugin SDK types are not. +- AWS Go SDK structures are the Go `struct` type, while there is no semantically equivalent Terraform Plugin SDK type. Instead they are represented as a slice of interfaces with an underlying map of interfaces. +- AWS Go SDK types are all Go concrete types, while the Terraform Plugin SDK types for collections and maps are interfaces. +- AWS Go SDK whole numeric type is always 64-bit, while the Terraform Plugin SDK type is implementation-specific. + +Conceptually, the first and second items above the most problematic in the Terraform AWS Provider codebase. The first item because non-pointer types in Go cannot implement the concept of no value (`nil`). The [Zero Value Mapping section](#zero-value-mapping) will go into more details about the implications of this limitation. The second item because it can be confusing to always handle a structure ("object") type as a list. + +_There are efforts to replace the Terraform Plugin type system with one similar the underlying Terraform CLI type system. As these efforts materialize, this documentation will be updated._ + +### Zero Value Mapping + +As mentioned in the [Type Mapping section](#type-mapping), there is a discrepency with how the Terraform Plugin SDK represents values and the reality that a Terraform State may not configure an Attribute. These values will default to the matching underlying Go type "zero value" if not set: + +| Terraform Plugin SDK | Go Type | Zero Value | +|----------------------|---------|------------| +| `TypeBool` | `bool` | `false` | +| `TypeFloat` | `float64` | `0.0` | +| `TypeInt` | `int` | `0` | +| `TypeString` | `string` | `""` | + +For Terraform resource logic this means that these special values must always be accounted for in implementation. The semantics of the API and its meaning of the zero value will determine whether: + +- If it is not used/needed, then generally the zero value can safely be used to store an "unset" value and should be ignored when sending to the API. +- If it is used/needed, whether: + - A value can always be set and it is safe to always send to the API. Generally, boolean values fall into this category. + - A different default/sentinel value must be used as the "unset" value so it can either match the default of the API or be ignored when sending to the API. + - A special type implementation is required within the schema to workaround the limitation. + +The maintainers can provide guidance on appropriate solutions for cases not mentioned in the [Recommended Implementation section](#recommended-implementations). + +### Root Attributes Versus Block Attributes + +All Attributes and Blocks at the top level of `schema.Resource` `Schema` are considered "root" attributes. These will always be handled with receiver methods on `ResourceData`, such as reading with `d.Get()`, `d.GetOk()`, etc. and writing with `d.Set()`. Any nested Attributes and Blocks inside those root Blocks will then be handled with standard Go types according to the table in the [Type Mapping section](#type-mapping). + +By convention in the codebase, each level of Block handling beyond root attributes should be separated into "expand" functions that convert Terraform Plugin SDK data into the equivalent AWS Go SDK type (typically named `expand{Service}{Type}`) and "flatten" functions that convert an AWS Go SDK type into the equivalent Terraform Plugin SDK data (typically named `flatten{Service}{Type}`). The [Recommended Implementations section](#recommended-implementations) will go into those details. + +_NOTE: While it is possible in certain type scenarios to deeply read and write ResourceData information for a Block Attribute, this practice is discouraged in preference of only handling root Attributes and Blocks._ + +## Recommended Implementations + +Given the various complexities around the Terraform Plugin SDK type system, this section contains recommended implementations for Terraform AWS Provider resource code based on the [Type Mapping section](#type-mapping) and the features of the Terraform Plugin SDK and AWS Go SDK. The eventual goal and styling for many of these recommendations is to ease static analysis of the codebase and future potential code generation efforts. + +_Some of these coding patterns may not be well represented in the codebase, as refactoring the many older styles over years of community development is a large task, however this is meant to represent the most preferable implementations today. These will continue to evolve as this codebase and the Terraform Plugin ecosystem changes._ + +### Expand Functions for Blocks + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + if tfMap == nil { + return nil + } + + apiObject := &service.Structure{} + + // ... nested attribute handling ... + + return apiObject +} + +func expandServiceStructures(tfList []interface{}) []*service.Structure { + if len(tfList) == 0 { + return nil + } + + var apiObjects []*service.Structure + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandServiceStructure(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} +``` + +### Flatten Functions for Blocks + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + // ... nested attribute handling ... + + return tfMap +} + +func flattenServiceStructures(apiObjects []*service.Structure) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + tfList = append(tfList, flattenServiceStructure(apiObject)) + } + + return tfList +} +``` + +### Root TypeBool and AWS Boolean + +To read, if always sending the attribute value is correct: + +```go +input := service.ExampleOperationInput{ + AttributeName: aws.String(d.Get("attribute_name").(bool)) +} +``` + +Otherwise to read, if only sending the attribute value when `true` is preferred (`!ok` for opposite): + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok { + input.AttributeName = aws.Bool(v.(bool)) +} +``` + +To write: + +```go +d.Set("attribute_name", output.Thing.AttributeName) +``` + +### Root TypeFloat and AWS Float + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok { + input.AttributeName = aws.Float64(v.(float64)) +} +``` + +To write: + +```go +d.Set("attribute_name", output.Thing.AttributeName) +``` + +### Root TypeInt and AWS Integer + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok { + input.AttributeName = aws.Int64(int64(v.(int))) +} +``` + +To write: + +```go +d.Set("attribute_name", output.Thing.AttributeName) +``` + +### Root TypeList of Resource and AWS List of Structure + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && len(v.([]interface{})) > 0 { + input.AttributeName = expandServiceStructures(v.([]interface{})) +} +``` + +To write: + +```go +d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeName)) +``` + +### Root TypeList of Resource and AWS Structure + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.AttributeName = expandServiceStructure(v.([]interface{})[0].(map[string]interface{})) +} +``` + +To write (_likely to have helper function introduced soon_): + +```go +if output.Thing.AttributeName != nil { + d.Set("attribute_name", []interface{}{flattenServiceStructure(output.Thing.AttributeName)}) +} else { + d.Set("attribute_name", nil) +} +``` + +### Root TypeList of TypeString and AWS List of String + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && len(v.([]interface{})) > 0 { + input.AttributeName = expandStringList(v.([]interface{})) +} +``` + +To write: + +```go +d.Set("attribute_name", aws.StringValueSlice(output.Thing.AttributeName)) +``` + +### Root TypeMap of TypeString and AWS Map of String + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && len(v.(map[string]interface{})) > 0 { + input.AttributeName = stringMapToPointers(v.(map[string]interface{})) +} +``` + +To write: + +```go +d.Set("attribute_name", aws.StringValueMap(output.Thing.AttributeName)) +``` + +### Root TypeSet of Resource and AWS List of Structure + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && v.(*schema.Set).Len() > 0 { + input.AttributeName = expandServiceStructures(v.(*schema.Set).List()) +} +``` + +To write: + +```go +d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeNames)) +``` + +### Root TypeSet of TypeString and AWS List of String + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok && v.(*schema.Set).Len() > 0 { + input.AttributeName = expandStringSet(v.(*schema.Set)) +} +``` + +To write: + +```go +d.Set("attribute_name", aws.StringValueSlice(output.Thing.AttributeName)) +``` + +### Root TypeString and AWS String + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := d.GetOk("attribute_name"); ok { + input.AttributeName = aws.String(v.(string)) +} +``` + +To write: + +```go +d.Set("attribute_name", output.Thing.AttributeName) +``` + +### Nested TypeBool and AWS Boolean + +To read, if always sending the attribute value is correct: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(bool); ok { + apiObject.NestedAttributeName = aws.Bool(v) + } + + // ... +} +``` + +To read, if only sending the attribute value when `true` is preferred (`!v` for opposite): + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(bool); ok && v { + apiObject.NestedAttributeName = aws.Bool(v) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.BoolValue(v) + } + + // ... +} +``` + +### Nested TypeFloat and AWS Float + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(int); ok && v != 0.0 { + apiObject.NestedAttributeName = aws.Float64(float64(v)) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.Float64Value(v) + } + + // ... +} +``` + +### Nested TypeInt and AWS Integer + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(int); ok && v != 0 { + apiObject.NestedAttributeName = aws.Int64(int64(v)) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.Int64Value(v) + } + + // ... +} +``` + +### Nested TypeList of Resource and AWS List of Structure + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].([]interface{}); ok && len(v) > 0 { + apiObject.NestedAttributeName = expandServiceStructures(v) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = flattenServiceNestedStructures(v) + } + + // ... +} +``` + +### Nested TypeList of Resource and AWS Structure + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].([]interface{}); ok && len(v) > 0 { + apiObject.NestedAttributeName = expandServiceStructure(v[0].map[string]interface{}) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = []interface{}{flattenServiceNestedStructure(v)} + } + + // ... +} +``` + +### Nested TypeList of TypeString and AWS List of String + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].([]interface{}); ok && len(v) > 0 { + apiObject.NestedAttributeName = expandStringList(v) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.StringValueSlice(v) + } + + // ... +} +``` + +### Nested TypeMap of TypeString and AWS Map of String + +To read: + +```go +input := service.ExampleOperationInput{} + +if v, ok := tfMap["nested_attribute_name"].(map[string]interface{}; ok && len(v) > 0 { + apiObject.NestedAttributeName = stringMapToPointers(v) +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.StringValueMap(v) + } + + // ... +} +``` + +### Nested TypeSet of Resource and AWS List of Structure + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(*schema.Set); ok && v.Len() > 0 { + apiObject.NestedAttributeName = expandServiceStructures(v.List()) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = flattenServiceNestedStructures(v) + } + + // ... +} +``` + +### Nested TypeList of TypeString and AWS List of String + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(*schema.Set); ok && v.Len() > 0 { + apiObject.NestedAttributeName = expandStringSet(v) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.StringValueSlice(v) + } + + // ... +} +``` + +### Nested TypeString and AWS String + +To read: + +```go +func expandServiceStructure(tfMap map[string]interface{}) *service.Structure { + // ... + + if v, ok := tfMap["nested_attribute_name"].(string); ok && v != "" { + apiObject.NestedAttributeName = aws.String(v) + } + + // ... +} +``` + +To write: + +```go +func flattenServiceStructure(apiObject *service.Structure) map[string]interface{} { + // ... + + if v := apiObject.NestedAttributeName; v != nil { + tfMap["nested_attribute_name"] = aws.StringValue(v) + } + + // ... +} +``` + +## Further Guidelines + +This section includes additional topics related to data design and decision making from the Terraform AWS Provider maintainers. + +### Sensitive Values + +Marking an Attribute in the Terraform Plugin SDK Schema with `Sensitive` has the following real world implications: + +- All occurrences of the Attribute will have the value hidden in plan difference output. In the context of an Attribute within a Block, all Blocks will hide all values of the Attribute. +- In Terraform CLI 0.14 (with the `provider_sensitive_attrs` experiment enabled) and later, any downstream references to the value in other configuration will hide the value in plan difference output. + +The value is either always hidden or not as the Terraform Plugin SDK does not currently implement conditional support for this functionality. Since Terraform Configurations have no control over the behavior, hiding values from the plan difference can incur a potentially undesirable user experience cost for operators. + +Given that and especially with the improvements in Terraform CLI 0.14, the Terraform AWS Provider maintainers guiding principles for determining whether an Attribute should be marked as `Sensitive` is if an Attribute value: + +- Objectively will always contain a credential, password, or other secret material. Operators can have differing opinions on what constitutes secret material and the maintainers will make best effort determinations, if necessary consulting with the HashiCorp Security team. +- If the Attribute is within a Block, that all occurrences of the Attribute value will objectively contain secret material. Some APIs (and therefore the Terraform AWS Provider resources) implement generic "setting" and "value" structures which likely will contain a mixture of secret and non-secret material. These will generally not be accepted for marking as `Sensitive`. + +If you are unsatisfied with sensitive value handling, the maintainers can recommend ensuring there is a covering issue in the Terraform CLI and/or Terraform Plugin SDK projects explaining the use case. Ultimately, Terraform Plugins including the Terraform AWS Provider cannot implement their own sensitive value abilities if the upstream projects do not implement the appropriate functionality. + +## Glossary + +Below is a listing of relevant terms and descriptions for data handling and conversion in the Terraform AWS Provider to establish common conventions throughout this documentation. This list is not exhaustive of all concepts of Terraform Plugins, the Terraform AWS Provider, or the data handling that occurs during Terraform runs, but these should generally provide enough context about the topics discussed here. + +- **AWS Go SDK**: Library that converts Go code into AWS Service API compatible operations and data types. Currently refers to version 1 (v1) available since 2015, however version 2 (v2) will reach general availability status soon. [Project](https://github.com/aws/aws-sdk-go). +- **AWS Go SDK Model**: AWS Go SDK compatible format of AWS Service API Model. +- **AWS Go SDK Service**: AWS Service API Go code generated from the AWS Go SDK Model. Generated by the AWS Go SDK code. +- **AWS Service API**: Logical boundary of an AWS service by API endpoint. Some large AWS services may be marketed with many different product names under the same service API (e.g. VPC functionality is part of the EC2 API) and vice-versa where some services may be marketed with one product name but are split into multiple service APIs (e.g. Single Sign-On functionality is split into the Identity Store and SSO Admin APIs). +- **AWS Service API Model**: Declarative description of the AWS Service API operations and data types. Generated by the AWS service teams. Used to operate the API and generate API clients such as the various AWS Software Development Kits (SDKs). +- **Terraform Language** ("Configuration"): Configuration syntax interpreted by the Terraform CLI. An implementation of [HCL](https://github.com/hashicorp/hcl). [Full Documentation](https://www.terraform.io/docs/configuration/index.html). +- **Terraform Plugin Protocol**: Description of Terraform Plugin operations and data types. Currently based on the Remote Procedure Call (RPC) library [`gRPC`](https://grpc.io/). +- **Terraform Plugin Go**: Low-level library that converts Go code into Terraform Plugin Protocol compatible operations and data types. Not currently implemented in the Terraform AWS Provider. [Project](https://github.com/hashicorp/terraform-plugin-go). +- **Terraform Plugin SDK**: High-level library that converts Go code into Terraform Plugin Protocol compatible operations and data types. [Project](https://github.com/hashicorp/terraform-plugin-sdk). +- **Terraform Plugin SDK Schema**: Declarative description of types and domain specific behaviors for a Terraform provider, including resources and attributes. [Full Documentation](https://www.terraform.io/docs/extend/schemas/index.html). +- **Terraform State**: Bindings between objects in a remote system (e.g. an EC2 VPC) and a Terraform configuration (e.g. an `aws_vpc` resource configuration). [Full Documentation](https://www.terraform.io/docs/state/index.html). + +AWS Service API Models use specific terminology to describe data and types: + +- **Enumeration**: Collection of valid values for a Shape. +- **Operation**: An API call. Includes information about input, output, and error Shapes. +- **Shape**: Type description. + - **boolean**: Boolean value. + - **float**: Fractional numeric value. May contain value validation such as maximum or minimum. + - **integer**: Whole numeric value. May contain value validation such as maximum or minimum. + - **list**: Collection that contains member Shapes. May contain value validation such as maximum or minimum keys. + - **map**: Grouping of key Shape to value Shape. May contain value validation such as maximum or minimum keys. + - **string**: Sequence of characters. May contain value validation such as an enumeration, regular expression pattern, maximum length, or minimum length. + - **structure**: Object that contains member Shapes. May represent an error. + - **timestamp**: Date and time value. + +The Terraform Language uses the following terminology to describe data and types: + +- **Attribute** ("Argument"): Assigns a name to a data value. +- **Block** ("Configuration Block"): Container type for Attributes or Blocks. +- **null**: Virtual value equivalent to the Attribute not being set. +- **Types**: [Full Documentation](https://www.terraform.io/docs/configuration/expressions/types.html). + - **any**: Virtual type representing any concrete type in type declarations. + - **bool**: Boolean value. + - **list** ("tuple"): Ordered collection of values. + - **map** ("object"): Grouping of string keys to values. + - **number**: Numeric value. Can be either whole or fractional numbers. + - **set**: Unordered collection of values. + - **string**: Sequence of characters. + +Terraform Plugin SDK Schemas use the following terminology to describe data and types: + +- **Behaviors**: [Full Documentation](https://www.terraform.io/docs/extend/schemas/schema-behaviors.html). + - **Sensitive**: Whether the value should be hidden from user interface output. + - **StateFunc**: Conversion function between the value set by the Terraform Plugin and the value seen by Terraform Plugin SDK (and ultimately the Terraform State). +- **Element**: Underylying value type for a collection or grouping Schema. +- **Resource Data**: Data representation of a Resource Schema. Translation layer between the Schema and Go code of a Terraform Plugin. In the Terraform Plugin SDK, the `ResourceData` Go type. +- **Resource Schema**: Grouping of Schema that represents a Terraform Resource. +- **Schema**: Represents an Attribute or Block. Has a Type and Behavior(s). +- **Types**: [Full Documentation](https://www.terraform.io/docs/extend/schemas/schema-types.html). + - **TypeBool**: Boolean value. + - **TypeFloat**: Fractional numeric value. + - **TypeInt**: Whole numeric value. + - **TypeList**: Ordered collection of values or Blocks. + - **TypeMap**: Grouping of key Type to value Type. + - **TypeSet**: Unordered collection of values or Blocks. + - **TypeString**: Sequence of characters value. + +Some other terms that may be used: + +- **Block Attribute** ("Child Attribute", "Nested Attribute"): Block level Attribute. +- **Expand Function**: Function that converts Terraform Plugin SDK data into the equivalent AWS Go SDK type. +- **Flatten Function**: Function that converts an AWS Go SDK type into the equivalent Terraform Plugin SDK data. +- **NullableTypeBool**: Workaround "schema type" created to accept a boolean value that is not configured in addition to true and false. Not implemented in the Terraform Plugin SDK, but uses `TypeString` (where `""` represents not configured) and additional validation. +- **NullableTypeFloat**: Workaround "schema type" created to accept a fractional numeric value that is not configured in addition to `0.0`. Not implemented in the Terraform Plugin SDK, but uses `TypeString` (where `""` represents not configured) and additional validation. +- **NullableTypeInt**: Workaround "schema type" created to accept a whole numeric value that is not configured in addition to `0`. Not implemented in the Terraform Plugin SDK, but uses `TypeString` (where `""` represents not configured) and additional validation. +- **Root Attribute**: Resource top level Attribute or Block. + +For additional reference, the Terraform documentation also includes a [full glossary of terminology](https://www.terraform.io/docs/glossary.html). From 68ae04c1838486629a1975a36381a02becf4bbfb Mon Sep 17 00:00:00 2001 From: Rajiv Shah Date: Wed, 16 Dec 2020 19:35:39 -0500 Subject: [PATCH 0271/1212] service/ec2: Add throughput attribute to aws_instance resource and data source (#16620) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16514 Output from acceptance testing (test failure unrelated and noted in https://github.com/hashicorp/terraform-provider-aws/issues/16769): ``` --- FAIL: TestAccAWSInstance_instanceProfileChange (317.03s) --- PASS: TestAccAWSInstance_addSecondaryInterface (165.55s) --- PASS: TestAccAWSInstance_addSecurityGroupNetworkInterface (159.08s) --- PASS: TestAccAWSInstance_associatePublic_defaultPrivate (96.96s) --- PASS: TestAccAWSInstance_associatePublic_defaultPublic (88.29s) --- PASS: TestAccAWSInstance_associatePublic_explicitPrivate (98.92s) --- PASS: TestAccAWSInstance_associatePublic_explicitPublic (78.79s) --- PASS: TestAccAWSInstance_associatePublic_overridePrivate (87.75s) --- PASS: TestAccAWSInstance_associatePublic_overridePublic (78.38s) --- PASS: TestAccAWSInstance_associatePublicIPAndPrivateIP (259.61s) --- PASS: TestAccAWSInstance_atLeastOneOtherEbsVolume (147.90s) --- PASS: TestAccAWSInstance_basic (76.37s) --- PASS: TestAccAWSInstance_blockDevices (84.41s) --- PASS: TestAccAWSInstance_changeInstanceType (374.14s) --- PASS: TestAccAWSInstance_CreditSpecification_Empty_NonBurstable (332.05s) --- PASS: TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable (122.53s) --- PASS: TestAccAWSInstance_creditSpecification_standardCpuCredits (125.02s) --- PASS: TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint (294.22s) --- PASS: TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2 (108.40s) --- PASS: TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3 (310.98s) --- PASS: TestAccAWSInstance_creditSpecification_unlimitedCpuCredits (135.78s) --- PASS: TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint (288.61s) --- PASS: TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard (94.79s) --- PASS: TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable (124.98s) --- PASS: TestAccAWSInstance_creditSpecification_updateCpuCredits (336.93s) --- PASS: TestAccAWSInstance_creditSpecificationT3_standardCpuCredits (216.49s) --- PASS: TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits (224.03s) --- PASS: TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited (107.79s) --- PASS: TestAccAWSInstance_creditSpecificationT3_updateCpuCredits (317.85s) --- PASS: TestAccAWSInstance_dedicatedInstance (126.43s) --- PASS: TestAccAWSInstance_disableApiTermination (154.23s) --- PASS: TestAccAWSInstance_disappears (151.57s) --- PASS: TestAccAWSInstance_EbsBlockDevice_InvalidIopsForVolumeType (11.08s) --- PASS: TestAccAWSInstance_EbsBlockDevice_InvalidThroughputForVolumeType (11.48s) --- PASS: TestAccAWSInstance_EbsBlockDevice_KmsKeyArn (72.43s) --- PASS: TestAccAWSInstance_EbsRootDevice_basic (191.38s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyAll (153.72s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyDeleteOnTermination (173.98s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyIOPS_Io1 (230.44s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyIOPS_Io2 (246.25s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifySize (225.70s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyThroughput_Gp3 (221.01s) --- PASS: TestAccAWSInstance_EbsRootDevice_ModifyType (230.40s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleBlockDevices_ModifyDeleteOnTermination (118.15s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleBlockDevices_ModifySize (138.47s) --- PASS: TestAccAWSInstance_EbsRootDevice_MultipleDynamicEBSBlockDevices (209.99s) --- PASS: TestAccAWSInstance_Empty_PrivateIP (247.91s) --- PASS: TestAccAWSInstance_enclaveOptions (524.76s) --- PASS: TestAccAWSInstance_forceNewAndTagsDrift (301.26s) --- PASS: TestAccAWSInstance_getPasswordData_falseToTrue (195.55s) --- PASS: TestAccAWSInstance_getPasswordData_trueToFalse (284.94s) --- PASS: TestAccAWSInstance_GP2IopsDevice (86.28s) --- PASS: TestAccAWSInstance_GP2WithIopsValue (9.66s) --- PASS: TestAccAWSInstance_hibernation (284.60s) --- PASS: TestAccAWSInstance_inDefaultVpcBySgId (96.80s) --- PASS: TestAccAWSInstance_inDefaultVpcBySgName (186.86s) --- PASS: TestAccAWSInstance_ipv6_supportAddressCount (101.07s) --- PASS: TestAccAWSInstance_ipv6_supportAddressCountWithIpv4 (143.20s) --- PASS: TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError (13.53s) --- PASS: TestAccAWSInstance_keyPairCheck (249.89s) --- PASS: TestAccAWSInstance_metadataOptions (245.64s) --- PASS: TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups (285.45s) --- PASS: TestAccAWSInstance_NetworkInstanceSecurityGroups (147.15s) --- PASS: TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs (202.59s) --- PASS: TestAccAWSInstance_NewNetworkInterface_EmptyPrivateIPAndSecondaryPrivateIPs (344.11s) --- PASS: TestAccAWSInstance_NewNetworkInterface_EmptyPrivateIPAndSecondaryPrivateIPsUpdate (163.40s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PrivateIPAndSecondaryPrivateIPs (355.32s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PrivateIPAndSecondaryPrivateIPsUpdate (144.22s) --- PASS: TestAccAWSInstance_NewNetworkInterface_PublicIPAndSecondaryPrivateIPs (415.89s) --- PASS: TestAccAWSInstance_noAMIEphemeralDevices (91.80s) --- PASS: TestAccAWSInstance_placementGroup (117.55s) --- PASS: TestAccAWSInstance_primaryNetworkInterface (132.58s) --- PASS: TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck (244.37s) --- PASS: TestAccAWSInstance_privateIP (259.77s) --- PASS: TestAccAWSInstance_RootBlockDevice_KmsKeyArn (99.86s) --- PASS: TestAccAWSInstance_rootBlockDeviceMismatch (234.60s) --- PASS: TestAccAWSInstance_rootInstanceStore (118.53s) --- PASS: TestAccAWSInstance_sourceDestCheck (258.89s) --- PASS: TestAccAWSInstance_tags (318.09s) --- PASS: TestAccAWSInstance_UserData_EmptyStringToUnspecified (237.74s) --- PASS: TestAccAWSInstance_UserData_UnspecifiedToEmptyString (236.79s) --- PASS: TestAccAWSInstance_userDataBase64 (88.37s) --- PASS: TestAccAWSInstance_volumeTags (349.07s) --- PASS: TestAccAWSInstance_volumeTagsComputed (291.98s) --- PASS: TestAccAWSInstance_withIamInstanceProfile (276.41s) --- SKIP: TestAccAWSInstance_inEc2Classic (1.59s) --- SKIP: TestAccAWSInstance_outpost (1.89s) --- PASS: TestAccAWSInstanceDataSource_AzUserData (149.91s) --- PASS: TestAccAWSInstanceDataSource_basic (154.74s) --- PASS: TestAccAWSInstanceDataSource_blockDevices (111.39s) --- PASS: TestAccAWSInstanceDataSource_creditSpecification (188.40s) --- PASS: TestAccAWSInstanceDataSource_EbsBlockDevice_KmsKeyId (153.00s) --- PASS: TestAccAWSInstanceDataSource_enclaveOptions (332.35s) --- PASS: TestAccAWSInstanceDataSource_getPasswordData_falseToTrue (207.01s) --- PASS: TestAccAWSInstanceDataSource_getPasswordData_trueToFalse (240.13s) --- PASS: TestAccAWSInstanceDataSource_GetUserData (204.58s) --- PASS: TestAccAWSInstanceDataSource_GetUserData_NoUserData (194.61s) --- PASS: TestAccAWSInstanceDataSource_gp2IopsDevice (393.36s) --- PASS: TestAccAWSInstanceDataSource_gp3ThroughputDevice (134.01s) --- PASS: TestAccAWSInstanceDataSource_keyPair (143.35s) --- PASS: TestAccAWSInstanceDataSource_metadataOptions (311.32s) --- PASS: TestAccAWSInstanceDataSource_PlacementGroup (358.55s) --- PASS: TestAccAWSInstanceDataSource_privateIP (150.46s) --- PASS: TestAccAWSInstanceDataSource_RootBlockDevice_KmsKeyId (163.76s) --- PASS: TestAccAWSInstanceDataSource_rootInstanceStore (374.57s) --- PASS: TestAccAWSInstanceDataSource_secondaryPrivateIPs (146.10s) --- PASS: TestAccAWSInstanceDataSource_SecurityGroups (162.74s) --- PASS: TestAccAWSInstanceDataSource_tags (161.67s) --- PASS: TestAccAWSInstanceDataSource_VPC (170.24s) --- PASS: TestAccAWSInstanceDataSource_VPCSecurityGroups (158.43s) --- PASS: TestAccAWSInstancesDataSource_basic (320.63s) --- PASS: TestAccAWSInstancesDataSource_instanceStateNames (321.13s) --- PASS: TestAccAWSInstancesDataSource_tags (319.82s) ``` --- aws/data_source_aws_instance.go | 10 +++ aws/data_source_aws_instance_test.go | 49 +++++++++++ aws/resource_aws_instance.go | 65 +++++++++++++-- aws/resource_aws_instance_test.go | 112 +++++++++++++++++++++++++- website/docs/r/instance.html.markdown | 13 ++- 5 files changed, 234 insertions(+), 15 deletions(-) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 6de84be9267..ad5f55c6d53 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -208,6 +208,11 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_size": { Type: schema.TypeInt, Computed: true, @@ -263,6 +268,11 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_size": { Type: schema.TypeInt, Computed: true, diff --git a/aws/data_source_aws_instance_test.go b/aws/data_source_aws_instance_test.go index 8dddd0e2f6e..d2d0c4b7910 100644 --- a/aws/data_source_aws_instance_test.go +++ b/aws/data_source_aws_instance_test.go @@ -98,6 +98,30 @@ func TestAccAWSInstanceDataSource_gp2IopsDevice(t *testing.T) { }) } +func TestAccAWSInstanceDataSource_gp3ThroughputDevice(t *testing.T) { + resourceName := "aws_instance.test" + datasourceName := "data.aws_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccInstanceDataSourceConfig_gp3ThroughputDevice, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, "ami", resourceName, "ami"), + resource.TestCheckResourceAttrPair(datasourceName, "instance_type", resourceName, "instance_type"), + resource.TestCheckResourceAttrPair(datasourceName, "root_block_device.#", resourceName, "root_block_device.#"), + resource.TestCheckResourceAttrPair(datasourceName, "root_block_device.0.volume_size", resourceName, "root_block_device.0.volume_size"), + resource.TestCheckResourceAttrPair(datasourceName, "root_block_device.0.volume_type", resourceName, "root_block_device.0.volume_type"), + resource.TestCheckResourceAttrPair(datasourceName, "root_block_device.0.device_name", resourceName, "root_block_device.0.device_name"), + resource.TestCheckResourceAttrPair(datasourceName, "root_block_device.0.throughput", resourceName, "root_block_device.0.throughput"), + ), + }, + }, + }) +} + func TestAccAWSInstanceDataSource_blockDevices(t *testing.T) { resourceName := "aws_instance.test" datasourceName := "data.aws_instance.test" @@ -587,6 +611,24 @@ data "aws_instance" "test" { } ` +// GP3ThroughputDevice +var testAccInstanceDataSourceConfig_gp3ThroughputDevice = testAccLatestAmazonLinuxHvmEbsAmiConfig() + ` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t3.medium" + + root_block_device { + volume_type = "gp3" + volume_size = 10 + throughput = 300 + } +} + +data "aws_instance" "test" { + instance_id = aws_instance.test.id +} +` + // Block Device var testAccInstanceDataSourceConfig_blockDevices = testAccLatestAmazonLinuxHvmEbsAmiConfig() + ` resource "aws_instance" "test" { @@ -621,6 +663,13 @@ resource "aws_instance" "test" { device_name = "/dev/sde" virtual_name = "ephemeral0" } + + ebs_block_device { + device_name = "/dev/sdf" + volume_size = 10 + volume_type = "gp3" + throughput = 300 + } } data "aws_instance" "test" { diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 2d00658b75f..2ff3c7cbb51 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -389,6 +389,14 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, }, + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: throughputDiffSuppressFunc, + }, + "volume_size": { Type: schema.TypeInt, Optional: true, @@ -496,6 +504,13 @@ func resourceAwsInstance() *schema.Resource { DiffSuppressFunc: iopsDiffSuppressFunc, }, + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + DiffSuppressFunc: throughputDiffSuppressFunc, + }, + "volume_size": { Type: schema.TypeInt, Optional: true, @@ -602,11 +617,19 @@ func resourceAwsInstance() *schema.Resource { } func iopsDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool { - // Suppress diff if volume_type is not io1 or io2 and iops is unset or configured as 0 + // Suppress diff if volume_type is not io1, io2, or gp3 and iops is unset or configured as 0 i := strings.LastIndexByte(k, '.') vt := k[:i+1] + "volume_type" v := d.Get(vt).(string) - return (strings.ToLower(v) != ec2.VolumeTypeIo1 || strings.ToLower(v) != ec2.VolumeTypeIo2) && new == "0" + return (strings.ToLower(v) != ec2.VolumeTypeIo1 && strings.ToLower(v) != ec2.VolumeTypeIo2 && strings.ToLower(v) != ec2.VolumeTypeGp3) && new == "0" +} + +func throughputDiffSuppressFunc(k, old, new string, d *schema.ResourceData) bool { + // Suppress diff if volume_type is not gp3 and throughput is unset or configured as 0 + i := strings.LastIndexByte(k, '.') + vt := k[:i+1] + "volume_type" + v := d.Get(vt).(string) + return strings.ToLower(v) != ec2.VolumeTypeGp3 && new == "0" } func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { @@ -1421,7 +1444,7 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.Get("root_block_device.0.iops").(int); ok && v != 0 { // Enforce IOPs usage with a valid volume type // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 - if t, ok := d.Get("root_block_device.0.volume_type").(string); ok && t != ec2.VolumeTypeIo1 && t != ec2.VolumeTypeIo2 { + if t, ok := d.Get("root_block_device.0.volume_type").(string); ok && t != ec2.VolumeTypeIo1 && t != ec2.VolumeTypeIo2 && t != ec2.VolumeTypeGp3 { if t == "" { // Volume defaults to gp2 t = ec2.VolumeTypeGp2 @@ -1432,6 +1455,16 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { input.Iops = aws.Int64(int64(v)) } } + if d.HasChange("root_block_device.0.throughput") { + if v, ok := d.Get("root_block_device.0.throughput").(int); ok && v != 0 { + // Enforce throughput usage with a valid volume type + if t, ok := d.Get("root_block_device.0.volume_type").(string); ok && t != ec2.VolumeTypeGp3 { + return fmt.Errorf("error updating instance: throughput attribute not supported for type %s", t) + } + modifyVolume = true + input.Throughput = aws.Int64(int64(v)) + } + } if modifyVolume { _, err := conn.ModifyVolume(&input) if err != nil { @@ -1765,6 +1798,9 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st if vol.KmsKeyId != nil { bd["kms_key_id"] = aws.StringValue(vol.KmsKeyId) } + if vol.Throughput != nil { + bd["throughput"] = aws.Int64Value(vol.Throughput) + } if instanceBd.DeviceName != nil { bd["device_name"] = aws.StringValue(instanceBd.DeviceName) } @@ -1953,9 +1989,9 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) if iops, ok := bd["iops"].(int); ok && iops > 0 { - if ec2.VolumeTypeIo1 == strings.ToLower(v) || ec2.VolumeTypeIo2 == strings.ToLower(v) { + if ec2.VolumeTypeIo1 == strings.ToLower(v) || ec2.VolumeTypeIo2 == strings.ToLower(v) || ec2.VolumeTypeGp3 == strings.ToLower(v) { // Condition: This parameter is required for requests to create io1 or io2 - // volumes; it is not used in requests to create gp2, st1, sc1, or + // volumes and optional for gp3; it is not used in requests to create gp2, st1, sc1, or // standard volumes. // See: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html ebs.Iops = aws.Int64(int64(iops)) @@ -1964,6 +2000,13 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 return nil, fmt.Errorf("error creating resource: iops attribute not supported for ebs_block_device with volume_type %s", v) } + } else if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { + // `throughput` is only valid for gp3 + if ec2.VolumeTypeGp3 == strings.ToLower(v) { + ebs.Throughput = aws.Int64(int64(throughput)) + } else { + return nil, fmt.Errorf("error creating resource: throughput attribute not supported for ebs_block_device with volume_type %s", v) + } } } @@ -2019,8 +2062,8 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) if iops, ok := bd["iops"].(int); ok && iops > 0 { - if ec2.VolumeTypeIo1 == strings.ToLower(v) || ec2.VolumeTypeIo2 == strings.ToLower(v) { - // Only set the iops attribute if the volume type is io1 or io2. Setting otherwise + if ec2.VolumeTypeIo1 == strings.ToLower(v) || ec2.VolumeTypeIo2 == strings.ToLower(v) || ec2.VolumeTypeGp3 == strings.ToLower(v) { + // Only set the iops attribute if the volume type is io1, io2, or gp3. Setting otherwise // can trigger a refresh/plan loop based on the computed value that is given // from AWS, and prevent us from specifying 0 as a valid iops. // See https://github.com/hashicorp/terraform/pull/4146 @@ -2031,6 +2074,14 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 return nil, fmt.Errorf("error creating resource: iops attribute not supported for root_block_device with volume_type %s", v) } + } else if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { + // throughput is only valid for gp3 + if ec2.VolumeTypeGp3 == strings.ToLower(v) { + ebs.Throughput = aws.Int64(int64(throughput)) + } else { + // Enforce throughput usage with a valid volume type + return nil, fmt.Errorf("error creating resource: throughput attribute not supported for root_block_device with volume_type %s", v) + } } } diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index da1c1f0afdf..baf2b92e375 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -340,6 +340,20 @@ func TestAccAWSInstance_EbsBlockDevice_InvalidIopsForVolumeType(t *testing.T) { }) } +func TestAccAWSInstance_EbsBlockDevice_InvalidThroughputForVolumeType(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckInstanceConfigEBSBlockDeviceInvalidThroughput, + ExpectError: regexp.MustCompile(`error creating resource: throughput attribute not supported for ebs_block_device with volume_type gp2`), + }, + }, + }) +} + func TestAccAWSInstance_RootBlockDevice_KmsKeyArn(t *testing.T) { var instance ec2.Instance kmsKeyResourceName := "aws_kms_key.test" @@ -494,6 +508,10 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { return fmt.Errorf("block device doesn't exist: /dev/sdd") } + if _, ok := blockDevices["/dev/sdf"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/sdf") + } + return nil } } @@ -515,7 +533,7 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "root_block_device.0.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", rootVolumeSize), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "3"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "4"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ "device_name": "/dev/sdb", "volume_size": "9", @@ -530,6 +548,12 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { "volume_type": "io1", "iops": "100", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "device_name": "/dev/sdf", + "volume_size": "10", + "volume_type": "gp3", + "throughput": "300", + }), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]*regexp.Regexp{ "volume_id": regexp.MustCompile("vol-[a-z0-9]+"), }), @@ -1654,6 +1678,48 @@ func TestAccAWSInstance_EbsRootDevice_ModifyIOPS_Io2(t *testing.T) { }) } +func TestAccAWSInstance_EbsRootDevice_ModifyThroughput_Gp3(t *testing.T) { + var original ec2.Instance + var updated ec2.Instance + resourceName := "aws_instance.test" + + volumeSize := "30" + deleteOnTermination := "true" + volumeType := "gp3" + + originalThroughput := "250" + updatedThroughput := "300" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2InstanceRootBlockDeviceWithThroughput(volumeSize, deleteOnTermination, volumeType, originalThroughput), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &original), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", volumeSize), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.delete_on_termination", deleteOnTermination), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_type", volumeType), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.throughput", originalThroughput), + ), + }, + { + Config: testAccAwsEc2InstanceRootBlockDeviceWithThroughput(volumeSize, deleteOnTermination, volumeType, updatedThroughput), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &updated), + testAccCheckInstanceNotRecreated(t, &original, &updated), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", volumeSize), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.delete_on_termination", deleteOnTermination), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_type", volumeType), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.throughput", updatedThroughput), + ), + }, + }, + }) +} + func TestAccAWSInstance_EbsRootDevice_ModifyDeleteOnTermination(t *testing.T) { var original ec2.Instance var updated ec2.Instance @@ -3615,6 +3681,27 @@ resource "aws_instance" "test" { `, size, delete, volumeType, iops)) } +func testAccAwsEc2InstanceRootBlockDeviceWithThroughput(size, delete, volumeType, throughput string) string { + if throughput == "" { + throughput = "null" + } + return composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.ami.id + + instance_type = "t2.medium" + + root_block_device { + volume_size = %[1]s + delete_on_termination = %[2]s + volume_type = %[3]q + throughput = %[4]s + } +} +`, size, delete, volumeType, throughput)) +} + const testAccAwsEc2InstanceAmiWithEbsRootVolume = ` data "aws_ami" "ami" { owners = ["amazon"] @@ -3682,6 +3769,14 @@ resource "aws_instance" "test" { device_name = "/dev/sde" virtual_name = "ephemeral0" } + + ebs_block_device { + device_name = "/dev/sdf" + volume_size = 10 + volume_type = "gp3" + throughput = 300 + } + } `, size, delete)) } @@ -3989,6 +4084,21 @@ resource "aws_instance" "test" { } `) +var testAccCheckInstanceConfigEBSBlockDeviceInvalidThroughput = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` +resource "aws_instance" "test" { + ami = data.aws_ami.ami.id + + instance_type = "t2.medium" + + ebs_block_device { + device_name = "/dev/sdc" + volume_size = 10 + volume_type = "gp2" + throughput = 300 + } +} +`) + func testAccCheckInstanceConfigWithVolumeTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_instance" "test" { diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index ad1e5d7ba8c..0d4203d2f2a 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -127,12 +127,11 @@ to understand the implications of using these attributes. The `root_block_device` mapping supports the following: -* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"io1"`, `"io2"`, `"sc1"`, or `"st1"`. (Default: `"gp2"`). +* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"io2"`, `"sc1"`, or `"st1"`. (Default: `"gp2"`). * `volume_size` - (Optional) The size of the volume in gibibytes (GiB). * `iops` - (Optional) The amount of provisioned - [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). - This is only valid for `volume_type` of `"io1/io2"`, and must be specified if - using that type + [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). Only valid for volume_type of `"io1"`, `"io2"` or `"gp3"`. +* `throughput` - (Optional) The throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `"gp3"`. * `delete_on_termination` - (Optional) Whether the volume should be destroyed on instance termination (Default: `true`). * `encrypted` - (Optional) Enable volume encryption. (Default: `false`). Must be configured to perform drift detection. @@ -145,12 +144,12 @@ Each `ebs_block_device` supports the following: * `device_name` - (Required) The name of the device to mount. * `snapshot_id` - (Optional) The Snapshot ID to mount. -* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"io1"` - or `"io2"`. (Default: `"gp2"`). +* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"io2"`, `"sc1"`, or `"st1"`. (Default: `"gp2"`). * `volume_size` - (Optional) The size of the volume in gibibytes (GiB). * `iops` - (Optional) The amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). - This must be set with a `volume_type` of `"io1/io2"`. + Only valid for volume_type of `"io1"`, `"io2"` or `"gp3"`. +* `throughput` - (Optional) The throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `"gp3"`. * `delete_on_termination` - (Optional) Whether the volume should be destroyed on instance termination (Default: `true`). * `encrypted` - (Optional) Enables [EBS From f75f46fea0ff79d9eeecb72f71cbe15ea885a94d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 19:36:55 -0500 Subject: [PATCH 0272/1212] docs/data-source/aws_instance: Add throughput attribute --- website/docs/d/instance.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 8753f953a4c..9d9089df438 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -69,6 +69,7 @@ interpolation. * `iops` - `0` If the EBS volume is not a provisioned IOPS image, otherwise the supported IOPS count. * `kms_key_arn` - Amazon Resource Name (ARN) of KMS Key, if EBS volume is encrypted. * `snapshot_id` - The ID of the snapshot. + * `throughput` - The throughput of the volume, in MiB/s. * `volume_size` - The size of the volume, in GiB. * `volume_type` - The volume type. * `ebs_optimized` - Whether the Instance is EBS optimized or not (Boolean). @@ -102,6 +103,7 @@ interpolation. * `encrypted` - If the EBS volume is encrypted. * `iops` - `0` If the volume is not a provisioned IOPS image, otherwise the supported IOPS count. * `kms_key_arn` - Amazon Resource Name (ARN) of KMS Key, if EBS volume is encrypted. + * `throughput` - The throughput of the volume, in MiB/s. * `volume_size` - The size of the volume, in GiB. * `volume_type` - The type of the volume. * `security_groups` - The associated security groups. From f790a953b0c57f1c4f2f19774de490d4b6bfe0be Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 19:38:45 -0500 Subject: [PATCH 0273/1212] Update CHANGELOG for #16620 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45df5ab8e69..d81ee40ebac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,11 +13,13 @@ ENHANCEMENTS * data-source/aws_availability_zone: Add `parent_zone_id`, `parent_zone_name`, and `zone_type` attributes (additional support for Local and Wavelength Zones) [GH-16770] * data-source/aws_eip: Add `carrier_ip` attribute [GH-16724] * data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] +* data-source/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] * data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * data-source/aws_network_interface: Add `association` `carrier_ip` and `customer_owned_ip` attributes [GH-16723] * resource/aws_eip: Add `carrier_ip` attribute [GH-16724] * resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] +* resource/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] * resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_vpn_connection: Add support for VPN tunnel options and enable acceleration, DPDTimeoutAction, StartupAction, local/remote IPv4/IPv6 network CIDR and tunnel inside IP version. [GH-14740] From cfd4729d4ba5bca38072d2f51c04bf0f2be567d7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 20:30:59 -0500 Subject: [PATCH 0274/1212] resource/aws_directory_service_directory: Deletion and sweeper refactoring (#16653) A few changes to handle some API oddities and improve sweeper behavior: - Add `aws_db_instance` to sweeper dependencies since MSSQL databases attached to directories can cause ` ClientException: Cannot delete the directory because it still has authorized applications` errors - Implement multiple error handling in sweeper - Skip `ErrCodeEntityDoesNotExistException` errors on deletion to prevent real world issues with stale plans and since sometimes the `DescribeDirectories` API can return directories that will return this error on deletion Output from acceptance test in AWS Commercial: ``` --- PASS: TestAccAWSDirectoryServiceDirectory_basic (1143.67s) --- PASS: TestAccAWSDirectoryServiceDirectory_connector (1261.53s) --- PASS: TestAccAWSDirectoryServiceDirectory_disappears (620.70s) --- PASS: TestAccAWSDirectoryServiceDirectory_microsoft (1873.90s) --- PASS: TestAccAWSDirectoryServiceDirectory_microsoftStandard (1734.15s) --- PASS: TestAccAWSDirectoryServiceDirectory_tags (1178.94s) --- PASS: TestAccAWSDirectoryServiceDirectory_withAliasAndSso (798.85s) ``` Output from sweeper in AWS Commercial: ``` 2020/12/08 13:51:37 [DEBUG] Running Sweepers for region (us-west-2): 2020/12/08 13:51:37 [DEBUG] Running Sweeper (aws_workspaces_workspace) in region (us-west-2) 2020/12/08 13:51:39 [DEBUG] Running Sweeper (aws_workspaces_directory) in region (us-west-2) 2020/12/08 13:51:39 [DEBUG] Running Sweeper (aws_db_instance) in region (us-west-2) ... 2020/12/08 13:55:50 [DEBUG] Running Sweeper (aws_fsx_windows_file_system) in region (us-west-2) 2020/12/08 13:55:51 [DEBUG] Running Sweeper (aws_directory_service_directory) in region (us-west-2) ... 2020/12/08 17:27:50 Sweeper Tests ran successfully: - aws_workspaces_workspace - aws_workspaces_directory - aws_db_instance - aws_fsx_windows_file_system - aws_directory_service_directory 2020/12/08 17:27:50 [DEBUG] Running Sweepers for region (us-east-1): 2020/12/08 17:27:50 [DEBUG] Running Sweeper (aws_workspaces_workspace) in region (us-east-1) 2020/12/08 17:27:51 [DEBUG] Running Sweeper (aws_db_instance) in region (us-east-1) 2020/12/08 17:27:51 [DEBUG] Running Sweeper (aws_fsx_windows_file_system) in region (us-east-1) 2020/12/08 17:27:51 [DEBUG] Running Sweeper (aws_workspaces_directory) in region (us-east-1) 2020/12/08 17:27:51 [DEBUG] Running Sweeper (aws_directory_service_directory) in region (us-east-1) 2020/12/08 17:27:52 Sweeper Tests ran successfully: - aws_workspaces_workspace - aws_db_instance - aws_fsx_windows_file_system - aws_workspaces_directory - aws_directory_service_directory ok github.com/terraform-providers/terraform-provider-aws/aws 8918.345s ``` --- .../service/directoryservice/lister/list.go | 3 + .../directoryservice/lister/list_pages_gen.go | 31 +++++++++ ...esource_aws_directory_service_directory.go | 21 +++--- ...ce_aws_directory_service_directory_test.go | 69 +++++++++++-------- 4 files changed, 86 insertions(+), 38 deletions(-) create mode 100644 aws/internal/service/directoryservice/lister/list.go create mode 100644 aws/internal/service/directoryservice/lister/list_pages_gen.go diff --git a/aws/internal/service/directoryservice/lister/list.go b/aws/internal/service/directoryservice/lister/list.go new file mode 100644 index 00000000000..ffcd0539ba4 --- /dev/null +++ b/aws/internal/service/directoryservice/lister/list.go @@ -0,0 +1,3 @@ +//go:generate go run ../../../generators/listpages/main.go -function=DescribeDirectories github.com/aws/aws-sdk-go/service/directoryservice + +package lister diff --git a/aws/internal/service/directoryservice/lister/list_pages_gen.go b/aws/internal/service/directoryservice/lister/list_pages_gen.go new file mode 100644 index 00000000000..1b8fda44253 --- /dev/null +++ b/aws/internal/service/directoryservice/lister/list_pages_gen.go @@ -0,0 +1,31 @@ +// Code generated by "aws/internal/generators/listpages/main.go -function=DescribeDirectories github.com/aws/aws-sdk-go/service/directoryservice"; DO NOT EDIT. + +package lister + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/directoryservice" +) + +func DescribeDirectoriesPages(conn *directoryservice.DirectoryService, input *directoryservice.DescribeDirectoriesInput, fn func(*directoryservice.DescribeDirectoriesOutput, bool) bool) error { + return DescribeDirectoriesPagesWithContext(context.Background(), conn, input, fn) +} + +func DescribeDirectoriesPagesWithContext(ctx context.Context, conn *directoryservice.DirectoryService, input *directoryservice.DescribeDirectoriesInput, fn func(*directoryservice.DescribeDirectoriesOutput, bool) bool) error { + for { + output, err := conn.DescribeDirectoriesWithContext(ctx, input) + if err != nil { + return err + } + + lastPage := aws.StringValue(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} diff --git a/aws/resource_aws_directory_service_directory.go b/aws/resource_aws_directory_service_directory.go index 369680f3055..13e8f41d55a 100644 --- a/aws/resource_aws_directory_service_directory.go +++ b/aws/resource_aws_directory_service_directory.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -517,22 +518,26 @@ func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta inter } func resourceAwsDirectoryServiceDirectoryDelete(d *schema.ResourceData, meta interface{}) error { - dsconn := meta.(*AWSClient).dsconn + conn := meta.(*AWSClient).dsconn - input := directoryservice.DeleteDirectoryInput{ + input := &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), } - log.Printf("[DEBUG] Deleting Directory Service Directory: %s", input) - _, err := dsconn.DeleteDirectory(&input) + _, err := conn.DeleteDirectory(input) + + if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + return nil + } + if err != nil { - return fmt.Errorf("error deleting Directory Service Directory (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting Directory Service Directory (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Waiting for Directory Service Directory (%q) to be deleted", d.Id()) - err = waitForDirectoryServiceDirectoryDeletion(dsconn, d.Id()) + err = waitForDirectoryServiceDirectoryDeletion(conn, d.Id()) + if err != nil { - return fmt.Errorf("error waiting for Directory Service (%s) to be deleted: %s", d.Id(), err) + return fmt.Errorf("error waiting for Directory Service (%s) to be deleted: %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_directory_service_directory_test.go b/aws/resource_aws_directory_service_directory_test.go index f78e888fcb7..a3a53bf6f02 100644 --- a/aws/resource_aws_directory_service_directory_test.go +++ b/aws/resource_aws_directory_service_directory_test.go @@ -1,73 +1,82 @@ package aws import ( + "context" "fmt" "log" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/directoryservice/lister" ) func init() { resource.AddTestSweepers("aws_directory_service_directory", &resource.Sweeper{ - Name: "aws_directory_service_directory", - F: testSweepDirectoryServiceDirectories, - Dependencies: []string{"aws_fsx_windows_file_system", "aws_workspaces_directory"}, + Name: "aws_directory_service_directory", + F: testSweepDirectoryServiceDirectories, + Dependencies: []string{ + "aws_db_instance", + "aws_fsx_windows_file_system", + "aws_workspaces_directory", + }, }) } func testSweepDirectoryServiceDirectories(region string) error { client, err := sharedClientForRegion(region) + if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } + conn := client.(*AWSClient).dsconn - input := &directoryservice.DescribeDirectoriesInput{} - for { - resp, err := conn.DescribeDirectories(input) + var sweeperErrs *multierror.Error - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Directory Service Directory sweep for %s: %s", region, err) - return nil - } + input := &directoryservice.DescribeDirectoriesInput{} - if err != nil { - return fmt.Errorf("error listing Directory Service Directories: %s", err) + err = lister.DescribeDirectoriesPagesWithContext(context.TODO(), conn, input, func(page *directoryservice.DescribeDirectoriesOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - for _, directory := range resp.DirectoryDescriptions { + for _, directory := range page.DirectoryDescriptions { id := aws.StringValue(directory.DirectoryId) - deleteDirectoryInput := directoryservice.DeleteDirectoryInput{ - DirectoryId: directory.DirectoryId, - } + r := resourceAwsDirectoryServiceDirectory() + d := r.Data(nil) + d.SetId(id) - log.Printf("[INFO] Deleting Directory Service Directory: %s", deleteDirectoryInput) - _, err := conn.DeleteDirectory(&deleteDirectoryInput) - if err != nil { - return fmt.Errorf("error deleting Directory Service Directory (%s): %s", id, err) - } + err := r.Delete(d, client) - log.Printf("[INFO] Waiting for Directory Service Directory (%q) to be deleted", id) - err = waitForDirectoryServiceDirectoryDeletion(conn, id) if err != nil { - return fmt.Errorf("error waiting for Directory Service (%s) to be deleted: %s", id, err) + sweeperErr := fmt.Errorf("error deleting Directory Service Directory (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } - if resp.NextToken == nil { - break - } + return !lastPage + }) - input.NextToken = resp.NextToken + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Directory Service Directory sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() } - return nil + if err != nil { + sweeperErr := fmt.Errorf("error listing Directory Service Directories: %w", err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + } + + return sweeperErrs.ErrorOrNil() } func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) { From f16f5083b2b1c19eaf84135c99e803499109d31c Mon Sep 17 00:00:00 2001 From: Lachlan Cooper Date: Thu, 17 Dec 2020 12:57:10 +1100 Subject: [PATCH 0275/1212] Fix s3_bucket lifecycle_rule requirements In #15263, `abort_incomplete_multipart_upload_days` was added to the set of optional `lifecycle_rule` arguments of which at least one must be specified. This updates the documentation in line with that change. --- website/docs/r/s3_bucket.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 334d30d3479..51c09d1d95b 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -397,7 +397,7 @@ The `lifecycle_rule` object supports the following: * `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire (documented below). * `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions (documented below). -At least one of `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. +At least one of `abort_incomplete_multipart_upload_days`, `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. The `expiration` object supports the following From ada2d7d7efe6491539fa9ed8682abb71c5fba1fc Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Thu, 17 Dec 2020 01:59:42 +0000 Subject: [PATCH 0276/1212] New data source: aws_ec2_managed_prefix_list (#16738) * d/aws_ec2_managed_prefix_list: new data source * d/aws_ec2_managed_prefix_list: code review updates --- ...data_source_aws_ec2_managed_prefix_list.go | 145 +++++++++++++++ ...source_aws_ec2_managed_prefix_list_test.go | 165 ++++++++++++++++++ aws/provider.go | 1 + .../d/ec2_managed_prefix_list.html.markdown | 65 +++++++ 4 files changed, 376 insertions(+) create mode 100644 aws/data_source_aws_ec2_managed_prefix_list.go create mode 100644 aws/data_source_aws_ec2_managed_prefix_list_test.go create mode 100644 website/docs/d/ec2_managed_prefix_list.html.markdown diff --git a/aws/data_source_aws_ec2_managed_prefix_list.go b/aws/data_source_aws_ec2_managed_prefix_list.go new file mode 100644 index 00000000000..cbee0c9e144 --- /dev/null +++ b/aws/data_source_aws_ec2_managed_prefix_list.go @@ -0,0 +1,145 @@ +package aws + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func dataSourceAwsEc2ManagedPrefixList() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceAwsEc2ManagedPrefixListRead, + Schema: map[string]*schema.Schema{ + "address_family": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "entries": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "filter": dataSourceFiltersSchema(), + "id": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "max_entries": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchemaComputed(), + "version": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceAwsEc2ManagedPrefixListRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*AWSClient).ec2conn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + input := ec2.DescribeManagedPrefixListsInput{} + + if filters, ok := d.GetOk("filter"); ok { + input.Filters = buildAwsDataSourceFilters(filters.(*schema.Set)) + } + + if prefixListId, ok := d.GetOk("id"); ok { + input.PrefixListIds = aws.StringSlice([]string{prefixListId.(string)}) + } + + if prefixListName, ok := d.GetOk("name"); ok { + input.Filters = append(input.Filters, &ec2.Filter{ + Name: aws.String("prefix-list-name"), + Values: aws.StringSlice([]string{prefixListName.(string)}), + }) + } + + out, err := conn.DescribeManagedPrefixListsWithContext(ctx, &input) + + if err != nil { + return diag.Errorf("error describing EC2 Managed Prefix Lists: %s", err) + } + + if len(out.PrefixLists) < 1 { + return diag.Errorf("no managed prefix lists matched the given criteria") + } + + if len(out.PrefixLists) > 1 { + return diag.Errorf("more than 1 prefix list matched the given criteria") + } + + pl := *out.PrefixLists[0] + + d.SetId(aws.StringValue(pl.PrefixListId)) + d.Set("name", pl.PrefixListName) + d.Set("owner_id", pl.OwnerId) + d.Set("address_family", pl.AddressFamily) + d.Set("arn", pl.PrefixListArn) + d.Set("max_entries", pl.MaxEntries) + d.Set("version", pl.Version) + + if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pl.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return diag.Errorf("error setting tags attribute: %s", err) + } + + var entries []interface{} + + err = conn.GetManagedPrefixListEntriesPages( + &ec2.GetManagedPrefixListEntriesInput{ + PrefixListId: pl.PrefixListId, + }, + func(output *ec2.GetManagedPrefixListEntriesOutput, last bool) bool { + for _, entry := range output.Entries { + entries = append(entries, map[string]interface{}{ + "cidr": aws.StringValue(entry.Cidr), + "description": aws.StringValue(entry.Description), + }) + } + + return true + }, + ) + + if err != nil { + return diag.Errorf("error listing EC2 Managed Prefix List (%s) entries: %s", d.Id(), err) + } + + if err := d.Set("entries", entries); err != nil { + return diag.FromErr(err) + } + + return nil +} diff --git a/aws/data_source_aws_ec2_managed_prefix_list_test.go b/aws/data_source_aws_ec2_managed_prefix_list_test.go new file mode 100644 index 00000000000..332e1259c45 --- /dev/null +++ b/aws/data_source_aws_ec2_managed_prefix_list_test.go @@ -0,0 +1,165 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccDataSourceAwsEc2ManagedPrefixListGetIdByName(name string, id *string, arn *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + output, err := conn.DescribeManagedPrefixLists(&ec2.DescribeManagedPrefixListsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("prefix-list-name"), + Values: aws.StringSlice([]string{name}), + }, + }, + }) + + if err != nil { + return err + } + + *id = *output.PrefixLists[0].PrefixListId + *arn = *output.PrefixLists[0].PrefixListArn + return nil + } +} + +func TestAccDataSourceAwsEc2ManagedPrefixList_basic(t *testing.T) { + prefixListName := fmt.Sprintf("com.amazonaws.%s.s3", testAccGetRegion()) + prefixListId := "" + prefixListArn := "" + + resourceByName := "data.aws_ec2_managed_prefix_list.s3_by_name" + resourceById := "data.aws_ec2_managed_prefix_list.s3_by_id" + prefixListResourceName := "data.aws_prefix_list.s3_by_id" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsEc2ManagedPrefixListConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsEc2ManagedPrefixListGetIdByName(prefixListName, &prefixListId, &prefixListArn), + + resource.TestCheckResourceAttrPtr(resourceByName, "id", &prefixListId), + resource.TestCheckResourceAttr(resourceByName, "name", prefixListName), + resource.TestCheckResourceAttr(resourceByName, "owner_id", "AWS"), + resource.TestCheckResourceAttr(resourceByName, "address_family", "IPv4"), + resource.TestCheckResourceAttrPtr(resourceByName, "arn", &prefixListArn), + resource.TestCheckResourceAttr(resourceByName, "max_entries", "0"), + resource.TestCheckResourceAttr(resourceByName, "version", "0"), + resource.TestCheckResourceAttr(resourceByName, "tags.%", "0"), + + resource.TestCheckResourceAttrPtr(resourceById, "id", &prefixListId), + resource.TestCheckResourceAttr(resourceById, "name", prefixListName), + + resource.TestCheckResourceAttrPair(resourceByName, "id", prefixListResourceName, "id"), + resource.TestCheckResourceAttrPair(resourceByName, "name", prefixListResourceName, "name"), + resource.TestCheckResourceAttrPair(resourceByName, "entries.#", prefixListResourceName, "cidr_blocks.#"), + ), + }, + }, + }) +} + +const testAccDataSourceAwsEc2ManagedPrefixListConfig_basic = ` +data "aws_region" "current" {} + +data "aws_ec2_managed_prefix_list" "s3_by_name" { + name = "com.amazonaws.${data.aws_region.current.name}.s3" +} + +data "aws_ec2_managed_prefix_list" "s3_by_id" { + id = data.aws_ec2_managed_prefix_list.s3_by_name.id +} + +data "aws_prefix_list" "s3_by_id" { + prefix_list_id = data.aws_ec2_managed_prefix_list.s3_by_name.id +} +` + +func TestAccDataSourceAwsEc2ManagedPrefixList_filter(t *testing.T) { + prefixListName := fmt.Sprintf("com.amazonaws.%s.s3", testAccGetRegion()) + prefixListId := "" + prefixListArn := "" + + resourceByName := "data.aws_ec2_managed_prefix_list.s3_by_name" + resourceById := "data.aws_ec2_managed_prefix_list.s3_by_id" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsEc2ManagedPrefixListConfig_filter, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsEc2ManagedPrefixListGetIdByName(prefixListName, &prefixListId, &prefixListArn), + resource.TestCheckResourceAttrPtr(resourceByName, "id", &prefixListId), + resource.TestCheckResourceAttr(resourceByName, "name", prefixListName), + resource.TestCheckResourceAttr(resourceByName, "owner_id", "AWS"), + resource.TestCheckResourceAttr(resourceByName, "address_family", "IPv4"), + resource.TestCheckResourceAttrPtr(resourceByName, "arn", &prefixListArn), + resource.TestCheckResourceAttr(resourceByName, "max_entries", "0"), + resource.TestCheckResourceAttr(resourceByName, "version", "0"), + resource.TestCheckResourceAttr(resourceByName, "tags.%", "0"), + + resource.TestCheckResourceAttrPair(resourceByName, "id", resourceById, "id"), + resource.TestCheckResourceAttrPair(resourceByName, "name", resourceById, "name"), + resource.TestCheckResourceAttrPair(resourceByName, "entries", resourceById, "entries"), + resource.TestCheckResourceAttrPair(resourceByName, "owner_id", resourceById, "owner_id"), + resource.TestCheckResourceAttrPair(resourceByName, "address_family", resourceById, "address_family"), + resource.TestCheckResourceAttrPair(resourceByName, "arn", resourceById, "arn"), + resource.TestCheckResourceAttrPair(resourceByName, "max_entries", resourceById, "max_entries"), + resource.TestCheckResourceAttrPair(resourceByName, "tags", resourceById, "tags"), + resource.TestCheckResourceAttrPair(resourceByName, "version", resourceById, "version"), + ), + }, + }, + }) +} + +const testAccDataSourceAwsEc2ManagedPrefixListConfig_filter = ` +data "aws_region" "current" {} + +data "aws_ec2_managed_prefix_list" "s3_by_name" { + filter { + name = "prefix-list-name" + values = ["com.amazonaws.${data.aws_region.current.name}.s3"] + } +} + +data "aws_ec2_managed_prefix_list" "s3_by_id" { + filter { + name = "prefix-list-id" + values = [data.aws_ec2_managed_prefix_list.s3_by_name.id] + } +} +` + +func TestAccDataSourceAwsEc2ManagedPrefixList_matchesTooMany(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsPrefixListConfig_matchesTooMany, + ExpectError: regexp.MustCompile(`more than 1 prefix list matched the given criteria`), + }, + }, + }) +} + +const testAccDataSourceAwsPrefixListConfig_matchesTooMany = ` +data "aws_ec2_managed_prefix_list" "test" {} +` diff --git a/aws/provider.go b/aws/provider.go index 9eb5c30013f..854a3a9638f 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -225,6 +225,7 @@ func Provider() *schema.Provider { "aws_ec2_local_gateway_virtual_interface": dataSourceAwsEc2LocalGatewayVirtualInterface(), "aws_ec2_local_gateway_virtual_interface_group": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroup(), "aws_ec2_local_gateway_virtual_interface_groups": dataSourceAwsEc2LocalGatewayVirtualInterfaceGroups(), + "aws_ec2_managed_prefix_list": dataSourceAwsEc2ManagedPrefixList(), "aws_ec2_spot_price": dataSourceAwsEc2SpotPrice(), "aws_ec2_transit_gateway": dataSourceAwsEc2TransitGateway(), "aws_ec2_transit_gateway_dx_gateway_attachment": dataSourceAwsEc2TransitGatewayDxGatewayAttachment(), diff --git a/website/docs/d/ec2_managed_prefix_list.html.markdown b/website/docs/d/ec2_managed_prefix_list.html.markdown new file mode 100644 index 00000000000..63986d99d47 --- /dev/null +++ b/website/docs/d/ec2_managed_prefix_list.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "VPC" +layout: "aws" +page_title: "AWS: aws_ec2_managed_prefix_list" +description: |- + Provides details about a specific managed prefix list +--- + +# Data Source: aws_ec2_managed_prefix_list + +`aws_ec2_managed_prefix_list` provides details about a specific AWS prefix list or +customer-managed prefix list in the current region. + +## Example Usage + +### Find the regional DynamoDB prefix list + +```hcl +data "aws_region" "current" {} + +data "aws_ec2_managed_prefix_list" "example" { + name = "com.amazonaws.${data.aws_region.current.name}.dynamodb" +} +``` + +### Find a managed prefix list using filters + +```hcl +data "aws_ec2_managed_prefix_list" "example" { + filter { + name = "prefix-list-name" + values = ["my-prefix-list"] + } +} +``` + +## Argument Reference + +The arguments of this data source act as filters for querying the available +prefix lists. The given filters must match exactly one prefix list +whose data will be exported as attributes. + +* `id` - (Optional) The ID of the prefix list to select. +* `name` - (Optional) The name of the prefix list to select. +* `filter` - (Optional) Configuration block(s) for filtering. Detailed below. + +### filter Configuration Block + +The following arguments are supported by the `filter` configuration block: + +* `name` - (Required) The name of the filter field. Valid values can be found in the EC2 [DescribeManagedPrefixLists](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeManagedPrefixLists.html) API Reference. +* `values` - (Required) Set of values that are accepted for the given filter field. Results will be selected if any given value matches. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the selected prefix list. +* `arn` - The ARN of the selected prefix list. +* `name` - The name of the selected prefix list. +* `entries` - The set of entries in this prefix list. Each entry is an object with `cidr` and `description`. +* `owner_id` - The Account ID of the owner of a customer-managed prefix list, or `AWS` otherwise. +* `address_family` - The address family of the prefix list. Valid values are `IPv4` and `IPv6`. +* `max_entries` - When then prefix list is managed, the maximum number of entries it supports, or null otherwise. +* `tags` - A map of tags assigned to the resource. From cabe75faf91ad3ae1cbbb71d5a76985cf276ee53 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 21:11:52 -0500 Subject: [PATCH 0277/1212] Update CHANGELOG for #16738 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d81ee40ebac..d889951381e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES +* **New Data Source:** `aws_ec2_managed_prefix_list` [GH-16738] * **New Data Source:** `aws_lakeformation_data_lake_settings` [GH-13250] * **New Resource:** `aws_codestarconnections_connection` [GH-15990] * **New Resource:** `aws_lakeformation_data_lake_settings` [GH-13250] From 8ece623c0743f1844333286c983a50fac4039255 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 22:28:40 -0500 Subject: [PATCH 0278/1212] resource/aws_ec2_managed_prefix_list: Refactoring for consistency Output from acceptance testing: ``` --- PASS: TestAccAwsEc2ManagedPrefixList_disappears (20.46s) --- PASS: TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6 (24.19s) --- PASS: TestAccAwsEc2ManagedPrefixList_basic (27.71s) --- PASS: TestAccAwsEc2ManagedPrefixList_Name (35.35s) --- PASS: TestAccAwsEc2ManagedPrefixList_Entry (41.97s) --- PASS: TestAccAwsEc2ManagedPrefixList_Tags (48.37s) ``` --- aws/internal/service/ec2/errors.go | 4 + aws/resource_aws_ec2_managed_prefix_list.go | 278 ++++++---- ...source_aws_ec2_managed_prefix_list_test.go | 521 +++++++----------- 3 files changed, 365 insertions(+), 438 deletions(-) diff --git a/aws/internal/service/ec2/errors.go b/aws/internal/service/ec2/errors.go index fc253b53def..b14b13b5ff4 100644 --- a/aws/internal/service/ec2/errors.go +++ b/aws/internal/service/ec2/errors.go @@ -8,6 +8,10 @@ const ( ErrCodeInvalidCarrierGatewayIDNotFound = "InvalidCarrierGatewayID.NotFound" ) +const ( + ErrCodeInvalidPrefixListIDNotFound = "InvalidPrefixListID.NotFound" +) + const ( ErrCodeClientVpnEndpointIdNotFound = "InvalidClientVpnEndpointId.NotFound" ErrCodeClientVpnAuthorizationRuleNotFound = "InvalidClientVpnEndpointAuthorizationRuleNotFound" diff --git a/aws/resource_aws_ec2_managed_prefix_list.go b/aws/resource_aws_ec2_managed_prefix_list.go index fddf4835120..64e265698b2 100644 --- a/aws/resource_aws_ec2_managed_prefix_list.go +++ b/aws/resource_aws_ec2_managed_prefix_list.go @@ -1,15 +1,19 @@ package aws import ( + "context" "fmt" "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" ) @@ -24,6 +28,12 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { State: schema.ImportStatePassthrough, }, + CustomizeDiff: customdiff.Sequence( + customdiff.ComputedIf("version", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { + return diff.HasChange("entry") + }), + ), + Schema: map[string]*schema.Schema{ "address_family": { Type: schema.TypeString, @@ -40,8 +50,6 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { "entry": { Type: schema.TypeSet, Optional: true, - // Computed: true, - // ConfigMode: schema.SchemaConfigModeAttr, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cidr_block": { @@ -84,34 +92,38 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { func resourceAwsEc2ManagedPrefixListCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - input := ec2.CreateManagedPrefixListInput{} + input := &ec2.CreateManagedPrefixListInput{} - input.AddressFamily = aws.String(d.Get("address_family").(string)) + if v, ok := d.GetOk("address_family"); ok { + input.AddressFamily = aws.String(v.(string)) + } - if v, ok := d.GetOk("entry"); ok { - input.Entries = expandAddPrefixListEntries(v) + if v, ok := d.GetOk("entry"); ok && v.(*schema.Set).Len() > 0 { + input.Entries = expandEc2AddPrefixListEntries(v.(*schema.Set).List()) } - input.MaxEntries = aws.Int64(int64(d.Get("max_entries").(int))) - input.PrefixListName = aws.String(d.Get("name").(string)) + if v, ok := d.GetOk("max_entries"); ok { + input.MaxEntries = aws.Int64(int64(v.(int))) + } - if v, ok := d.GetOk("tags"); ok { - input.TagSpecifications = ec2TagSpecificationsFromMap( - v.(map[string]interface{}), - "prefix-list") // no ec2.ResourceTypePrefixList as of 01/07/20 + if v, ok := d.GetOk("name"); ok { + input.PrefixListName = aws.String(v.(string)) } - output, err := conn.CreateManagedPrefixList(&input) + if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { + input.TagSpecifications = ec2TagSpecificationsFromMap(v.(map[string]interface{}), "prefix-list") + } + + output, err := conn.CreateManagedPrefixList(input) + if err != nil { - return fmt.Errorf("failed to create managed prefix list: %w", err) + return fmt.Errorf("error creating EC2 Managed Prefix List: %w", err) } d.SetId(aws.StringValue(output.PrefixList.PrefixListId)) - log.Printf("[INFO] Created Managed Prefix List %s (%s)", d.Get("name").(string), d.Id()) - if _, err := waiter.ManagedPrefixListCreated(conn, d.Id()); err != nil { - return fmt.Errorf("managed prefix list %s failed to create: %w", d.Id(), err) + return fmt.Errorf("error waiting for EC2 Managed Prefix List (%s) creation: %w", d.Id(), err) } return resourceAwsEc2ManagedPrefixListRead(d, meta) @@ -120,29 +132,53 @@ func resourceAwsEc2ManagedPrefixListCreate(d *schema.ResourceData, meta interfac func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - id := d.Id() - pl, ok, err := getManagedPrefixList(id, conn) + pl, err := finder.ManagedPrefixListByID(conn, d.Id()) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidPrefixListIDNotFound) { + log.Printf("[WARN] EC2 Managed Prefix List %s not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { - return fmt.Errorf("failed to get managed prefix list %s: %w", id, err) + return fmt.Errorf("error reading EC2 Managed Prefix List (%s): %w", d.Id(), err) } - if !ok { - log.Printf("[WARN] Managed Prefix List %s not found; removing from state.", id) + if pl == nil { + if d.IsNewResource() { + return fmt.Errorf("error reading EC2 Managed Prefix List (%s): not found", d.Id()) + } + + log.Printf("[WARN] EC2 Managed Prefix List %s not found, removing from state", d.Id()) d.SetId("") return nil } - d.Set("address_family", pl.AddressFamily) - d.Set("arn", pl.PrefixListArn) + input := &ec2.GetManagedPrefixListEntriesInput{ + PrefixListId: pl.PrefixListId, + } + var prefixListEntries []*ec2.PrefixListEntry + + err = conn.GetManagedPrefixListEntriesPages(input, func(page *ec2.GetManagedPrefixListEntriesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + prefixListEntries = append(prefixListEntries, page.Entries...) + + return !lastPage + }) - entries, err := getPrefixListEntries(id, conn, 0) if err != nil { return fmt.Errorf("error listing entries of EC2 Managed Prefix List (%s): %w", d.Id(), err) } - if err := d.Set("entry", flattenPrefixListEntries(entries)); err != nil { - return fmt.Errorf("error setting attribute entry of managed prefix list %s: %s", id, err) + d.Set("address_family", pl.AddressFamily) + d.Set("arn", pl.PrefixListArn) + + if err := d.Set("entry", flattenEc2PrefixListEntries(prefixListEntries)); err != nil { + return fmt.Errorf("error setting attribute entry of managed prefix list %s: %w", d.Id(), err) } d.Set("max_entries", pl.MaxEntries) @@ -150,7 +186,7 @@ func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{ d.Set("owner_id", pl.OwnerId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pl.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error settings attribute tags of managed prefix list %s: %s", id, err) + return fmt.Errorf("error settings attribute tags of managed prefix list %s: %w", d.Id(), err) } d.Set("version", pl.Version) @@ -160,13 +196,12 @@ func resourceAwsEc2ManagedPrefixListRead(d *schema.ResourceData, meta interface{ func resourceAwsEc2ManagedPrefixListUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - id := d.Id() - - input := ec2.ModifyManagedPrefixListInput{} - - input.PrefixListId = aws.String(id) if d.HasChangeExcept("tags") { + input := &ec2.ModifyManagedPrefixListInput{ + PrefixListId: aws.String(d.Id()), + } + input.PrefixListName = aws.String(d.Get("name").(string)) currentVersion := int64(d.Get("version").(int)) wait := false @@ -176,40 +211,34 @@ func resourceAwsEc2ManagedPrefixListUpdate(d *schema.ResourceData, meta interfac ns := newAttr.(*schema.Set) if addEntries := ns.Difference(os); addEntries.Len() > 0 { - input.AddEntries = expandAddPrefixListEntries(addEntries) + input.AddEntries = expandEc2AddPrefixListEntries(addEntries.List()) input.CurrentVersion = aws.Int64(currentVersion) wait = true } if removeEntries := os.Difference(ns); removeEntries.Len() > 0 { - input.RemoveEntries = expandRemovePrefixListEntries(removeEntries) + input.RemoveEntries = expandEc2RemovePrefixListEntries(removeEntries.List()) input.CurrentVersion = aws.Int64(currentVersion) wait = true } - log.Printf("[INFO] modifying managed prefix list %s...", id) - - _, err := conn.ModifyManagedPrefixList(&input) - - if isAWSErr(err, "PrefixListVersionMismatch", "prefix list has the incorrect version number") { - return fmt.Errorf("failed to modify managed prefix list %s: conflicting change", id) - } + _, err := conn.ModifyManagedPrefixList(input) if err != nil { - return fmt.Errorf("failed to modify managed prefix list %s: %s", id, err) + return fmt.Errorf("error updating EC2 Managed Prefix List (%s): %w", d.Id(), err) } if wait { if _, err := waiter.ManagedPrefixListModified(conn, d.Id()); err != nil { - return fmt.Errorf("failed to modify managed prefix list %s: %w", d.Id(), err) + return fmt.Errorf("error waiting for EC2 Managed Prefix List (%s) update: %w", d.Id(), err) } } } if d.HasChange("tags") { - before, after := d.GetChange("tags") - if err := keyvaluetags.Ec2UpdateTags(conn, id, before, after); err != nil { - return fmt.Errorf("failed to update tags of managed prefix list %s: %s", id, err) + o, n := d.GetChange("tags") + if err := keyvaluetags.Ec2UpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating EC2 Managed Prefix List (%s) tags: %w", d.Id(), err) } } @@ -218,15 +247,14 @@ func resourceAwsEc2ManagedPrefixListUpdate(d *schema.ResourceData, meta interfac func resourceAwsEc2ManagedPrefixListDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - id := d.Id() - input := ec2.DeleteManagedPrefixListInput{ - PrefixListId: aws.String(id), + input := &ec2.DeleteManagedPrefixListInput{ + PrefixListId: aws.String(d.Id()), } - _, err := conn.DeleteManagedPrefixList(&input) + _, err := conn.DeleteManagedPrefixList(input) - if tfawserr.ErrCodeEquals(err, "InvalidPrefixListID.NotFound") { + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidPrefixListIDNotFound) { return nil } @@ -235,116 +263,128 @@ func resourceAwsEc2ManagedPrefixListDelete(d *schema.ResourceData, meta interfac } if err := waiter.ManagedPrefixListDeleted(conn, d.Id()); err != nil { - return fmt.Errorf("failed to delete managed prefix list %s: %w", d.Id(), err) + return fmt.Errorf("error waiting for EC2 Managed Prefix List (%s) deletion: %w", d.Id(), err) } return nil } -func expandAddPrefixListEntries(input interface{}) []*ec2.AddPrefixListEntry { - if input == nil { +func expandEc2AddPrefixListEntry(tfMap map[string]interface{}) *ec2.AddPrefixListEntry { + if tfMap == nil { + return nil + } + + apiObject := &ec2.AddPrefixListEntry{} + + if v, ok := tfMap["cidr_block"].(string); ok && v != "" { + apiObject.Cidr = aws.String(v) + } + + if v, ok := tfMap["description"].(string); ok && v != "" { + apiObject.Description = aws.String(v) + } + + return apiObject +} + +func expandEc2AddPrefixListEntries(tfList []interface{}) []*ec2.AddPrefixListEntry { + if len(tfList) == 0 { return nil } - list := input.(*schema.Set).List() - result := make([]*ec2.AddPrefixListEntry, 0, len(list)) + var apiObjects []*ec2.AddPrefixListEntry - for _, entry := range list { - m := entry.(map[string]interface{}) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) - output := ec2.AddPrefixListEntry{} + if !ok { + continue + } - output.Cidr = aws.String(m["cidr_block"].(string)) + apiObject := expandEc2AddPrefixListEntry(tfMap) - if v, ok := m["description"]; ok { - output.Description = aws.String(v.(string)) + if apiObject == nil { + continue } - result = append(result, &output) + apiObjects = append(apiObjects, apiObject) } - return result + return apiObjects } -func expandRemovePrefixListEntries(input interface{}) []*ec2.RemovePrefixListEntry { - if input == nil { +func expandEc2RemovePrefixListEntry(tfMap map[string]interface{}) *ec2.RemovePrefixListEntry { + if tfMap == nil { return nil } - list := input.(*schema.Set).List() - result := make([]*ec2.RemovePrefixListEntry, 0, len(list)) + apiObject := &ec2.RemovePrefixListEntry{} - for _, entry := range list { - m := entry.(map[string]interface{}) - output := ec2.RemovePrefixListEntry{} - output.Cidr = aws.String(m["cidr_block"].(string)) - result = append(result, &output) + if v, ok := tfMap["cidr_block"].(string); ok && v != "" { + apiObject.Cidr = aws.String(v) } - return result + return apiObject } -func flattenPrefixListEntries(entries []*ec2.PrefixListEntry) []interface{} { - list := make([]interface{}, 0, len(entries)) +func expandEc2RemovePrefixListEntries(tfList []interface{}) []*ec2.RemovePrefixListEntry { + if len(tfList) == 0 { + return nil + } - for _, entry := range entries { - m := make(map[string]interface{}, 2) - m["cidr_block"] = aws.StringValue(entry.Cidr) + var apiObjects []*ec2.RemovePrefixListEntry - if entry.Description != nil { - m["description"] = aws.StringValue(entry.Description) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue } - list = append(list, m) + apiObject := expandEc2RemovePrefixListEntry(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) } - return list + return apiObjects } -func getManagedPrefixList( - id string, - conn *ec2.EC2, -) (*ec2.ManagedPrefixList, bool, error) { - input := ec2.DescribeManagedPrefixListsInput{ - PrefixListIds: aws.StringSlice([]string{id}), +func flattenEc2PrefixListEntry(apiObject *ec2.PrefixListEntry) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Cidr; v != nil { + tfMap["cidr_block"] = aws.StringValue(v) } - output, err := conn.DescribeManagedPrefixLists(&input) - switch { - case isAWSErr(err, "InvalidPrefixListID.NotFound", ""): - return nil, false, nil - case err != nil: - return nil, false, fmt.Errorf("describe managed prefix list %s: %v", id, err) - case len(output.PrefixLists) != 1: - return nil, false, nil + if v := apiObject.Description; v != nil { + tfMap["description"] = aws.StringValue(v) } - return output.PrefixLists[0], true, nil + return tfMap } -func getPrefixListEntries( - id string, - conn *ec2.EC2, - version int64, -) ([]*ec2.PrefixListEntry, error) { - input := ec2.GetManagedPrefixListEntriesInput{ - PrefixListId: aws.String(id), +func flattenEc2PrefixListEntries(apiObjects []*ec2.PrefixListEntry) []interface{} { + if len(apiObjects) == 0 { + return nil } - if version > 0 { - input.TargetVersion = aws.Int64(version) - } + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } - result := []*ec2.PrefixListEntry(nil) - switch err := conn.GetManagedPrefixListEntriesPages( - &input, - func(output *ec2.GetManagedPrefixListEntriesOutput, last bool) bool { - result = append(result, output.Entries...) - return true - }); { - case err != nil: - return nil, fmt.Errorf("failed to get entries in prefix list %s: %v", id, err) + tfList = append(tfList, flattenEc2PrefixListEntry(apiObject)) } - return result, nil + return tfList } diff --git a/aws/resource_aws_ec2_managed_prefix_list_test.go b/aws/resource_aws_ec2_managed_prefix_list_test.go index 6c767623cb4..1440ccf2313 100644 --- a/aws/resource_aws_ec2_managed_prefix_list_test.go +++ b/aws/resource_aws_ec2_managed_prefix_list_test.go @@ -5,52 +5,17 @@ import ( "regexp" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" ) -func testAccCheckAwsEc2ManagedPrefixListDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ec2_managed_prefix_list" { - continue - } - - id := rs.Primary.ID - - switch _, ok, err := getManagedPrefixList(id, conn); { - case err != nil: - return err - case ok: - return fmt.Errorf("managed prefix list %s still exists", id) - } - } - - return nil -} - -func testAccCheckAwsEc2ManagedPrefixListVersion( - prefixList *ec2.ManagedPrefixList, - version int64, -) resource.TestCheckFunc { - return func(state *terraform.State) error { - if actual := aws.Int64Value(prefixList.Version); actual != version { - return fmt.Errorf("expected prefix list version %d, got %d", version, actual) - } - - return nil - } -} - func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" - pl, entries := ec2.ManagedPrefixList{}, []*ec2.PrefixListEntry(nil) - rName1 := acctest.RandomWithPrefix("tf-acc-test") - rName2 := acctest.RandomWithPrefix("tf-acc-test") + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -58,54 +23,18 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_basic_create(rName1), + Config: testAccAwsEc2ManagedPrefixListConfig_Name(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), - resource.TestCheckResourceAttr(resourceName, "name", rName1), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`prefix-list/pl-[[:xdigit:]]+`)), + testAccAwsEc2ManagedPrefixListExists(resourceName), resource.TestCheckResourceAttr(resourceName, "address_family", "IPv4"), - resource.TestCheckResourceAttr(resourceName, "max_entries", "5"), - resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "1.0.0.0/8", - "description": "Test1", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "2.0.0.0/8", - "description": "Test2", - }), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`prefix-list/pl-[[:xdigit:]]+`)), + resource.TestCheckResourceAttr(resourceName, "entry.#", "0"), + resource.TestCheckResourceAttr(resourceName, "max_entries", "1"), + resource.TestCheckResourceAttr(resourceName, "name", rName), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), - testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), - resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListConfig_basic_update(rName2), - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, &entries), - resource.TestCheckResourceAttr(resourceName, "name", rName2), - resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "1.0.0.0/8", - "description": "Test1", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "3.0.0.0/8", - "description": "Test3", - }), - testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 2), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), - resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -117,101 +46,8 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { }) } -func testAccAwsEc2ManagedPrefixListConfig_basic_create(rName string) string { - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q - address_family = "IPv4" - max_entries = 5 - - entry { - cidr_block = "1.0.0.0/8" - description = "Test1" - } - - entry { - cidr_block = "2.0.0.0/8" - description = "Test2" - } - - tags = { - Key1 = "Value1" - Key2 = "Value2" - } -} -`, rName) -} - -func testAccAwsEc2ManagedPrefixListConfig_basic_update(rName string) string { - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q - address_family = "IPv4" - max_entries = 5 - - entry { - cidr_block = "1.0.0.0/8" - description = "Test1" - } - - entry { - cidr_block = "3.0.0.0/8" - description = "Test3" - } - - tags = { - Key1 = "Value1" - Key3 = "Value3" - } -} -`, rName) -} - -func testAccAwsEc2ManagedPrefixListExists( - name string, - out *ec2.ManagedPrefixList, - entries *[]*ec2.PrefixListEntry, -) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - switch { - case !ok: - return fmt.Errorf("resource %s not found", name) - case rs.Primary.ID == "": - return fmt.Errorf("resource %s has not set its id", name) - } - - conn := testAccProvider.Meta().(*AWSClient).ec2conn - id := rs.Primary.ID - - pl, ok, err := getManagedPrefixList(id, conn) - switch { - case err != nil: - return err - case !ok: - return fmt.Errorf("resource %s (%s) has not been created", name, id) - } - - if out != nil { - *out = *pl - } - - if entries != nil { - entries1, err := getPrefixListEntries(id, conn, *pl.Version) - if err != nil { - return err - } - - *entries = entries1 - } - - return nil - } -} - func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" - pl := ec2.ManagedPrefixList{} rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -220,10 +56,9 @@ func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_disappears(rName), - ResourceName: resourceName, + Config: testAccAwsEc2ManagedPrefixListConfig_Name(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), + testAccAwsEc2ManagedPrefixListExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2ManagedPrefixList(), resourceName), ), ExpectNonEmptyPlan: true, @@ -232,25 +67,9 @@ func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { }) } -func testAccAwsEc2ManagedPrefixListConfig_disappears(rName string) string { - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q - address_family = "IPv4" - max_entries = 2 - - entry { - cidr_block = "1.0.0.0/8" - } -} -`, rName) -} - -func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { +func TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" - pl := ec2.ManagedPrefixList{} - rName1 := acctest.RandomWithPrefix("tf-acc-test") - rName2 := acctest.RandomWithPrefix("tf-acc-test") + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -258,26 +77,11 @@ func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_name_create(rName1), - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "name", rName1), - testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAwsEc2ManagedPrefixListConfig_name_update(rName2), + Config: testAccAwsEc2ManagedPrefixListConfig_AddressFamily(rName, "IPv6"), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "name", rName2), - testAccCheckAwsEc2ManagedPrefixListVersion(&pl, 1), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "address_family", "IPv6"), ), }, { @@ -289,29 +93,8 @@ func TestAccAwsEc2ManagedPrefixList_name(t *testing.T) { }) } -func testAccAwsEc2ManagedPrefixListConfig_name_create(rName string) string { - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q - address_family = "IPv4" - max_entries = 5 -} -`, rName) -} - -func testAccAwsEc2ManagedPrefixListConfig_name_update(rName string) string { - return fmt.Sprintf(` -resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q - address_family = "IPv4" - max_entries = 5 -} -`, rName) -} - -func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { +func TestAccAwsEc2ManagedPrefixList_Entry(t *testing.T) { resourceName := "aws_ec2_managed_prefix_list.test" - pl := ec2.ManagedPrefixList{} rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -320,11 +103,20 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_none(rName), + Config: testAccAwsEc2ManagedPrefixListConfig_Entry1(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "1.0.0.0/8", + "description": "Test1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "2.0.0.0/8", + "description": "Test2", + }), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -333,14 +125,20 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_addSome(rName), + Config: testAccAwsEc2ManagedPrefixListConfig_Entry2(rName), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), - resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), - resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2"), - resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "1.0.0.0/8", + "description": "Test1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ + "cidr_block": "3.0.0.0/8", + "description": "Test3", + }), + resource.TestCheckResourceAttr(resourceName, "version", "2"), ), }, { @@ -348,14 +146,27 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + }, + }) +} + +func TestAccAwsEc2ManagedPrefixList_Name(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + rName1 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome(rName), + Config: testAccAwsEc2ManagedPrefixListConfig_Name(rName1), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2-1"), - resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName1), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -364,11 +175,34 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_empty(rName), + Config: testAccAwsEc2ManagedPrefixListConfig_Name(rName2), ResourceName: resourceName, Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + resource.TestCheckResourceAttr(resourceName, "version", "1"), + ), + }, + }, + }) +} + +func TestAccAwsEc2ManagedPrefixList_Tags(t *testing.T) { + resourceName := "aws_ec2_managed_prefix_list.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2ManagedPrefixListConfig_Tags1(rName, "key1", "value1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { @@ -377,118 +211,167 @@ func TestAccAwsEc2ManagedPrefixList_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsEc2ManagedPrefixListConfig_tags_none(rName), - ResourceName: resourceName, + Config: testAccAwsEc2ManagedPrefixListConfig_Tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &pl, nil), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + Config: testAccAwsEc2ManagedPrefixListConfig_Tags1(rName, "key2", "value2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2ManagedPrefixListExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), + ), }, }, }) } -func testAccAwsEc2ManagedPrefixListConfig_tags_none(rName string) string { +func testAccCheckAwsEc2ManagedPrefixListDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ec2_managed_prefix_list" { + continue + } + + pl, err := finder.ManagedPrefixListByID(conn, rs.Primary.ID) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidPrefixListIDNotFound) { + continue + } + + if err != nil { + return fmt.Errorf("error reading EC2 Managed Prefix List (%s): %w", rs.Primary.ID, err) + } + + if pl != nil { + return fmt.Errorf("EC2 Managed Prefix List (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccAwsEc2ManagedPrefixListExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("resource %s not found", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("resource %s has not set its id", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + pl, err := finder.ManagedPrefixListByID(conn, rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error reading EC2 Managed Prefix List (%s): %w", rs.Primary.ID, err) + } + + if pl == nil { + return fmt.Errorf("EC2 Managed Prefix List (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccAwsEc2ManagedPrefixListConfig_AddressFamily(rName string, addressFamily string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { + address_family = %[2]q + max_entries = 1 name = %[1]q - address_family = "IPv4" - max_entries = 5 } -`, rName) +`, rName, addressFamily) } -func testAccAwsEc2ManagedPrefixListConfig_tags_addSome(rName string) string { +func testAccAwsEc2ManagedPrefixListConfig_Entry1(rName string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q address_family = "IPv4" max_entries = 5 + name = %[1]q - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" + entry { + cidr_block = "1.0.0.0/8" + description = "Test1" + } + + entry { + cidr_block = "2.0.0.0/8" + description = "Test2" } } `, rName) } -func testAccAwsEc2ManagedPrefixListConfig_tags_dropOrModifySome(rName string) string { +func testAccAwsEc2ManagedPrefixListConfig_Entry2(rName string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q address_family = "IPv4" max_entries = 5 + name = %[1]q - tags = { - Key2 = "Value2-1" - Key3 = "Value3" + entry { + cidr_block = "1.0.0.0/8" + description = "Test1" + } + + entry { + cidr_block = "3.0.0.0/8" + description = "Test3" } } `, rName) } -func testAccAwsEc2ManagedPrefixListConfig_tags_empty(rName string) string { +func testAccAwsEc2ManagedPrefixListConfig_Name(rName string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = %[1]q address_family = "IPv4" - max_entries = 5 - tags = {} + max_entries = 1 + name = %[1]q } `, rName) } -func TestAccAwsEc2ManagedPrefixList_exceedLimit(t *testing.T) { - resourceName := "aws_ec2_managed_prefix_list.test" - prefixList := ec2.ManagedPrefixList{} - rName := acctest.RandomWithPrefix("tf-acc-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName, 2), - ResourceName: resourceName, - Check: resource.ComposeAggregateTestCheckFunc( - testAccAwsEc2ManagedPrefixListExists(resourceName, &prefixList, nil), - resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), - ), - }, - { - Config: testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName, 3), - ResourceName: resourceName, - ExpectError: regexp.MustCompile(`You've reached the maximum number of entries for the prefix list.`), - }, - }, - }) -} +func testAccAwsEc2ManagedPrefixListConfig_Tags1(rName string, tagKey1 string, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + name = %[1]q + address_family = "IPv4" + max_entries = 5 -func testAccAwsEc2ManagedPrefixListConfig_exceedLimit(rName string, count int) string { - entries := `` - for i := 0; i < count; i++ { - entries += fmt.Sprintf(` - entry { - cidr_block = "%[1]d.0.0.0/8" - description = "Test_%[1]d" + tags = { + %[2]q = %[3]q } -`, i+1) - } +} +`, rName, tagKey1, tagValue1) +} +func testAccAwsEc2ManagedPrefixListConfig_Tags2(rName string, tagKey1 string, tagValue1 string, tagKey2 string, tagValue2 string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { - name = %[2]q + name = %[1]q address_family = "IPv4" - max_entries = 2 -%[1]s + max_entries = 5 + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } } -`, entries, rName) +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) } From dc57570ec1f4d4f816d470d9e262aab957ee7b12 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 22:30:25 -0500 Subject: [PATCH 0279/1212] Update CHANGELOG for #14068 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d889951381e..36757c2561e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES * **New Data Source:** `aws_ec2_managed_prefix_list` [GH-16738] * **New Data Source:** `aws_lakeformation_data_lake_settings` [GH-13250] * **New Resource:** `aws_codestarconnections_connection` [GH-15990] +* **New Resource:** `aws_ec2_managed_prefix_list` [GH-14068] * **New Resource:** `aws_lakeformation_data_lake_settings` [GH-13250] * **New Resource:** `aws_lakeformation_resource` [GH-13267] From e5e68dd92f4ebf444839d0dbb7ef4ce0659263bd Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 16 Dec 2020 23:04:03 -0500 Subject: [PATCH 0280/1212] resource/aws_ec2_managed_prefix_list: Use cidr not cidr_block for API consistency Output from acceptance testing: ``` --- PASS: TestAccAwsEc2ManagedPrefixList_disappears (18.76s) --- PASS: TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6 (27.40s) --- PASS: TestAccAwsEc2ManagedPrefixList_basic (28.55s) --- PASS: TestAccAwsEc2ManagedPrefixList_Name (37.41s) --- PASS: TestAccAwsEc2ManagedPrefixList_Entry (42.01s) --- PASS: TestAccAwsEc2ManagedPrefixList_Tags (45.00s) ``` --- aws/resource_aws_ec2_managed_prefix_list.go | 8 ++++---- aws/resource_aws_ec2_managed_prefix_list_test.go | 16 ++++++++-------- .../docs/r/ec2_managed_prefix_list.html.markdown | 6 +++--- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_ec2_managed_prefix_list.go b/aws/resource_aws_ec2_managed_prefix_list.go index 64e265698b2..caa8dea8342 100644 --- a/aws/resource_aws_ec2_managed_prefix_list.go +++ b/aws/resource_aws_ec2_managed_prefix_list.go @@ -52,7 +52,7 @@ func resourceAwsEc2ManagedPrefixList() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "cidr_block": { + "cidr": { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDR, @@ -276,7 +276,7 @@ func expandEc2AddPrefixListEntry(tfMap map[string]interface{}) *ec2.AddPrefixLis apiObject := &ec2.AddPrefixListEntry{} - if v, ok := tfMap["cidr_block"].(string); ok && v != "" { + if v, ok := tfMap["cidr"].(string); ok && v != "" { apiObject.Cidr = aws.String(v) } @@ -320,7 +320,7 @@ func expandEc2RemovePrefixListEntry(tfMap map[string]interface{}) *ec2.RemovePre apiObject := &ec2.RemovePrefixListEntry{} - if v, ok := tfMap["cidr_block"].(string); ok && v != "" { + if v, ok := tfMap["cidr"].(string); ok && v != "" { apiObject.Cidr = aws.String(v) } @@ -361,7 +361,7 @@ func flattenEc2PrefixListEntry(apiObject *ec2.PrefixListEntry) map[string]interf tfMap := map[string]interface{}{} if v := apiObject.Cidr; v != nil { - tfMap["cidr_block"] = aws.StringValue(v) + tfMap["cidr"] = aws.StringValue(v) } if v := apiObject.Description; v != nil { diff --git a/aws/resource_aws_ec2_managed_prefix_list_test.go b/aws/resource_aws_ec2_managed_prefix_list_test.go index 1440ccf2313..0116248f2c9 100644 --- a/aws/resource_aws_ec2_managed_prefix_list_test.go +++ b/aws/resource_aws_ec2_managed_prefix_list_test.go @@ -109,11 +109,11 @@ func TestAccAwsEc2ManagedPrefixList_Entry(t *testing.T) { testAccAwsEc2ManagedPrefixListExists(resourceName), resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "1.0.0.0/8", + "cidr": "1.0.0.0/8", "description": "Test1", }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "2.0.0.0/8", + "cidr": "2.0.0.0/8", "description": "Test2", }), resource.TestCheckResourceAttr(resourceName, "version", "1"), @@ -131,11 +131,11 @@ func TestAccAwsEc2ManagedPrefixList_Entry(t *testing.T) { testAccAwsEc2ManagedPrefixListExists(resourceName), resource.TestCheckResourceAttr(resourceName, "entry.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "1.0.0.0/8", + "cidr": "1.0.0.0/8", "description": "Test1", }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "entry.*", map[string]string{ - "cidr_block": "3.0.0.0/8", + "cidr": "3.0.0.0/8", "description": "Test3", }), resource.TestCheckResourceAttr(resourceName, "version", "2"), @@ -305,12 +305,12 @@ resource "aws_ec2_managed_prefix_list" "test" { name = %[1]q entry { - cidr_block = "1.0.0.0/8" + cidr = "1.0.0.0/8" description = "Test1" } entry { - cidr_block = "2.0.0.0/8" + cidr = "2.0.0.0/8" description = "Test2" } } @@ -325,12 +325,12 @@ resource "aws_ec2_managed_prefix_list" "test" { name = %[1]q entry { - cidr_block = "1.0.0.0/8" + cidr = "1.0.0.0/8" description = "Test1" } entry { - cidr_block = "3.0.0.0/8" + cidr = "3.0.0.0/8" description = "Test3" } } diff --git a/website/docs/r/ec2_managed_prefix_list.html.markdown b/website/docs/r/ec2_managed_prefix_list.html.markdown index d0f25d54b9d..98c480a4e45 100644 --- a/website/docs/r/ec2_managed_prefix_list.html.markdown +++ b/website/docs/r/ec2_managed_prefix_list.html.markdown @@ -27,12 +27,12 @@ resource "aws_ec2_managed_prefix_list" "example" { max_entries = 5 entry { - cidr_block = aws_vpc.example.cidr_block + cidr = aws_vpc.example.cidr_block description = "Primary" } entry { - cidr_block = aws_vpc_ipv4_cidr_block_association.example.cidr_block + cidr = aws_vpc_ipv4_cidr_block_association.example.cidr_block description = "Secondary" } @@ -58,7 +58,7 @@ The following arguments are supported: The `entry` block supports: -* `cidr_block` - (Required) The CIDR block of this entry. +* `cidr` - (Required) The CIDR block of this entry. * `description` - (Optional) Description of this entry. ## Attributes Reference From 8f30f8c341c8614782020d4896d7eba8552b91a7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 16 Dec 2020 22:57:02 -0800 Subject: [PATCH 0281/1212] Adds cliuster_enabled output and plan-time validation for number_cache_clusters and cluster_mode --- ...ource_aws_elasticache_replication_group.go | 28 +++++++++---------- ..._aws_elasticache_replication_group_test.go | 13 ++++++++- ...lasticache_replication_group.html.markdown | 5 ++-- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index c940ef56827..86589c5704e 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -64,6 +64,10 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, + "cluster_enabled": { + Type: schema.TypeBool, + Computed: true, + }, "cluster_mode": { Type: schema.TypeList, Optional: true, @@ -71,8 +75,9 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { // and a cluster mode enabled parameter_group_name will create // a single shard replication group with number_cache_clusters - 1 // read replicas. Otherwise, the resource is marked ForceNew. - Computed: true, - MaxItems: 1, + Computed: true, + MaxItems: 1, + ExactlyOneOf: []string{"cluster_mode", "number_cache_clusters"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "replicas_per_node_group": { @@ -131,9 +136,10 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { ValidateFunc: validateArn, }, "number_cache_clusters": { - Type: schema.TypeInt, - Computed: true, - Optional: true, + Type: schema.TypeInt, + Computed: true, + Optional: true, + ExactlyOneOf: []string{"cluster_mode", "number_cache_clusters"}, }, "parameter_group_name": { Type: schema.TypeString, @@ -340,14 +346,7 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i params.AuthToken = aws.String(v.(string)) } - clusterMode, clusterModeOk := d.GetOk("cluster_mode") - cacheClusters, cacheClustersOk := d.GetOk("number_cache_clusters") - - if !clusterModeOk && !cacheClustersOk || clusterModeOk && cacheClustersOk { - return fmt.Errorf("Either `number_cache_clusters` or `cluster_mode` must be set") - } - - if clusterModeOk { + if clusterMode, ok := d.GetOk("cluster_mode"); ok { clusterModeList := clusterMode.([]interface{}) attributes := clusterModeList[0].(map[string]interface{}) @@ -360,7 +359,7 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i } } - if cacheClustersOk { + if cacheClusters, ok := d.GetOk("number_cache_clusters"); ok { params.NumCacheClusters = aws.Int64(int64(cacheClusters.(int))) } @@ -449,6 +448,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int if err := d.Set("cluster_mode", flattenElasticacheNodeGroupsToClusterMode(rgp.NodeGroups)); err != nil { return fmt.Errorf("error setting cluster_mode attribute: %w", err) } + d.Set("cluster_enabled", rgp.ClusterEnabled) d.Set("replication_group_id", rgp.ReplicationGroupId) if rgp.NodeGroups != nil { diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index db61a57c186..c2804352bae 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -78,6 +78,7 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "false"), ), }, { @@ -389,6 +390,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), @@ -420,6 +423,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NonClusteredParameterGrou Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig_NonClusteredParameterGroup(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), @@ -453,6 +458,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "3"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), @@ -469,6 +476,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), @@ -479,6 +488,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), @@ -499,7 +510,7 @@ func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t Steps: []resource.TestStep{ { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt, rName), - ExpectError: regexp.MustCompile("Either `number_cache_clusters` or `cluster_mode` must be set"), + ExpectError: regexp.MustCompile("only one of `cluster_mode,number_cache_clusters` can be\\s+specified, but `cluster_mode,number_cache_clusters` were specified."), }, }, }) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 908a49fc2bd..d86cdf147c0 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -100,7 +100,7 @@ The following arguments are supported: * `replication_group_id` – (Required) The replication group identifier. This parameter is stored as a lowercase string. * `replication_group_description` – (Required) A user-created description for the replication group. -* `number_cache_clusters` - (Required for Cluster Mode Disabled) The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. +* `number_cache_clusters` - (Optional) The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. One of `number_cache_clusters` or `cluster_mode` is required. * `node_type` - (Required) The compute and memory capacity of the nodes in the node group. * `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. * `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`. @@ -135,7 +135,7 @@ before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro cache nodes * `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`. * `tags` - (Optional) A map of tags to assign to the resource. Adding tags to this resource will add or overwrite any existing tags on the clusters in the replication group and not to the group itself. -* `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed. +* `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed. One of `number_cache_clusters` or `cluster_mode` is required. Cluster Mode (`cluster_mode`) supports the following: @@ -147,6 +147,7 @@ Cluster Mode (`cluster_mode`) supports the following: In addition to all arguments above, the following attributes are exported: * `id` - The ID of the ElastiCache Replication Group. +* `cluster_enabled` - Indicates if cluster mode is enabled. * `configuration_endpoint_address` - The address of the replication group configuration endpoint when cluster mode is enabled. * `primary_endpoint_address` - (Redis only) The address of the endpoint for the primary node in the replication group, if the cluster mode is disabled. * `member_clusters` - The identifiers of all the nodes that are part of this replication group. From 294f76bacf16929fe3e8b5f3c5c9f7dc6b24ee51 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 17 Dec 2020 15:16:17 -0500 Subject: [PATCH 0282/1212] add support for extraConnectionAttributes in create/read ops and handle returned diffs --- aws/resource_aws_dms_endpoint.go | 114 +++++++++++++++- aws/resource_aws_dms_endpoint_test.go | 189 +++++++++++++++++++++++++- 2 files changed, 295 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_dms_endpoint.go b/aws/resource_aws_dms_endpoint.go index 3d5a6aef94a..9aad32ba2e4 100644 --- a/aws/resource_aws_dms_endpoint.go +++ b/aws/resource_aws_dms_endpoint.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "strings" "time" @@ -128,9 +129,10 @@ func resourceAwsDmsEndpoint() *schema.Resource { }, false), }, "extra_connection_attributes": { - Type: schema.TypeString, - Computed: true, - Optional: true, + Type: schema.TypeString, + Computed: true, + Optional: true, + DiffSuppressFunc: suppressExtraConnectionAttributesDiffs, }, "kafka_settings": { Type: schema.TypeList, @@ -299,6 +301,11 @@ func resourceAwsDmsEndpoint() *schema.Resource { Optional: true, Default: "NONE", }, + "date_partition_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, }, }, @@ -396,6 +403,7 @@ func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) erro BucketFolder: aws.String(d.Get("s3_settings.0.bucket_folder").(string)), BucketName: aws.String(d.Get("s3_settings.0.bucket_name").(string)), CompressionType: aws.String(d.Get("s3_settings.0.compression_type").(string)), + DatePartitionEnabled: aws.Bool(d.Get("s3_settings.0.date_partition_enabled").(bool)), } default: request.Password = aws.String(d.Get("password").(string)) @@ -406,9 +414,7 @@ func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) erro if v, ok := d.GetOk("database_name"); ok { request.DatabaseName = aws.String(v.(string)) } - if v, ok := d.GetOk("extra_connection_attributes"); ok { - request.ExtraConnectionAttributes = aws.String(v.(string)) - } + if v, ok := d.GetOk("kms_key_arn"); ok { request.KmsKeyId = aws.String(v.(string)) } @@ -417,6 +423,13 @@ func resourceAwsDmsEndpointCreate(d *schema.ResourceData, meta interface{}) erro if v, ok := d.GetOk("certificate_arn"); ok { request.CertificateArn = aws.String(v.(string)) } + + // Send ExtraConnectionAttributes in the API request for all resource types + // per https://github.com/hashicorp/terraform-provider-aws/issues/8009 + if v, ok := d.GetOk("extra_connection_attributes"); ok { + request.ExtraConnectionAttributes = aws.String(v.(string)) + } + if v, ok := d.GetOk("ssl_mode"); ok { request.SslMode = aws.String(v.(string)) } @@ -698,6 +711,7 @@ func resourceAwsDmsEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoi // For some reason the AWS API only accepts lowercase type but returns it as uppercase d.Set("endpoint_type", strings.ToLower(*endpoint.EndpointType)) d.Set("engine_name", endpoint.EngineName) + d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes) switch *endpoint.EngineName { case "dynamodb": @@ -823,7 +837,95 @@ func flattenDmsS3Settings(settings *dms.S3Settings) []map[string]interface{} { "bucket_folder": aws.StringValue(settings.BucketFolder), "bucket_name": aws.StringValue(settings.BucketName), "compression_type": aws.StringValue(settings.CompressionType), + "date_partition_enabled": aws.BoolValue(settings.DatePartitionEnabled), } return []map[string]interface{}{m} } + +func suppressExtraConnectionAttributesDiffs(_, old, new string, d *schema.ResourceData) bool { + if d.Id() != "" { + o := extraConnectionAttributesToSet(old) + n := extraConnectionAttributesToSet(new) + + var config *schema.Set + // when the engine is "s3" or "mongodb", the extra_connection_attributes + // can consist of a subset of the attributes configured in the {engine}_settings block; + // fields such as service_access_role_arn (in the case of "s3") are not returned from the API in + // extra_connection_attributes thus we take the Set difference to ensure + // the returned attributes were set in the {engine}_settings block or originally + // in the extra_connection_attributes field + if v, ok := d.GetOk("mongodb_settings"); ok { + config = engineSettingsToSet(v.([]interface{})) + } else if v, ok := d.GetOk("s3_settings"); ok { + config = engineSettingsToSet(v.([]interface{})) + } + + if o != nil && config != nil { + diff := o.Difference(config) + + return diff.Len() == 0 || diff.Equal(n) + } + } + return false +} + +// extraConnectionAttributesToSet accepts an extra_connection_attributes +// string in the form of "key=value;key2=value2;" and returns +// the Set representation, with each element being the key/value pair +func extraConnectionAttributesToSet(extra string) *schema.Set { + if extra == "" { + return nil + } + + s := &schema.Set{F: schema.HashString} + + parts := strings.Split(extra, ";") + for _, part := range parts { + kvParts := strings.Split(part, "=") + if len(kvParts) != 2 { + continue + } + + k, v := kvParts[0], kvParts[1] + // normalize key, from camelCase to snake_case, + // and value where hyphens maybe used in a config + // but the API returns with underscores + matchAllCap := regexp.MustCompile("([a-z])([A-Z])") + key := matchAllCap.ReplaceAllString(k, "${1}_${2}") + normalizedVal := strings.Replace(strings.ToLower(v), "-", "_", -1) + + s.Add(fmt.Sprintf("%s=%s", strings.ToLower(key), normalizedVal)) + } + + return s +} + +// engineSettingsToSet accepts the {engine}_settings block as a list +// and returns the Set representation, with each element being the key/value pair +func engineSettingsToSet(l []interface{}) *schema.Set { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + s := &schema.Set{F: schema.HashString} + + for k, v := range tfMap { + switch t := v.(type) { + case string: + // normalize value for changes in case or where hyphens + // maybe used in a config but the API returns with underscores + normalizedVal := strings.Replace(strings.ToLower(t), "-", "_", -1) + s.Add(fmt.Sprintf("%s=%v", k, normalizedVal)) + default: + s.Add(fmt.Sprintf("%s=%v", k, t)) + } + } + + return s +} diff --git a/aws/resource_aws_dms_endpoint_test.go b/aws/resource_aws_dms_endpoint_test.go index 7b498110c0f..7902378f534 100644 --- a/aws/resource_aws_dms_endpoint_test.go +++ b/aws/resource_aws_dms_endpoint_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -82,7 +83,7 @@ func TestAccAwsDmsEndpoint_S3(t *testing.T) { Config: dmsEndpointS3ConfigUpdate(randId), Check: resource.ComposeTestCheckFunc( checkDmsEndpointExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "extra_connection_attributes", "key=value;"), + resource.TestMatchResourceAttr(resourceName, "extra_connection_attributes", regexp.MustCompile(`key=value;`)), resource.TestCheckResourceAttr(resourceName, "s3_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "s3_settings.0.external_table_definition", "new-external_table_definition"), resource.TestCheckResourceAttr(resourceName, "s3_settings.0.csv_row_delimiter", "\\r"), @@ -96,6 +97,33 @@ func TestAccAwsDmsEndpoint_S3(t *testing.T) { }) } +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8009 +func TestAccAwsDmsEndpoint_S3_ExtraConnectionAttributes(t *testing.T) { + resourceName := "aws_dms_endpoint.dms_endpoint" + randId := acctest.RandString(8) + "-s3" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: dmsEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: dmsEndpointS3ExtraConnectionAttributesConfig(randId), + Check: resource.ComposeTestCheckFunc( + checkDmsEndpointExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "extra_connection_attributes", regexp.MustCompile(`dataFormat=parquet;`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} + func TestAccAwsDmsEndpoint_DynamoDb(t *testing.T) { resourceName := "aws_dms_endpoint.dms_endpoint" randId := acctest.RandString(8) + "-dynamodb" @@ -157,6 +185,36 @@ func TestAccAwsDmsEndpoint_Elasticsearch(t *testing.T) { }) } +// TestAccAwsDmsEndpoint_Elasticsearch_ExtraConnectionAttributes validates +// extra_connection_attributes handling for "elasticsearch" engine not affected +// by changes made specific to suppressing diffs in the case of "s3"/"mongodb" engine +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/8009 +func TestAccAwsDmsEndpoint_Elasticsearch_ExtraConnectionAttributes(t *testing.T) { + resourceName := "aws_dms_endpoint.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: dmsEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: dmsEndpointElasticsearchExtraConnectionAttributesConfig(rName), + Check: resource.ComposeTestCheckFunc( + checkDmsEndpointExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "extra_connection_attributes", "errorRetryDuration=400;"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} + func TestAccAwsDmsEndpoint_Elasticsearch_ErrorRetryDuration(t *testing.T) { resourceName := "aws_dms_endpoint.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -366,6 +424,29 @@ func TestAccAwsDmsEndpoint_MongoDb(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"password"}, }, + }, + }) +} + +// TestAccAwsDmsEndpoint_MongoDb_Update validates engine-specific +// configured fields and extra_connection_attributes now set in the resource +// per https://github.com/hashicorp/terraform-provider-aws/issues/8009 +func TestAccAwsDmsEndpoint_MongoDb_Update(t *testing.T) { + resourceName := "aws_dms_endpoint.dms_endpoint" + randId := acctest.RandString(8) + "-mongodb" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: dmsEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: dmsEndpointMongoDbConfig(randId), + Check: resource.ComposeTestCheckFunc( + checkDmsEndpointExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "endpoint_arn"), + ), + }, { Config: dmsEndpointMongoDbConfigUpdate(randId), Check: resource.ComposeTestCheckFunc( @@ -376,7 +457,7 @@ func TestAccAwsDmsEndpoint_MongoDb(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "password", "tftest-new-password"), resource.TestCheckResourceAttr(resourceName, "database_name", "tftest-new-database_name"), resource.TestCheckResourceAttr(resourceName, "ssl_mode", "require"), - resource.TestCheckResourceAttr(resourceName, "extra_connection_attributes", "key=value;"), + resource.TestMatchResourceAttr(resourceName, "extra_connection_attributes", regexp.MustCompile(`key=value;`)), resource.TestCheckResourceAttr(resourceName, "mongodb_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "mongodb_settings.0.auth_mechanism", "scram-sha-1"), resource.TestCheckResourceAttr(resourceName, "mongodb_settings.0.nesting_level", "one"), @@ -384,6 +465,12 @@ func TestAccAwsDmsEndpoint_MongoDb(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "mongodb_settings.0.docs_to_investigate", "1001"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, }, }) } @@ -768,6 +855,84 @@ EOF `, randId) } +func dmsEndpointS3ExtraConnectionAttributesConfig(randId string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_dms_endpoint" "dms_endpoint" { + endpoint_id = "tf-test-dms-endpoint-%[1]s" + endpoint_type = "target" + engine_name = "s3" + ssl_mode = "none" + extra_connection_attributes = "dataFormat=parquet;" + + s3_settings { + service_access_role_arn = aws_iam_role.iam_role.arn + bucket_name = "bucket_name" + bucket_folder = "bucket_folder" + compression_type = "GZIP" + } + + tags = { + Name = "tf-test-s3-endpoint-%[1]s" + Update = "to-update" + Remove = "to-remove" + } + + depends_on = [aws_iam_role_policy.dms_s3_access] +} + +resource "aws_iam_role" "iam_role" { + name = "tf-test-iam-s3-role-%[1]s" + + assume_role_policy = < Date: Thu, 17 Dec 2020 13:20:43 -0800 Subject: [PATCH 0283/1212] #16821 Fix spelling of db_subnet_group_name in restore_to_point_in_time --- aws/resource_aws_db_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_db_instance.go b/aws/resource_aws_db_instance.go index 762066309ec..e48cc00f692 100644 --- a/aws/resource_aws_db_instance.go +++ b/aws/resource_aws_db_instance.go @@ -1149,7 +1149,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error input.StorageType = aws.String(v.(string)) } - if v, ok := d.GetOk("subnet_group_name"); ok { + if v, ok := d.GetOk("db_subnet_group_name"); ok { input.DBSubnetGroupName = aws.String(v.(string)) } From 0f31f2919105248c4b89f511cf83fb7538a73d69 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 17 Dec 2020 13:31:43 -0800 Subject: [PATCH 0284/1212] Adds clarifying documentation about cluster mode and parameter groups --- website/docs/r/elasticache_replication_group.html.markdown | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index d86cdf147c0..41c46edcf08 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -20,6 +20,8 @@ actual modification has not yet taken place. You can use the immediately. Using `apply_immediately` can result in a brief downtime as servers reboots. +~> **Note:** Be aware of the terminology collision around "cluster" for `aws_elasticache_replication_group`. For example, it is possible to create a ["Cluster Mode Disabled [Redis] Cluster"](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Create.CON.Redis.html). With "Cluster Mode Enabled", the data will be stored in shards (called "node groups"). See [Redis Cluster Configuration](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/cluster-create-determine-requirements.html#redis-cluster-configuration) for a diagram of the differences. To enable cluster mode, use a parameter group that has cluster mode enabled. The default parameter groups provided by AWS end with ".cluster.on", for example `default.redis6.x.cluster.on`. + ## Example Usage ### Redis Cluster Mode Disabled @@ -111,7 +113,7 @@ The following arguments are supported: * `auth_token` - (Optional) The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. * `kms_key_id` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`. * `engine_version` - (Optional) The version number of the cache engine to be used for the cache clusters in this replication group. -* `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. +* `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. To enable "cluster mode", i.e. data sharding, use a parameter group that has the parameter `cluster-enabled` set to true. * `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. * `subnet_group_name` - (Optional) The name of the cache subnet group to be used for the replication group. * `security_group_names` - (Optional) A list of cache security group names to associate with this replication group. @@ -135,7 +137,7 @@ before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro cache nodes * `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`. * `tags` - (Optional) A map of tags to assign to the resource. Adding tags to this resource will add or overwrite any existing tags on the clusters in the replication group and not to the group itself. -* `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed. One of `number_cache_clusters` or `cluster_mode` is required. +* `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed. One of `number_cache_clusters` or `cluster_mode` is required. Note that configuring this block does not enable cluster mode, i.e. data sharding, this requires using a parameter group that has the parameter `cluster-enabled` set to true. Cluster Mode (`cluster_mode`) supports the following: From 3f5b95a2343e0b35b8a9157b2a9ab0befbb13634 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 11 Dec 2020 00:22:28 -0800 Subject: [PATCH 0285/1212] Removes GitHub OAuth token hashing --- aws/resource_aws_codepipeline.go | 93 ++++++++++----------------- aws/resource_aws_codepipeline_test.go | 4 +- go.mod | 1 + website/docs/r/codepipeline.markdown | 2 +- 4 files changed, 39 insertions(+), 61 deletions(-) diff --git a/aws/resource_aws_codepipeline.go b/aws/resource_aws_codepipeline.go index 5cbaadce2d5..8d7821fab9b 100644 --- a/aws/resource_aws_codepipeline.go +++ b/aws/resource_aws_codepipeline.go @@ -1,15 +1,14 @@ package aws import ( - "crypto/sha256" - "encoding/hex" "errors" "fmt" "log" - "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -59,11 +58,9 @@ func resourceAwsCodePipeline() *schema.Resource { Required: true, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - codepipeline.ArtifactStoreTypeS3, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(codepipeline.ArtifactStoreType_Values(), false), }, "encryption_key": { Type: schema.TypeList, @@ -76,11 +73,9 @@ func resourceAwsCodePipeline() *schema.Resource { Required: true, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - codepipeline.EncryptionKeyTypeKms, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(codepipeline.EncryptionKeyType_Values(), false), }, }, }, @@ -109,35 +104,24 @@ func resourceAwsCodePipeline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "configuration": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - DiffSuppressFunc: suppressCodePipelineStageActionConfiguration, + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "category": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - codepipeline.ActionCategorySource, - codepipeline.ActionCategoryBuild, - codepipeline.ActionCategoryDeploy, - codepipeline.ActionCategoryTest, - codepipeline.ActionCategoryInvoke, - codepipeline.ActionCategoryApproval, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(codepipeline.ActionCategory_Values(), false), }, "owner": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - codepipeline.ActionOwnerAws, - codepipeline.ActionOwnerThirdParty, - codepipeline.ActionOwnerCustom, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(codepipeline.ActionOwner_Values(), false), }, "provider": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: resourceAwsCodePipelineValidateActionProvider, }, "version": { Type: schema.TypeString, @@ -425,8 +409,7 @@ func flattenAwsCodePipelineStageActions(si int, actions []*codepipeline.ActionDe if _, ok := config[CodePipelineGitHubActionConfigurationOAuthToken]; ok { // The AWS API returns "****" for the OAuthToken value. Pull the value from the configuration. addr := fmt.Sprintf("stage.%d.action.%d.configuration.OAuthToken", si, ai) - hash := hashCodePipelineGitHubToken(d.Get(addr).(string)) - config[CodePipelineGitHubActionConfigurationOAuthToken] = hash + config[CodePipelineGitHubActionConfigurationOAuthToken] = d.Get(addr).(string) } } @@ -620,27 +603,21 @@ func resourceAwsCodePipelineDelete(d *schema.ResourceData, meta interface{}) err return err } -func suppressCodePipelineStageActionConfiguration(k, old, new string, d *schema.ResourceData) bool { - parts := strings.Split(k, ".") - parts = parts[:len(parts)-2] - providerAddr := strings.Join(append(parts, "provider"), ".") - provider := d.Get(providerAddr).(string) - - if provider == CodePipelineProviderGitHub && strings.HasSuffix(k, CodePipelineGitHubActionConfigurationOAuthToken) { - hash := hashCodePipelineGitHubToken(new) - return old == hash +func resourceAwsCodePipelineValidateActionProvider(i interface{}, path cty.Path) diag.Diagnostics { + v, ok := i.(string) + if !ok { + return diag.Errorf("expected type to be string") } - return false -} - -const codePipelineGitHubTokenHashPrefix = "hash-" - -func hashCodePipelineGitHubToken(token string) string { - // Without this check, the value was getting encoded twice - if strings.HasPrefix(token, codePipelineGitHubTokenHashPrefix) { - return token + if v == CodePipelineProviderGitHub { + return diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "The CodePipeline GitHub version 1 action provider is deprecated.", + Detail: "Use a CodeStarSourceConnection instead.", + }, + } } - sum := sha256.Sum256([]byte(token)) - return codePipelineGitHubTokenHashPrefix + hex.EncodeToString(sum[:]) + + return nil } diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index bba8e411ebf..26d586b9f13 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -50,7 +50,7 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "lifesum-terraform"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "master"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", hashCodePipelineGitHubToken(githubToken)), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.role_arn", ""), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.run_order", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.region", ""), @@ -98,7 +98,7 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "test-terraform"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test-repo"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "stable"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", hashCodePipelineGitHubToken(githubToken)), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), resource.TestCheckResourceAttr(resourceName, "stage.1.name", "Build"), resource.TestCheckResourceAttr(resourceName, "stage.1.action.#", "1"), diff --git a/go.mod b/go.mod index 597534f085f..cf668d87d9e 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 diff --git a/website/docs/r/codepipeline.markdown b/website/docs/r/codepipeline.markdown index aa602954d0c..301aa0e4576 100644 --- a/website/docs/r/codepipeline.markdown +++ b/website/docs/r/codepipeline.markdown @@ -183,7 +183,7 @@ An `action` block supports the following arguments: * `category` - (Required) A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Possible values are `Approval`, `Build`, `Deploy`, `Invoke`, `Source` and `Test`. * `owner` - (Required) The creator of the action being called. Possible values are `AWS`, `Custom` and `ThirdParty`. * `name` - (Required) The action declaration's name. -* `provider` - (Required) The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy. +* `provider` - (Required) The provider of the service being called by the action. Valid providers are determined by the action category. Provider names are listed in the [Action Structure Reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) documentation. * `version` - (Required) A string that identifies the action type. * `configuration` - (Optional) A map of the action declaration's configuration. Configurations options for action types and providers can be found in the [Pipeline Structure Reference](http://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#action-requirements) and [Action Structure Reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) documentation. * `input_artifacts` - (Optional) A list of artifact names to be worked on. From ad5f5c3ccbf4314c3e6fb47617fcfac05041053a Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 17 Dec 2020 20:04:16 -0500 Subject: [PATCH 0286/1212] Update CHANGELOG for #16830 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36757c2561e..ec9e6906a65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ BUG FIXES * data-source/aws_ec2_local_gateway_route_tables: Ensure all results from large environments are returned [GH-16669] * data-source/aws_ec2_local_gateway_virtual_interface_groups: Ensure all results from large environments are returned [GH-16669] * data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] +* resource/aws_db_instance: Fix missing `db_subnet_group_name` in API request when using `restore_to_point_in_time` [GH-16830] * resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found [GH-16680] * resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] From d0793c2539fe478f1016aa522840815353072421 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 17 Dec 2020 18:37:39 -0800 Subject: [PATCH 0287/1212] Adds acceptance test for CodePipeline using CodeStar Connection source ation --- aws/resource_aws_codepipeline.go | 2 +- aws/resource_aws_codepipeline_test.go | 120 +++++++++++++++++- website/docs/r/codepipeline.markdown | 16 ++- .../r/codestarconnections_connection.markdown | 7 +- 4 files changed, 127 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_codepipeline.go b/aws/resource_aws_codepipeline.go index 8d7821fab9b..b66de0d158d 100644 --- a/aws/resource_aws_codepipeline.go +++ b/aws/resource_aws_codepipeline.go @@ -614,7 +614,7 @@ func resourceAwsCodePipelineValidateActionProvider(i interface{}, path cty.Path) diag.Diagnostic{ Severity: diag.Warning, Summary: "The CodePipeline GitHub version 1 action provider is deprecated.", - Detail: "Use a CodeStarSourceConnection instead.", + Detail: "Use a GitHub version 2 action (with a CodeStar Connection `aws_codestarconnections_connection`) instead. See https://docs.aws.amazon.com/codepipeline/latest/userguide/update-github-action-connections.html", }, } } diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index 26d586b9f13..c58d4182df6 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/codestarconnections" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -49,7 +50,7 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "4"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "lifesum-terraform"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "master"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "main"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.role_arn", ""), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.run_order", "1"), @@ -493,6 +494,48 @@ func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { }) } +func TestAccAWSCodePipeline_WithCodeStarConnection(t *testing.T) { + var v codepipeline.PipelineDeclaration + name := acctest.RandString(10) + resourceName := "aws_codepipeline.test" + codestarConnectionResourceName := "aws_codestarconnections_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(codepipeline.EndpointsID, t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodePipelineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodePipelineConfigWithCodeStarConnection(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodePipelineExists(resourceName, &v), + + resource.TestCheckResourceAttr(resourceName, "stage.#", "2"), + + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.name", "Source"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.category", "Source"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "AWS"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "CodeStarSourceConnection"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.version", "1"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "3"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.FullRepositoryId", "lifesum-terraform/test"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.BranchName", "main"), + resource.TestCheckResourceAttrPair(resourceName, "stage.0.action.0.configuration.ConnectionArn", codestarConnectionResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAWSCodePipelineExists(n string, pipeline *codepipeline.PipelineDeclaration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -724,7 +767,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[2]q } } @@ -838,7 +881,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[2]q } } @@ -1031,7 +1074,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[4]q } } @@ -1112,7 +1155,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[4]q } } @@ -1202,7 +1245,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[4]q } } @@ -1307,7 +1350,7 @@ resource "aws_codepipeline" "test" { configuration = { Owner = "lifesum-terraform" Repo = "test" - Branch = "master" + Branch = "main" OAuthToken = %[2]q } } @@ -1338,6 +1381,69 @@ resource "aws_s3_bucket" "foo" { `, rName, githubToken)) } +func testAccAWSCodePipelineConfigWithCodeStarConnection(rName string) string { + return composeConfig( + testAccAWSCodePipelineS3DefaultBucket(rName), + testAccAWSCodePipelineServiceIAMRole(rName), + fmt.Sprintf(` +resource "aws_codepipeline" "test" { + name = "test-pipeline-%[1]s" + role_arn = aws_iam_role.codepipeline_role.arn + + artifact_store { + location = aws_s3_bucket.test.bucket + type = "S3" + + encryption_key { + id = "1234" + type = "KMS" + } + } + + stage { + name = "Source" + + action { + name = "Source" + category = "Source" + owner = "AWS" + provider = "CodeStarSourceConnection" + version = "1" + output_artifacts = ["test"] + + configuration = { + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" + } + } + } + + stage { + name = "Build" + + action { + name = "Build" + category = "Build" + owner = "AWS" + provider = "CodeBuild" + input_artifacts = ["test"] + version = "1" + + configuration = { + ProjectName = "test" + } + } + } +} + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName)) +} + func TestResourceAWSCodePipelineExpandArtifactStoresValidation(t *testing.T) { cases := []struct { Name string diff --git a/website/docs/r/codepipeline.markdown b/website/docs/r/codepipeline.markdown index 301aa0e4576..33ee75b2aa2 100644 --- a/website/docs/r/codepipeline.markdown +++ b/website/docs/r/codepipeline.markdown @@ -33,16 +33,15 @@ resource "aws_codepipeline" "codepipeline" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["source_output"] configuration = { - Owner = "my-organization" - Repo = "test" - Branch = "master" - OAuthToken = var.github_token + ConnectionArn = aws_codestarconnections_connection.example.arn + FullRepositoryId = "my-organization/example" + BranchName = "main" } } } @@ -87,6 +86,11 @@ resource "aws_codepipeline" "codepipeline" { } } +resource "aws_codestarconnections_connection" "example" { + name = "example-connection" + provider_type = "GitHub" +} + resource "aws_s3_bucket" "codepipeline_bucket" { bucket = "test-bucket" acl = "private" diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown index 5aadbb83228..7b9d7b1d01a 100644 --- a/website/docs/r/codestarconnections_connection.markdown +++ b/website/docs/r/codestarconnections_connection.markdown @@ -38,10 +38,9 @@ resource "aws_codepipeline" "example" { version = "1" output_artifacts = ["source_output"] configuration = { - Owner = "my-organization" - ConnectionArn = aws_codestarconnections_connection.example.arn - Repo = "foo/test" - Branch = "master" + ConnectionArn = aws_codestarconnections_connection.example.arn + FullRepositoryId = "my-organization/test" + BranchName = "main" } } } From 687fff9aad0289fb87dd710e2d45f53e091866af Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 17 Dec 2020 22:04:31 -0500 Subject: [PATCH 0288/1212] Update CHANGELOG for #16684 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec9e6906a65..f9748b36662 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ ENHANCEMENTS * resource/aws_eip: Add `carrier_ip` attribute [GH-16724] * resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] +* resource/aws_kinesis_firehose_delivery_stream: Mark `http_endpoint_configuration` `access_key` as sensitive [GH-16684] * resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] * resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_vpn_connection: Add support for VPN tunnel options and enable acceleration, DPDTimeoutAction, StartupAction, local/remote IPv4/IPv6 network CIDR and tunnel inside IP version. [GH-14740] From a2b8ebbeb19a8e22c9d8a4b8d7b91871cc82370a Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 18 Dec 2020 16:54:40 +0900 Subject: [PATCH 0289/1212] Add tag support for CodeStar connection --- .../keyvaluetags/generators/listtags/main.go | 1 + .../generators/servicetags/main.go | 1 + .../generators/updatetags/main.go | 1 + aws/internal/keyvaluetags/list_tags_gen.go | 18 +++++++++ .../service_generation_customizations.go | 3 ++ aws/internal/keyvaluetags/service_tags_gen.go | 28 ++++++++++++++ aws/internal/keyvaluetags/update_tags_gen.go | 37 +++++++++++++++++++ ...urce_aws_codestarconnections_connection.go | 25 ++++++++++++- 8 files changed, 113 insertions(+), 1 deletion(-) diff --git a/aws/internal/keyvaluetags/generators/listtags/main.go b/aws/internal/keyvaluetags/generators/listtags/main.go index 840bbe34802..be8deda9e6d 100644 --- a/aws/internal/keyvaluetags/generators/listtags/main.go +++ b/aws/internal/keyvaluetags/generators/listtags/main.go @@ -40,6 +40,7 @@ var serviceNames = []string{ "codecommit", "codedeploy", "codepipeline", + "codestarconnections", "codestarnotifications", "cognitoidentity", "cognitoidentityprovider", diff --git a/aws/internal/keyvaluetags/generators/servicetags/main.go b/aws/internal/keyvaluetags/generators/servicetags/main.go index 761772d520d..56041c9ac24 100644 --- a/aws/internal/keyvaluetags/generators/servicetags/main.go +++ b/aws/internal/keyvaluetags/generators/servicetags/main.go @@ -34,6 +34,7 @@ var sliceServiceNames = []string{ "codebuild", "codedeploy", "codepipeline", + "codestarconnections", "configservice", "databasemigrationservice", "datapipeline", diff --git a/aws/internal/keyvaluetags/generators/updatetags/main.go b/aws/internal/keyvaluetags/generators/updatetags/main.go index 584e64a08b7..407ce64029a 100644 --- a/aws/internal/keyvaluetags/generators/updatetags/main.go +++ b/aws/internal/keyvaluetags/generators/updatetags/main.go @@ -41,6 +41,7 @@ var serviceNames = []string{ "codecommit", "codedeploy", "codepipeline", + "codestarconnections", "codestarnotifications", "cognitoidentity", "cognitoidentityprovider", diff --git a/aws/internal/keyvaluetags/list_tags_gen.go b/aws/internal/keyvaluetags/list_tags_gen.go index a08d3c9e5e1..fdcf9fc3c3b 100644 --- a/aws/internal/keyvaluetags/list_tags_gen.go +++ b/aws/internal/keyvaluetags/list_tags_gen.go @@ -27,6 +27,7 @@ import ( "github.com/aws/aws-sdk-go/service/codecommit" "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/codestarconnections" "github.com/aws/aws-sdk-go/service/codestarnotifications" "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" @@ -505,6 +506,23 @@ func CodepipelineListTags(conn *codepipeline.CodePipeline, identifier string) (K return CodepipelineKeyValueTags(output.Tags), nil } +// CodestarconnectionsListTags lists codestarconnections service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodestarconnectionsListTags(conn *codestarconnections.CodeStarConnections, identifier string) (KeyValueTags, error) { + input := &codestarconnections.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return CodestarconnectionsKeyValueTags(output.Tags), nil +} + // CodestarnotificationsListTags lists codestarnotifications service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. diff --git a/aws/internal/keyvaluetags/service_generation_customizations.go b/aws/internal/keyvaluetags/service_generation_customizations.go index 48841befb37..c4865fef45e 100644 --- a/aws/internal/keyvaluetags/service_generation_customizations.go +++ b/aws/internal/keyvaluetags/service_generation_customizations.go @@ -30,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/service/codecommit" "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/codestarconnections" "github.com/aws/aws-sdk-go/service/codestarnotifications" "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" @@ -175,6 +176,8 @@ func ServiceClientType(serviceName string) string { funcType = reflect.TypeOf(codedeploy.New) case "codepipeline": funcType = reflect.TypeOf(codepipeline.New) + case "codestarconnections": + funcType = reflect.TypeOf(codestarconnections.New) case "codestarnotifications": funcType = reflect.TypeOf(codestarnotifications.New) case "cognitoidentity": diff --git a/aws/internal/keyvaluetags/service_tags_gen.go b/aws/internal/keyvaluetags/service_tags_gen.go index 0f8a6481e11..e1e513028ff 100644 --- a/aws/internal/keyvaluetags/service_tags_gen.go +++ b/aws/internal/keyvaluetags/service_tags_gen.go @@ -22,6 +22,7 @@ import ( "github.com/aws/aws-sdk-go/service/codebuild" "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/codestarconnections" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/databasemigrationservice" "github.com/aws/aws-sdk-go/service/datapipeline" @@ -1034,6 +1035,33 @@ func CodepipelineKeyValueTags(tags []*codepipeline.Tag) KeyValueTags { return New(m) } +// CodestarconnectionsTags returns codestarconnections service tags. +func (tags KeyValueTags) CodestarconnectionsTags() []*codestarconnections.Tag { + result := make([]*codestarconnections.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &codestarconnections.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// CodestarconnectionsKeyValueTags creates KeyValueTags from codestarconnections service tags. +func CodestarconnectionsKeyValueTags(tags []*codestarconnections.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + // ConfigserviceTags returns configservice service tags. func (tags KeyValueTags) ConfigserviceTags() []*configservice.Tag { result := make([]*configservice.Tag, 0, len(tags)) diff --git a/aws/internal/keyvaluetags/update_tags_gen.go b/aws/internal/keyvaluetags/update_tags_gen.go index 754c49b3133..f4945aa98fd 100644 --- a/aws/internal/keyvaluetags/update_tags_gen.go +++ b/aws/internal/keyvaluetags/update_tags_gen.go @@ -30,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go/service/codecommit" "github.com/aws/aws-sdk-go/service/codedeploy" "github.com/aws/aws-sdk-go/service/codepipeline" + "github.com/aws/aws-sdk-go/service/codestarconnections" "github.com/aws/aws-sdk-go/service/codestarnotifications" "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" @@ -980,6 +981,42 @@ func CodepipelineUpdateTags(conn *codepipeline.CodePipeline, identifier string, return nil } +// CodestarconnectionsUpdateTags updates codestarconnections service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func CodestarconnectionsUpdateTags(conn *codestarconnections.CodeStarConnections, identifier string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &codestarconnections.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &codestarconnections.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().CodestarconnectionsTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + // CodestarnotificationsUpdateTags updates codestarnotifications service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index df38fa73ea5..bba0068e3cc 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" "github.com/aws/aws-sdk-go/aws" @@ -43,6 +44,13 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringInSlice(codestarconnections.ProviderType_Values(), false), }, + + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } @@ -55,6 +63,10 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta ProviderType: aws.String(d.Get("provider_type").(string)), } + if v, ok := d.GetOk("tags"); ok { + params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CodestarconnectionsTags() + } + resp, err := conn.CreateConnection(params) if err != nil { return fmt.Errorf("error creating CodeStar connection: %w", err) @@ -67,6 +79,7 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).codestarconnectionsconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig resp, err := conn.GetConnection(&codestarconnections.GetConnectionInput{ ConnectionArn: aws.String(d.Id()), @@ -84,12 +97,22 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i return fmt.Errorf("error reading CodeStar connection (%s): empty response", d.Id()) } - d.SetId(aws.StringValue(resp.Connection.ConnectionArn)) + arn := aws.StringValue(resp.Connection.ConnectionArn) + d.SetId(arn) d.Set("arn", resp.Connection.ConnectionArn) d.Set("name", resp.Connection.ConnectionName) d.Set("connection_status", resp.Connection.ConnectionStatus) d.Set("provider_type", resp.Connection.ProviderType) + tags, err := keyvaluetags.CodestarconnectionsListTags(conn, arn) + if err != nil { + return fmt.Errorf("error listing tags for CodeStar connection (%s): %w", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags for CodeStar connection (%s): %w", arn, err) + } + return nil } From 40710b3d7d63f1beaf3302fb29342e6dfe9cc52f Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 18 Dec 2020 16:55:02 +0900 Subject: [PATCH 0290/1212] Add a test for CodeStar connection tag --- ...aws_codestarconnections_connection_test.go | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index 9d59baa3512..ba8b6ecae3e 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -65,6 +65,33 @@ func TestAccAWSCodeStarConnectionsConnection_disappears(t *testing.T) { }) } +func TestAccAWSCodeStarConnectionsConnection_tags(t *testing.T) { + var v codestarconnections.Connection + resourceName := "aws_codestarconnections_connection.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodeStarConnectionsConnectionConfigTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Environment", "production"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAWSCodeStarConnectionsConnectionExists(n string, v *codestarconnections.Connection) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -118,3 +145,17 @@ resource "aws_codestarconnections_connection" "test" { } `, rName) } + +func testAccAWSCodeStarConnectionsConnectionConfigTags(rName string) string { + return fmt.Sprintf(` +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "Bitbucket" + + tags = { + Name = %[1]q + Environment = "production" + } +} +`, rName) +} From bf1144687bab49ea326b06696a5c0d3c792e010a Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 18 Dec 2020 16:55:14 +0900 Subject: [PATCH 0291/1212] Add a document for CodeStar connection tag --- website/docs/r/codestarconnections_connection.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown index 5aadbb83228..d7ad29dab9c 100644 --- a/website/docs/r/codestarconnections_connection.markdown +++ b/website/docs/r/codestarconnections_connection.markdown @@ -68,6 +68,7 @@ The following arguments are supported: * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `provider_type` - (Required) The name of the external provider where your third-party code repository is configured. Valid values are `Bitbucket`, `GitHub`, or `GitHubEnterpriseServer`. Changing `provider_type` will create a new resource. +* `tags` - (Optional) An array of key:value pairs to associate with the resource. ## Attributes Reference From d57a3255cfae1ba16708d9c44ac5a8db611c10d4 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 18 Dec 2020 17:00:53 +0900 Subject: [PATCH 0292/1212] Sort imports --- aws/resource_aws_codestarconnections_connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index bba0068e3cc..df44221cb09 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "log" "github.com/aws/aws-sdk-go/aws" @@ -10,6 +9,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsCodeStarConnectionsConnection() *schema.Resource { From b20f3426face8687ccae61353df13d125faca538 Mon Sep 17 00:00:00 2001 From: Do Hoang Khiem Date: Fri, 18 Dec 2020 23:41:31 +0700 Subject: [PATCH 0293/1212] resource/aws_eip_association: fix eventual consistency issue when associating EIP (#16808) * resource/aws_eip_association: fix eventual consistency issue when associating EIP * Update aws_eip_association for eventual consistency issue + add EC2 general PropagationTimeout --- aws/internal/service/ec2/waiter/waiter.go | 3 ++ aws/resource_aws_eip_association.go | 35 +++++++++++++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index 08dc760bd27..e912e908b3d 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -11,6 +11,9 @@ import ( const ( // Maximum amount of time to wait for EC2 Instance attribute modifications to propagate InstanceAttributePropagationTimeout = 2 * time.Minute + + // General timeout for EC2 resource creations to propagate + PropagationTimeout = 2 * time.Minute ) const ( diff --git a/aws/resource_aws_eip_association.go b/aws/resource_aws_eip_association.go index 01cc47aba13..9496721b709 100644 --- a/aws/resource_aws_eip_association.go +++ b/aws/resource_aws_eip_association.go @@ -4,13 +4,13 @@ import ( "fmt" "log" "net" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" ) func resourceAwsEipAssociation() *schema.Resource { @@ -94,7 +94,7 @@ func resourceAwsEipAssociationCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] EIP association configuration: %#v", request) var resp *ec2.AssociateAddressOutput - err := resource.Retry(2*time.Minute, func() *resource.RetryError { + err := resource.Retry(waiter.PropagationTimeout, func() *resource.RetryError { var err error resp, err = conn.AssociateAddress(request) @@ -157,7 +157,36 @@ func resourceAwsEipAssociationRead(d *schema.ResourceData, meta interface{}) err return err } - response, err := conn.DescribeAddresses(request) + var response *ec2.DescribeAddressesOutput + err = resource.Retry(waiter.PropagationTimeout, func() *resource.RetryError { + var err error + response, err = conn.DescribeAddresses(request) + + if d.IsNewResource() && tfawserr.ErrCodeEquals(err, "InvalidAssociationID.NotFound") { + return resource.RetryableError(err) + } + + if d.IsNewResource() && (response.Addresses == nil || len(response.Addresses) == 0) { + return resource.RetryableError(&resource.NotFoundError{}) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if isResourceTimeoutError(err) { + response, err = conn.DescribeAddresses(request) + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, "InvalidAssociationID.NotFound") { + log.Printf("[WARN] EIP Association (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return fmt.Errorf("Error reading EC2 Elastic IP %s: %#v", d.Get("allocation_id").(string), err) } From 049dbb1a94d400d80ce875264638fbce917b5716 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 18 Dec 2020 11:42:35 -0500 Subject: [PATCH 0294/1212] Update CHANGELOG for #16808 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9748b36662..0354baa6db9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ BUG FIXES * data-source/aws_ec2_local_gateway_virtual_interface_groups: Ensure all results from large environments are returned [GH-16669] * data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] * resource/aws_db_instance: Fix missing `db_subnet_group_name` in API request when using `restore_to_point_in_time` [GH-16830] +* resource/aws_eip_association: Handle eventual consistency when creating resource [GH-16808] * resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found [GH-16680] * resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] From 431f32bc8b69d559d719531aa546f053d7e5a758 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sat, 9 May 2020 17:42:26 +0200 Subject: [PATCH 0295/1212] New resource: aws_lakeformation_datalake_settings --- ...rce_aws_lakeformation_datalake_settings.go | 135 ++++++++++++++++++ ...ws_lakeformation_datalake_settings_test.go | 66 +++++++++ 2 files changed, 201 insertions(+) create mode 100644 aws/resource_aws_lakeformation_datalake_settings.go create mode 100644 aws/resource_aws_lakeformation_datalake_settings_test.go diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go new file mode 100644 index 00000000000..c708abcef59 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings.go @@ -0,0 +1,135 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLakeFormationDataLakeSettingsPut, + Update: resourceAwsLakeFormationDataLakeSettingsPut, + Read: resourceAwsLakeFormationDataLakeSettingsRead, + Delete: resourceAwsLakeFormationDataLakeSettingsReset, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + "admins": { + Type: schema.TypeList, + Required: true, + MinItems: 0, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + }, + } +} + +func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: expandAdmins(d), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error updating DataLakeSettings: %s", err) + } + + d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) + d.Set("catalog_id", catalogId) + + return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) +} + +func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.GetDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + } + + out, err := conn.GetDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reading DataLakeSettings: %s", err) + } + + d.Set("catalog_id", catalogId) + if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { + return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) + } + // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions + + return nil +} + +func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.PutDataLakeSettingsInput{ + CatalogId: aws.String(catalogId), + DataLakeSettings: &lakeformation.DataLakeSettings{ + DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), + }, + } + + _, err := conn.PutDataLakeSettings(input) + if err != nil { + return fmt.Errorf("Error reseting DataLakeSettings: %s", err) + } + + return nil +} + +func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { + if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { + catalogId = inputCatalogId.(string) + } else { + catalogId = accountId + } + return +} + +func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { + xs := d.Get("admins") + ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) + + for i, x := range xs.([]interface{}) { + ys[i] = &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(x.(string)), + } + } + + return ys +} + +func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { + admins := make([]string, len(xs)) + for i, x := range xs { + admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) + } + + return admins +} diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go new file mode 100644 index 00000000000..d2ac3b0c538 --- /dev/null +++ b/aws/resource_aws_lakeformation_datalake_settings_test.go @@ -0,0 +1,66 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = ["${data.aws_caller_identity.current.arn}"] +} +` + +func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { + callerIdentityName := "data.aws_caller_identity.current" + resourceName := "aws_lakeformation_datalake_settings.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), + ), + }, + }, + }) +} + +const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_datalake_settings" "test" { + catalog_id = "${data.aws_caller_identity.current.account_id}" + admins = ["${data.aws_caller_identity.current.arn}"] +} +` From 3dcb295ffbb7d0f4975b494fe39285e2bebf2732 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 14:48:21 +0200 Subject: [PATCH 0296/1212] Add resource documentation --- website/aws.erb | 3629 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 3629 insertions(+) create mode 100644 website/aws.erb diff --git a/website/aws.erb b/website/aws.erb new file mode 100644 index 00000000000..d8a0680afd4 --- /dev/null +++ b/website/aws.erb @@ -0,0 +1,3629 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + <%= yield %> +<% end %> From ed879d584efefd8de572f20886ae093162e31153 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Sun, 10 May 2020 19:09:31 +0200 Subject: [PATCH 0297/1212] New resource: aws_lakeformation_permissions (WIP) --- aws/provider.go | 1 + aws/resource_aws_lakeformation_permissions.go | 314 ++++++++++++++++++ ...urce_aws_lakeformation_permissions_test.go | 245 ++++++++++++++ 3 files changed, 560 insertions(+) create mode 100644 aws/resource_aws_lakeformation_permissions.go create mode 100644 aws/resource_aws_lakeformation_permissions_test.go diff --git a/aws/provider.go b/aws/provider.go index 06cc9a18dd8..476b23c4954 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -747,6 +747,7 @@ func Provider() *schema.Provider { "aws_kms_key": resourceAwsKmsKey(), "aws_kms_ciphertext": resourceAwsKmsCiphertext(), "aws_lakeformation_data_lake_settings": resourceAwsLakeFormationDataLakeSettings(), + "aws_lakeformation_permissions": resourceAwsLakeFormationPermissions(), "aws_lakeformation_resource": resourceAwsLakeFormationResource(), "aws_lambda_alias": resourceAwsLambdaAlias(), "aws_lambda_code_signing_config": resourceAwsLambdaCodeSigningConfig(), diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go new file mode 100644 index 00000000000..bca0637660f --- /dev/null +++ b/aws/resource_aws_lakeformation_permissions.go @@ -0,0 +1,314 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" +) + +func AwsLakeFormationPermissions() []string { + return []string{ + "ALL", + "SELECT", + "ALTER", + "DROP", + "DELETE", + "INSERT", + "CREATE_DATABASE", + "CREATE_TABLE", + "DATA_LOCATION_ACCESS", + } +} + +func resourceAwsLakeFormationPermissions() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLakeFormationPermissionsGrant, + Read: resourceAwsLakeFormationPermissionsList, + Delete: resourceAwsLakeFormationPermissionsRevoke, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "permissions": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(AwsLakeFormationPermissions(), false), + }, + }, + "permissions_with_grant_option": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(AwsLakeFormationPermissions(), false), + }, + }, + "principal": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "database": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"location", "table"}, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateArn, + ConflictsWith: []string{"database", "table"}, + }, + "table": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"database", "location"}, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "column_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + "excluded_column_names": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + }, + }, + }, + }, + } +} + +func resourceAwsLakeFormationPermissionsGrant(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) + resource := expandAwsLakeFormationResource(d) + + input := &lakeformation.GrantPermissionsInput{ + CatalogId: aws.String(catalogId), + Permissions: expandStringList(d.Get("permissions").([]interface{})), + Principal: expandAwsLakeFormationPrincipal(d), + Resource: resource, + } + if vs, ok := d.GetOk("permissions_with_grant_option"); ok { + input.PermissionsWithGrantOption = expandStringList(vs.([]interface{})) + } + + // Catalog: CREATE_DATABASE + // Location: DATA_LOCATION_ACCESS + // Database: ALTER, CREATE_TABLE, DROP, (ALL ~ Super) + // Table: ALTER, INSERT, DELETE, DROP, SELECT, (ALL ~ Super) + // TableWithColumns: SELECT + + _, err := conn.GrantPermissions(input) + if err != nil { + return fmt.Errorf("Error granting LakeFormation Permissions: %s", err) + } + + d.SetId(fmt.Sprintf("lakeformation:resource:%s", catalogId)) // FIXME + d.Set("catalog_id", catalogId) + + return resourceAwsLakeFormationPermissionsList(d, meta) +} + +func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.ListPermissionsInput{ + CatalogId: aws.String(catalogId), + Principal: expandAwsLakeFormationPrincipal(d), + Resource: expandAwsLakeFormationResource(d), + } + + out, err := conn.ListPermissions(input) + if err != nil { + return fmt.Errorf("Error listing LakeFormation Permissions: %s", err) + } + + permissions := out.PrincipalResourcePermissions + if len(permissions) == 0 { + return fmt.Errorf("Error no LakeFormation Permissions found: %s", input) + } + + permissionsHead := permissions[0] // XXX: assuming there is only one result in the list + d.Set("catalog_id", catalogId) + d.Set("permissions", permissionsHead.Permissions) + d.Set("permissions_with_grant_option", permissionsHead.PermissionsWithGrantOption) + d.Set("principal", permissionsHead.Principal.DataLakePrincipalIdentifier) + if dataLocation := permissionsHead.Resource.DataLocation; dataLocation != nil { + d.Set("location", dataLocation.ResourceArn) + } + if database := permissionsHead.Resource.Database; database != nil { + d.Set("database", database.Name) + } + if table := permissionsHead.Resource.Table; table != nil { + d.Set("table", flattenAWSLakeFormationTable(table)) + } + if table := permissionsHead.Resource.TableWithColumns; table != nil { + d.Set("table", flattenAWSLakeFormationTableWithColumns(table)) + } + + return nil +} + +func resourceAwsLakeFormationPermissionsRevoke(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + catalogId := d.Get("catalog_id").(string) + + input := &lakeformation.RevokePermissionsInput{ + CatalogId: aws.String(catalogId), + Permissions: expandStringList(d.Get("permissions").([]interface{})), + Principal: expandAwsLakeFormationPrincipal(d), + Resource: expandAwsLakeFormationResource(d), + } + if vs, ok := d.GetOk("permissions_with_grant_option"); ok { + input.PermissionsWithGrantOption = expandStringList(vs.([]interface{})) + } + + _, err := conn.RevokePermissions(input) + if err != nil { + return fmt.Errorf("Error revoking LakeFormation Permissions: %s", err) + } + + return nil +} + +func expandAwsLakeFormationPrincipal(d *schema.ResourceData) *lakeformation.DataLakePrincipal { + return &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), + } +} + +func expandAwsLakeFormationResource(d *schema.ResourceData) *lakeformation.Resource { + resource := &lakeformation.Resource{ + // Catalog: &lakeformation.CatalogResource{}, + } + if v, ok := d.GetOk("database"); ok { + databaseName := v.(string) + if len(databaseName) > 0 { + resource.Database = &lakeformation.DatabaseResource{ + Name: aws.String(databaseName), + } + } + } + if v, ok := d.GetOk("location"); ok { + location := v.(string) + if len(location) > 0 { + resource.DataLocation = &lakeformation.DataLocationResource{ + ResourceArn: aws.String(v.(string)), + } + } + } + if vs, ok := d.GetOk("table"); ok { + tables := vs.([]interface{}) + if len(tables) > 0 { + table := tables[0].(map[string]interface{}) + + var databaseName, tableName string + var columnNames, excludedColumnNames []interface{} + if x, ok := table["database"]; ok { + databaseName = x.(string) + } + if x, ok := table["name"]; ok { + tableName = x.(string) + } + if xs, ok := table["column_names"]; ok { + columnNames = xs.([]interface{}) + } + if xs, ok := table["excluded_column_names"]; ok { + excludedColumnNames = xs.([]interface{}) + } + + if len(columnNames) > 0 || len(excludedColumnNames) > 0 { + tableWithColumns := &lakeformation.TableWithColumnsResource{ + DatabaseName: aws.String(databaseName), + Name: aws.String(tableName), + } + if len(columnNames) > 0 { + tableWithColumns.ColumnNames = expandStringList(columnNames) + } + if len(excludedColumnNames) > 0 { + tableWithColumns.ColumnWildcard = &lakeformation.ColumnWildcard{ + ExcludedColumnNames: expandStringList(excludedColumnNames), + } + } + resource.TableWithColumns = tableWithColumns + } else { + resource.Table = &lakeformation.TableResource{ + DatabaseName: aws.String(databaseName), + Name: aws.String(tableName), + } + } + } + } + + return resource +} + +func flattenAWSLakeFormationTable(tb *lakeformation.TableResource) map[string]interface{} { + m := make(map[string]interface{}) + + m["database"] = tb.DatabaseName + m["name"] = tb.Name + + return m +} + +func flattenAWSLakeFormationTableWithColumns(tb *lakeformation.TableWithColumnsResource) map[string]interface{} { + m := make(map[string]interface{}) + + m["database"] = tb.DatabaseName + m["name"] = tb.Name + if columnNames := tb.ColumnNames; columnNames != nil { + m["column_names"] = columnNames + } + if columnWildcard := tb.ColumnWildcard; columnWildcard != nil { + m["excluded_column_names"] = columnWildcard.ExcludedColumnNames + } + + return m +} diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go new file mode 100644 index 00000000000..c1e20c10ee7 --- /dev/null +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -0,0 +1,245 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +func TestAccAWSLakeFormationPermissions_full(t *testing.T) { + rName := acctest.RandomWithPrefix("lakeformation-test-bucket") + dName := acctest.RandomWithPrefix("lakeformation-test-db") + // tName := acctest.RandomWithPrefix("lakeformation-test-table") + + callerIdentityName := "data.aws_caller_identity.current" + roleName := "data.aws_iam_role.test" + resourceName := "aws_lakeformation_permissions.test" + bucketName := "aws_s3_bucket.test" + dbName := "aws_glue_catalog_database.test" + // tableName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + // TODO: CheckDestroy + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_location(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "location"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "DATA_LOCATION_ACCESS"), + ), + }, + { + Config: testAccAWSLakeFormationPermissionsConfig_database(rName, dName), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttrPair(dbName, "name", resourceName, "database"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "3"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALTER"), + resource.TestCheckResourceAttr(resourceName, "permissions.1", "CREATE_TABLE"), + resource.TestCheckResourceAttr(resourceName, "permissions.2", "DROP"), + resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.0", "CREATE_TABLE"), + ), + }, + // FIXME: more than one permission in API read result + // { + // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName), + // Check: resource.ComposeTestCheckFunc( + // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + // resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + // resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + // resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + // ), + // }, + // FIXME: WIP + // { + // Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), + // Check: resource.ComposeTestCheckFunc( + // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + // resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + // resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.#", "2"), + // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.0", "event"), + // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.1", "timestamp"), + // resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), + // ), + // }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsConfig_location(rName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = [ + data.aws_caller_identity.current.arn + ] +} + +resource "aws_lakeformation_resource" "test" { + resource_arn = aws_s3_bucket.test.arn + use_service_linked_role = true +} + +resource "aws_lakeformation_permissions" "test" { + permissions = ["DATA_LOCATION_ACCESS"] + principal = data.aws_iam_role.test.arn + + location = aws_lakeformation_resource.test.resource_arn +} +`, rName) +} + +func testAccAWSLakeFormationPermissionsConfig_database(rName, dName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_glue_catalog_database" "test" { + name = %[2]q +} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = [ + data.aws_caller_identity.current.arn + ] +} + +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALTER", "CREATE_TABLE", "DROP"] + permissions_with_grant_option = ["CREATE_TABLE"] + principal = data.aws_iam_role.test.arn + + database = aws_glue_catalog_database.test.name +} +`, rName, dName) +} + +// func testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName string) string { +// return fmt.Sprintf(` +// data "aws_caller_identity" "current" {} + +// data "aws_iam_role" "test" { +// name = "AWSServiceRoleForLakeFormationDataAccess" +// } + +// resource "aws_s3_bucket" "test" { +// bucket = %[1]q +// } + +// resource "aws_glue_catalog_database" "test" { +// name = %[2]q +// } + +// resource "aws_glue_catalog_table" "test" { +// name = %[3]q +// database_name = aws_glue_catalog_database.test.name +// } + +// resource "aws_lakeformation_datalake_settings" "test" { +// admins = [ +// data.aws_caller_identity.current.arn +// ] +// } + +// resource "aws_lakeformation_permissions" "test" { +// permissions = ["SELECT"] +// principal = data.aws_iam_role.test.arn + +// table { +// database = aws_glue_catalog_table.test.database_name +// name = aws_glue_catalog_table.test.name +// } +// } +// `, rName, dName, tName) +// } + +// func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { +// return fmt.Sprintf(` +// data "aws_caller_identity" "current" {} + +// data "aws_iam_role" "test" { +// name = "AWSServiceRoleForLakeFormationDataAccess" +// } + +// resource "aws_s3_bucket" "test" { +// bucket = %[1]q +// } + +// resource "aws_glue_catalog_database" "test" { +// name = %[2]q +// } + +// resource "aws_glue_catalog_table" "test" { +// name = %[3]q +// database_name = aws_glue_catalog_database.test.name + +// storage_descriptor { +// columns { +// name = "event" +// type = "string" +// } +// columns { +// name = "timestamp" +// type = "date" +// } +// columns { +// name = "value" +// type = "double" +// } +// } +// } + +// resource "aws_lakeformation_datalake_settings" "test" { +// admins = [ +// data.aws_caller_identity.current.arn +// ] +// } + +// resource "aws_lakeformation_permissions" "test" { +// permissions = ["SELECT"] +// principal = data.aws_iam_role.test.arn + +// table { +// database = aws_glue_catalog_table.test.database_name +// name = aws_glue_catalog_table.test.name +// column_names = ["event", "timestamp"] +// } +// } +// `, rName, dName, tName) +// } From 01999bd83253da0880ebd9b1a1abb9b823b839bc Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 00:17:32 +0200 Subject: [PATCH 0298/1212] Add documentation --- website/aws.erb | 3 + .../r/lakeformation_permissions.html.markdown | 180 ++++++++++++++++++ 2 files changed, 183 insertions(+) create mode 100644 website/docs/r/lakeformation_permissions.html.markdown diff --git a/website/aws.erb b/website/aws.erb index d8a0680afd4..0fad8c5a6a3 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -2093,6 +2093,9 @@
  • aws_lakeformation_datalake_settings
  • +
  • + aws_lakeformation_permissions +
  • aws_lakeformation_resource
  • diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown new file mode 100644 index 00000000000..31e4f6918e5 --- /dev/null +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -0,0 +1,180 @@ +--- +subcategory: "Lake Formation" +layout: "aws" +page_title: "AWS: aws_lakeformation_permissions" +description: |- + Manages the permissions that a principal has on an AWS Glue Data Catalog resource (such as AWS Glue database or AWS Glue tables) +--- + +# Resource: aws_lakeformation_resource + +Manages the permissions that a principal has on an AWS Glue Data Catalog resource (such as AWS Glue database or AWS Glue tables). + +## Example Usage + +### Granting permissions on Lake Formation resource + +```hcl +data "aws_iam_role" "example" { + name = "existing_lakeformation_role" +} + +data "aws_s3_bucket" "example" { + bucket = "existing_bucket" +} + +resource "aws_lakeformation_resource" "example" { + resource_arn = data.aws_s3_bucket.example.arn + use_service_linked_role = true +} + +resource "aws_lakeformation_permissions" "example" { + permissions = ["DATA_LOCATION_ACCESS"] + principal = data.aws_iam_role.example.arn + + location = aws_lakeformation_resource.example.resource_arn +} +``` + +### Granting permissions on Lake Formation database + +```hcl +data "aws_iam_role" "example" { + name = "existing_lakeformation_role" +} + +resource "aws_glue_catalog_database" "example" { + name = "example_database" +} + +resource "aws_lakeformation_permissions" "example" { + permissions = ["ALTER", "CREATE_TABLE", "DROP"] + principal = data.aws_iam_role.example.arn + + database = aws_glue_catalog_database.example.name +} +``` + +### Granting permissions on Lake Formation table + +```hcl +data "aws_iam_role" "example" { + name = "existing_lakeformation_role" +} + +resource "aws_glue_catalog_database" "example" { + name = "example_database" +} + +resource "aws_glue_catalog_table" "example" { + name = "example_table" + database_name = aws_glue_catalog_database.example.name +} + +resource "aws_lakeformation_permissions" "example" { + permissions = ["INSERT", "DELETE", "SELECT"] + permissions_with_grant_option = ["SELECT"] + principal = data.aws_iam_role.example.arn + + table { + database = aws_glue_catalog_table.example.database_name + name = aws_glue_catalog_table.example.name + } +} +``` + +### Granting permissions on Lake Formation columns + +```hcl +data "aws_iam_role" "example" { + name = "existing_lakeformation_role" +} + +resource "aws_glue_catalog_database" "example" { + name = "example_database" +} + +resource "aws_glue_catalog_table" "example" { + name = "example_table" + database_name = aws_glue_catalog_database.example.name + + storage_descriptor { + columns { + name = "event" + type = "string" + } + columns { + name = "timestamp" + type = "date" + } + columns { + name = "value" + type = "double" + } + } +} + +resource "aws_lakeformation_permissions" "example" { + permissions = [""SELECT"] + principal = data.aws_iam_role.example.arn + + table { + database = aws_glue_catalog_table.example.database_name + name = aws_glue_catalog_table.example.name + column_names = ["event", "timestamp"] + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `permissions` – (Required) The permissions granted. + +* `principal` – (Required) The AWS Lake Formation principal. + +The following arguments are optional: + +* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. + +* `permissions_with_grant_option` – (Optional) Indicates whether to grant the ability to grant permissions (as a subset of permissions granted)s. + +* `database` – (Optional) The name of the database resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. + +* `location` – (Optional) The Amazon Resource Name (ARN) of the resource (data location). + +* `table` – (Optional) A structure for the table object. A table is a metadata definition that represents your data. + +At least one of `database`, `location`, `table` must be specified. + +The `table` object supports the following: + +* `database` – (Required) The name of the database for the table. + +* `table` – (Required) The name of the table. + +* `column_names` - (Optional) The list of column names for the table. + +* `excluded_column_names` - (Optional) Excludes column names. Any column with this name will be excluded. + +The following summarizes the available Lake Formation permissions on Data Catalog resources: + +* `DATA_LOCATION_ACCESS` on registered location resources, + +* `CREATE_DATABASE` on catalog, + +* `CREATE_TABLE`, `ALTER`, `DROP` on databases, + +* `ALTER`, `INSERT`, `DELETE`, `DROP`, `SELECT` on tables, + +* `SELECT` on columns. + +`INSERT`, `DELETE`, `SELECT` permissions apply to the underlying data, the others to the metadata. + +There is also a special permission `ALL`, that enables a principal to perform every supported Lake Formation operation on the database or table on which it is granted. + +For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). + +~> **NOTE:** Data lake administrators and database creators have implicit Lake Formation permissions. See [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html) for more information. + From d85171b16aa91a16e78dbbecb82ebbb8bf102082 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 19:28:17 +0200 Subject: [PATCH 0299/1212] Define permissions at catalog level --- aws/resource_aws_lakeformation_permissions.go | 33 ++- ...urce_aws_lakeformation_permissions_test.go | 238 +++++++++++------- .../r/lakeformation_permissions.html.markdown | 15 +- 3 files changed, 176 insertions(+), 110 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index bca0637660f..bf88969dc40 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -174,10 +174,8 @@ func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interf return fmt.Errorf("Error no LakeFormation Permissions found: %s", input) } - permissionsHead := permissions[0] // XXX: assuming there is only one result in the list + permissionsHead := permissions[0] d.Set("catalog_id", catalogId) - d.Set("permissions", permissionsHead.Permissions) - d.Set("permissions_with_grant_option", permissionsHead.PermissionsWithGrantOption) d.Set("principal", permissionsHead.Principal.DataLakePrincipalIdentifier) if dataLocation := permissionsHead.Resource.DataLocation; dataLocation != nil { d.Set("location", dataLocation.ResourceArn) @@ -191,6 +189,13 @@ func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interf if table := permissionsHead.Resource.TableWithColumns; table != nil { d.Set("table", flattenAWSLakeFormationTableWithColumns(table)) } + var allPermissions, allPermissionsWithGrant []*string + for _, p := range permissions { + allPermissions = append(allPermissions, p.Permissions...) + allPermissionsWithGrant = append(allPermissionsWithGrant, p.PermissionsWithGrantOption...) + } + d.Set("permissions", allPermissions) + d.Set("permissions_with_grant_option", allPermissionsWithGrant) return nil } @@ -224,22 +229,23 @@ func expandAwsLakeFormationPrincipal(d *schema.ResourceData) *lakeformation.Data } func expandAwsLakeFormationResource(d *schema.ResourceData) *lakeformation.Resource { - resource := &lakeformation.Resource{ - // Catalog: &lakeformation.CatalogResource{}, - } if v, ok := d.GetOk("database"); ok { databaseName := v.(string) if len(databaseName) > 0 { - resource.Database = &lakeformation.DatabaseResource{ - Name: aws.String(databaseName), + return &lakeformation.Resource{ + Database: &lakeformation.DatabaseResource{ + Name: aws.String(databaseName), + }, } } } if v, ok := d.GetOk("location"); ok { location := v.(string) if len(location) > 0 { - resource.DataLocation = &lakeformation.DataLocationResource{ - ResourceArn: aws.String(v.(string)), + return &lakeformation.Resource{ + DataLocation: &lakeformation.DataLocationResource{ + ResourceArn: aws.String(v.(string)), + }, } } } @@ -248,6 +254,7 @@ func expandAwsLakeFormationResource(d *schema.ResourceData) *lakeformation.Resou if len(tables) > 0 { table := tables[0].(map[string]interface{}) + resource := &lakeformation.Resource{} var databaseName, tableName string var columnNames, excludedColumnNames []interface{} if x, ok := table["database"]; ok { @@ -283,10 +290,12 @@ func expandAwsLakeFormationResource(d *schema.ResourceData) *lakeformation.Resou Name: aws.String(tableName), } } + return resource } } - - return resource + return &lakeformation.Resource{ + Catalog: &lakeformation.CatalogResource{}, + } } func flattenAWSLakeFormationTable(tb *lakeformation.TableResource) map[string]interface{} { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index c1e20c10ee7..41c2e6e50b8 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -11,20 +11,30 @@ import ( func TestAccAWSLakeFormationPermissions_full(t *testing.T) { rName := acctest.RandomWithPrefix("lakeformation-test-bucket") dName := acctest.RandomWithPrefix("lakeformation-test-db") - // tName := acctest.RandomWithPrefix("lakeformation-test-table") + tName := acctest.RandomWithPrefix("lakeformation-test-table") callerIdentityName := "data.aws_caller_identity.current" roleName := "data.aws_iam_role.test" resourceName := "aws_lakeformation_permissions.test" bucketName := "aws_s3_bucket.test" dbName := "aws_glue_catalog_database.test" - // tableName := "aws_glue_catalog_table.test" + tableName := "aws_glue_catalog_table.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, // TODO: CheckDestroy Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_catalog(), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "CREATE_DATABASE"), + ), + }, { Config: testAccAWSLakeFormationPermissionsConfig_location(rName), Check: resource.ComposeTestCheckFunc( @@ -51,9 +61,22 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.0", "CREATE_TABLE"), ), }, + { + Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "ALL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + ), + }, // FIXME: more than one permission in API read result // { - // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName), + // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "SELECT"), // Check: resource.ComposeTestCheckFunc( // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), @@ -62,7 +85,7 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), // resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + // resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), // ), // }, // FIXME: WIP @@ -86,6 +109,27 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { }) } +func testAccAWSLakeFormationPermissionsConfig_catalog() string { + return ` +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = [ + data.aws_caller_identity.current.arn + ] +} + +resource "aws_lakeformation_permissions" "test" { + permissions = ["CREATE_DATABASE"] + principal = data.aws_iam_role.test.arn +} +` +} + func testAccAWSLakeFormationPermissionsConfig_location(rName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -150,96 +194,96 @@ resource "aws_lakeformation_permissions" "test" { `, rName, dName) } -// func testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName string) string { -// return fmt.Sprintf(` -// data "aws_caller_identity" "current" {} - -// data "aws_iam_role" "test" { -// name = "AWSServiceRoleForLakeFormationDataAccess" -// } - -// resource "aws_s3_bucket" "test" { -// bucket = %[1]q -// } - -// resource "aws_glue_catalog_database" "test" { -// name = %[2]q -// } - -// resource "aws_glue_catalog_table" "test" { -// name = %[3]q -// database_name = aws_glue_catalog_database.test.name -// } - -// resource "aws_lakeformation_datalake_settings" "test" { -// admins = [ -// data.aws_caller_identity.current.arn -// ] -// } - -// resource "aws_lakeformation_permissions" "test" { -// permissions = ["SELECT"] -// principal = data.aws_iam_role.test.arn - -// table { -// database = aws_glue_catalog_table.test.database_name -// name = aws_glue_catalog_table.test.name -// } -// } -// `, rName, dName, tName) -// } - -// func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { -// return fmt.Sprintf(` -// data "aws_caller_identity" "current" {} - -// data "aws_iam_role" "test" { -// name = "AWSServiceRoleForLakeFormationDataAccess" -// } - -// resource "aws_s3_bucket" "test" { -// bucket = %[1]q -// } - -// resource "aws_glue_catalog_database" "test" { -// name = %[2]q -// } - -// resource "aws_glue_catalog_table" "test" { -// name = %[3]q -// database_name = aws_glue_catalog_database.test.name - -// storage_descriptor { -// columns { -// name = "event" -// type = "string" -// } -// columns { -// name = "timestamp" -// type = "date" -// } -// columns { -// name = "value" -// type = "double" -// } -// } -// } - -// resource "aws_lakeformation_datalake_settings" "test" { -// admins = [ -// data.aws_caller_identity.current.arn -// ] -// } - -// resource "aws_lakeformation_permissions" "test" { -// permissions = ["SELECT"] -// principal = data.aws_iam_role.test.arn - -// table { -// database = aws_glue_catalog_table.test.database_name -// name = aws_glue_catalog_table.test.name -// column_names = ["event", "timestamp"] -// } -// } -// `, rName, dName, tName) -// } +func testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, permission string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_glue_catalog_database" "test" { + name = %[2]q +} + +resource "aws_glue_catalog_table" "test" { + name = %[3]q + database_name = aws_glue_catalog_database.test.name +} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = [ + data.aws_caller_identity.current.arn + ] +} + +resource "aws_lakeformation_permissions" "test" { + permissions = [%[4]q] + principal = data.aws_iam_role.test.arn + + table { + database = aws_glue_catalog_table.test.database_name + name = aws_glue_catalog_table.test.name + } +} +`, rName, dName, tName, permission) +} + +/* func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_iam_role" "test" { + name = "AWSServiceRoleForLakeFormationDataAccess" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_glue_catalog_database" "test" { + name = %[2]q +} + +resource "aws_glue_catalog_table" "test" { + name = %[3]q + database_name = aws_glue_catalog_database.test.name + + storage_descriptor { + columns { + name = "event" + type = "string" + } + columns { + name = "timestamp" + type = "date" + } + columns { + name = "value" + type = "double" + } + } +} + +resource "aws_lakeformation_datalake_settings" "test" { + admins = [ + data.aws_caller_identity.current.arn + ] +} + +resource "aws_lakeformation_permissions" "test" { + permissions = ["SELECT"] + principal = data.aws_iam_role.test.arn + + table { + database = aws_glue_catalog_table.test.database_name + name = aws_glue_catalog_table.test.name + column_names = ["event", "timestamp"] + } +} +`, rName, dName, tName) +} */ diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 31e4f6918e5..4bee2c25ffb 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -36,6 +36,19 @@ resource "aws_lakeformation_permissions" "example" { } ``` +### Granting permissions on Lake Formation catalog + +```hcl +data "aws_iam_role" "example" { + name = "existing_lakeformation_role" +} + +resource "aws_lakeformation_permissions" "example" { + permissions = ["CREATE_DATABASE"] + principal = data.aws_iam_role.example.arn +} +``` + ### Granting permissions on Lake Formation database ```hcl @@ -146,7 +159,7 @@ The following arguments are optional: * `table` – (Optional) A structure for the table object. A table is a metadata definition that represents your data. -At least one of `database`, `location`, `table` must be specified. +Only one of `database`, `location`, `table` can be specified at a time. If none of them is specified, permissions will be set at catalog level. See bellow for available permissions for each resource. The `table` object supports the following: From d4845eb37e0ffc401b9028de6f2d026012923c00 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 19:53:56 +0200 Subject: [PATCH 0300/1212] Test multiple permissions on table resource --- ...urce_aws_lakeformation_permissions_test.go | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 41c2e6e50b8..f4d947364d9 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -62,7 +62,7 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { ), }, { - Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "ALL"), + Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\""), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), @@ -74,20 +74,21 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), ), }, - // FIXME: more than one permission in API read result - // { - // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "SELECT"), - // Check: resource.ComposeTestCheckFunc( - // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - // resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - // resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - // resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), - // ), - // }, + // FIXME: more than one permission in API read result (in acceptance tests setup) + { + Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), + ), + }, // FIXME: WIP // { // Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), @@ -194,7 +195,7 @@ resource "aws_lakeformation_permissions" "test" { `, rName, dName) } -func testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, permission string) string { +func testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, permissions string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -222,7 +223,7 @@ resource "aws_lakeformation_datalake_settings" "test" { } resource "aws_lakeformation_permissions" "test" { - permissions = [%[4]q] + permissions = [%s] principal = data.aws_iam_role.test.arn table { @@ -230,7 +231,7 @@ resource "aws_lakeformation_permissions" "test" { name = aws_glue_catalog_table.test.name } } -`, rName, dName, tName, permission) +`, rName, dName, tName, permissions) } /* func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { From fd0adb6c2f06da2d522386e996d5f6122229e077 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 23:05:49 +0200 Subject: [PATCH 0301/1212] Check that permissions are revoked in acceptance tests --- ...urce_aws_lakeformation_permissions_test.go | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index f4d947364d9..9e55f9a8c34 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -4,8 +4,12 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func TestAccAWSLakeFormationPermissions_full(t *testing.T) { @@ -21,9 +25,9 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { tableName := "aws_glue_catalog_table.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsRevoked, Steps: []resource.TestStep{ { Config: testAccAWSLakeFormationPermissionsConfig_catalog(), @@ -288,3 +292,31 @@ resource "aws_lakeformation_permissions" "test" { } `, rName, dName, tName) } */ + +func testAccCheckAWSLakeFormationPermissionsRevoked(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_permissions" { + continue + } + + principal := rs.Primary.Attributes["principal"] + catalogId := rs.Primary.Attributes["catalog_id"] + + input := &lakeformation.ListPermissionsInput{ + CatalogId: aws.String(catalogId), + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(principal), + }, + } + + out, err := conn.ListPermissions(input) + if err == nil { + fmt.Print(out) + return fmt.Errorf("Resource still registered: %s %s", catalogId, principal) + } + } + + return nil +} From 419d805c496a45b555b6427c9f109960cc93bc69 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 23:18:15 +0200 Subject: [PATCH 0302/1212] Add timestamp to resource ID --- aws/resource_aws_lakeformation_permissions.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index bf88969dc40..0896dbfdaba 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" @@ -148,7 +149,7 @@ func resourceAwsLakeFormationPermissionsGrant(d *schema.ResourceData, meta inter return fmt.Errorf("Error granting LakeFormation Permissions: %s", err) } - d.SetId(fmt.Sprintf("lakeformation:resource:%s", catalogId)) // FIXME + d.SetId(fmt.Sprintf("lakeformation:resource:%s:%s", catalogId, time.Now().UTC().String())) d.Set("catalog_id", catalogId) return resourceAwsLakeFormationPermissionsList(d, meta) From 12bc694fff64044af29eeac4eb2f95463c6a2c14 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 23:19:18 +0200 Subject: [PATCH 0303/1212] Use const defined in API for permissions --- aws/resource_aws_lakeformation_permissions.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 0896dbfdaba..d46f7a69e47 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -12,15 +12,15 @@ import ( func AwsLakeFormationPermissions() []string { return []string{ - "ALL", - "SELECT", - "ALTER", - "DROP", - "DELETE", - "INSERT", - "CREATE_DATABASE", - "CREATE_TABLE", - "DATA_LOCATION_ACCESS", + lakeformation.PermissionAll, + lakeformation.PermissionSelect, + lakeformation.PermissionAlter, + lakeformation.PermissionDrop, + lakeformation.PermissionDelete, + lakeformation.PermissionInsert, + lakeformation.PermissionCreateDatabase, + lakeformation.PermissionCreateTable, + lakeformation.PermissionDataLocationAccess, } } From 88588632bc3f0ef6ac95fce48239ced9c802a464 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 20 May 2020 23:19:43 +0200 Subject: [PATCH 0304/1212] Cleanup --- aws/resource_aws_lakeformation_permissions.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index d46f7a69e47..774730308f8 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -138,12 +138,6 @@ func resourceAwsLakeFormationPermissionsGrant(d *schema.ResourceData, meta inter input.PermissionsWithGrantOption = expandStringList(vs.([]interface{})) } - // Catalog: CREATE_DATABASE - // Location: DATA_LOCATION_ACCESS - // Database: ALTER, CREATE_TABLE, DROP, (ALL ~ Super) - // Table: ALTER, INSERT, DELETE, DROP, SELECT, (ALL ~ Super) - // TableWithColumns: SELECT - _, err := conn.GrantPermissions(input) if err != nil { return fmt.Errorf("Error granting LakeFormation Permissions: %s", err) From 661e2b37f1cc9fc5d3cbf44825210b331ddac282 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Tue, 26 May 2020 23:00:07 +0200 Subject: [PATCH 0305/1212] Handle TableWithColumns resource I/O in ListPermissions request --- aws/resource_aws_lakeformation_permissions.go | 45 +++++++++++++++-- ...urce_aws_lakeformation_permissions_test.go | 48 +++++++++---------- 2 files changed, 66 insertions(+), 27 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 774730308f8..f880fc74ed7 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -153,10 +153,36 @@ func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interf conn := meta.(*AWSClient).lakeformationconn catalogId := d.Get("catalog_id").(string) + // This operation does not support getting privileges on a table with columns. + // Instead, call this operation on the table, and the operation returns the + // table and the table w columns. + resource := expandAwsLakeFormationResource(d) + isTableWithColumnsResource := false + if table := resource.TableWithColumns; table != nil { + resource.Table = &lakeformation.TableResource{ + DatabaseName: resource.TableWithColumns.DatabaseName, + Name: resource.TableWithColumns.Name, + } + resource.TableWithColumns = nil + isTableWithColumnsResource = true + } + + var resourceType string + if table := resource.Catalog; table != nil { + resourceType = lakeformation.DataLakeResourceTypeCatalog + } else if location := resource.DataLocation; location != nil { + resourceType = lakeformation.DataLakeResourceTypeDataLocation + } else if DB := resource.Database; DB != nil { + resourceType = lakeformation.DataLakeResourceTypeDatabase + } else { + resourceType = lakeformation.DataLakeResourceTypeTable + } + input := &lakeformation.ListPermissionsInput{ - CatalogId: aws.String(catalogId), - Principal: expandAwsLakeFormationPrincipal(d), - Resource: expandAwsLakeFormationResource(d), + CatalogId: aws.String(catalogId), + Principal: expandAwsLakeFormationPrincipal(d), + Resource: resource, + ResourceType: &resourceType, } out, err := conn.ListPermissions(input) @@ -169,6 +195,19 @@ func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interf return fmt.Errorf("Error no LakeFormation Permissions found: %s", input) } + // This operation does not support getting privileges on a table with columns. + // Instead, call this operation on the table, and the operation returns the + // table and the table w columns. + if isTableWithColumnsResource { + filtered := make([]*lakeformation.PrincipalResourcePermissions, 0) + for _, p := range permissions { + if table := p.Resource.TableWithColumns; table != nil { + filtered = append(filtered, p) + } + } + permissions = filtered + } + permissionsHead := permissions[0] d.Set("catalog_id", catalogId) d.Set("principal", permissionsHead.Principal.DataLakePrincipalIdentifier) diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 9e55f9a8c34..772496b8841 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -78,24 +78,8 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), ), }, - // FIXME: more than one permission in API read result (in acceptance tests setup) - { - Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), - resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), - ), - }, - // FIXME: WIP // { - // Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), + // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), // Check: resource.ComposeTestCheckFunc( // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), @@ -103,13 +87,27 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { // resource.TestCheckResourceAttr(resourceName, "table.#", "1"), // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.#", "2"), - // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.0", "event"), - // resource.TestCheckResourceAttr(resourceName, "table.0.column_names.1", "timestamp"), - // resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), + // resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), + // resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + // resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), // ), // }, + { + Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + resource.TestCheckResourceAttr(resourceName, "table.0.column_names.#", "2"), + resource.TestCheckResourceAttr(resourceName, "table.0.column_names.0", "event"), + resource.TestCheckResourceAttr(resourceName, "table.0.column_names.1", "timestamp"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), + ), + }, }, }) } @@ -131,6 +129,8 @@ resource "aws_lakeformation_datalake_settings" "test" { resource "aws_lakeformation_permissions" "test" { permissions = ["CREATE_DATABASE"] principal = data.aws_iam_role.test.arn + + depends_on = ["aws_lakeformation_datalake_settings.test"] } ` } @@ -238,7 +238,7 @@ resource "aws_lakeformation_permissions" "test" { `, rName, dName, tName, permissions) } -/* func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { +func testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -291,7 +291,7 @@ resource "aws_lakeformation_permissions" "test" { } } `, rName, dName, tName) -} */ +} func testAccCheckAWSLakeFormationPermissionsRevoked(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lakeformationconn From 708c760815ebfd605a924d184dca8b6e8ade94bb Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Tue, 26 May 2020 23:19:12 +0200 Subject: [PATCH 0306/1212] Fix type in HCL example --- website/docs/r/lakeformation_permissions.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 4bee2c25ffb..329dff20fdd 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -128,7 +128,7 @@ resource "aws_glue_catalog_table" "example" { } resource "aws_lakeformation_permissions" "example" { - permissions = [""SELECT"] + permissions = ["SELECT"] principal = data.aws_iam_role.example.arn table { From c5fd91649ff680df2a1f4ba5d296b9eafad92583 Mon Sep 17 00:00:00 2001 From: Gaylord Mazelier Date: Wed, 27 May 2020 22:24:38 +0200 Subject: [PATCH 0307/1212] Remove redundant check, add one test step --- ...urce_aws_lakeformation_permissions_test.go | 43 ++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 772496b8841..957f4b63859 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -17,7 +17,6 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { dName := acctest.RandomWithPrefix("lakeformation-test-db") tName := acctest.RandomWithPrefix("lakeformation-test-table") - callerIdentityName := "data.aws_caller_identity.current" roleName := "data.aws_iam_role.test" resourceName := "aws_lakeformation_permissions.test" bucketName := "aws_s3_bucket.test" @@ -33,7 +32,6 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { Config: testAccAWSLakeFormationPermissionsConfig_catalog(), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), resource.TestCheckResourceAttr(resourceName, "permissions.0", "CREATE_DATABASE"), @@ -43,7 +41,6 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { Config: testAccAWSLakeFormationPermissionsConfig_location(rName), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "location"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), @@ -54,7 +51,6 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { Config: testAccAWSLakeFormationPermissionsConfig_database(rName, dName), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttrPair(dbName, "name", resourceName, "database"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "3"), @@ -69,7 +65,6 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\""), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttr(resourceName, "table.#", "1"), resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), @@ -78,25 +73,23 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), ), }, - // { - // Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), - // Check: resource.ComposeTestCheckFunc( - // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - // resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - // resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - // resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - // resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - // resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - // resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), - // resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), - // resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), - // ), - // }, + { + Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), + resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), + resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), + ), + }, { Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), Check: resource.ComposeTestCheckFunc( testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttr(resourceName, "table.#", "1"), resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), @@ -156,6 +149,8 @@ resource "aws_lakeformation_datalake_settings" "test" { resource "aws_lakeformation_resource" "test" { resource_arn = aws_s3_bucket.test.arn use_service_linked_role = true + + depends_on = ["aws_lakeformation_datalake_settings.test"] } resource "aws_lakeformation_permissions" "test" { @@ -163,6 +158,8 @@ resource "aws_lakeformation_permissions" "test" { principal = data.aws_iam_role.test.arn location = aws_lakeformation_resource.test.resource_arn + + depends_on = ["aws_lakeformation_datalake_settings.test"] } `, rName) } @@ -195,6 +192,8 @@ resource "aws_lakeformation_permissions" "test" { principal = data.aws_iam_role.test.arn database = aws_glue_catalog_database.test.name + + depends_on = ["aws_lakeformation_datalake_settings.test"] } `, rName, dName) } @@ -234,6 +233,8 @@ resource "aws_lakeformation_permissions" "test" { database = aws_glue_catalog_table.test.database_name name = aws_glue_catalog_table.test.name } + + depends_on = ["aws_lakeformation_datalake_settings.test"] } `, rName, dName, tName, permissions) } @@ -289,6 +290,8 @@ resource "aws_lakeformation_permissions" "test" { name = aws_glue_catalog_table.test.name column_names = ["event", "timestamp"] } + + depends_on = ["aws_lakeformation_datalake_settings.test"] } `, rName, dName, tName) } From 2d4cb285262a7d60fff19198f4e6940d5735a9f3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 13:19:27 -0500 Subject: [PATCH 0308/1212] resource/lakeformation: Remove aws.erb from PR --- website/aws.erb | 3632 ----------------------------------------------- 1 file changed, 3632 deletions(-) delete mode 100644 website/aws.erb diff --git a/website/aws.erb b/website/aws.erb deleted file mode 100644 index 0fad8c5a6a3..00000000000 --- a/website/aws.erb +++ /dev/null @@ -1,3632 +0,0 @@ -<% wrap_layout :inner do %> - <% content_for :sidebar do %> - - <% end %> - <%= yield %> -<% end %> From 741137021ec94d4e7143f003f84920aadcc2fe14 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 13:31:20 -0500 Subject: [PATCH 0309/1212] resource/lakeformation_permissions: Upgrade to plugin SDK v2 --- aws/resource_aws_lakeformation_permissions.go | 4 +-- ...urce_aws_lakeformation_permissions_test.go | 7 ++-- go.mod | 1 + go.sum | 33 +++++++++++++++++++ 4 files changed, 39 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index f880fc74ed7..408a42cf9c1 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func AwsLakeFormationPermissions() []string { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 957f4b63859..0e3e05be91e 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -6,10 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSLakeFormationPermissions_full(t *testing.T) { diff --git a/go.mod b/go.mod index 597534f085f..97986e54841 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 + github.com/hashicorp/terraform-plugin-sdk v1.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba diff --git a/go.sum b/go.sum index 3a28bf9bdf2..f9fbbe06859 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,8 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -162,6 +164,8 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -177,6 +181,7 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -194,22 +199,34 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= +github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= +github.com/hashicorp/terraform-plugin-test/v2 v2.1.2 h1:p96IIn+XpvVjw7AtN8y9MKxn0x69S7wtbGf7JgDJoIk= +github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -249,11 +266,13 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -262,6 +281,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -293,6 +314,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -300,7 +323,10 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -309,9 +335,11 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -319,9 +347,13 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= +github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -383,6 +415,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From eeac9a39deadcd6d36d70d75153b270dbaca73cb Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 13:35:35 -0500 Subject: [PATCH 0310/1212] resource/lakeformation_permissions: Remove datalake_settings from PR --- ...rce_aws_lakeformation_datalake_settings.go | 135 ------------------ ...ws_lakeformation_datalake_settings_test.go | 66 --------- aws/resource_aws_lakeformation_permissions.go | 9 ++ 3 files changed, 9 insertions(+), 201 deletions(-) delete mode 100644 aws/resource_aws_lakeformation_datalake_settings.go delete mode 100644 aws/resource_aws_lakeformation_datalake_settings_test.go diff --git a/aws/resource_aws_lakeformation_datalake_settings.go b/aws/resource_aws_lakeformation_datalake_settings.go deleted file mode 100644 index c708abcef59..00000000000 --- a/aws/resource_aws_lakeformation_datalake_settings.go +++ /dev/null @@ -1,135 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/helper/validation" -) - -func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsLakeFormationDataLakeSettingsPut, - Update: resourceAwsLakeFormationDataLakeSettingsPut, - Read: resourceAwsLakeFormationDataLakeSettingsRead, - Delete: resourceAwsLakeFormationDataLakeSettingsReset, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "catalog_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - }, - "admins": { - Type: schema.TypeList, - Required: true, - MinItems: 0, - MaxItems: 10, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.NoZeroValues, - }, - }, - }, - } -} - -func resourceAwsLakeFormationDataLakeSettingsPut(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) - - input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: expandAdmins(d), - }, - } - - _, err := conn.PutDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error updating DataLakeSettings: %s", err) - } - - d.SetId(fmt.Sprintf("lakeformation:settings:%s", catalogId)) - d.Set("catalog_id", catalogId) - - return resourceAwsLakeFormationDataLakeSettingsRead(d, meta) -} - -func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - - input := &lakeformation.GetDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - } - - out, err := conn.GetDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error reading DataLakeSettings: %s", err) - } - - d.Set("catalog_id", catalogId) - if err := d.Set("admins", flattenAdmins(out.DataLakeSettings.DataLakeAdmins)); err != nil { - return fmt.Errorf("Error setting admins from DataLakeSettings: %s", err) - } - // TODO: Add CreateDatabaseDefaultPermissions and CreateTableDefaultPermissions - - return nil -} - -func resourceAwsLakeFormationDataLakeSettingsReset(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - - input := &lakeformation.PutDataLakeSettingsInput{ - CatalogId: aws.String(catalogId), - DataLakeSettings: &lakeformation.DataLakeSettings{ - DataLakeAdmins: make([]*lakeformation.DataLakePrincipal, 0), - }, - } - - _, err := conn.PutDataLakeSettings(input) - if err != nil { - return fmt.Errorf("Error reseting DataLakeSettings: %s", err) - } - - return nil -} - -func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { - if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { - catalogId = inputCatalogId.(string) - } else { - catalogId = accountId - } - return -} - -func expandAdmins(d *schema.ResourceData) []*lakeformation.DataLakePrincipal { - xs := d.Get("admins") - ys := make([]*lakeformation.DataLakePrincipal, len(xs.([]interface{}))) - - for i, x := range xs.([]interface{}) { - ys[i] = &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(x.(string)), - } - } - - return ys -} - -func flattenAdmins(xs []*lakeformation.DataLakePrincipal) []string { - admins := make([]string, len(xs)) - for i, x := range xs { - admins[i] = aws.StringValue(x.DataLakePrincipalIdentifier) - } - - return admins -} diff --git a/aws/resource_aws_lakeformation_datalake_settings_test.go b/aws/resource_aws_lakeformation_datalake_settings_test.go deleted file mode 100644 index d2ac3b0c538..00000000000 --- a/aws/resource_aws_lakeformation_datalake_settings_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" -) - -func TestAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { - callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLakeFormationDataLakeSettingsConfig_basic, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), - ), - }, - }, - }) -} - -const testAccAWSLakeFormationDataLakeSettingsConfig_basic = ` -data "aws_caller_identity" "current" {} - -resource "aws_lakeformation_datalake_settings" "test" { - admins = ["${data.aws_caller_identity.current.arn}"] -} -` - -func TestAccAWSLakeFormationDataLakeSettings_withCatalogId(t *testing.T) { - callerIdentityName := "data.aws_caller_identity.current" - resourceName := "aws_lakeformation_datalake_settings.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - // TODO: CheckDestroy: testAccCheckAWSLakeFormationDataLakeSettingsDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(callerIdentityName, "account_id", resourceName, "catalog_id"), - resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), - resource.TestCheckResourceAttrPair(callerIdentityName, "arn", resourceName, "admins.0"), - ), - }, - }, - }) -} - -const testAccAWSLakeFormationDataLakeSettingsConfig_withCatalogId = ` -data "aws_caller_identity" "current" {} - -resource "aws_lakeformation_datalake_settings" "test" { - catalog_id = "${data.aws_caller_identity.current.account_id}" - admins = ["${data.aws_caller_identity.current.arn}"] -} -` diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 408a42cf9c1..841385543cf 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -355,3 +355,12 @@ func flattenAWSLakeFormationTableWithColumns(tb *lakeformation.TableWithColumnsR return m } + +func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { + if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { + catalogId = inputCatalogId.(string) + } else { + catalogId = accountId + } + return +} From 1f2e58723e76614675432e904f2c7d2d922425f3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 13:49:57 -0500 Subject: [PATCH 0311/1212] resource/lakeformation_permissions: Remove PR-extraneous files --- go.mod | 1 - go.sum | 33 --------------------------------- 2 files changed, 34 deletions(-) diff --git a/go.mod b/go.mod index 97986e54841..597534f085f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk v1.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba diff --git a/go.sum b/go.sum index f9fbbe06859..3a28bf9bdf2 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -164,8 +162,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -181,7 +177,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -199,34 +194,22 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= -github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggUE= github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= -github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk v1.16.0 h1:NrkXMRjHErUPPTHQkZ6JIn6bByiJzGnlJzH1rVdNEuE= -github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2 h1:p96IIn+XpvVjw7AtN8y9MKxn0x69S7wtbGf7JgDJoIk= -github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -266,13 +249,11 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= @@ -281,8 +262,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -314,8 +293,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= -github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -323,10 +300,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -335,11 +309,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= @@ -347,13 +319,9 @@ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0B github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= -github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -415,7 +383,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From 94b4f5865333caa3bacc28e5ffa6c557af19ee41 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 15:26:11 -0500 Subject: [PATCH 0312/1212] ds/lakeformation_resource: New data source --- aws/data_source_aws_lakeformation_resource.go | 61 +++++++++++ ..._source_aws_lakeformation_resource_test.go | 101 ++++++++++++++++++ aws/provider.go | 1 + .../d/lakeformation_resource.html.markdown | 30 ++++++ 4 files changed, 193 insertions(+) create mode 100644 aws/data_source_aws_lakeformation_resource.go create mode 100644 aws/data_source_aws_lakeformation_resource_test.go create mode 100644 website/docs/d/lakeformation_resource.html.markdown diff --git a/aws/data_source_aws_lakeformation_resource.go b/aws/data_source_aws_lakeformation_resource.go new file mode 100644 index 00000000000..5b2d0d2db47 --- /dev/null +++ b/aws/data_source_aws_lakeformation_resource.go @@ -0,0 +1,61 @@ +package aws + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsLakeFormationResource() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsLakeFormationResourceRead, + + Schema: map[string]*schema.Schema{ + "last_modified": { + Type: schema.TypeString, + Computed: true, + }, + "resource_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "role_arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + + input := &lakeformation.DescribeResourceInput{} + + if v, ok := d.GetOk("resource_arn"); ok { + input.ResourceArn = aws.String(v.(string)) + } + + output, err := conn.DescribeResource(input) + + if err != nil { + return fmt.Errorf("error reading data source, Lake Formation Resource (resource_arn: %s): %w", aws.StringValue(input.ResourceArn), err) + } + + if output == nil || output.ResourceInfo == nil { + return fmt.Errorf("error reading data source, Lake Formation Resource: empty response") + } + + d.SetId(aws.StringValue(input.ResourceArn)) + // d.Set("resource_arn", output.ResourceInfo.ResourceArn) // output not including resource arn currently + d.Set("role_arn", output.ResourceInfo.RoleArn) + if output.ResourceInfo.LastModified != nil { // output not including last modified currently + d.Set("last_modified", output.ResourceInfo.LastModified.Format(time.RFC3339)) + } + + return nil +} diff --git a/aws/data_source_aws_lakeformation_resource_test.go b/aws/data_source_aws_lakeformation_resource_test.go new file mode 100644 index 00000000000..73d8a2c96ea --- /dev/null +++ b/aws/data_source_aws_lakeformation_resource_test.go @@ -0,0 +1,101 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSLakeFormationResourceDataSource_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_lakeformation_resource.test" + resourceName := "aws_lakeformation_resource.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAWSLakeFormationResourceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationResourceDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "resource_arn", resourceName, "resource_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "role_arn", resourceName, "role_arn"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationResourceDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/test/" + + assume_role_policy = < Date: Fri, 11 Dec 2020 15:28:07 -0500 Subject: [PATCH 0313/1212] resource/lakeformation_resource: Improve docs, error messages (minor) --- aws/resource_aws_lakeformation_resource.go | 10 +++------- website/docs/r/lakeformation_resource.html.markdown | 2 -- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_lakeformation_resource.go b/aws/resource_aws_lakeformation_resource.go index 6aad389c111..488b1832a2f 100644 --- a/aws/resource_aws_lakeformation_resource.go +++ b/aws/resource_aws_lakeformation_resource.go @@ -76,21 +76,17 @@ func resourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interface output, err := conn.DescribeResource(input) if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { - log.Printf("[WARN] Lake Formation Resource (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Resource Lake Formation Resource (%s) not found, removing from state", d.Id()) d.SetId("") return nil } if err != nil { - return fmt.Errorf("error getting Lake Formation Resource (%s): %w", d.Id(), err) + return fmt.Errorf("error reading resource Lake Formation Resource (%s): %w", d.Id(), err) } if output == nil || output.ResourceInfo == nil { - return fmt.Errorf("error getting Lake Formation Resource (%s): empty response", d.Id()) - } - - if err != nil { - return fmt.Errorf("error reading Lake Formation Resource (%s): %w", d.Id(), err) + return fmt.Errorf("error reading resource Lake Formation Resource (%s): empty response", d.Id()) } // d.Set("resource_arn", output.ResourceInfo.ResourceArn) // output not including resource arn currently diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown index 6444cbe713f..22894ea64d2 100644 --- a/website/docs/r/lakeformation_resource.html.markdown +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -26,8 +26,6 @@ resource "aws_lakeformation_resource" "example" { ## Argument Reference -The following arguments are required: - * `resource_arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. * `role_arn` – (Optional) Role that has read/write access to the resource. If not provided, the Lake Formation service-linked role must exist and is used. From 5db1eb9afe01fdbf83952038817520931b985d71 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 11 Dec 2020 17:39:21 -0500 Subject: [PATCH 0314/1212] docs/resource/lakeformation_permissions: Rework for design --- .../r/lakeformation_permissions.html.markdown | 193 +++++------------- 1 file changed, 53 insertions(+), 140 deletions(-) diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 329dff20fdd..52080812b7c 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -3,138 +3,40 @@ subcategory: "Lake Formation" layout: "aws" page_title: "AWS: aws_lakeformation_permissions" description: |- - Manages the permissions that a principal has on an AWS Glue Data Catalog resource (such as AWS Glue database or AWS Glue tables) + Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. --- -# Resource: aws_lakeformation_resource +# Resource: aws_lakeformation_permissions -Manages the permissions that a principal has on an AWS Glue Data Catalog resource (such as AWS Glue database or AWS Glue tables). +Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. Permissions are granted to a principal, in a Data Catalog, relative to a Lake Formation resource, which includes the Data Catalog, databases, and tables. For more information, see [Security and Access Control to Metadata and Data in Lake Formation](https://docs.aws.amazon.com/lake-formation/latest/dg/security-data-access.html). -## Example Usage - -### Granting permissions on Lake Formation resource - -```hcl -data "aws_iam_role" "example" { - name = "existing_lakeformation_role" -} - -data "aws_s3_bucket" "example" { - bucket = "existing_bucket" -} - -resource "aws_lakeformation_resource" "example" { - resource_arn = data.aws_s3_bucket.example.arn - use_service_linked_role = true -} - -resource "aws_lakeformation_permissions" "example" { - permissions = ["DATA_LOCATION_ACCESS"] - principal = data.aws_iam_role.example.arn - - location = aws_lakeformation_resource.example.resource_arn -} -``` - -### Granting permissions on Lake Formation catalog - -```hcl -data "aws_iam_role" "example" { - name = "existing_lakeformation_role" -} - -resource "aws_lakeformation_permissions" "example" { - permissions = ["CREATE_DATABASE"] - principal = data.aws_iam_role.example.arn -} -``` - -### Granting permissions on Lake Formation database - -```hcl -data "aws_iam_role" "example" { - name = "existing_lakeformation_role" -} +~> **NOTE:** Lake Formation grants implicit permissions to data lake administrators, database creators, and table creators. For more information, see [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html). -resource "aws_glue_catalog_database" "example" { - name = "example_database" -} - -resource "aws_lakeformation_permissions" "example" { - permissions = ["ALTER", "CREATE_TABLE", "DROP"] - principal = data.aws_iam_role.example.arn - - database = aws_glue_catalog_database.example.name -} -``` +## Example Usage -### Granting permissions on Lake Formation table +### Grant Permissions For A Lake Formation S3 Resource ```hcl -data "aws_iam_role" "example" { - name = "existing_lakeformation_role" -} - -resource "aws_glue_catalog_database" "example" { - name = "example_database" -} - -resource "aws_glue_catalog_table" "example" { - name = "example_table" - database_name = aws_glue_catalog_database.example.name -} - -resource "aws_lakeformation_permissions" "example" { - permissions = ["INSERT", "DELETE", "SELECT"] - permissions_with_grant_option = ["SELECT"] - principal = data.aws_iam_role.example.arn +resource "aws_lakeformation_permissions" "test" { + principal_arn = aws_iam_role.workflow_role.arn + permissions = ["ALL"] - table { - database = aws_glue_catalog_table.example.database_name - name = aws_glue_catalog_table.example.name + data_location { + resource_arn = aws_lakeformation_resource.test.resource_arn } } ``` -### Granting permissions on Lake Formation columns +### Grant Permissions For A Glue Catalog Database ```hcl -data "aws_iam_role" "example" { - name = "existing_lakeformation_role" -} - -resource "aws_glue_catalog_database" "example" { - name = "example_database" -} - -resource "aws_glue_catalog_table" "example" { - name = "example_table" - database_name = aws_glue_catalog_database.example.name - - storage_descriptor { - columns { - name = "event" - type = "string" - } - columns { - name = "timestamp" - type = "date" - } - columns { - name = "value" - type = "double" - } - } -} +resource "aws_lakeformation_permissions" "test" { + role = aws_iam_role.workflow_role.arn + permissions = ["CREATE_TABLE", "ALTER", "DROP"] -resource "aws_lakeformation_permissions" "example" { - permissions = ["SELECT"] - principal = data.aws_iam_role.example.arn - - table { - database = aws_glue_catalog_table.example.database_name - name = aws_glue_catalog_table.example.name - column_names = ["event", "timestamp"] + database { + name = aws_glue_catalog_database.test.name + catalog_id = "110376042874" } } ``` @@ -143,51 +45,62 @@ resource "aws_lakeformation_permissions" "example" { The following arguments are required: -* `permissions` – (Required) The permissions granted. - -* `principal` – (Required) The AWS Lake Formation principal. +* `permissions` – (Required) List of permissions granted to the principal. Valid values include `ALL`, `ALTER`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `principal_arn` – (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. The following arguments are optional: -* `catalog_id` – (Optional) The identifier for the Data Catalog. By default, the account ID. +* `catalog` - (Optional) Whether the permissions are to be granted for the Data Catalog. Defaults to `false`. +* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `data_location` - (Optional) Configuration block for data location configuration. Detailed below. +* `database` - (Optional) Configuration block for database configuration. Detailed below. +* `table` - (Optional) Configuration block for table configuration. Detailed below. +* `table_with_columns` - (Optional) Configuration block for table with columns configuration. Detailed below. + +### data_location -* `permissions_with_grant_option` – (Optional) Indicates whether to grant the ability to grant permissions (as a subset of permissions granted)s. +The following argument is required: -* `database` – (Optional) The name of the database resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. +* `resource_arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. -* `location` – (Optional) The Amazon Resource Name (ARN) of the resource (data location). +The following argument is optional: -* `table` – (Optional) A structure for the table object. A table is a metadata definition that represents your data. +* `catalog_id` - (Optional) Identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller. -Only one of `database`, `location`, `table` can be specified at a time. If none of them is specified, permissions will be set at catalog level. See bellow for available permissions for each resource. +### database -The `table` object supports the following: +The following argument is required: -* `database` – (Required) The name of the database for the table. +* `name` – (Required) Name of the database resource. Unique to the Data Catalog. -* `table` – (Required) The name of the table. +The following argument is optional: -* `column_names` - (Optional) The list of column names for the table. +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. -* `excluded_column_names` - (Optional) Excludes column names. Any column with this name will be excluded. +### table -The following summarizes the available Lake Formation permissions on Data Catalog resources: +The following argument is required: -* `DATA_LOCATION_ACCESS` on registered location resources, +* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. -* `CREATE_DATABASE` on catalog, +The following arguments are optional: -* `CREATE_TABLE`, `ALTER`, `DROP` on databases, +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. +* `name` - (Optional) Name of the table. Not including the table name results in a wildcard representing every table under a database. -* `ALTER`, `INSERT`, `DELETE`, `DROP`, `SELECT` on tables, +### table_with_columns -* `SELECT` on columns. +The following arguments are required: -`INSERT`, `DELETE`, `SELECT` permissions apply to the underlying data, the others to the metadata. +* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` – (Required) Name of the table resource. -There is also a special permission `ALL`, that enables a principal to perform every supported Lake Formation operation on the database or table on which it is granted. +The following arguments are optional: -For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. +* `column_names` - (Optional) List of column names for the table. At least one of `column_names` or `excluded_column_names` is required. +* `excluded_column_names` - (Optional) List of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required. -~> **NOTE:** Data lake administrators and database creators have implicit Lake Formation permissions. See [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html) for more information. +## Attributes Reference +In addition to the above arguments, no attributes are exported. From 60b7aef835a8f90a801810f397fe0611870e1c4c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 15 Dec 2020 18:07:56 -0500 Subject: [PATCH 0315/1212] resource/lakeformation_permissions: Align with design and conventions --- aws/resource_aws_lakeformation_permissions.go | 595 +++++++++++------- ...urce_aws_lakeformation_permissions_test.go | 234 ++++++- .../r/lakeformation_permissions.html.markdown | 4 +- 3 files changed, 581 insertions(+), 252 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 841385543cf..b3687a16a49 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -2,42 +2,75 @@ package aws import ( "fmt" - "time" + "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" ) -func AwsLakeFormationPermissions() []string { - return []string{ - lakeformation.PermissionAll, - lakeformation.PermissionSelect, - lakeformation.PermissionAlter, - lakeformation.PermissionDrop, - lakeformation.PermissionDelete, - lakeformation.PermissionInsert, - lakeformation.PermissionCreateDatabase, - lakeformation.PermissionCreateTable, - lakeformation.PermissionDataLocationAccess, - } -} - func resourceAwsLakeFormationPermissions() *schema.Resource { return &schema.Resource{ - Create: resourceAwsLakeFormationPermissionsGrant, - Read: resourceAwsLakeFormationPermissionsList, - Delete: resourceAwsLakeFormationPermissionsRevoke, + Create: resourceAwsLakeFormationPermissionsCreate, + Read: resourceAwsLakeFormationPermissionsRead, + Update: resourceAwsLakeFormationPermissionsUpdate, + Delete: resourceAwsLakeFormationPermissionsDelete, Schema: map[string]*schema.Schema{ "catalog_id": { Type: schema.TypeString, ForceNew: true, Optional: true, - Computed: true, ValidateFunc: validateAwsAccountId, }, + "catalog_resource": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "data_location": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "resource_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + "database": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "permissions": { Type: schema.TypeList, Required: true, @@ -45,7 +78,7 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(AwsLakeFormationPermissions(), false), + ValidateFunc: validation.StringInSlice(lakeformation.Permission_Values(), false), }, }, "permissions_with_grant_option": { @@ -55,67 +88,79 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { Computed: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(AwsLakeFormationPermissions(), false), + ValidateFunc: validation.StringInSlice(lakeformation.Permission_Values(), false), }, }, - "principal": { + "principal_arn": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validateArn, }, - "database": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"location", "table"}, - }, - "location": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validateArn, - ConflictsWith: []string{"database", "table"}, - }, "table": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"database", "location"}, - MinItems: 0, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "database": { + "catalog_id": { Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.NoZeroValues, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "database_name": { + Type: schema.TypeString, + Required: true, }, "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "table_with_columns": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.NoZeroValues, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, }, "column_names": { Type: schema.TypeList, Optional: true, - ForceNew: true, + MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, }, + "database_name": { + Type: schema.TypeString, + Required: true, + }, "excluded_column_names": { Type: schema.TypeList, Optional: true, - ForceNew: true, + MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, }, }, + "name": { + Type: schema.TypeString, + Required: true, + }, }, }, }, @@ -123,129 +168,136 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { } } -func resourceAwsLakeFormationPermissionsGrant(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - catalogId := createAwsDataCatalogId(d, meta.(*AWSClient).accountid) - resource := expandAwsLakeFormationResource(d) input := &lakeformation.GrantPermissionsInput{ - CatalogId: aws.String(catalogId), Permissions: expandStringList(d.Get("permissions").([]interface{})), - Principal: expandAwsLakeFormationPrincipal(d), - Resource: resource, + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + }, } - if vs, ok := d.GetOk("permissions_with_grant_option"); ok { - input.PermissionsWithGrantOption = expandStringList(vs.([]interface{})) + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) } - _, err := conn.GrantPermissions(input) - if err != nil { - return fmt.Errorf("Error granting LakeFormation Permissions: %s", err) + if v, ok := d.GetOk("permissions_with_grant_option"); ok { + input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - d.SetId(fmt.Sprintf("lakeformation:resource:%s:%s", catalogId, time.Now().UTC().String())) - d.Set("catalog_id", catalogId) + input.Resource = expandLakeFormationResource(d, false) - return resourceAwsLakeFormationPermissionsList(d, meta) -} + output, err := conn.GrantPermissions(input) -func resourceAwsLakeFormationPermissionsList(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) - - // This operation does not support getting privileges on a table with columns. - // Instead, call this operation on the table, and the operation returns the - // table and the table w columns. - resource := expandAwsLakeFormationResource(d) - isTableWithColumnsResource := false - if table := resource.TableWithColumns; table != nil { - resource.Table = &lakeformation.TableResource{ - DatabaseName: resource.TableWithColumns.DatabaseName, - Name: resource.TableWithColumns.Name, - } - resource.TableWithColumns = nil - isTableWithColumnsResource = true + if err != nil { + return fmt.Errorf("error creating Lake Formation Permissions (input: %v): %w", input, err) } - var resourceType string - if table := resource.Catalog; table != nil { - resourceType = lakeformation.DataLakeResourceTypeCatalog - } else if location := resource.DataLocation; location != nil { - resourceType = lakeformation.DataLakeResourceTypeDataLocation - } else if DB := resource.Database; DB != nil { - resourceType = lakeformation.DataLakeResourceTypeDatabase - } else { - resourceType = lakeformation.DataLakeResourceTypeTable + if output == nil { + return fmt.Errorf("error creating Lake Formation Permissions: empty response") } + d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) + + return resourceAwsLakeFormationPermissionsRead(d, meta) +} + +func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + + // filter results by principal and permissions input := &lakeformation.ListPermissionsInput{ - CatalogId: aws.String(catalogId), - Principal: expandAwsLakeFormationPrincipal(d), - Resource: resource, - ResourceType: &resourceType, + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + }, } - out, err := conn.ListPermissions(input) - if err != nil { - return fmt.Errorf("Error listing LakeFormation Permissions: %s", err) + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) } - permissions := out.PrincipalResourcePermissions - if len(permissions) == 0 { - return fmt.Errorf("Error no LakeFormation Permissions found: %s", input) - } + input.Resource = expandLakeFormationResource(d, true) - // This operation does not support getting privileges on a table with columns. - // Instead, call this operation on the table, and the operation returns the - // table and the table w columns. - if isTableWithColumnsResource { - filtered := make([]*lakeformation.PrincipalResourcePermissions, 0) - for _, p := range permissions { - if table := p.Resource.TableWithColumns; table != nil { - filtered = append(filtered, p) + log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) + var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions + + err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + for _, permission := range resp.PrincipalResourcePermissions { + if permission == nil { + continue } + + principalResourcePermissions = append(principalResourcePermissions, permission) } - permissions = filtered - } + return !lastPage + }) - permissionsHead := permissions[0] - d.Set("catalog_id", catalogId) - d.Set("principal", permissionsHead.Principal.DataLakePrincipalIdentifier) - if dataLocation := permissionsHead.Resource.DataLocation; dataLocation != nil { - d.Set("location", dataLocation.ResourceArn) - } - if database := permissionsHead.Resource.Database; database != nil { - d.Set("database", database.Name) - } - if table := permissionsHead.Resource.Table; table != nil { - d.Set("table", flattenAWSLakeFormationTable(table)) + if err != nil { + return fmt.Errorf("error reading Lake Formation permissions: %w", err) } - if table := permissionsHead.Resource.TableWithColumns; table != nil { - d.Set("table", flattenAWSLakeFormationTableWithColumns(table)) + + if len(principalResourcePermissions) > 1 { + return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found") } - var allPermissions, allPermissionsWithGrant []*string - for _, p := range permissions { - allPermissions = append(allPermissions, p.Permissions...) - allPermissionsWithGrant = append(allPermissionsWithGrant, p.PermissionsWithGrantOption...) + + for _, permissions := range principalResourcePermissions { + d.Set("principal", permissions.Principal.DataLakePrincipalIdentifier) + d.Set("permissions", permissions.Permissions) + d.Set("permissions_with_grant_option", permissions.PermissionsWithGrantOption) + + if permissions.Resource.Catalog != nil { + d.Set("catalog_resource", true) + } + + if permissions.Resource.DataLocation != nil { + d.Set("data_location", []interface{}{flattenLakeFormationDataLocationResource(permissions.Resource.DataLocation)}) + } else { + d.Set("data_location", nil) + } + + if permissions.Resource.Database != nil { + d.Set("database", []interface{}{flattenLakeFormationDatabaseResource(permissions.Resource.Database)}) + } else { + d.Set("database", nil) + } + + // table with columns permissions will include the table and table with columns + if permissions.Resource.TableWithColumns != nil { + d.Set("table_with_columns", []interface{}{flattenLakeFormationTableWithColumnsResource(permissions.Resource.TableWithColumns)}) + } else if permissions.Resource.Table != nil { + d.Set("table_with_columns", nil) + d.Set("table", []interface{}{flattenLakeFormationTableResource(permissions.Resource.Table)}) + } else { + d.Set("table", nil) + } } - d.Set("permissions", allPermissions) - d.Set("permissions_with_grant_option", allPermissionsWithGrant) return nil } -func resourceAwsLakeFormationPermissionsRevoke(d *schema.ResourceData, meta interface{}) error { +func resourceAwsLakeFormationPermissionsUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - catalogId := d.Get("catalog_id").(string) input := &lakeformation.RevokePermissionsInput{ - CatalogId: aws.String(catalogId), Permissions: expandStringList(d.Get("permissions").([]interface{})), - Principal: expandAwsLakeFormationPrincipal(d), - Resource: expandAwsLakeFormationResource(d), + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + }, } - if vs, ok := d.GetOk("permissions_with_grant_option"); ok { - input.PermissionsWithGrantOption = expandStringList(vs.([]interface{})) + + input.Resource = expandLakeFormationResource(d, false) + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("permissions_with_grant_option"); ok { + input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } _, err := conn.RevokePermissions(input) @@ -256,111 +308,212 @@ func resourceAwsLakeFormationPermissionsRevoke(d *schema.ResourceData, meta inte return nil } -func expandAwsLakeFormationPrincipal(d *schema.ResourceData) *lakeformation.DataLakePrincipal { - return &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), +func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { + res := &lakeformation.Resource{} + + if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) { + res.Catalog = &lakeformation.CatalogResource{} } -} -func expandAwsLakeFormationResource(d *schema.ResourceData) *lakeformation.Resource { - if v, ok := d.GetOk("database"); ok { - databaseName := v.(string) - if len(databaseName) > 0 { - return &lakeformation.Resource{ - Database: &lakeformation.DatabaseResource{ - Name: aws.String(databaseName), - }, - } - } + if v, ok := d.GetOk("data_location"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + res.DataLocation = expandLakeFormationDataLocationResource(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.GetOk("location"); ok { - location := v.(string) - if len(location) > 0 { - return &lakeformation.Resource{ - DataLocation: &lakeformation.DataLocationResource{ - ResourceArn: aws.String(v.(string)), - }, - } - } + + if v, ok := d.GetOk("database"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + res.Database = expandLakeFormationDatabaseResource(v.([]interface{})[0].(map[string]interface{})) } - if vs, ok := d.GetOk("table"); ok { - tables := vs.([]interface{}) - if len(tables) > 0 { - table := tables[0].(map[string]interface{}) - resource := &lakeformation.Resource{} - var databaseName, tableName string - var columnNames, excludedColumnNames []interface{} - if x, ok := table["database"]; ok { - databaseName = x.(string) - } - if x, ok := table["name"]; ok { - tableName = x.(string) - } - if xs, ok := table["column_names"]; ok { - columnNames = xs.([]interface{}) - } - if xs, ok := table["excluded_column_names"]; ok { - excludedColumnNames = xs.([]interface{}) - } + if v, ok := d.GetOk("table"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + res.Table = expandLakeFormationTableResource(v.([]interface{})[0].(map[string]interface{})) + } - if len(columnNames) > 0 || len(excludedColumnNames) > 0 { - tableWithColumns := &lakeformation.TableWithColumnsResource{ - DatabaseName: aws.String(databaseName), - Name: aws.String(tableName), - } - if len(columnNames) > 0 { - tableWithColumns.ColumnNames = expandStringList(columnNames) - } - if len(excludedColumnNames) > 0 { - tableWithColumns.ColumnWildcard = &lakeformation.ColumnWildcard{ - ExcludedColumnNames: expandStringList(excludedColumnNames), - } - } - resource.TableWithColumns = tableWithColumns - } else { - resource.Table = &lakeformation.TableResource{ - DatabaseName: aws.String(databaseName), - Name: aws.String(tableName), - } - } - return resource + if v, ok := d.GetOk("table_with_columns"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + if squashTableWithColumns { + // ListPermissions does not support getting privileges by tables with columns. Instead, + // use the table which will return both table and table with columns. + res.Table = expandLakeFormationTableResource(v.([]interface{})[0].(map[string]interface{})) + } else { + res.TableWithColumns = expandLakeFormationTableWithColumnsResource(v.([]interface{})[0].(map[string]interface{})) } } - return &lakeformation.Resource{ - Catalog: &lakeformation.CatalogResource{}, + + return res +} + +func expandLakeFormationDataLocationResource(tfMap map[string]interface{}) *lakeformation.DataLocationResource { + if tfMap == nil { + return nil + } + + apiObject := &lakeformation.DataLocationResource{} + + if v, ok := tfMap["catalog_id"].(string); ok && v != "" { + apiObject.CatalogId = aws.String(v) + } + + if v, ok := tfMap["resource_arn"].(string); ok && v != "" { + apiObject.ResourceArn = aws.String(v) } + + return apiObject } -func flattenAWSLakeFormationTable(tb *lakeformation.TableResource) map[string]interface{} { - m := make(map[string]interface{}) +func flattenLakeFormationDataLocationResource(apiObject *lakeformation.DataLocationResource) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CatalogId; v != nil { + tfMap["catalog_id"] = aws.StringValue(v) + } - m["database"] = tb.DatabaseName - m["name"] = tb.Name + if v := apiObject.ResourceArn; v != nil { + tfMap["resource_arn"] = aws.StringValue(v) + } - return m + return tfMap } -func flattenAWSLakeFormationTableWithColumns(tb *lakeformation.TableWithColumnsResource) map[string]interface{} { - m := make(map[string]interface{}) +func expandLakeFormationDatabaseResource(tfMap map[string]interface{}) *lakeformation.DatabaseResource { + if tfMap == nil { + return nil + } + + apiObject := &lakeformation.DatabaseResource{} - m["database"] = tb.DatabaseName - m["name"] = tb.Name - if columnNames := tb.ColumnNames; columnNames != nil { - m["column_names"] = columnNames + if v, ok := tfMap["catalog_id"].(string); ok && v != "" { + apiObject.CatalogId = aws.String(v) } - if columnWildcard := tb.ColumnWildcard; columnWildcard != nil { - m["excluded_column_names"] = columnWildcard.ExcludedColumnNames + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) } - return m + return apiObject } -func createAwsDataCatalogId(d *schema.ResourceData, accountId string) (catalogId string) { - if inputCatalogId, ok := d.GetOkExists("catalog_id"); ok { - catalogId = inputCatalogId.(string) +func flattenLakeFormationDatabaseResource(apiObject *lakeformation.DatabaseResource) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CatalogId; v != nil { + tfMap["catalog_id"] = aws.StringValue(v) + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + return tfMap +} + +func expandLakeFormationTableResource(tfMap map[string]interface{}) *lakeformation.TableResource { + if tfMap == nil { + return nil + } + + apiObject := &lakeformation.TableResource{} + + if v, ok := tfMap["catalog_id"].(string); ok && v != "" { + apiObject.CatalogId = aws.String(v) + } + + if v, ok := tfMap["database_name"].(string); ok && v != "" { + apiObject.DatabaseName = aws.String(v) + } + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) } else { - catalogId = accountId + apiObject.TableWildcard = &lakeformation.TableWildcard{} + } + + return apiObject +} + +func flattenLakeFormationTableResource(apiObject *lakeformation.TableResource) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CatalogId; v != nil { + tfMap["catalog_id"] = aws.StringValue(v) + } + + if v := apiObject.DatabaseName; v != nil { + tfMap["database_name"] = aws.StringValue(v) + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + return tfMap +} + +func expandLakeFormationTableWithColumnsResource(tfMap map[string]interface{}) *lakeformation.TableWithColumnsResource { + if tfMap == nil { + return nil + } + + apiObject := &lakeformation.TableWithColumnsResource{} + + if v, ok := tfMap["catalog_id"].(string); ok && v != "" { + apiObject.CatalogId = aws.String(v) + } + + if v, ok := tfMap["column_names"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + apiObject.ColumnNames = expandStringList(v.([]interface{})) } - return + + if v, ok := tfMap["database_name"].(string); ok && v != "" { + apiObject.DatabaseName = aws.String(v) + } + + if v, ok := tfMap["excluded_column_names"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + apiObject.ColumnWildcard = &lakeformation.ColumnWildcard{ + ExcludedColumnNames: expandStringList(v.([]interface{})), + } + } + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + return apiObject +} + +func flattenLakeFormationTableWithColumnsResource(apiObject *lakeformation.TableWithColumnsResource) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.CatalogId; v != nil { + tfMap["catalog_id"] = aws.StringValue(v) + } + + tfMap["column_names"] = flattenStringList(apiObject.ColumnNames) + + if v := apiObject.DatabaseName; v != nil { + tfMap["database_name"] = aws.StringValue(v) + } + + if v := apiObject.ColumnWildcard; v != nil { + tfMap["excluded_column_names"] = flattenStringList(v.ExcludedColumnNames) + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + return tfMap } diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 0e3e05be91e..a2b39e7743e 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -11,6 +11,50 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceAddr := "aws_lakeformation_resource.test" + bucketAddr := "aws_s3_bucket.test" + roleAddr := "aws_iam_role.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationPermissionsExists(resourceAddr), + resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), + resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), + ), + }, + }, + }) +} + +func TestAccAWSLakeFormationPermissions_disappears(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_resource.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsLakeFormationPermissions(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSLakeFormationPermissions_full(t *testing.T) { rName := acctest.RandomWithPrefix("lakeformation-test-bucket") dName := acctest.RandomWithPrefix("lakeformation-test-db") @@ -25,7 +69,7 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationPermissionsRevoked, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, Steps: []resource.TestStep{ { Config: testAccAWSLakeFormationPermissionsConfig_catalog(), @@ -104,6 +148,166 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { }) } +func testAccCheckAWSLakeFormationPermissionsDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_lakeformation_permissions" { + continue + } + + principal := rs.Primary.Attributes["principal"] + catalogId := rs.Primary.Attributes["catalog_id"] + + input := &lakeformation.ListPermissionsInput{ + CatalogId: aws.String(catalogId), + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(principal), + }, + } + + out, err := conn.ListPermissions(input) + if err == nil { + fmt.Print(out) + return fmt.Errorf("Resource still registered: %s %s", catalogId, principal) + } + } + + return nil +} + +func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("resource not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).lakeformationconn + + input := &lakeformation.ListPermissionsInput{ + MaxResults: aws.Int64(1), + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(rs.Primary.Attributes["principal"]), + }, + } + + if rs.Primary.Attributes["catalog_resource"] == "true" { + input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeCatalog) + } + + if rs.Primary.Attributes["data_location.#"] != "0" { + input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeDataLocation) + } + + if rs.Primary.Attributes["database.#"] != "0" { + input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeDatabase) + } + + if rs.Primary.Attributes["table.#"] != "0" { + input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeTable) + } + + if rs.Primary.Attributes["table_with_columns.#"] != "0" { + input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeTable) + } + + _, err := conn.ListPermissions(input) + + if err != nil { + return fmt.Errorf("error getting Lake Formation resource (%s): %w", rs.Primary.ID, err) + } + + return nil + } +} + +func testAccAWSLakeFormationPermissionsConfig_basic(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "workflow_role" { + name = %[1]q + + assume_role_policy = < **NOTE:** Lake Formation grants implicit permissions to data lake administrators, database creators, and table creators. For more information, see [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html). +~> **NOTE:** This resource deals with explicitly granted permissions. Lake Formation grants implicit permissions to data lake administrators, database creators, and table creators. For more information, see [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html). ## Example Usage @@ -50,8 +50,8 @@ The following arguments are required: The following arguments are optional: -* `catalog` - (Optional) Whether the permissions are to be granted for the Data Catalog. Defaults to `false`. * `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `catalog_resource` - (Optional) Whether the permissions are to be granted for the Data Catalog. Defaults to `false`. * `data_location` - (Optional) Configuration block for data location configuration. Detailed below. * `database` - (Optional) Configuration block for database configuration. Detailed below. * `table` - (Optional) Configuration block for table configuration. Detailed below. From e139a59a57b50f4592c32021408fa0940f85c78c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 17 Dec 2020 10:12:10 -0500 Subject: [PATCH 0316/1212] validators: Add ValidatePrincipal for Lake Formation principals --- aws/validators.go | 19 ++++++++++++++++++ aws/validators_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/aws/validators.go b/aws/validators.go index 526eb613492..e982a1e3f20 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -660,6 +660,25 @@ func validateAwsAccountId(v interface{}, k string) (ws []string, errors []error) return } +func validatePrincipal(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value == "IAM_ALLOWED_PRINCIPALS" { + return ws, errors + } + + wsARN, errorsARN := validateArn(v, k) + ws = append(ws, wsARN...) + errors = append(errors, errorsARN...) + + pattern := `\d{12}:(role|user)/` + if !regexp.MustCompile(pattern).MatchString(value) { + errors = append(errors, fmt.Errorf("%q doesn't look like a user or role: %q", k, value)) + } + + return ws, errors +} + func validateArn(v interface{}, k string) (ws []string, errors []error) { value := v.(string) diff --git a/aws/validators_test.go b/aws/validators_test.go index 900c11308f2..e9941893c64 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -359,6 +359,50 @@ func TestValidateArn(t *testing.T) { } } +func TestValidatePrincipal(t *testing.T) { + v := "" + _, errors := validatePrincipal(v, "arn") + if len(errors) == 0 { + t.Fatalf("%q should not be validated as a principal %d: %q", v, len(errors), errors) + } + + validNames := []string{ + "IAM_ALLOWED_PRINCIPALS", // Special principal + "arn:aws-us-gov:iam::357342307427:role/tf-acc-test-3217321001347236965", // lintignore:AWSAT005 // IAM Role + "arn:aws:iam::123456789012:user/David", // lintignore:AWSAT005 // IAM User + "arn:aws-us-gov:iam:us-west-2:357342307427:role/tf-acc-test-3217321001347236965", // lintignore:AWSAT003,AWSAT005 // Non-global IAM Role? + "arn:aws:iam:us-east-1:123456789012:user/David", // lintignore:AWSAT003,AWSAT005 // Non-global IAM User? + } + for _, v := range validNames { + _, errors := validatePrincipal(v, "arn") + if len(errors) != 0 { + t.Fatalf("%q should be a valid principal: %q", v, errors) + } + } + + invalidNames := []string{ + "IAM_NOT_ALLOWED_PRINCIPALS", // doesn't exist + "arn", + "123456789012", + "arn:aws", + "arn:aws:logs", //lintignore:AWSAT005 + "arn:aws:logs:region:*:*", //lintignore:AWSAT005 + "arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment", // lintignore:AWSAT003,AWSAT005 // not a user or role + "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess", // lintignore:AWSAT005 // not a user or role + "arn:aws:rds:eu-west-1:123456789012:db:mysql-db", // lintignore:AWSAT003,AWSAT005 // not a user or role + "arn:aws:s3:::my_corporate_bucket/exampleobject.png", // lintignore:AWSAT005 // not a user or role + "arn:aws:events:us-east-1:319201112229:rule/rule_name", // lintignore:AWSAT003,AWSAT005 // not a user or role + "arn:aws-us-gov:ec2:us-gov-west-1:123456789012:instance/i-12345678", // lintignore:AWSAT003,AWSAT005 // not a user or role + "arn:aws-us-gov:s3:::bucket/object", // lintignore:AWSAT005 // not a user or role + } + for _, v := range invalidNames { + _, errors := validatePrincipal(v, "arn") + if len(errors) == 0 { + t.Fatalf("%q should be an invalid principal", v) + } + } +} + func TestValidateEC2AutomateARN(t *testing.T) { validNames := []string{ "arn:aws:automate:us-east-1:ec2:reboot", //lintignore:AWSAT003,AWSAT005 From cb153218451031fa44cb1a6f904a3d77972b19f3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 17 Dec 2020 10:16:41 -0500 Subject: [PATCH 0317/1212] resource/lakeformation: Use ValidatePrincipal for Lake Formation principals --- ...ce_aws_lakeformation_data_lake_settings.go | 4 +- aws/resource_aws_lakeformation_permissions.go | 10 ++-- ...urce_aws_lakeformation_permissions_test.go | 48 +++++++++++-------- .../r/lakeformation_permissions.html.markdown | 8 ++-- 4 files changed, 39 insertions(+), 31 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 51463b4b756..f03fc372177 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -48,7 +48,7 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.NoZeroValues, // can be non-ARN, e.g. "IAM_ALLOWED_PRINCIPALS" + ValidateFunc: validatePrincipal, }, }, }, @@ -73,7 +73,7 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.NoZeroValues, // can be non-ARN, e.g. "IAM_ALLOWED_PRINCIPALS" + ValidateFunc: validatePrincipal, }, }, }, diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index b3687a16a49..0002b132112 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -91,11 +91,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { ValidateFunc: validation.StringInSlice(lakeformation.Permission_Values(), false), }, }, - "principal_arn": { + "principal": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validateArn, + ValidateFunc: validatePrincipal, }, "table": { Type: schema.TypeList, @@ -174,7 +174,7 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte input := &lakeformation.GrantPermissionsInput{ Permissions: expandStringList(d.Get("permissions").([]interface{})), Principal: &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), }, } @@ -209,7 +209,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf // filter results by principal and permissions input := &lakeformation.ListPermissionsInput{ Principal: &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), }, } @@ -286,7 +286,7 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte input := &lakeformation.RevokePermissionsInput{ Permissions: expandStringList(d.Get("permissions").([]interface{})), Principal: &lakeformation.DataLakePrincipal{ - DataLakePrincipalIdentifier: aws.String(d.Get("principal_arn").(string)), + DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), }, } diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index a2b39e7743e..f68c4f24e94 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -12,7 +12,7 @@ import ( ) func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") + //rName := acctest.RandomWithPrefix("tf-acc-test") resourceAddr := "aws_lakeformation_resource.test" bucketAddr := "aws_s3_bucket.test" roleAddr := "aws_iam_role.test" @@ -23,7 +23,7 @@ func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_basic(rName), + Config: testAccAWSLakeFormationPermissionsConfig_catalog(), Check: resource.ComposeTestCheckFunc( testAccCheckAWSLakeFormationPermissionsExists(resourceAddr), resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), @@ -294,16 +294,23 @@ resource "aws_lakeformation_resource" "test" { resource_arn = aws_s3_bucket.test.arn } +data "aws_caller_identity" "current" {} + +resource "aws_lakeformation_data_lake_settings" "test" { + data_lake_admins = [data.aws_caller_identity.current.arn] +} + // grants permissions to workflow role in catalog on bucket resource "aws_lakeformation_permissions" "grants" { - principal_arn = aws_iam_role.workflow_role.arn + principal_arn = data.aws_caller_identity.current.arn permissions = ["CREATE_DATABASE"] - data_location { - resource_arn = aws_s3_bucket.test.arn - } + //data_location { + // resource_arn = aws_s3_bucket.test.arn + //} + catalog_resource = true - depends_on = ["aws_lakeformation_resource.test"] + depends_on = ["aws_lakeformation_resource.test", "aws_iam_role.workflow_role", "aws_iam_role_policy_attachment.managed"] } `, rName) } @@ -316,17 +323,18 @@ data "aws_iam_role" "test" { name = "AWSServiceRoleForLakeFormationDataAccess" } -resource "aws_lakeformation_datalake_settings" "test" { - admins = [ +resource "aws_lakeformation_data_lake_settings" "test" { + data_lake_admins = [ data.aws_caller_identity.current.arn ] } resource "aws_lakeformation_permissions" "test" { permissions = ["CREATE_DATABASE"] - principal = data.aws_iam_role.test.arn + principal_arn = data.aws_iam_role.test.arn + catalog_resource = true - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } ` } @@ -343,7 +351,7 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { admins = [ data.aws_caller_identity.current.arn ] @@ -353,7 +361,7 @@ resource "aws_lakeformation_resource" "test" { resource_arn = aws_s3_bucket.test.arn use_service_linked_role = true - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } resource "aws_lakeformation_permissions" "test" { @@ -362,7 +370,7 @@ resource "aws_lakeformation_permissions" "test" { location = aws_lakeformation_resource.test.resource_arn - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } `, rName) } @@ -383,7 +391,7 @@ resource "aws_glue_catalog_database" "test" { name = %[2]q } -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { admins = [ data.aws_caller_identity.current.arn ] @@ -396,7 +404,7 @@ resource "aws_lakeformation_permissions" "test" { database = aws_glue_catalog_database.test.name - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } `, rName, dName) } @@ -422,7 +430,7 @@ resource "aws_glue_catalog_table" "test" { database_name = aws_glue_catalog_database.test.name } -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { admins = [ data.aws_caller_identity.current.arn ] @@ -437,7 +445,7 @@ resource "aws_lakeformation_permissions" "test" { name = aws_glue_catalog_table.test.name } - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } `, rName, dName, tName, permissions) } @@ -478,7 +486,7 @@ resource "aws_glue_catalog_table" "test" { } } -resource "aws_lakeformation_datalake_settings" "test" { +resource "aws_lakeformation_data_lake_settings" "test" { admins = [ data.aws_caller_identity.current.arn ] @@ -494,7 +502,7 @@ resource "aws_lakeformation_permissions" "test" { column_names = ["event", "timestamp"] } - depends_on = ["aws_lakeformation_datalake_settings.test"] + depends_on = ["aws_lakeformation_data_lake_settings.test"] } `, rName, dName, tName) } diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 3ab1c1739df..ea37f9cf590 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -52,10 +52,10 @@ The following arguments are optional: * `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. * `catalog_resource` - (Optional) Whether the permissions are to be granted for the Data Catalog. Defaults to `false`. -* `data_location` - (Optional) Configuration block for data location configuration. Detailed below. -* `database` - (Optional) Configuration block for database configuration. Detailed below. -* `table` - (Optional) Configuration block for table configuration. Detailed below. -* `table_with_columns` - (Optional) Configuration block for table with columns configuration. Detailed below. +* `data_location` - (Optional) Configuration block for a data location resource. Detailed below. +* `database` - (Optional) Configuration block for a database resource. Detailed below. +* `table` - (Optional) Configuration block for a table resource. Detailed below. +* `table_with_columns` - (Optional) Configuration block for a table with columns resource. Detailed below. ### data_location From cd9e6c20e2f3b466fa8f6274335472bc34b50d1b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 17 Dec 2020 15:39:36 -0500 Subject: [PATCH 0318/1212] resource/lakeformation_permissions: Eventual consistency issues --- aws/resource_aws_lakeformation_permissions.go | 92 +++++++-- ...urce_aws_lakeformation_permissions_test.go | 190 ++++++++++++++---- .../r/lakeformation_permissions.html.markdown | 3 + 3 files changed, 227 insertions(+), 58 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 0002b132112..748217d01cb 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -3,9 +3,11 @@ package aws import ( "fmt" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" @@ -15,7 +17,7 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { return &schema.Resource{ Create: resourceAwsLakeFormationPermissionsCreate, Read: resourceAwsLakeFormationPermissionsRead, - Update: resourceAwsLakeFormationPermissionsUpdate, + Update: resourceAwsLakeFormationPermissionsCreate, Delete: resourceAwsLakeFormationPermissionsDelete, Schema: map[string]*schema.Schema{ @@ -188,7 +190,38 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte input.Resource = expandLakeFormationResource(d, false) - output, err := conn.GrantPermissions(input) + var output *lakeformation.GrantPermissionsOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.GrantPermissions(input) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Grantee has no permissions") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "register the S3 path") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, "AccessDeniedException", "is not authorized to access requested permissions") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("error creating Lake Formation Permissions: %w", err)) + } + return nil + }) + + if isResourceTimeoutError(err) { + output, err = conn.GrantPermissions(input) + } if err != nil { return fmt.Errorf("error creating Lake Formation Permissions (input: %v): %w", input, err) @@ -206,7 +239,6 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn - // filter results by principal and permissions input := &lakeformation.ListPermissionsInput{ Principal: &lakeformation.DataLakePrincipal{ DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), @@ -222,15 +254,25 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions - err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { - for _, permission := range resp.PrincipalResourcePermissions { - if permission == nil { - continue - } + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + for _, permission := range resp.PrincipalResourcePermissions { + if permission == nil { + continue + } - principalResourcePermissions = append(principalResourcePermissions, permission) + principalResourcePermissions = append(principalResourcePermissions, permission) + } + return !lastPage + }) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(fmt.Errorf("error creating Lake Formation Permissions: %w", err)) } - return !lastPage + return nil }) if err != nil { @@ -276,10 +318,6 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf return nil } -func resourceAwsLakeFormationPermissionsUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).lakeformationconn @@ -290,8 +328,6 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte }, } - input.Resource = expandLakeFormationResource(d, false) - if v, ok := d.GetOk("catalog_id"); ok { input.CatalogId = aws.String(v.(string)) } @@ -300,9 +336,29 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - _, err := conn.RevokePermissions(input) + input.Resource = expandLakeFormationResource(d, false) + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + _, err = conn.RevokePermissions(input) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "register the S3 path") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("unable to revoke Lake Formation Permissions: %w", err)) + } + return nil + }) + if err != nil { - return fmt.Errorf("Error revoking LakeFormation Permissions: %s", err) + return fmt.Errorf("unable to revoke LakeFormation Permissions (input: %v): %w", input, err) } return nil diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index f68c4f24e94..73eb6b2292a 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" @@ -12,10 +13,9 @@ import ( ) func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { - //rName := acctest.RandomWithPrefix("tf-acc-test") - resourceAddr := "aws_lakeformation_resource.test" - bucketAddr := "aws_s3_bucket.test" - roleAddr := "aws_iam_role.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + roleName := "aws_iam_role.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, @@ -23,11 +23,13 @@ func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_catalog(), + Config: testAccAWSLakeFormationPermissionsConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLakeFormationPermissionsExists(resourceAddr), - resource.TestCheckResourceAttrPair(resourceAddr, "role_arn", roleAddr, "arn"), - resource.TestCheckResourceAttrPair(resourceAddr, "resource_arn", bucketAddr, "arn"), + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "CREATE_DATABASE"), + resource.TestCheckResourceAttr(resourceName, "catalog_resource", "true"), ), }, }, @@ -36,7 +38,7 @@ func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { func TestAccAWSLakeFormationPermissions_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_lakeformation_resource.test" + resourceName := "aws_lakeformation_permissions.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, @@ -55,6 +57,33 @@ func TestAccAWSLakeFormationPermissions_disappears(t *testing.T) { }) } +func TestAccAWSLakeFormationPermissions_dataLocation(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + roleName := "aws_iam_role.test" + bucketName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_dataLocation(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", "DATA_LOCATION_ACCESS"), + resource.TestCheckResourceAttr(resourceName, "catalog_resource", "false"), + resource.TestCheckResourceAttr(resourceName, "data_location.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "data_location.0.resource_arn", bucketName, "arn"), + ), + }, + }, + }) +} + func TestAccAWSLakeFormationPermissions_full(t *testing.T) { rName := acctest.RandomWithPrefix("lakeformation-test-bucket") dName := acctest.RandomWithPrefix("lakeformation-test-db") @@ -194,10 +223,22 @@ func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource if rs.Primary.Attributes["catalog_resource"] == "true" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeCatalog) + input.Resource = &lakeformation.Resource{ + Catalog: &lakeformation.CatalogResource{}, + } } if rs.Primary.Attributes["data_location.#"] != "0" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeDataLocation) + res := &lakeformation.DataLocationResource{ + ResourceArn: aws.String(rs.Primary.Attributes["data_location.0.resource_arn"]), + } + if rs.Primary.Attributes["data_location.0.catalog_id"] != "" { + res.CatalogId = aws.String(rs.Primary.Attributes["data_location.0.catalog_id"]) + } + input.Resource = &lakeformation.Resource{ + DataLocation: res, + } } if rs.Primary.Attributes["database.#"] != "0" { @@ -212,10 +253,40 @@ func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeTable) } - _, err := conn.ListPermissions(input) + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + _, err = conn.ListPermissions(input) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Grantee has no permissions") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "register the S3 path") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, "AccessDeniedException", "is not authorized to access requested permissions") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("unable to get Lake Formation Permissions: %w", err)) + } + return nil + }) + + if isResourceTimeoutError(err) { + _, err = conn.ListPermissions(input) + } if err != nil { - return fmt.Errorf("error getting Lake Formation resource (%s): %w", rs.Primary.ID, err) + return fmt.Errorf("unable to get Lake Formation permissions (%s): %w", rs.Primary.ID, err) } return nil @@ -226,7 +297,7 @@ func testAccAWSLakeFormationPermissionsConfig_basic(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} -resource "aws_iam_role" "workflow_role" { +resource "aws_iam_role" "test" { name = %[1]q assume_role_policy = < Date: Thu, 17 Dec 2020 18:32:30 -0500 Subject: [PATCH 0319/1212] resource/lakeformation_permissions: Adjust for passing tests --- ...ce_aws_lakeformation_data_lake_settings.go | 4 +- ...s_lakeformation_data_lake_settings_test.go | 6 +- ...ce_aws_lakeformation_data_lake_settings.go | 6 +- ...s_lakeformation_data_lake_settings_test.go | 12 +- aws/resource_aws_lakeformation_permissions.go | 15 +- ...urce_aws_lakeformation_permissions_test.go | 414 ++++++++---------- ...formation_data_lake_settings.html.markdown | 2 +- ...formation_data_lake_settings.html.markdown | 6 +- .../r/lakeformation_permissions.html.markdown | 3 +- 9 files changed, 218 insertions(+), 250 deletions(-) diff --git a/aws/data_source_aws_lakeformation_data_lake_settings.go b/aws/data_source_aws_lakeformation_data_lake_settings.go index 01e46336a6a..3bb0add521c 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings.go @@ -54,7 +54,7 @@ func dataSourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, }, - "data_lake_admins": { + "admins": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -98,7 +98,7 @@ func dataSourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) - d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) + d.Set("admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) return nil diff --git a/aws/data_source_aws_lakeformation_data_lake_settings_test.go b/aws/data_source_aws_lakeformation_data_lake_settings_test.go index 61597552d58..a37d389e64a 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings_test.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings_test.go @@ -34,8 +34,8 @@ func testAccAWSLakeFormationDataLakeSettingsDataSource_basic(t *testing.T) { Config: testAccAWSLakeFormationDataLakeSettingsDataSourceConfig_basic, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "catalog_id", callerIdentityName, "account_id"), - resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "admins.0", callerIdentityName, "arn"), ), }, }, @@ -47,7 +47,7 @@ data "aws_caller_identity" "current" {} resource "aws_lakeformation_data_lake_settings" "test" { catalog_id = data.aws_caller_identity.current.account_id - data_lake_admins = [data.aws_caller_identity.current.arn] + admins = [data.aws_caller_identity.current.arn] } data "aws_lakeformation_data_lake_settings" "test" { diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index f03fc372177..d41e5a1b148 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -78,7 +78,7 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, }, - "data_lake_admins": { + "admins": { Type: schema.TypeList, Computed: true, Optional: true, @@ -119,7 +119,7 @@ func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta settings.CreateTableDefaultPermissions = expandDataLakeSettingsCreateDefaultPermissions(v.([]interface{})) } - if v, ok := d.GetOk("data_lake_admins"); ok { + if v, ok := d.GetOk("admins"); ok { settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.([]interface{})) } @@ -172,7 +172,7 @@ func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta i d.Set("create_database_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateDatabaseDefaultPermissions)) d.Set("create_table_default_permissions", flattenDataLakeSettingsCreateDefaultPermissions(settings.CreateTableDefaultPermissions)) - d.Set("data_lake_admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) + d.Set("admins", flattenDataLakeSettingsAdmins(settings.DataLakeAdmins)) d.Set("trusted_resource_owners", flattenStringList(settings.TrustedResourceOwners)) return nil diff --git a/aws/resource_aws_lakeformation_data_lake_settings_test.go b/aws/resource_aws_lakeformation_data_lake_settings_test.go index 6c2c358a526..4592bc8587d 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings_test.go +++ b/aws/resource_aws_lakeformation_data_lake_settings_test.go @@ -40,8 +40,8 @@ func testAccAWSLakeFormationDataLakeSettings_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "catalog_id", callerIdentityName, "account_id"), - resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "admins.0", callerIdentityName, "arn"), ), }, }, @@ -81,8 +81,8 @@ func testAccAWSLakeFormationDataLakeSettings_withoutCatalogId(t *testing.T) { Config: testAccAWSLakeFormationDataLakeSettingsConfig_withoutCatalogId, Check: resource.ComposeTestCheckFunc( testAccCheckAWSLakeFormationDataLakeSettingsExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "data_lake_admins.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "data_lake_admins.0", callerIdentityName, "arn"), + resource.TestCheckResourceAttr(resourceName, "admins.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "admins.0", callerIdentityName, "arn"), ), }, }, @@ -162,7 +162,7 @@ resource "aws_lakeformation_data_lake_settings" "test" { permissions = ["ALL"] } - data_lake_admins = [data.aws_caller_identity.current.arn] + admins = [data.aws_caller_identity.current.arn] trusted_resource_owners = [data.aws_caller_identity.current.account_id] } ` @@ -171,6 +171,6 @@ const testAccAWSLakeFormationDataLakeSettingsConfig_withoutCatalogId = ` data "aws_caller_identity" "current" {} resource "aws_lakeformation_data_lake_settings" "test" { - data_lake_admins = [data.aws_caller_identity.current.arn] + admins = [data.aws_caller_identity.current.arn] } ` diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 748217d01cb..3eefc5f28d6 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -121,6 +121,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { Optional: true, Computed: true, }, + "wildcard": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, }, }, }, @@ -140,7 +145,6 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { "column_names": { Type: schema.TypeList, Optional: true, - MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, @@ -153,7 +157,6 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { "excluded_column_names": { Type: schema.TypeList, Optional: true, - MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.NoZeroValues, @@ -485,7 +488,9 @@ func expandLakeFormationTableResource(tfMap map[string]interface{}) *lakeformati if v, ok := tfMap["name"].(string); ok && v != "" { apiObject.Name = aws.String(v) - } else { + } + + if v, ok := tfMap["wildcard"].(bool); ok && v { apiObject.TableWildcard = &lakeformation.TableWildcard{} } @@ -511,6 +516,10 @@ func flattenLakeFormationTableResource(apiObject *lakeformation.TableResource) m tfMap["name"] = aws.StringValue(v) } + if v := apiObject.TableWildcard; v != nil { + tfMap["wildcard"] = true + } + return tfMap } diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 73eb6b2292a..191013d0df0 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -36,27 +36,6 @@ func TestAccAWSLakeFormationPermissions_basic(t *testing.T) { }) } -func TestAccAWSLakeFormationPermissions_disappears(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_lakeformation_permissions.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLakeFormationPermissionsConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLakeFormationPermissionsExists(resourceName), - testAccCheckResourceDisappears(testAccProvider, resourceAwsLakeFormationPermissions(), resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - func TestAccAWSLakeFormationPermissions_dataLocation(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_permissions.test" @@ -84,47 +63,26 @@ func TestAccAWSLakeFormationPermissions_dataLocation(t *testing.T) { }) } -func TestAccAWSLakeFormationPermissions_full(t *testing.T) { - rName := acctest.RandomWithPrefix("lakeformation-test-bucket") - dName := acctest.RandomWithPrefix("lakeformation-test-db") - tName := acctest.RandomWithPrefix("lakeformation-test-table") - - roleName := "data.aws_iam_role.test" +func TestAccAWSLakeFormationPermissions_database(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_permissions.test" - bucketName := "aws_s3_bucket.test" + roleName := "aws_iam_role.test" dbName := "aws_glue_catalog_database.test" - tableName := "aws_glue_catalog_table.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_catalog(), - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "CREATE_DATABASE"), - ), - }, - { - Config: testAccAWSLakeFormationPermissionsConfig_location(rName), + Config: testAccAWSLakeFormationPermissionsConfig_database(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttrPair(bucketName, "arn", resourceName, "location"), - resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "DATA_LOCATION_ACCESS"), - ), - }, - { - Config: testAccAWSLakeFormationPermissionsConfig_database(rName, dName), - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttrPair(dbName, "name", resourceName, "database"), + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "catalog_resource", "false"), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "database.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "database.0.name", dbName, "name"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "3"), resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALTER"), resource.TestCheckResourceAttr(resourceName, "permissions.1", "CREATE_TABLE"), @@ -133,42 +91,59 @@ func TestAccAWSLakeFormationPermissions_full(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.0", "CREATE_TABLE"), ), }, + }, + }) +} + +func TestAccAWSLakeFormationPermissions_table(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + roleName := "aws_iam_role.test" + tableName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\""), + Config: testAccAWSLakeFormationPermissionsConfig_table(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + testAccCheckAWSLakeFormationPermissionsExists(resourceName), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), + resource.TestCheckResourceAttrPair(resourceName, "table.0.database_name", tableName, "database_name"), + resource.TestCheckResourceAttrPair(resourceName, "table.0.name", tableName, "name"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), ), }, + }, + }) +} + +func TestAccAWSLakeFormationPermissions_tableWithColumns(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + roleName := "aws_iam_role.test" + tableName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_table(rName, dName, tName, "\"ALL\", \"SELECT\""), - Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - resource.TestCheckResourceAttr(resourceName, "permissions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), - resource.TestCheckResourceAttr(resourceName, "permissions.1", "SELECT"), - ), - }, - { - Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName, dName, tName), + Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumns(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), - resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), - resource.TestCheckResourceAttr(resourceName, "table.#", "1"), - resource.TestCheckResourceAttrPair(tableName, "database_name", resourceName, "table.0.database"), - resource.TestCheckResourceAttrPair(tableName, "name", resourceName, "table.0.name"), - resource.TestCheckResourceAttr(resourceName, "table.0.column_names.#", "2"), - resource.TestCheckResourceAttr(resourceName, "table.0.column_names.0", "event"), - resource.TestCheckResourceAttr(resourceName, "table.0.column_names.1", "timestamp"), + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.database_name", tableName, "database_name"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.name", tableName, "name"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.#", "2"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.0", "event"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.1", "timestamp"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), ), @@ -221,14 +196,14 @@ func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource }, } - if rs.Primary.Attributes["catalog_resource"] == "true" { + if v, ok := rs.Primary.Attributes["catalog_resource"]; ok && v != "" && v == "true" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeCatalog) input.Resource = &lakeformation.Resource{ Catalog: &lakeformation.CatalogResource{}, } } - if rs.Primary.Attributes["data_location.#"] != "0" { + if v, ok := rs.Primary.Attributes["data_location.#"]; ok && v != "" && v != "0" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeDataLocation) res := &lakeformation.DataLocationResource{ ResourceArn: aws.String(rs.Primary.Attributes["data_location.0.resource_arn"]), @@ -241,16 +216,54 @@ func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource } } - if rs.Primary.Attributes["database.#"] != "0" { + if v, ok := rs.Primary.Attributes["database.#"]; ok && v != "" && v != "0" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeDatabase) + res := &lakeformation.DatabaseResource{ + Name: aws.String(rs.Primary.Attributes["database.0.name"]), + } + if rs.Primary.Attributes["database.0.catalog_id"] != "" { + res.CatalogId = aws.String(rs.Primary.Attributes["database.0.catalog_id"]) + } + input.Resource = &lakeformation.Resource{ + Database: res, + } } - if rs.Primary.Attributes["table.#"] != "0" { + if v, ok := rs.Primary.Attributes["table.#"]; ok && v != "" && v != "0" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeTable) + res := &lakeformation.TableResource{ + DatabaseName: aws.String(rs.Primary.Attributes["table.0.database_name"]), + } + if rs.Primary.Attributes["table.0.catalog_id"] != "" { + res.CatalogId = aws.String(rs.Primary.Attributes["table.0.catalog_id"]) + } + if rs.Primary.Attributes["table.0.name"] != "" { + res.Name = aws.String(rs.Primary.Attributes["table.0.name"]) + } + if rs.Primary.Attributes["table.0.wildcard"] == "true" { + res.TableWildcard = &lakeformation.TableWildcard{} + } + input.Resource = &lakeformation.Resource{ + Table: res, + } } - if rs.Primary.Attributes["table_with_columns.#"] != "0" { + // ListPermissions does not support getting privileges on a table with columns. + // Instead, call this operation on the table, and the operation returns the + // table and the table w columns. + // https://docs.aws.amazon.com/sdk-for-go/api/service/lakeformation/#ListPermissionsInput + if v, ok := rs.Primary.Attributes["table_with_columns.#"]; ok && v != "" && v != "0" { input.ResourceType = aws.String(lakeformation.DataLakeResourceTypeTable) + res := &lakeformation.TableResource{ + DatabaseName: aws.String(rs.Primary.Attributes["table_with_columns.0.database_name"]), + Name: aws.String(rs.Primary.Attributes["table_with_columns.0.name"]), + } + if rs.Primary.Attributes["table.0.catalog_id"] != "" { + res.CatalogId = aws.String(rs.Primary.Attributes["table.0.catalog_id"]) + } + input.Resource = &lakeformation.Resource{ + Table: res, + } } err := resource.Retry(2*time.Minute, func() *resource.RetryError { @@ -320,7 +333,7 @@ EOF data "aws_caller_identity" "current" {} resource "aws_lakeformation_data_lake_settings" "test" { - data_lake_admins = [data.aws_caller_identity.current.arn] + admins = [data.aws_caller_identity.current.arn] } resource "aws_lakeformation_permissions" "test" { @@ -356,48 +369,6 @@ resource "aws_iam_role" "test" { EOF } -resource "aws_iam_role_policy" "test" { - name = %[1]q - role = aws_iam_role.test.id - - policy = < Date: Thu, 17 Dec 2020 19:38:15 -0500 Subject: [PATCH 0320/1212] resource/lakeformation: Pre-merge cleanup --- ...ce_aws_lakeformation_data_lake_settings.go | 51 ++++++++++++++----- ...s_lakeformation_data_lake_settings_test.go | 2 +- ...formation_data_lake_settings.html.markdown | 2 +- 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index d41e5a1b148..7b1644f3b3e 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -3,10 +3,12 @@ package aws import ( "fmt" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" @@ -23,6 +25,15 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "admins": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, "catalog_id": { Type: schema.TypeString, ForceNew: true, @@ -78,15 +89,6 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, }, - "admins": { - Type: schema.TypeList, - Computed: true, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, - }, "trusted_resource_owners": { Type: schema.TypeList, Computed: true, @@ -128,7 +130,30 @@ func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta } input.DataLakeSettings = settings - output, err := conn.PutDataLakeSettings(input) + + var output *lakeformation.PutDataLakeSettingsOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.PutDataLakeSettings(input) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { + return resource.RetryableError(err) + } + if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(fmt.Errorf("error creating Lake Formation data lake settings: %w", err)) + } + return nil + }) + + if isResourceTimeoutError(err) { + output, err = conn.PutDataLakeSettings(input) + } if err != nil { return fmt.Errorf("error creating Lake Formation data lake settings: %w", err) @@ -161,11 +186,11 @@ func resourceAwsLakeFormationDataLakeSettingsRead(d *schema.ResourceData, meta i } if err != nil { - return fmt.Errorf("error reading Lake Formation data lake settings (%s): %w", d.Id(), err) + return fmt.Errorf("reading Lake Formation data lake settings (%s): %w", d.Id(), err) } if output == nil || output.DataLakeSettings == nil { - return fmt.Errorf("error reading Lake Formation data lake settings (%s): empty response", d.Id()) + return fmt.Errorf("reading Lake Formation data lake settings (%s): empty response", d.Id()) } settings := output.DataLakeSettings @@ -202,7 +227,7 @@ func resourceAwsLakeFormationDataLakeSettingsDelete(d *schema.ResourceData, meta } if err != nil { - return fmt.Errorf("error deleting Lake Formation data lake settings (%s): %w", d.Id(), err) + return fmt.Errorf("deleting Lake Formation data lake settings (%s): %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_lakeformation_data_lake_settings_test.go b/aws/resource_aws_lakeformation_data_lake_settings_test.go index 4592bc8587d..93c9e729c6b 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings_test.go +++ b/aws/resource_aws_lakeformation_data_lake_settings_test.go @@ -162,7 +162,7 @@ resource "aws_lakeformation_data_lake_settings" "test" { permissions = ["ALL"] } - admins = [data.aws_caller_identity.current.arn] + admins = [data.aws_caller_identity.current.arn] trusted_resource_owners = [data.aws_caller_identity.current.account_id] } ` diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index be99f56def8..4ac3a30d80b 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -44,10 +44,10 @@ resource "aws_lakeformation_data_lake_settings" "example" { The following arguments are optional: +* `admins` – (Optional) List of ARNs of AWS Lake Formation principals (IAM users or roles). * `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. * `create_database_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. * `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. -* `admins` – (Optional) List of ARNs of AWS Lake Formation principals (IAM users or roles). * `trusted_resource_owners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). ### create_database_default_permissions From 641fbd469a62beae6867d1c1aa66905363462b50 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 17 Dec 2020 19:46:08 -0500 Subject: [PATCH 0321/1212] resource/lakeformation: Fix linting --- ...ce_aws_lakeformation_data_lake_settings_test.go | 4 ++-- aws/resource_aws_lakeformation_permissions_test.go | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/data_source_aws_lakeformation_data_lake_settings_test.go b/aws/data_source_aws_lakeformation_data_lake_settings_test.go index a37d389e64a..a28ec1bdb8d 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings_test.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings_test.go @@ -46,8 +46,8 @@ const testAccAWSLakeFormationDataLakeSettingsDataSourceConfig_basic = ` data "aws_caller_identity" "current" {} resource "aws_lakeformation_data_lake_settings" "test" { - catalog_id = data.aws_caller_identity.current.account_id - admins = [data.aws_caller_identity.current.arn] + catalog_id = data.aws_caller_identity.current.account_id + admins = [data.aws_caller_identity.current.arn] } data "aws_lakeformation_data_lake_settings" "test" { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 191013d0df0..d9b5a8748df 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -388,7 +388,7 @@ resource "aws_lakeformation_data_lake_settings" "test" { resource "aws_lakeformation_permissions" "test" { principal = aws_iam_role.test.arn permissions = ["DATA_LOCATION_ACCESS"] - + data_location { resource_arn = aws_s3_bucket.test.arn } @@ -439,7 +439,7 @@ resource "aws_lakeformation_permissions" "test" { permissions = ["ALTER", "CREATE_TABLE", "DROP"] permissions_with_grant_option = ["CREATE_TABLE"] principal = aws_iam_role.test.arn - + database { name = aws_glue_catalog_database.test.name } @@ -498,8 +498,8 @@ resource "aws_lakeformation_permissions" "test" { principal = aws_iam_role.test.arn table { - database_name = aws_glue_catalog_table.test.database_name - name = aws_glue_catalog_table.test.name + database_name = aws_glue_catalog_table.test.database_name + name = aws_glue_catalog_table.test.name } } `, rName) @@ -565,9 +565,9 @@ resource "aws_lakeformation_permissions" "test" { principal = aws_iam_role.test.arn table_with_columns { - database_name = aws_glue_catalog_table.test.database_name - name = aws_glue_catalog_table.test.name - column_names = ["event", "timestamp"] + database_name = aws_glue_catalog_table.test.database_name + name = aws_glue_catalog_table.test.name + column_names = ["event", "timestamp"] } depends_on = ["aws_lakeformation_data_lake_settings.test"] From e170ade99836bb1166776e9cdb212d4d6ae8224a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 18 Dec 2020 12:59:27 -0500 Subject: [PATCH 0322/1212] ds/lakeformation_permissions: New data source --- ...ce_aws_lakeformation_data_lake_settings.go | 10 +- ...s_lakeformation_data_lake_settings_test.go | 14 - ...ta_source_aws_lakeformation_permissions.go | 246 ++++++++++ ...urce_aws_lakeformation_permissions_test.go | 445 ++++++++++++++++++ aws/data_source_aws_lakeformation_resource.go | 16 +- ..._source_aws_lakeformation_resource_test.go | 8 +- aws/provider.go | 1 + ...s_lakeformation_data_lake_settings_test.go | 15 - aws/resource_aws_lakeformation_permissions.go | 14 +- ...urce_aws_lakeformation_permissions_test.go | 28 +- aws/resource_aws_lakeformation_resource.go | 18 +- ...esource_aws_lakeformation_resource_test.go | 20 +- aws/resource_aws_lakeformation_test.go | 40 ++ ...formation_data_lake_settings.html.markdown | 6 +- .../d/lakeformation_permissions.html.markdown | 110 +++++ .../d/lakeformation_resource.html.markdown | 4 +- ...formation_data_lake_settings.html.markdown | 6 +- .../r/lakeformation_permissions.html.markdown | 31 +- .../r/lakeformation_resource.html.markdown | 4 +- 19 files changed, 929 insertions(+), 107 deletions(-) create mode 100644 aws/data_source_aws_lakeformation_permissions.go create mode 100644 aws/data_source_aws_lakeformation_permissions_test.go create mode 100644 aws/resource_aws_lakeformation_test.go create mode 100644 website/docs/d/lakeformation_permissions.html.markdown diff --git a/aws/data_source_aws_lakeformation_data_lake_settings.go b/aws/data_source_aws_lakeformation_data_lake_settings.go index 3bb0add521c..27244797a9c 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings.go @@ -16,6 +16,11 @@ func dataSourceAwsLakeFormationDataLakeSettings() *schema.Resource { Read: dataSourceAwsLakeFormationDataLakeSettingsRead, Schema: map[string]*schema.Schema{ + "admins": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "catalog_id": { Type: schema.TypeString, Optional: true, @@ -54,11 +59,6 @@ func dataSourceAwsLakeFormationDataLakeSettings() *schema.Resource { }, }, }, - "admins": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, "trusted_resource_owners": { Type: schema.TypeList, Computed: true, diff --git a/aws/data_source_aws_lakeformation_data_lake_settings_test.go b/aws/data_source_aws_lakeformation_data_lake_settings_test.go index a28ec1bdb8d..e0ae83a99b4 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings_test.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings_test.go @@ -7,20 +7,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccAWSLakeFormationDataLakeSettingsDataSource_serial(t *testing.T) { - testCases := map[string]func(t *testing.T){ - "basic": testAccAWSLakeFormationDataLakeSettingsDataSource_basic, - // if more tests are added, they should be serial (data catalog is account-shared resource) - } - - for name, tc := range testCases { - tc := tc - t.Run(name, func(t *testing.T) { - tc(t) - }) - } -} - func testAccAWSLakeFormationDataLakeSettingsDataSource_basic(t *testing.T) { callerIdentityName := "data.aws_caller_identity.current" resourceName := "data.aws_lakeformation_data_lake_settings.test" diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go new file mode 100644 index 00000000000..cdbbcab7d84 --- /dev/null +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -0,0 +1,246 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" +) + +func dataSourceAwsLakeFormationPermissions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsLakeFormationPermissionsRead, + + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateAwsAccountId, + }, + "catalog_resource": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "data_location": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + }, + }, + }, + "database": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "permissions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "permissions_with_grant_option": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "principal": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validatePrincipal, + }, + "table": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "wildcard": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "table_with_columns": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAwsAccountId, + }, + "column_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + }, + "excluded_column_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.NoZeroValues, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).lakeformationconn + + input := &lakeformation.ListPermissionsInput{ + Principal: &lakeformation.DataLakePrincipal{ + DataLakePrincipalIdentifier: aws.String(d.Get("principal").(string)), + }, + } + + if v, ok := d.GetOk("catalog_id"); ok { + input.CatalogId = aws.String(v.(string)) + } + + input.Resource = expandLakeFormationResource(d, true) + + log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) + var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + for _, permission := range resp.PrincipalResourcePermissions { + if permission == nil { + continue + } + + principalResourcePermissions = append(principalResourcePermissions, permission) + } + return !lastPage + }) + if err != nil { + if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(fmt.Errorf("error reading Lake Formation Permissions: %w", err)) + } + return nil + }) + + if err != nil { + return fmt.Errorf("error reading Lake Formation permissions: %w", err) + } + + if len(principalResourcePermissions) > 1 { + return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found") + } + + d.SetId(fmt.Sprintf("%d", hashcode.String(input.String()))) + for _, permissions := range principalResourcePermissions { + d.Set("principal", permissions.Principal.DataLakePrincipalIdentifier) + d.Set("permissions", permissions.Permissions) + d.Set("permissions_with_grant_option", permissions.PermissionsWithGrantOption) + + if permissions.Resource.Catalog != nil { + d.Set("catalog_resource", true) + } + + if permissions.Resource.DataLocation != nil { + d.Set("data_location", []interface{}{flattenLakeFormationDataLocationResource(permissions.Resource.DataLocation)}) + } else { + d.Set("data_location", nil) + } + + if permissions.Resource.Database != nil { + d.Set("database", []interface{}{flattenLakeFormationDatabaseResource(permissions.Resource.Database)}) + } else { + d.Set("database", nil) + } + + // table with columns permissions will include the table and table with columns + if permissions.Resource.TableWithColumns != nil { + d.Set("table_with_columns", []interface{}{flattenLakeFormationTableWithColumnsResource(permissions.Resource.TableWithColumns)}) + } else if permissions.Resource.Table != nil { + d.Set("table_with_columns", nil) + d.Set("table", []interface{}{flattenLakeFormationTableResource(permissions.Resource.Table)}) + } else { + d.Set("table", nil) + } + } + + return nil +} diff --git a/aws/data_source_aws_lakeformation_permissions_test.go b/aws/data_source_aws_lakeformation_permissions_test.go new file mode 100644 index 00000000000..4b59b28c8c8 --- /dev/null +++ b/aws/data_source_aws_lakeformation_permissions_test.go @@ -0,0 +1,445 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func testAccAWSLakeFormationPermissionsDataSource_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + dataSourceName := "data.aws_lakeformation_permissions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "principal", dataSourceName, "principal"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.#", dataSourceName, "permissions.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.0", dataSourceName, "permissions.0"), + resource.TestCheckResourceAttrPair(resourceName, "catalog_resource", dataSourceName, "catalog_resource"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsDataSource_dataLocation(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + dataSourceName := "data.aws_lakeformation_permissions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsDataSourceConfig_dataLocation(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "principal", dataSourceName, "principal"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.#", dataSourceName, "permissions.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.0", dataSourceName, "permissions.0"), + resource.TestCheckResourceAttrPair(resourceName, "data_location.#", dataSourceName, "data_location.#"), + resource.TestCheckResourceAttrPair(resourceName, "data_location.0.arn", dataSourceName, "data_location.0.arn"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsDataSource_database(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + dataSourceName := "data.aws_lakeformation_permissions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsDataSourceConfig_database(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "principal", dataSourceName, "principal"), + resource.TestCheckResourceAttrPair(resourceName, "database.#", dataSourceName, "database.#"), + resource.TestCheckResourceAttrPair(resourceName, "database.0.name", dataSourceName, "database.0.name"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.#", dataSourceName, "permissions.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.0", dataSourceName, "permissions.0"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.1", dataSourceName, "permissions.1"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.2", dataSourceName, "permissions.2"), + resource.TestCheckResourceAttrPair(resourceName, "permissions_with_grant_option.#", dataSourceName, "permissions_with_grant_option.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions_with_grant_option.0", dataSourceName, "permissions_with_grant_option.0"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsDataSource_table(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + dataSourceName := "data.aws_lakeformation_permissions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsDataSourceConfig_table(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "principal", dataSourceName, "principal"), + resource.TestCheckResourceAttrPair(resourceName, "table.#", dataSourceName, "table.#"), + resource.TestCheckResourceAttrPair(resourceName, "table.0.database_name", dataSourceName, "table.0.database_name"), + resource.TestCheckResourceAttrPair(resourceName, "table.0.name", dataSourceName, "table.0.name"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.#", dataSourceName, "permissions.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.0", dataSourceName, "permissions.0"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsDataSource_tableWithColumns(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + dataSourceName := "data.aws_lakeformation_permissions.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsDataSourceConfig_tableWithColumns(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "principal", dataSourceName, "principal"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.#", dataSourceName, "table_with_columns.#"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.database_name", dataSourceName, "table_with_columns.0.database_name"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.name", dataSourceName, "table_with_columns.0.name"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.column_names.#", dataSourceName, "table_with_columns.0.column_names.#"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.column_names.0", dataSourceName, "table_with_columns.0.column_names.0"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.column_names.1", dataSourceName, "table_with_columns.0.column_names.1"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.#", dataSourceName, "permissions.#"), + resource.TestCheckResourceAttrPair(resourceName, "permissions.0", dataSourceName, "permissions.0"), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissionsDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < **NOTE:** This data source deals with explicitly granted permissions. Lake Formation grants implicit permissions to data lake administrators, database creators, and table creators. For more information, see [Implicit Lake Formation Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/implicit-permissions.html). + +## Example Usage + +### Permissions For A Lake Formation S3 Resource + +```hcl +data "aws_lakeformation_permissions" "test" { + principal_arn = aws_iam_role.workflow_role.arn + + data_location { + arn = aws_lakeformation_resource.test.arn + } +} +``` + +### Permissions For A Glue Catalog Database + +```hcl +data "aws_lakeformation_permissions" "test" { + role = aws_iam_role.workflow_role.arn + + database { + name = aws_glue_catalog_database.test.name + catalog_id = "110376042874" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. + +One of the following is required: + +* `catalog_resource` - Whether the permissions are to be granted for the Data Catalog. Defaults to `false`. +* `data_location` - Configuration block for a data location resource. Detailed below. +* `database` - Configuration block for a database resource. Detailed below. +* `table` - Configuration block for a table resource. Detailed below. +* `table_with_columns` - Configuration block for a table with columns resource. Detailed below. + +The following arguments are optional: + +* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. + +### data_location + +The following argument is required: + +* `arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. + +The following argument is optional: + +* `catalog_id` - (Optional) Identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller. + +### database + +The following argument is required: + +* `name` – (Required) Name of the database resource. Unique to the Data Catalog. + +The following argument is optional: + +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. + +### table + +The following argument is required: + +* `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. + +The following arguments are optional: + +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. +* `name` - (Optional) Name of the table. At least one of `name` or `wildcard` is required. +* `wildcard` - (Optional) Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`. + +### table_with_columns + +The following arguments are required: + +* `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. +* `name` – (Required) Name of the table resource. + +The following arguments are optional: + +* `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. +* `column_names` - (Optional) List of column names for the table. At least one of `column_names` or `excluded_column_names` is required. +* `excluded_column_names` - (Optional) List of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required. + +## Attributes Reference + +In addition to the above arguments, the following attribute is exported: + +* `permissions` – List of permissions granted to the principal. For details on permissions, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `permissions_with_grant_option` - Subset of `permissions` which the principal can pass. diff --git a/website/docs/d/lakeformation_resource.html.markdown b/website/docs/d/lakeformation_resource.html.markdown index fa33546eb17..70285dc5019 100644 --- a/website/docs/d/lakeformation_resource.html.markdown +++ b/website/docs/d/lakeformation_resource.html.markdown @@ -14,13 +14,13 @@ Provides details about a Lake Formation resource. ```hcl data "aws_lakeformation_resource" "example" { - resource_arn = "arn:aws:s3:::tf-acc-test-9151654063908211878" + arn = "arn:aws:s3:::tf-acc-test-9151654063908211878" } ``` ## Argument Reference -* `resource_arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. +* `arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. ## Attributes Reference diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index 4ac3a30d80b..509e78aa29d 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -50,18 +50,20 @@ The following arguments are optional: * `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `trusted_resource_owners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). +~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, and/or `trusted_resource_owners` results in the setting being cleared. + ### create_database_default_permissions The following arguments are optional: -* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, and `CREATE_TABLE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. ### create_table_default_permissions The following arguments are optional: -* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, `DESCRIBE`, `CREATE_DATABASE`, `CREATE_TABLE`, and `DATA_LOCATION_ACCESS`. +* `permissions` - (Optional) List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). * `principal` - (Optional) Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`. ## Attributes Reference diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index cd7d2ef5fb1..05acb29d66a 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -22,7 +22,7 @@ resource "aws_lakeformation_permissions" "test" { permissions = ["ALL"] data_location { - resource_arn = aws_lakeformation_resource.test.resource_arn + arn = aws_lakeformation_resource.test.arn } } ``` @@ -45,12 +45,8 @@ resource "aws_lakeformation_permissions" "test" { The following arguments are required: -* `permissions` – (Required) List of permissions granted to the principal. Valid values include `ALL`, `ALTER`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). -* `principal_arn` – (Required) Principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles. - -The following arguments are optional: - -* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `permissions` – (Required) List of permissions granted to the principal. Valid values may include `ALL`, `ALTER`, `CREATE_DATABASE`, `CREATE_TABLE`, `DATA_LOCATION_ACCESS`, `DELETE`, `DESCRIBE`, `DROP`, `INSERT`, and `SELECT`. For details on each permission, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html). +* `principal` – (Required) Principal to be granted the permissions on the resource. Supported principals include IAM users and IAM roles. One of the following is required: @@ -60,11 +56,16 @@ One of the following is required: * `table` - (Optional) Configuration block for a table resource. Detailed below. * `table_with_columns` - (Optional) Configuration block for a table with columns resource. Detailed below. +The following arguments are optional: + +* `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. +* `permissions_with_grant_option` - (Optional) Subset of `permissions` which the principal can pass. + ### data_location The following argument is required: -* `resource_arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. +* `arn` – (Required) Amazon Resource Name (ARN) that uniquely identifies the data location resource. The following argument is optional: @@ -86,11 +87,14 @@ The following argument is required: * `database_name` – (Required) Name of the database for the table. Unique to a Data Catalog. +At least one of the following is required: + +* `name` - (Optional) Name of the table. +* `wildcard` - (Optional) Whether to use a wildcard representing every table under a database. Defaults to `false`. + The following arguments are optional: * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. -* `name` - (Optional) Name of the table. At least one of `name` or `wildcard` is required. -* `wildcard` - (Optional) Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`. ### table_with_columns @@ -99,11 +103,14 @@ The following arguments are required: * `database_name` – (Required) Name of the database for the table with columns resource. Unique to the Data Catalog. * `name` – (Required) Name of the table resource. +At least one of the following is required: + +* `column_names` - (Optional) List of column names for the table. +* `excluded_column_names` - (Optional) List of column names for the table to exclude. + The following arguments are optional: * `catalog_id` - (Optional) Identifier for the Data Catalog. By default, it is the account ID of the caller. -* `column_names` - (Optional) List of column names for the table. At least one of `column_names` or `excluded_column_names` is required. -* `excluded_column_names` - (Optional) List of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required. ## Attributes Reference diff --git a/website/docs/r/lakeformation_resource.html.markdown b/website/docs/r/lakeformation_resource.html.markdown index 22894ea64d2..27eb12fc6dc 100644 --- a/website/docs/r/lakeformation_resource.html.markdown +++ b/website/docs/r/lakeformation_resource.html.markdown @@ -20,13 +20,13 @@ data "aws_s3_bucket" "example" { } resource "aws_lakeformation_resource" "example" { - resource_arn = data.aws_s3_bucket.example.arn + arn = data.aws_s3_bucket.example.arn } ``` ## Argument Reference -* `resource_arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. +* `arn` – (Required) Amazon Resource Name (ARN) of the resource, an S3 path. * `role_arn` – (Optional) Role that has read/write access to the resource. If not provided, the Lake Formation service-linked role must exist and is used. ~> **NOTE:** AWS does not support registering an S3 location with an IAM role and subsequently updating the S3 location registration to a service-linked role. From 7824d0b91b67c4075df2dab86b7023e9af5543dd Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 18 Dec 2020 10:04:51 -0800 Subject: [PATCH 0323/1212] Separates GITHUB_TOKEN preCheck from CodePipeline service support preChek --- aws/provider_test.go | 6 +++ aws/resource_aws_codepipeline_test.go | 53 +++++++++++++------ aws/resource_aws_codepipeline_webhook_test.go | 30 +++++++++-- 3 files changed, 69 insertions(+), 20 deletions(-) diff --git a/aws/provider_test.go b/aws/provider_test.go index 8cbab7ba700..b90be7d7c9f 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -779,6 +779,12 @@ func testAccPreCheckIamServiceLinkedRole(t *testing.T, pathPrefix string) { } } +func testAccEnvironmentVariableSetPreCheck(variable string, t *testing.T) { + if os.Getenv(variable) == "" { + t.Skipf("skipping tests; environment variable %s must be set", variable) + } +} + func testAccAlternateAccountProviderConfig() string { //lintignore:AT004 return fmt.Sprintf(` diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index c58d4182df6..c213ffe45ce 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -23,7 +23,11 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -131,7 +135,11 @@ func TestAccAWSCodePipeline_disappears(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -155,7 +163,11 @@ func TestAccAWSCodePipeline_emptyStageArtifacts(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -198,7 +210,11 @@ func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -232,7 +248,11 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -298,7 +318,8 @@ func TestAccAWSCodePipeline_multiregion_basic(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccPreCheckAWSCodePipeline(t, testAccGetAlternateRegion()) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, @@ -344,7 +365,8 @@ func TestAccAWSCodePipeline_multiregion_Update(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccPreCheckAWSCodePipeline(t, testAccGetAlternateRegion()) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, @@ -404,7 +426,8 @@ func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccPreCheckAWSCodePipeline(t, testAccGetAlternateRegion()) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, @@ -469,7 +492,11 @@ func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -503,7 +530,7 @@ func TestAccAWSCodePipeline_WithCodeStarConnection(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccPartitionHasServicePreCheck(codepipeline.EndpointsID, t) + testAccPreCheckAWSCodePipelineSupported(t) testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, @@ -586,11 +613,7 @@ func testAccCheckAWSCodePipelineDestroy(s *terraform.State) error { return nil } -func testAccPreCheckAWSCodePipeline(t *testing.T, regions ...string) { - if os.Getenv("GITHUB_TOKEN") == "" { - t.Skip("Environment variable GITHUB_TOKEN is not set") - } - +func testAccPreCheckAWSCodePipelineSupported(t *testing.T, regions ...string) { regions = append(regions, testAccGetRegion()) for _, region := range regions { conf := &Config{ diff --git a/aws/resource_aws_codepipeline_webhook_test.go b/aws/resource_aws_codepipeline_webhook_test.go index 64673fbb2ad..cf13fdc8900 100644 --- a/aws/resource_aws_codepipeline_webhook_test.go +++ b/aws/resource_aws_codepipeline_webhook_test.go @@ -20,7 +20,11 @@ func TestAccAWSCodePipelineWebhook_basic(t *testing.T) { resourceName := "aws_codepipeline_webhook.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -51,7 +55,11 @@ func TestAccAWSCodePipelineWebhook_ipAuth(t *testing.T) { resourceName := "aws_codepipeline_webhook.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -82,7 +90,11 @@ func TestAccAWSCodePipelineWebhook_unauthenticated(t *testing.T) { resourceName := "aws_codepipeline_webhook.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -111,7 +123,11 @@ func TestAccAWSCodePipelineWebhook_tags(t *testing.T) { resourceName := "aws_codepipeline_webhook.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ @@ -171,7 +187,11 @@ func TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken resourceName := "aws_codepipeline_webhook.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) + testAccPreCheckAWSCodePipelineSupported(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ From 70e0cf18dced0b32f1d100caec0ca5cfdb201d36 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 18 Dec 2020 13:22:13 -0500 Subject: [PATCH 0324/1212] resource/lakeformation: Fix linter issues --- aws/data_source_aws_lakeformation_permissions.go | 8 ++++++++ aws/resource_aws_lakeformation_permissions.go | 8 ++++++++ website/docs/d/lakeformation_permissions.html.markdown | 4 ++-- .../docs/r/lakeformation_data_lake_settings.html.markdown | 2 +- website/docs/r/lakeformation_permissions.html.markdown | 4 ++-- 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go index cdbbcab7d84..9851ac0033d 100644 --- a/aws/data_source_aws_lakeformation_permissions.go +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -192,6 +193,7 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte } return !lastPage }) + if err != nil { if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { return resource.RetryableError(err) @@ -201,6 +203,12 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte return nil }) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Resource Lake Formation permissions (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return fmt.Errorf("error reading Lake Formation permissions: %w", err) } diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 72c446df8fb..48c37f9b70b 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -269,6 +270,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf } return !lastPage }) + if err != nil { if isAWSErr(err, lakeformation.ErrCodeInvalidInputException, "Invalid principal") { return resource.RetryableError(err) @@ -278,6 +280,12 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf return nil }) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Resource Lake Formation permissions (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return fmt.Errorf("error reading Lake Formation permissions: %w", err) } diff --git a/website/docs/d/lakeformation_permissions.html.markdown b/website/docs/d/lakeformation_permissions.html.markdown index 6e84589f956..69872f971a2 100644 --- a/website/docs/d/lakeformation_permissions.html.markdown +++ b/website/docs/d/lakeformation_permissions.html.markdown @@ -18,7 +18,7 @@ Get permissions for a principal to access metadata in the Data Catalog and data ```hcl data "aws_lakeformation_permissions" "test" { - principal_arn = aws_iam_role.workflow_role.arn + principal = aws_iam_role.workflow_role.arn data_location { arn = aws_lakeformation_resource.test.arn @@ -30,7 +30,7 @@ data "aws_lakeformation_permissions" "test" { ```hcl data "aws_lakeformation_permissions" "test" { - role = aws_iam_role.workflow_role.arn + principal = aws_iam_role.workflow_role.arn database { name = aws_glue_catalog_database.test.name diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index 509e78aa29d..0b4123bc10e 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -50,7 +50,7 @@ The following arguments are optional: * `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. * `trusted_resource_owners` – (Optional) List of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). -~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, and/or `trusted_resource_owners` results in the setting being cleared. +~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, and/or `trusted_resource_owners` results in the setting being cleared. ### create_database_default_permissions diff --git a/website/docs/r/lakeformation_permissions.html.markdown b/website/docs/r/lakeformation_permissions.html.markdown index 05acb29d66a..610761119f5 100644 --- a/website/docs/r/lakeformation_permissions.html.markdown +++ b/website/docs/r/lakeformation_permissions.html.markdown @@ -18,8 +18,8 @@ Grants permissions to the principal to access metadata in the Data Catalog and d ```hcl resource "aws_lakeformation_permissions" "test" { - principal_arn = aws_iam_role.workflow_role.arn - permissions = ["ALL"] + principal = aws_iam_role.workflow_role.arn + permissions = ["ALL"] data_location { arn = aws_lakeformation_resource.test.arn From 3ef409de3b22d2d7ede465c908594a6d666324ad Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 18 Dec 2020 13:36:45 -0500 Subject: [PATCH 0325/1212] resource/lakeformation: Fix semgrep issues --- ...ta_source_aws_lakeformation_permissions.go | 16 ++++++++-- aws/data_source_aws_lakeformation_resource.go | 8 +++++ ...ce_aws_lakeformation_data_lake_settings.go | 3 -- aws/resource_aws_lakeformation_permissions.go | 32 +++++++++++++------ ...urce_aws_lakeformation_permissions_test.go | 3 -- 5 files changed, 44 insertions(+), 18 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go index 9851ac0033d..7773b8a552e 100644 --- a/aws/data_source_aws_lakeformation_permissions.go +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -182,8 +182,7 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var err error - err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { if permission == nil { continue @@ -203,6 +202,19 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte return nil }) + if isResourceTimeoutError(err) { + err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + for _, permission := range resp.PrincipalResourcePermissions { + if permission == nil { + continue + } + + principalResourcePermissions = append(principalResourcePermissions, permission) + } + return !lastPage + }) + } + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { log.Printf("[WARN] Resource Lake Formation permissions (%s) not found, removing from state", d.Id()) d.SetId("") diff --git a/aws/data_source_aws_lakeformation_resource.go b/aws/data_source_aws_lakeformation_resource.go index 0e39552789e..ebb9ea88bd7 100644 --- a/aws/data_source_aws_lakeformation_resource.go +++ b/aws/data_source_aws_lakeformation_resource.go @@ -2,10 +2,12 @@ package aws import ( "fmt" + "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -42,6 +44,12 @@ func dataSourceAwsLakeFormationResourceRead(d *schema.ResourceData, meta interfa output, err := conn.DescribeResource(input) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { + log.Printf("[WARN] Resource Lake Formation Resource (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return fmt.Errorf("error reading data source, Lake Formation Resource (arn: %s): %w", aws.StringValue(input.ResourceArn), err) } diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 7b1644f3b3e..8e3c18d01f6 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -142,9 +142,6 @@ func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { return resource.RetryableError(err) } - if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { - return resource.RetryableError(err) - } return resource.NonRetryableError(fmt.Errorf("error creating Lake Formation data lake settings: %w", err)) } diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 48c37f9b70b..151b83f9ef9 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -211,9 +211,6 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { return resource.RetryableError(err) } - if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { - return resource.RetryableError(err) - } if isAWSErr(err, "AccessDeniedException", "is not authorized to access requested permissions") { return resource.RetryableError(err) } @@ -259,8 +256,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions err := resource.Retry(2*time.Minute, func() *resource.RetryError { - var err error - err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { if permission == nil { continue @@ -280,6 +276,19 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf return nil }) + if isResourceTimeoutError(err) { + err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { + for _, permission := range resp.PrincipalResourcePermissions { + if permission == nil { + continue + } + + principalResourcePermissions = append(principalResourcePermissions, permission) + } + return !lastPage + }) + } + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, lakeformation.ErrCodeEntityNotFoundException) { log.Printf("[WARN] Resource Lake Formation permissions (%s) not found, removing from state", d.Id()) d.SetId("") @@ -359,15 +368,16 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { return resource.RetryableError(err) } - if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { - return resource.RetryableError(err) - } return resource.NonRetryableError(fmt.Errorf("unable to revoke Lake Formation Permissions: %w", err)) } return nil }) + if isResourceTimeoutError(err) { + _, err = conn.RevokePermissions(input) + } + if err != nil { return fmt.Errorf("unable to revoke LakeFormation Permissions (input: %v): %w", input, err) } @@ -378,8 +388,10 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { res := &lakeformation.Resource{} - if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) { - res.Catalog = &lakeformation.CatalogResource{} + if v, ok := d.GetOk("catalog_resource"); ok { + if v.(bool) { + res.Catalog = &lakeformation.CatalogResource{} + } } if v, ok := d.GetOk("data_location"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index bfaaeaee475..2f96730804d 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -282,9 +282,6 @@ func testAccCheckAWSLakeFormationPermissionsExists(resourceName string) resource if isAWSErr(err, lakeformation.ErrCodeConcurrentModificationException, "") { return resource.RetryableError(err) } - if isAWSErr(err, lakeformation.ErrCodeOperationTimeoutException, "") { - return resource.RetryableError(err) - } if isAWSErr(err, "AccessDeniedException", "is not authorized to access requested permissions") { return resource.RetryableError(err) } From 7ca07395ccdda788cf1d2f069bf558ca24bdc0ca Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 18 Dec 2020 14:57:55 -0500 Subject: [PATCH 0326/1212] Update CHANGELOG with Lake Formation --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0354baa6db9..5a289ec5024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,12 @@ FEATURES * **New Data Source:** `aws_ec2_managed_prefix_list` [GH-16738] * **New Data Source:** `aws_lakeformation_data_lake_settings` [GH-13250] +* **New Data Source:** `aws_lakeformation_permissions` [GH-13396] +* **New Data Source:** `aws_lakeformation_resource` [GH-13396] * **New Resource:** `aws_codestarconnections_connection` [GH-15990] * **New Resource:** `aws_ec2_managed_prefix_list` [GH-14068] * **New Resource:** `aws_lakeformation_data_lake_settings` [GH-13250] +* **New Resource:** `aws_lakeformation_permissions` [GH-13396] * **New Resource:** `aws_lakeformation_resource` [GH-13267] ENHANCEMENTS From 260066919d1a655b9d1ce65dcb679c84d652f9eb Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 18 Dec 2020 13:27:36 -0800 Subject: [PATCH 0327/1212] Update CHANGELOG for #16678 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a289ec5024..bc65264c3d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ ENHANCEMENTS * data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] * data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] * data-source/aws_network_interface: Add `association` `carrier_ip` and `customer_owned_ip` attributes [GH-16723] +* resource/aws_autoscaling_group: Adds support for Instance Refresh [GH-16678] * resource/aws_eip: Add `carrier_ip` attribute [GH-16724] * resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] * resource/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] From 233e345bd89af1dcd65110e329470f8f9114add5 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 18 Dec 2020 13:56:17 -0800 Subject: [PATCH 0328/1212] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc65264c3d4..50d17f4cb9b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 3.22.0 (unreleased) +## 3.22.0 (Unreleased) FEATURES From 9c9a116a857fb838a0e7d1cfbf420c2524f0abe1 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 18 Dec 2020 21:57:23 +0000 Subject: [PATCH 0329/1212] v3.22.0 --- CHANGELOG.md | 70 ++++++++++++++++++++++++++-------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50d17f4cb9b..4e1756591c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,47 +1,47 @@ -## 3.22.0 (Unreleased) +## 3.22.0 (December 18, 2020) FEATURES -* **New Data Source:** `aws_ec2_managed_prefix_list` [GH-16738] -* **New Data Source:** `aws_lakeformation_data_lake_settings` [GH-13250] -* **New Data Source:** `aws_lakeformation_permissions` [GH-13396] -* **New Data Source:** `aws_lakeformation_resource` [GH-13396] -* **New Resource:** `aws_codestarconnections_connection` [GH-15990] -* **New Resource:** `aws_ec2_managed_prefix_list` [GH-14068] -* **New Resource:** `aws_lakeformation_data_lake_settings` [GH-13250] -* **New Resource:** `aws_lakeformation_permissions` [GH-13396] -* **New Resource:** `aws_lakeformation_resource` [GH-13267] +* **New Data Source:** `aws_ec2_managed_prefix_list` ([#16738](https://github.com/hashicorp/terraform-provider-aws/issues/16738)) +* **New Data Source:** `aws_lakeformation_data_lake_settings` ([#13250](https://github.com/hashicorp/terraform-provider-aws/issues/13250)) +* **New Data Source:** `aws_lakeformation_permissions` ([#13396](https://github.com/hashicorp/terraform-provider-aws/issues/13396)) +* **New Data Source:** `aws_lakeformation_resource` ([#13396](https://github.com/hashicorp/terraform-provider-aws/issues/13396)) +* **New Resource:** `aws_codestarconnections_connection` ([#15990](https://github.com/hashicorp/terraform-provider-aws/issues/15990)) +* **New Resource:** `aws_ec2_managed_prefix_list` ([#14068](https://github.com/hashicorp/terraform-provider-aws/issues/14068)) +* **New Resource:** `aws_lakeformation_data_lake_settings` ([#13250](https://github.com/hashicorp/terraform-provider-aws/issues/13250)) +* **New Resource:** `aws_lakeformation_permissions` ([#13396](https://github.com/hashicorp/terraform-provider-aws/issues/13396)) +* **New Resource:** `aws_lakeformation_resource` ([#13267](https://github.com/hashicorp/terraform-provider-aws/issues/13267)) ENHANCEMENTS -* data-source/aws_autoscaling_group: Adds `launch_template` attribute [GH-16297] -* data-source/aws_availability_zone: Add `parent_zone_id`, `parent_zone_name`, and `zone_type` attributes (additional support for Local and Wavelength Zones) [GH-16770] -* data-source/aws_eip: Add `carrier_ip` attribute [GH-16724] -* data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] -* data-source/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] -* data-source/aws_launch_configuration: Add `metadata_options` attribute [GH-14637] -* data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) [GH-16361] -* data-source/aws_network_interface: Add `association` `carrier_ip` and `customer_owned_ip` attributes [GH-16723] -* resource/aws_autoscaling_group: Adds support for Instance Refresh [GH-16678] -* resource/aws_eip: Add `carrier_ip` attribute [GH-16724] -* resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] -* resource/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute [GH-16620] -* resource/aws_kinesis_firehose_delivery_stream: Mark `http_endpoint_configuration` `access_key` as sensitive [GH-16684] -* resource/aws_launch_configuration: Add `metadata_options` configuration block [GH-14637] -* resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) [GH-16361] -* resource/aws_vpn_connection: Add support for VPN tunnel options and enable acceleration, DPDTimeoutAction, StartupAction, local/remote IPv4/IPv6 network CIDR and tunnel inside IP version. [GH-14740] +* data-source/aws_autoscaling_group: Adds `launch_template` attribute ([#16297](https://github.com/hashicorp/terraform-provider-aws/issues/16297)) +* data-source/aws_availability_zone: Add `parent_zone_id`, `parent_zone_name`, and `zone_type` attributes (additional support for Local and Wavelength Zones) ([#16770](https://github.com/hashicorp/terraform-provider-aws/issues/16770)) +* data-source/aws_eip: Add `carrier_ip` attribute ([#16724](https://github.com/hashicorp/terraform-provider-aws/issues/16724)) +* data-source/aws_instance: Add `enclave_options` attribute (Nitro Enclaves) ([#16361](https://github.com/hashicorp/terraform-provider-aws/issues/16361)) +* data-source/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute ([#16620](https://github.com/hashicorp/terraform-provider-aws/issues/16620)) +* data-source/aws_launch_configuration: Add `metadata_options` attribute ([#14637](https://github.com/hashicorp/terraform-provider-aws/issues/14637)) +* data-source/aws_launch_template: Add `enclave_options` attribute (Nitro Enclaves) ([#16361](https://github.com/hashicorp/terraform-provider-aws/issues/16361)) +* data-source/aws_network_interface: Add `association` `carrier_ip` and `customer_owned_ip` attributes ([#16723](https://github.com/hashicorp/terraform-provider-aws/issues/16723)) +* resource/aws_autoscaling_group: Adds support for Instance Refresh ([#16678](https://github.com/hashicorp/terraform-provider-aws/issues/16678)) +* resource/aws_eip: Add `carrier_ip` attribute ([#16724](https://github.com/hashicorp/terraform-provider-aws/issues/16724)) +* resource/aws_instance: Add `enclave_options` configuration block (Nitro Enclaves) ([#16361](https://github.com/hashicorp/terraform-provider-aws/issues/16361)) +* resource/aws_instance: Add `ebs_block_device` and `root_block_device` configuration block `throughput` attribute ([#16620](https://github.com/hashicorp/terraform-provider-aws/issues/16620)) +* resource/aws_kinesis_firehose_delivery_stream: Mark `http_endpoint_configuration` `access_key` as sensitive ([#16684](https://github.com/hashicorp/terraform-provider-aws/issues/16684)) +* resource/aws_launch_configuration: Add `metadata_options` configuration block ([#14637](https://github.com/hashicorp/terraform-provider-aws/issues/14637)) +* resource/aws_launch_template: Add `enclave_options` configuration block (Nitro Enclaves) ([#16361](https://github.com/hashicorp/terraform-provider-aws/issues/16361)) +* resource/aws_vpn_connection: Add support for VPN tunnel options and enable acceleration, DPDTimeoutAction, StartupAction, local/remote IPv4/IPv6 network CIDR and tunnel inside IP version. ([#14740](https://github.com/hashicorp/terraform-provider-aws/issues/14740)) BUG FIXES -* data-source/aws_ec2_coip_pools: Ensure all results from large environments are returned [GH-16669] -* data-source/aws_ec2_local_gateways: Ensure all results from large environments are returned [GH-16669] -* data-source/aws_ec2_local_gateway_route_tables: Ensure all results from large environments are returned [GH-16669] -* data-source/aws_ec2_local_gateway_virtual_interface_groups: Ensure all results from large environments are returned [GH-16669] -* data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments [GH-16739] -* resource/aws_db_instance: Fix missing `db_subnet_group_name` in API request when using `restore_to_point_in_time` [GH-16830] -* resource/aws_eip_association: Handle eventual consistency when creating resource [GH-16808] -* resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found [GH-16680] -* resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace [GH-16692] +* data-source/aws_ec2_coip_pools: Ensure all results from large environments are returned ([#16669](https://github.com/hashicorp/terraform-provider-aws/issues/16669)) +* data-source/aws_ec2_local_gateways: Ensure all results from large environments are returned ([#16669](https://github.com/hashicorp/terraform-provider-aws/issues/16669)) +* data-source/aws_ec2_local_gateway_route_tables: Ensure all results from large environments are returned ([#16669](https://github.com/hashicorp/terraform-provider-aws/issues/16669)) +* data-source/aws_ec2_local_gateway_virtual_interface_groups: Ensure all results from large environments are returned ([#16669](https://github.com/hashicorp/terraform-provider-aws/issues/16669)) +* data-source/aws_prefix_list: Using `name` argument no longer overrides other arguments ([#16739](https://github.com/hashicorp/terraform-provider-aws/issues/16739)) +* resource/aws_db_instance: Fix missing `db_subnet_group_name` in API request when using `restore_to_point_in_time` ([#16830](https://github.com/hashicorp/terraform-provider-aws/issues/16830)) +* resource/aws_eip_association: Handle eventual consistency when creating resource ([#16808](https://github.com/hashicorp/terraform-provider-aws/issues/16808)) +* resource/aws_main_route_table_association: Prevent crash on creation when VPC main route table association is not found ([#16680](https://github.com/hashicorp/terraform-provider-aws/issues/16680)) +* resource/aws_workspaces_workspace: Prevent panic from terminated WorkSpace ([#16692](https://github.com/hashicorp/terraform-provider-aws/issues/16692)) ## 3.21.0 (December 11, 2020) From a3bcc7e5f30a920d384e1065c8e1d0a34208f07c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 18 Dec 2020 19:26:12 -0800 Subject: [PATCH 0330/1212] Makes CodeStar Connection default for CodePipeline tests. GITHUB_TOKEN is no longer needs for most acceptance tests --- aws/resource_aws_codepipeline_test.go | 423 +++++++++++++++----------- 1 file changed, 244 insertions(+), 179 deletions(-) diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index c213ffe45ce..b485e8effe5 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -16,23 +16,22 @@ import ( ) func TestAccAWSCodePipeline_basic(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p1, p2 codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" + codestarConnectionResourceName := "aws_codestarconnections_connection.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_basic(name, githubToken), + Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.codepipeline_role", "arn"), @@ -45,17 +44,16 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.#", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.name", "Source"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.category", "Source"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "ThirdParty"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "GitHub"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "AWS"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "CodeStarSourceConnection"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.version", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.input_artifacts.#", "0"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.output_artifacts.#", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.output_artifacts.0", "test"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "4"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "lifesum-terraform"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "main"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "3"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.FullRepositoryId", "lifesum-terraform/test"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.BranchName", "main"), + resource.TestCheckResourceAttrPair(resourceName, "stage.0.action.0.configuration.ConnectionArn", codestarConnectionResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.role_arn", ""), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.run_order", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.region", ""), @@ -81,13 +79,9 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, { - Config: testAccAWSCodePipelineConfig_basicUpdated(name, githubToken), + Config: testAccAWSCodePipelineConfig_basicUpdated(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p2), @@ -99,11 +93,10 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.input_artifacts.#", "0"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.output_artifacts.#", "1"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.output_artifacts.0", "artifacts"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "4"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "test-terraform"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test-repo"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "stable"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "3"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.FullRepositoryId", "test-terraform/test-repo"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.BranchName", "stable"), + resource.TestCheckResourceAttrPair(resourceName, "stage.0.action.0.configuration.ConnectionArn", codestarConnectionResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "stage.1.name", "Build"), resource.TestCheckResourceAttr(resourceName, "stage.1.action.#", "1"), @@ -128,8 +121,6 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { } func TestAccAWSCodePipeline_disappears(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" @@ -137,14 +128,14 @@ func TestAccAWSCodePipeline_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_basic(name, githubToken), + Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p), testAccCheckResourceDisappears(testAccProvider, resourceAwsCodePipeline(), resourceName), @@ -156,8 +147,6 @@ func TestAccAWSCodePipeline_disappears(t *testing.T) { } func TestAccAWSCodePipeline_emptyStageArtifacts(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" @@ -165,14 +154,14 @@ func TestAccAWSCodePipeline_emptyStageArtifacts(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_emptyStageArtifacts(name, githubToken), + Config: testAccAWSCodePipelineConfig_emptyStageArtifacts(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codepipeline", regexp.MustCompile(fmt.Sprintf("test-pipeline-%s$", name))), @@ -193,18 +182,12 @@ func TestAccAWSCodePipeline_emptyStageArtifacts(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" @@ -212,14 +195,14 @@ func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_deployWithServiceRole(name, githubToken), + Config: testAccAWSCodePipelineConfig_deployWithServiceRole(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p), resource.TestCheckResourceAttr(resourceName, "stage.2.name", "Deploy"), @@ -231,18 +214,12 @@ func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } func TestAccAWSCodePipeline_tags(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p1, p2, p3 codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" @@ -250,14 +227,14 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfigWithTags(name, githubToken, "tag1value", "tag2value"), + Config: testAccAWSCodePipelineConfigWithTags(name, "tag1value", "tag2value"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), @@ -270,13 +247,9 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, { - Config: testAccAWSCodePipelineConfigWithTags(name, githubToken, "tag1valueUpdate", "tag2valueUpdate"), + Config: testAccAWSCodePipelineConfigWithTags(name, "tag1valueUpdate", "tag2valueUpdate"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p2), resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), @@ -289,13 +262,9 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, { - Config: testAccAWSCodePipelineConfig_basic(name, githubToken), + Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p3), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -306,8 +275,6 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { } func TestAccAWSCodePipeline_multiregion_basic(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p codepipeline.PipelineDeclaration resourceName := "aws_codepipeline.test" var providers []*schema.Provider @@ -318,14 +285,14 @@ func TestAccAWSCodePipeline_multiregion_basic(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_multiregion(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregion(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "2"), @@ -339,22 +306,16 @@ func TestAccAWSCodePipeline_multiregion_basic(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_multiregion(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregion(name), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } func TestAccAWSCodePipeline_multiregion_Update(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p1, p2 codepipeline.PipelineDeclaration resourceName := "aws_codepipeline.test" var providers []*schema.Provider @@ -365,14 +326,14 @@ func TestAccAWSCodePipeline_multiregion_Update(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_multiregion(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregion(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "2"), @@ -386,7 +347,7 @@ func TestAccAWSCodePipeline_multiregion_Update(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_multiregionUpdated(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregionUpdated(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p2), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "2"), @@ -400,22 +361,16 @@ func TestAccAWSCodePipeline_multiregion_Update(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_multiregionUpdated(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregionUpdated(name), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p1, p2 codepipeline.PipelineDeclaration resourceName := "aws_codepipeline.test" var providers []*schema.Provider @@ -426,14 +381,14 @@ func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { PreCheck: func() { testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t, testAccGetAlternateRegion()) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfig_basic(name, githubToken), + Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "1"), @@ -445,7 +400,7 @@ func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_multiregion(name, githubToken), + Config: testAccAWSCodePipelineConfig_multiregion(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p2), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "2"), @@ -459,7 +414,7 @@ func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_backToBasic(name, githubToken), + Config: testAccAWSCodePipelineConfig_backToBasic(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), resource.TestCheckResourceAttr(resourceName, "artifact_store.#", "1"), @@ -471,22 +426,16 @@ func TestAccAWSCodePipeline_multiregion_ConvertSingleRegion(t *testing.T) { ), }, { - Config: testAccAWSCodePipelineConfig_backToBasic(name, githubToken), + Config: testAccAWSCodePipelineConfig_backToBasic(name), ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") - var p1 codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" @@ -494,14 +443,14 @@ func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) + testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfigWithNamespace(name, githubToken), + Config: testAccAWSCodePipelineConfigWithNamespace(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &p1), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "codepipeline", regexp.MustCompile(fmt.Sprintf("test-pipeline-%s", name))), @@ -512,32 +461,28 @@ func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "stage.0.action.0.configuration.%", - "stage.0.action.0.configuration.OAuthToken", - }, }, }, }) } -func TestAccAWSCodePipeline_WithCodeStarConnection(t *testing.T) { +func TestAccAWSCodePipeline_WithGitHubv1SourceAction(t *testing.T) { var v codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" - codestarConnectionResourceName := "aws_codestarconnections_connection.test" + githubToken := os.Getenv("GITHUB_TOKEN") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) + testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) - testAccPartitionHasServicePreCheck(codestarconnections.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCodePipelineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodePipelineConfigWithCodeStarConnection(name), + Config: testAccAWSCodePipelineConfig_WithGitHubv1SourceAction(name, githubToken), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodePipelineExists(resourceName, &v), @@ -545,19 +490,52 @@ func TestAccAWSCodePipeline_WithCodeStarConnection(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.name", "Source"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.category", "Source"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "AWS"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "CodeStarSourceConnection"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "ThirdParty"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "GitHub"), resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.version", "1"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "3"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.FullRepositoryId", "lifesum-terraform/test"), - resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.BranchName", "main"), - resource.TestCheckResourceAttrPair(resourceName, "stage.0.action.0.configuration.ConnectionArn", codestarConnectionResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "4"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "lifesum-terraform"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "main"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "stage.0.action.0.configuration.%", + "stage.0.action.0.configuration.OAuthToken", + }, + }, + { + Config: testAccAWSCodePipelineConfig_WithGitHubv1SourceAction_Updated(name, githubToken), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodePipelineExists(resourceName, &v), + + resource.TestCheckResourceAttr(resourceName, "stage.#", "2"), + + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.name", "Source"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.category", "Source"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.owner", "ThirdParty"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.provider", "GitHub"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.version", "1"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.%", "4"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Owner", "test-terraform"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Repo", "test-repo"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.Branch", "stable"), + resource.TestCheckResourceAttr(resourceName, "stage.0.action.0.configuration.OAuthToken", githubToken), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "stage.0.action.0.configuration.%", + "stage.0.action.0.configuration.OAuthToken", + }, }, }, }) @@ -757,7 +735,7 @@ EOF `, rName) } -func testAccAWSCodePipelineConfig_basic(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_basic(rName string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRole(rName), @@ -782,16 +760,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[2]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -813,10 +790,15 @@ resource "aws_codepipeline" "test" { } } } -`, rName, githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName)) } -func testAccAWSCodePipelineConfig_basicUpdated(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_basicUpdated(rName string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineS3Bucket("updated", rName), @@ -842,16 +824,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["artifacts"] configuration = { - Owner = "test-terraform" - Repo = "test-repo" - Branch = "stable" - OAuthToken = %[2]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "test-terraform/test-repo" + BranchName = "stable" } } } @@ -873,10 +854,15 @@ resource "aws_codepipeline" "test" { } } } -`, rName, githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName)) } -func testAccAWSCodePipelineConfig_emptyStageArtifacts(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_emptyStageArtifacts(rName string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRole(rName), @@ -896,16 +882,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[2]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -928,7 +913,12 @@ resource "aws_codepipeline" "test" { } } } -`, rName, githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName)) } func testAccAWSCodePipelineDeployActionIAMRole(rName string) string { @@ -982,7 +972,7 @@ EOF `, rName) } -func testAccAWSCodePipelineConfig_deployWithServiceRole(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_deployWithServiceRole(rName string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRoleWithAssumeRole(rName), @@ -1008,16 +998,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["artifacts"] configuration = { - Owner = "test-terraform" - Repo = "test-repo" - Branch = "stable" - OAuthToken = %[2]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -1061,10 +1050,15 @@ resource "aws_codepipeline" "test" { } } } -`, rName, githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName)) } -func testAccAWSCodePipelineConfigWithTags(rName, githubToken, tag1, tag2 string) string { +func testAccAWSCodePipelineConfigWithTags(rName, tag1, tag2 string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRole(rName), @@ -1089,16 +1083,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[4]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -1126,10 +1119,15 @@ resource "aws_codepipeline" "test" { tag2 = %[3]q } } -`, rName, tag1, tag2, githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName, tag1, tag2)) } -func testAccAWSCodePipelineConfig_multiregion(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_multiregion(rName string) string { return composeConfig( testAccAlternateRegionProviderConfig(), testAccAWSCodePipelineS3DefaultBucket(rName), @@ -1170,16 +1168,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[4]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -1216,10 +1213,15 @@ resource "aws_codepipeline" "test" { } } } -`, rName, testAccGetRegion(), testAccGetAlternateRegion(), githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName, testAccGetRegion(), testAccGetAlternateRegion())) } -func testAccAWSCodePipelineConfig_multiregionUpdated(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_multiregionUpdated(rName string) string { return composeConfig( testAccAlternateRegionProviderConfig(), testAccAWSCodePipelineS3DefaultBucket(rName), @@ -1260,16 +1262,15 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[4]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -1306,13 +1307,18 @@ resource "aws_codepipeline" "test" { } } } -`, rName, testAccGetRegion(), testAccGetAlternateRegion(), githubToken)) + +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} +`, rName, testAccGetRegion(), testAccGetAlternateRegion())) } -func testAccAWSCodePipelineConfig_backToBasic(rName, githubToken string) string { +func testAccAWSCodePipelineConfig_backToBasic(rName string) string { return composeConfig( testAccAlternateRegionProviderConfig(), - testAccAWSCodePipelineConfig_basic(rName, githubToken), + testAccAWSCodePipelineConfig_basic(rName), ) } @@ -1339,7 +1345,7 @@ resource "aws_s3_bucket" "%[1]s" { `, bucket, rName, provider) } -func testAccAWSCodePipelineConfigWithNamespace(rName, githubToken string) string { +func testAccAWSCodePipelineConfigWithNamespace(rName string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRole(rName), @@ -1364,17 +1370,16 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "ThirdParty" - provider = "GitHub" + owner = "AWS" + provider = "CodeStarSourceConnection" version = "1" output_artifacts = ["test"] namespace = "SourceVariables" configuration = { - Owner = "lifesum-terraform" - Repo = "test" - Branch = "main" - OAuthToken = %[2]q + ConnectionArn = aws_codestarconnections_connection.test.arn + FullRepositoryId = "lifesum-terraform/test" + BranchName = "main" } } } @@ -1397,14 +1402,19 @@ resource "aws_codepipeline" "test" { } } +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "GitHub" +} + resource "aws_s3_bucket" "foo" { bucket = "tf-test-pipeline-%[1]s" acl = "private" } -`, rName, githubToken)) +`, rName)) } -func testAccAWSCodePipelineConfigWithCodeStarConnection(rName string) string { +func testAccAWSCodePipelineConfig_WithGitHubv1SourceAction(rName, githubToken string) string { return composeConfig( testAccAWSCodePipelineS3DefaultBucket(rName), testAccAWSCodePipelineServiceIAMRole(rName), @@ -1429,15 +1439,16 @@ resource "aws_codepipeline" "test" { action { name = "Source" category = "Source" - owner = "AWS" - provider = "CodeStarSourceConnection" + owner = "ThirdParty" + provider = "GitHub" version = "1" output_artifacts = ["test"] configuration = { - ConnectionArn = aws_codestarconnections_connection.test.arn - FullRepositoryId = "lifesum-terraform/test" - BranchName = "main" + Owner = "lifesum-terraform" + Repo = "test" + Branch = "main" + OAuthToken = %[2]q } } } @@ -1459,12 +1470,66 @@ resource "aws_codepipeline" "test" { } } } +`, rName, githubToken)) +} -resource "aws_codestarconnections_connection" "test" { - name = %[1]q - provider_type = "GitHub" +func testAccAWSCodePipelineConfig_WithGitHubv1SourceAction_Updated(rName, githubToken string) string { + return composeConfig( + testAccAWSCodePipelineS3DefaultBucket(rName), + testAccAWSCodePipelineServiceIAMRole(rName), + fmt.Sprintf(` +resource "aws_codepipeline" "test" { + name = "test-pipeline-%[1]s" + role_arn = aws_iam_role.codepipeline_role.arn + + artifact_store { + location = aws_s3_bucket.test.bucket + type = "S3" + + encryption_key { + id = "1234" + type = "KMS" + } + } + + stage { + name = "Source" + + action { + name = "Source" + category = "Source" + owner = "ThirdParty" + provider = "GitHub" + version = "1" + output_artifacts = ["artifacts"] + + configuration = { + Owner = "test-terraform" + Repo = "test-repo" + Branch = "stable" + OAuthToken = %[2]q + } + } + } + + stage { + name = "Build" + + action { + name = "Build" + category = "Build" + owner = "AWS" + provider = "CodeBuild" + input_artifacts = ["artifacts"] + version = "1" + + configuration = { + ProjectName = "test" + } + } + } } -`, rName)) +`, rName, githubToken)) } func TestResourceAWSCodePipelineExpandArtifactStoresValidation(t *testing.T) { From 1e15681deedaf93cdf605161ce17f05107f47017 Mon Sep 17 00:00:00 2001 From: Mike Dalrymple Date: Sat, 19 Dec 2020 21:48:18 -0800 Subject: [PATCH 0331/1212] Updating example's "name" The example used `connection_name` but it should be `name` --- website/docs/r/codestarconnections_connection.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown index 5aadbb83228..147b3092e8b 100644 --- a/website/docs/r/codestarconnections_connection.markdown +++ b/website/docs/r/codestarconnections_connection.markdown @@ -16,8 +16,8 @@ Provides a CodeStar Connection. ```hcl resource "aws_codestarconnections_connection" "example" { - connection_name = "example-connection" - provider_type = "Bitbucket" + name = "example-connection" + provider_type = "Bitbucket" } resource "aws_codepipeline" "example" { From cc787fe51c57fde944df841ba03e890ec25601d8 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 10 Dec 2020 22:59:01 +0200 Subject: [PATCH 0332/1212] add lineage settings --- aws/resource_aws_glue_crawler.go | 115 ++++++++++++++++++-------- aws/resource_aws_glue_crawler_test.go | 66 +++++++++++++++ 2 files changed, 148 insertions(+), 33 deletions(-) diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index 25ae86fb32d..744063edf87 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -222,6 +222,20 @@ func resourceAwsGlueCrawler() *schema.Resource { }, ValidateFunc: validation.StringIsJSON, }, + "lineage_configuration": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "crawler_lineage_settings": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(glue.CrawlerLineageSettings_Values(), false), + }, + }, + }, + }, "security_configuration": { Type: schema.TypeString, Optional: true, @@ -259,7 +273,7 @@ func resourceAwsGlueCrawlerCreate(d *schema.ResourceData, meta interface{}) erro _, err = glueConn.CreateCrawler(crawlerInput) } if err != nil { - return fmt.Errorf("error creating Glue crawler: %s", err) + return fmt.Errorf("error creating Glue crawler: %w", err) } d.SetId(name) @@ -305,6 +319,10 @@ func createCrawlerInput(crawlerName string, d *schema.ResourceData) (*glue.Creat crawlerInput.CrawlerSecurityConfiguration = aws.String(securityConfiguration.(string)) } + if v, ok := d.GetOk("lineage_configuration"); ok { + crawlerInput.LineageConfiguration = expandGlueCrawlerLineageConfiguration(v.([]interface{})) + } + return crawlerInput, nil } @@ -349,6 +367,10 @@ func updateCrawlerInput(crawlerName string, d *schema.ResourceData) (*glue.Updat crawlerInput.CrawlerSecurityConfiguration = aws.String(securityConfiguration.(string)) } + if v, ok := d.GetOk("lineage_configuration"); ok { + crawlerInput.LineageConfiguration = expandGlueCrawlerLineageConfiguration(v.([]interface{})) + } + return crawlerInput, nil } @@ -529,7 +551,8 @@ func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) erro if d.HasChanges( "catalog_target", "classifiers", "configuration", "description", "dynamodb_target", "jdbc_target", "role", - "s3_target", "schedule", "schema_change_policy", "security_configuration", "table_prefix", "mongodb_target") { + "s3_target", "schedule", "schema_change_policy", "security_configuration", "table_prefix", "mongodb_target", + "lineage_configuration") { updateCrawlerInput, err := updateCrawlerInput(name, d) if err != nil { return err @@ -556,14 +579,14 @@ func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) erro } if err != nil { - return fmt.Errorf("error updating Glue crawler: %s", err) + return fmt.Errorf("error updating Glue crawler: %w", err) } } if d.HasChange("tags") { o, n := d.GetChange("tags") if err := keyvaluetags.GlueUpdateTags(glueConn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating tags: %s", err) + return fmt.Errorf("error updating tags: %w", err) } } @@ -586,10 +609,11 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error return nil } - return fmt.Errorf("error reading Glue crawler: %s", err.Error()) + return fmt.Errorf("error reading Glue crawler: %w", err) } - if crawlerOutput.Crawler == nil { + crawler := crawlerOutput.Crawler + if crawler == nil { log.Printf("[WARN] Glue Crawler (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -603,25 +627,25 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error Resource: fmt.Sprintf("crawler/%s", d.Id()), }.String() d.Set("arn", crawlerARN) - d.Set("name", crawlerOutput.Crawler.Name) - d.Set("database_name", crawlerOutput.Crawler.DatabaseName) - d.Set("role", crawlerOutput.Crawler.Role) - d.Set("configuration", crawlerOutput.Crawler.Configuration) - d.Set("description", crawlerOutput.Crawler.Description) - d.Set("security_configuration", crawlerOutput.Crawler.CrawlerSecurityConfiguration) + d.Set("name", crawler.Name) + d.Set("database_name", crawler.DatabaseName) + d.Set("role", crawler.Role) + d.Set("configuration", crawler.Configuration) + d.Set("description", crawler.Description) + d.Set("security_configuration", crawler.CrawlerSecurityConfiguration) d.Set("schedule", "") - if crawlerOutput.Crawler.Schedule != nil { - d.Set("schedule", crawlerOutput.Crawler.Schedule.ScheduleExpression) + if crawler.Schedule != nil { + d.Set("schedule", crawler.Schedule.ScheduleExpression) } - if err := d.Set("classifiers", flattenStringList(crawlerOutput.Crawler.Classifiers)); err != nil { - return fmt.Errorf("error setting classifiers: %s", err) + if err := d.Set("classifiers", flattenStringList(crawler.Classifiers)); err != nil { + return fmt.Errorf("error setting classifiers: %w", err) } - d.Set("table_prefix", crawlerOutput.Crawler.TablePrefix) + d.Set("table_prefix", crawler.TablePrefix) - if crawlerOutput.Crawler.SchemaChangePolicy != nil { + if crawler.SchemaChangePolicy != nil { schemaPolicy := map[string]string{ - "delete_behavior": aws.StringValue(crawlerOutput.Crawler.SchemaChangePolicy.DeleteBehavior), - "update_behavior": aws.StringValue(crawlerOutput.Crawler.SchemaChangePolicy.UpdateBehavior), + "delete_behavior": aws.StringValue(crawler.SchemaChangePolicy.DeleteBehavior), + "update_behavior": aws.StringValue(crawler.SchemaChangePolicy.UpdateBehavior), } if err := d.Set("schema_change_policy", []map[string]string{schemaPolicy}); err != nil { @@ -629,24 +653,24 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error } } - if crawlerOutput.Crawler.Targets != nil { - if err := d.Set("dynamodb_target", flattenGlueDynamoDBTargets(crawlerOutput.Crawler.Targets.DynamoDBTargets)); err != nil { - return fmt.Errorf("error setting dynamodb_target: %s", err) + if crawler.Targets != nil { + if err := d.Set("dynamodb_target", flattenGlueDynamoDBTargets(crawler.Targets.DynamoDBTargets)); err != nil { + return fmt.Errorf("error setting dynamodb_target: %w", err) } - if err := d.Set("jdbc_target", flattenGlueJdbcTargets(crawlerOutput.Crawler.Targets.JdbcTargets)); err != nil { - return fmt.Errorf("error setting jdbc_target: %s", err) + if err := d.Set("jdbc_target", flattenGlueJdbcTargets(crawler.Targets.JdbcTargets)); err != nil { + return fmt.Errorf("error setting jdbc_target: %w", err) } - if err := d.Set("s3_target", flattenGlueS3Targets(crawlerOutput.Crawler.Targets.S3Targets)); err != nil { - return fmt.Errorf("error setting s3_target: %s", err) + if err := d.Set("s3_target", flattenGlueS3Targets(crawler.Targets.S3Targets)); err != nil { + return fmt.Errorf("error setting s3_target: %w", err) } - if err := d.Set("catalog_target", flattenGlueCatalogTargets(crawlerOutput.Crawler.Targets.CatalogTargets)); err != nil { - return fmt.Errorf("error setting catalog_target: %s", err) + if err := d.Set("catalog_target", flattenGlueCatalogTargets(crawler.Targets.CatalogTargets)); err != nil { + return fmt.Errorf("error setting catalog_target: %w", err) } - if err := d.Set("mongodb_target", flattenGlueMongoDBTargets(crawlerOutput.Crawler.Targets.MongoDBTargets)); err != nil { + if err := d.Set("mongodb_target", flattenGlueMongoDBTargets(crawler.Targets.MongoDBTargets)); err != nil { return fmt.Errorf("error setting mongodb_target: %w", err) } } @@ -654,11 +678,15 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error tags, err := keyvaluetags.GlueListTags(glueConn, crawlerARN) if err != nil { - return fmt.Errorf("error listing tags for Glue Crawler (%s): %s", crawlerARN, err) + return fmt.Errorf("error listing tags for Glue Crawler (%s): %w", crawlerARN, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("lineage_configuration", flattenGlueCrawlerLineageConfiguration(crawler.LineageConfiguration)); err != nil { + return fmt.Errorf("error setting lineage_configuration: %w", err) } return nil @@ -744,7 +772,28 @@ func resourceAwsGlueCrawlerDelete(d *schema.ResourceData, meta interface{}) erro if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { return nil } - return fmt.Errorf("error deleting Glue crawler: %s", err.Error()) + return fmt.Errorf("error deleting Glue crawler: %w", err) } return nil } + +func expandGlueCrawlerLineageConfiguration(cfg []interface{}) *glue.LineageConfiguration { + m := cfg[0].(map[string]interface{}) + + target := &glue.LineageConfiguration{ + CrawlerLineageSettings: aws.String(m["crawler_lineage_settings"].(string)), + } + return target +} + +func flattenGlueCrawlerLineageConfiguration(cfg *glue.LineageConfiguration) []map[string]interface{} { + if cfg == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "crawler_lineage_settings": aws.StringValue(cfg.CrawlerLineageSettings), + } + + return []map[string]interface{}{m} +} diff --git a/aws/resource_aws_glue_crawler_test.go b/aws/resource_aws_glue_crawler_test.go index c45ffcdb15a..777c3445b77 100644 --- a/aws/resource_aws_glue_crawler_test.go +++ b/aws/resource_aws_glue_crawler_test.go @@ -1271,6 +1271,48 @@ func TestAccAWSGlueCrawler_SecurityConfiguration(t *testing.T) { }) } +func TestAccAWSGlueCrawler_lineageConfig(t *testing.T) { + var crawler glue.Crawler + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_crawler.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueCrawlerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlueCrawlerLineageConfig(rName, "ENABLE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.0.crawler_lineage_settings", "ENABLE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGlueCrawlerLineageConfig(rName, "DISABLE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.0.crawler_lineage_settings", "DISABLE")), + }, + { + Config: testAccGlueCrawlerLineageConfig(rName, "ENABLE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "lineage_configuration.0.crawler_lineage_settings", "ENABLE"), + ), + }, + }, + }) +} + func testAccCheckAWSGlueCrawlerExists(resourceName string, crawler *glue.Crawler) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -2339,3 +2381,27 @@ resource "aws_glue_crawler" "test" { } `, rName, path1, path2) } + +func testAccGlueCrawlerLineageConfig(rName, lineageConfig string) string { + return testAccGlueCrawlerConfig_Base(rName) + fmt.Sprintf(` +resource "aws_glue_catalog_database" "test" { + name = %[1]q +} + +resource "aws_glue_crawler" "test" { + depends_on = [aws_iam_role_policy_attachment.test-AWSGlueServiceRole] + + database_name = aws_glue_catalog_database.test.name + name = %[1]q + role = aws_iam_role.test.name + + lineage_configuration { + crawler_lineage_settings = %[2]q + } + + s3_target { + path = "s3://bucket-name" + } +} +`, rName, lineageConfig) +} From 9aa2caf279a7a42549892ef7218bf28cfbc0d015 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 10 Dec 2020 23:04:13 +0200 Subject: [PATCH 0333/1212] flatten schem policy --- aws/resource_aws_glue_crawler.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index 744063edf87..a0230f37bc7 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -643,13 +643,8 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error d.Set("table_prefix", crawler.TablePrefix) if crawler.SchemaChangePolicy != nil { - schemaPolicy := map[string]string{ - "delete_behavior": aws.StringValue(crawler.SchemaChangePolicy.DeleteBehavior), - "update_behavior": aws.StringValue(crawler.SchemaChangePolicy.UpdateBehavior), - } - - if err := d.Set("schema_change_policy", []map[string]string{schemaPolicy}); err != nil { - return fmt.Errorf("error setting schema_change_policy: %s", schemaPolicy) + if err := d.Set("schema_change_policy", flattenGlueCrawlerSchemaChangePolicy(crawler.SchemaChangePolicy)); err != nil { + return fmt.Errorf("error setting schema_change_policy: %w", err) } } @@ -777,6 +772,19 @@ func resourceAwsGlueCrawlerDelete(d *schema.ResourceData, meta interface{}) erro return nil } +func flattenGlueCrawlerSchemaChangePolicy(cfg *glue.SchemaChangePolicy) []map[string]interface{} { + if cfg == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "delete_behavior": aws.StringValue(cfg.DeleteBehavior), + "update_behavior": aws.StringValue(cfg.UpdateBehavior), + } + + return []map[string]interface{}{m} +} + func expandGlueCrawlerLineageConfiguration(cfg []interface{}) *glue.LineageConfiguration { m := cfg[0].(map[string]interface{}) From 930442db347fb799783e86ed881a691850ca0c49 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:32:34 +0200 Subject: [PATCH 0334/1212] add crawler test --- aws/resource_aws_glue_crawler_test.go | 71 +++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/aws/resource_aws_glue_crawler_test.go b/aws/resource_aws_glue_crawler_test.go index 777c3445b77..92b00020551 100644 --- a/aws/resource_aws_glue_crawler_test.go +++ b/aws/resource_aws_glue_crawler_test.go @@ -1313,6 +1313,48 @@ func TestAccAWSGlueCrawler_lineageConfig(t *testing.T) { }) } +func TestAccAWSGlueCrawler_recrawlPolicy(t *testing.T) { + var crawler glue.Crawler + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_crawler.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueCrawlerDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlueCrawlerRecrawlPolicyConfig(rName, "CRAWL_EVERYTHING"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.0.recrawl_behavior", "CRAWL_EVERYTHING"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGlueCrawlerRecrawlPolicyConfig(rName, "CRAWL_NEW_FOLDERS_ONLY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.0.recrawl_behavior", "CRAWL_NEW_FOLDERS_ONLY")), + }, + { + Config: testAccGlueCrawlerRecrawlPolicyConfig(rName, "CRAWL_EVERYTHING"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueCrawlerExists(resourceName, &crawler), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "recrawl_policy.0.recrawl_behavior", "CRAWL_EVERYTHING"), + ), + }, + }, + }) +} + func testAccCheckAWSGlueCrawlerExists(resourceName string, crawler *glue.Crawler) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -2405,3 +2447,32 @@ resource "aws_glue_crawler" "test" { } `, rName, lineageConfig) } + +func testAccGlueCrawlerRecrawlPolicyConfig(rName, policy string) string { + return testAccGlueCrawlerConfig_Base(rName) + fmt.Sprintf(` +resource "aws_glue_catalog_database" "test" { + name = %[1]q +} + +resource "aws_glue_crawler" "test" { + depends_on = [aws_iam_role_policy_attachment.test-AWSGlueServiceRole] + + database_name = aws_glue_catalog_database.test.name + name = %[1]q + role = aws_iam_role.test.name + + schema_change_policy { + delete_behavior = "LOG" + update_behavior = "LOG" + } + + recrawl_policy { + recrawl_behavior = %[2]q + } + + s3_target { + path = "s3://bucket-name" + } +} +`, rName, policy) +} From c928c0903933c17f0fc5f6dc1d64b4df6407a98a Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:33:01 +0200 Subject: [PATCH 0335/1212] add recrawl argument + validations --- aws/resource_aws_glue_crawler.go | 81 +++++++++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index a0230f37bc7..2695738d199 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "strings" "time" @@ -32,6 +33,10 @@ func resourceAwsGlueCrawler() *schema.Resource { Type: schema.TypeString, ForceNew: true, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 255), + validation.StringMatch(regexp.MustCompile(`[a-zA-Z0-9-_$#]+$`), ""), + ), }, "arn": { Type: schema.TypeString, @@ -57,8 +62,9 @@ func resourceAwsGlueCrawler() *schema.Resource { }, }, "description": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 2048), }, "schedule": { Type: schema.TypeString, @@ -97,8 +103,9 @@ func resourceAwsGlueCrawler() *schema.Resource { }, }, "table_prefix": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), }, "s3_target": { Type: schema.TypeList, @@ -225,17 +232,44 @@ func resourceAwsGlueCrawler() *schema.Resource { "lineage_configuration": { Type: schema.TypeList, Optional: true, - MinItems: 1, + MaxItems: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "1" && new == "0" { + return true + } + return false + }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "crawler_lineage_settings": { Type: schema.TypeString, - Required: true, + Optional: true, + Default: glue.CrawlerLineageSettingsDisable, ValidateFunc: validation.StringInSlice(glue.CrawlerLineageSettings_Values(), false), }, }, }, }, + "recrawl_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == "1" && new == "0" { + return true + } + return false + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recrawl_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(glue.RecrawlBehavior_Values(), false), + }, + }, + }, + }, "security_configuration": { Type: schema.TypeString, Optional: true, @@ -323,6 +357,10 @@ func createCrawlerInput(crawlerName string, d *schema.ResourceData) (*glue.Creat crawlerInput.LineageConfiguration = expandGlueCrawlerLineageConfiguration(v.([]interface{})) } + if v, ok := d.GetOk("recrawl_policy"); ok { + crawlerInput.RecrawlPolicy = expandGlueCrawlerRecrawlPolicy(v.([]interface{})) + } + return crawlerInput, nil } @@ -371,6 +409,10 @@ func updateCrawlerInput(crawlerName string, d *schema.ResourceData) (*glue.Updat crawlerInput.LineageConfiguration = expandGlueCrawlerLineageConfiguration(v.([]interface{})) } + if v, ok := d.GetOk("recrawl_policy"); ok { + crawlerInput.RecrawlPolicy = expandGlueCrawlerRecrawlPolicy(v.([]interface{})) + } + return crawlerInput, nil } @@ -552,7 +594,7 @@ func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) erro if d.HasChanges( "catalog_target", "classifiers", "configuration", "description", "dynamodb_target", "jdbc_target", "role", "s3_target", "schedule", "schema_change_policy", "security_configuration", "table_prefix", "mongodb_target", - "lineage_configuration") { + "lineage_configuration", "recrawl_policy") { updateCrawlerInput, err := updateCrawlerInput(name, d) if err != nil { return err @@ -684,6 +726,10 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error setting lineage_configuration: %w", err) } + if err := d.Set("recrawl_policy", flattenGlueCrawlerRecrawlPolicy(crawler.RecrawlPolicy)); err != nil { + return fmt.Errorf("error setting recrawl_policy: %w", err) + } + return nil } @@ -805,3 +851,24 @@ func flattenGlueCrawlerLineageConfiguration(cfg *glue.LineageConfiguration) []ma return []map[string]interface{}{m} } + +func expandGlueCrawlerRecrawlPolicy(cfg []interface{}) *glue.RecrawlPolicy { + m := cfg[0].(map[string]interface{}) + + target := &glue.RecrawlPolicy{ + RecrawlBehavior: aws.String(m["recrawl_behavior"].(string)), + } + return target +} + +func flattenGlueCrawlerRecrawlPolicy(cfg *glue.RecrawlPolicy) []map[string]interface{} { + if cfg == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "recrawl_behavior": aws.StringValue(cfg.RecrawlBehavior), + } + + return []map[string]interface{}{m} +} From e05941a4a4b862d5e450341a8d5788eb4b49cc1a Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:40:09 +0200 Subject: [PATCH 0336/1212] default for recrawl --- aws/resource_aws_glue_crawler.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index 2695738d199..38c9c837524 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -264,7 +264,8 @@ func resourceAwsGlueCrawler() *schema.Resource { Schema: map[string]*schema.Schema{ "recrawl_behavior": { Type: schema.TypeString, - Required: true, + Optional: true, + Default: glue.RecrawlBehaviorCrawlEverything, ValidateFunc: validation.StringInSlice(glue.RecrawlBehavior_Values(), false), }, }, From d0cc8cc13b004b3f1bcac3b9669dcb67e6ca6a74 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:52:08 +0200 Subject: [PATCH 0337/1212] add docs --- website/docs/r/glue_crawler.html.markdown | 36 +++++++++++++---------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/website/docs/r/glue_crawler.html.markdown b/website/docs/r/glue_crawler.html.markdown index dfc3caf0110..351110636a4 100644 --- a/website/docs/r/glue_crawler.html.markdown +++ b/website/docs/r/glue_crawler.html.markdown @@ -139,55 +139,59 @@ The following arguments are supported: * `classifiers` (Optional) List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification. * `configuration` (Optional) JSON string of configuration information. For more details see [Setting Crawler Configuration Options](https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). * `description` (Optional) Description of the crawler. -* `dynamodb_target` (Optional) List of nested DynamoDB target arguments. See below. -* `jdbc_target` (Optional) List of nested JBDC target arguments. See below. -* `s3_target` (Optional) List nested Amazon S3 target arguments. See below. -* `mongodb_target` (Optional) List nested MongoDB target arguments. See below. +* `dynamodb_target` (Optional) List of nested DynamoDB target arguments. See [Dynamodb Target](#dynamodb-target) below. +* `jdbc_target` (Optional) List of nested JBDC target arguments. See [JDBC Target](#jdbc-target) below. +* `s3_target` (Optional) List nested Amazon S3 target arguments. See [S# Target](#s3-target) below. +* `mongodb_target` (Optional) List nested MongoDB target arguments. See [MongoDB Target](#mongodb-target) below. * `schedule` (Optional) A cron expression used to specify the schedule. For more information, see [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). For example, to run something every day at 12:15 UTC, you would specify: `cron(15 12 * * ? *)`. -* `schema_change_policy` (Optional) Policy for the crawler's update and deletion behavior. +* `schema_change_policy` (Optional) Policy for the crawler's update and deletion behavior. See [Schema Change Policy](#schema-change-policy) below. +* `lineage_configuration` (Optional) Specifies data lineage configuration settings for the crawler. See [Lineage Configuration](#lineage-configuration) below. +* `recrawl_policy` (Optional) A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.. See [Recrawl Policy](#recrawl-policy) below. * `security_configuration` (Optional) The name of Security Configuration to be used by the crawler * `table_prefix` (Optional) The table prefix used for catalog tables that are created. * `tags` - (Optional) Key-value map of resource tags -### dynamodb_target Argument Reference +### Dynamodb Target * `path` - (Required) The name of the DynamoDB table to crawl. * `scan_all` - (Optional) Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. defaults to `true`. * `scan_rate` - (Optional) The percentage of the configured read capacity units to use by the AWS Glue crawler. The valid values are null or a value between 0.1 to 1.5. -### jdbc_target Argument Reference +### JDBC Target * `connection_name` - (Required) The name of the connection to use to connect to the JDBC target. * `path` - (Required) The path of the JDBC target. * `exclusions` - (Optional) A list of glob patterns used to exclude from the crawl. -### s3_target Argument Reference +### S3 Target * `path` - (Required) The path to the Amazon S3 target. * `connection_name` - (Optional) The name of a connection which allows crawler to access data in S3 within a VPC. * `exclusions` - (Optional) A list of glob patterns used to exclude from the crawl. -### catalog_target Argument Reference +### Catalog Target * `database_name` - (Required) The name of the Glue database to be synchronized. * `tables` - (Required) A list of catalog tables to be synchronized. -### mongodb_target Argument Reference - -* `connection_name` - (Required) The name of the connection to use to connect to the Amazon DocumentDB or MongoDB target. -* `path` - (Required) The path of the Amazon DocumentDB or MongoDB target (database/collection). -* `scan_all` - (Optional) Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. Default value is `true`. - ~> **Note:** `deletion_behavior` of catalog target doesn't support `DEPRECATE_IN_DATABASE`. -> **Note:** `configuration` for catalog target crawlers will have `{ ... "Grouping": { "TableGroupingPolicy": "CombineCompatibleSchemas"} }` by default. -### schema_change_policy Argument Reference +### Schema Change Policy * `delete_behavior` - (Optional) The deletion behavior when the crawler finds a deleted object. Valid values: `LOG`, `DELETE_FROM_DATABASE`, or `DEPRECATE_IN_DATABASE`. Defaults to `DEPRECATE_IN_DATABASE`. * `update_behavior` - (Optional) The update behavior when the crawler finds a changed schema. Valid values: `LOG` or `UPDATE_IN_DATABASE`. Defaults to `UPDATE_IN_DATABASE`. +### Lineage Configuration + +* `crawler_lineage_settings` - (Optional) Specifies whether data lineage is enabled for the crawler. Valid values are: `ENABLE` and `DISABLE`. Default value is `Disable`. + +### Recrawl Policy + +* `recrawl_behavior` - (Optional) Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. Valid Values are: `CRAWL_EVERYTHING` and `CRAWL_NEW_FOLDERS_ONLY`. Default value is `CRAWL_EVERYTHING`. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 1df3de410a35c2f2e5a6218ff2a229d5062bf9e7 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:57:49 +0200 Subject: [PATCH 0338/1212] revert mongo db doc delete --- website/docs/r/glue_crawler.html.markdown | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/website/docs/r/glue_crawler.html.markdown b/website/docs/r/glue_crawler.html.markdown index 351110636a4..d15632cc14d 100644 --- a/website/docs/r/glue_crawler.html.markdown +++ b/website/docs/r/glue_crawler.html.markdown @@ -12,7 +12,7 @@ Manages a Glue Crawler. More information can be found in the [AWS Glue Developer ## Example Usage -### DynamoDB Target +### DynamoDB Target Example ```hcl resource "aws_glue_crawler" "example" { @@ -26,7 +26,7 @@ resource "aws_glue_crawler" "example" { } ``` -### JDBC Target +### JDBC Target Example ```hcl resource "aws_glue_crawler" "example" { @@ -41,7 +41,7 @@ resource "aws_glue_crawler" "example" { } ``` -### S3 Target +### S3 Target Example ```hcl resource "aws_glue_crawler" "example" { @@ -56,7 +56,7 @@ resource "aws_glue_crawler" "example" { ``` -### Catalog Target +### Catalog Target Example ```hcl resource "aws_glue_crawler" "example" { @@ -84,7 +84,7 @@ EOF } ``` -### MongoDB Target +### MongoDB Target Example ```hcl resource "aws_glue_crawler" "example" { @@ -99,7 +99,7 @@ resource "aws_glue_crawler" "example" { } ``` -### Configuration Settings +### Configuration Settings Example ```hcl resource "aws_glue_crawler" "events_crawler" { @@ -179,6 +179,12 @@ The following arguments are supported: -> **Note:** `configuration` for catalog target crawlers will have `{ ... "Grouping": { "TableGroupingPolicy": "CombineCompatibleSchemas"} }` by default. +### MongoDB Target + +* `connection_name` - (Required) The name of the connection to use to connect to the Amazon DocumentDB or MongoDB target. +* `path` - (Required) The path of the Amazon DocumentDB or MongoDB target (database/collection). +* `scan_all` - (Optional) Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table. Default value is `true`. + ### Schema Change Policy * `delete_behavior` - (Optional) The deletion behavior when the crawler finds a deleted object. Valid values: `LOG`, `DELETE_FROM_DATABASE`, or `DEPRECATE_IN_DATABASE`. Defaults to `DEPRECATE_IN_DATABASE`. From 731f51297385944d73b3e98beecf8f16b825892e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 10:59:17 +0200 Subject: [PATCH 0339/1212] fmt --- aws/resource_aws_glue_crawler_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_glue_crawler_test.go b/aws/resource_aws_glue_crawler_test.go index 92b00020551..eac4aa6de9f 100644 --- a/aws/resource_aws_glue_crawler_test.go +++ b/aws/resource_aws_glue_crawler_test.go @@ -2433,9 +2433,9 @@ resource "aws_glue_catalog_database" "test" { resource "aws_glue_crawler" "test" { depends_on = [aws_iam_role_policy_attachment.test-AWSGlueServiceRole] - database_name = aws_glue_catalog_database.test.name - name = %[1]q - role = aws_iam_role.test.name + database_name = aws_glue_catalog_database.test.name + name = %[1]q + role = aws_iam_role.test.name lineage_configuration { crawler_lineage_settings = %[2]q @@ -2457,12 +2457,12 @@ resource "aws_glue_catalog_database" "test" { resource "aws_glue_crawler" "test" { depends_on = [aws_iam_role_policy_attachment.test-AWSGlueServiceRole] - database_name = aws_glue_catalog_database.test.name - name = %[1]q - role = aws_iam_role.test.name + database_name = aws_glue_catalog_database.test.name + name = %[1]q + role = aws_iam_role.test.name schema_change_policy { - delete_behavior = "LOG" + delete_behavior = "LOG" update_behavior = "LOG" } From 4fd0f6277eb65c99584423ad9db7e99961e56900 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 11:04:04 +0200 Subject: [PATCH 0340/1212] fmt --- aws/resource_aws_glue_crawler_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_glue_crawler_test.go b/aws/resource_aws_glue_crawler_test.go index eac4aa6de9f..b4666abd946 100644 --- a/aws/resource_aws_glue_crawler_test.go +++ b/aws/resource_aws_glue_crawler_test.go @@ -2463,7 +2463,7 @@ resource "aws_glue_crawler" "test" { schema_change_policy { delete_behavior = "LOG" - update_behavior = "LOG" + update_behavior = "LOG" } recrawl_policy { From afa8fb165fc6c18739530292ce4eec7f2e1e0086 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 18:28:19 +0200 Subject: [PATCH 0341/1212] comments --- aws/resource_aws_glue_crawler.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index 38c9c837524..93f13266e1a 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -592,10 +592,7 @@ func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) erro glueConn := meta.(*AWSClient).glueconn name := d.Get("name").(string) - if d.HasChanges( - "catalog_target", "classifiers", "configuration", "description", "dynamodb_target", "jdbc_target", "role", - "s3_target", "schedule", "schema_change_policy", "security_configuration", "table_prefix", "mongodb_target", - "lineage_configuration", "recrawl_policy") { + if d.HasChangesExcept("tags") { updateCrawlerInput, err := updateCrawlerInput(name, d) if err != nil { return err From f4e18d73e55374f19d29ac9690071c9de465feb9 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Wed, 16 Dec 2020 21:57:54 +0200 Subject: [PATCH 0342/1212] Update glue_crawler.html.markdown --- website/docs/r/glue_crawler.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/glue_crawler.html.markdown b/website/docs/r/glue_crawler.html.markdown index d15632cc14d..cd6dba5b0fa 100644 --- a/website/docs/r/glue_crawler.html.markdown +++ b/website/docs/r/glue_crawler.html.markdown @@ -141,7 +141,7 @@ The following arguments are supported: * `description` (Optional) Description of the crawler. * `dynamodb_target` (Optional) List of nested DynamoDB target arguments. See [Dynamodb Target](#dynamodb-target) below. * `jdbc_target` (Optional) List of nested JBDC target arguments. See [JDBC Target](#jdbc-target) below. -* `s3_target` (Optional) List nested Amazon S3 target arguments. See [S# Target](#s3-target) below. +* `s3_target` (Optional) List nested Amazon S3 target arguments. See [S3 Target](#s3-target) below. * `mongodb_target` (Optional) List nested MongoDB target arguments. See [MongoDB Target](#mongodb-target) below. * `schedule` (Optional) A cron expression used to specify the schedule. For more information, see [Time-Based Schedules for Jobs and Crawlers](https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). For example, to run something every day at 12:15 UTC, you would specify: `cron(15 12 * * ? *)`. * `schema_change_policy` (Optional) Policy for the crawler's update and deletion behavior. See [Schema Change Policy](#schema-change-policy) below. From 139fe25da57d76e74945976ac25c5f148e885089 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Dec 2020 15:13:36 +0000 Subject: [PATCH 0343/1212] build(deps): bump github.com/aws/aws-sdk-go in /awsproviderlint (#16855) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 76 +++++++++++++++++-- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- awsproviderlint/vendor/modules.txt | 2 +- 5 files changed, 75 insertions(+), 11 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index 6d99e006a13..b9d02f41036 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.7 + github.com/aws/aws-sdk-go v1.36.12 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 85d4e1b68c9..6b16394cac8 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -55,8 +55,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.7 h1:XoJPAjKoqvdL531XGWxKYn5eGX/xMoXzMN5fBtoyfSY= -github.com/aws/aws-sdk-go v1.36.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG8= +github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 78f2226071d..72dcdfad248 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -827,12 +827,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "autoscaling": service{ @@ -1355,6 +1379,21 @@ var awsPartition = partition{ }, }, }, + "codeguru-reviewer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "codepipeline": service{ Endpoints: endpoints{ @@ -3520,6 +3559,23 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "iotwireless": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "kafka": service{ Endpoints: endpoints{ @@ -5636,6 +5692,7 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5645,6 +5702,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -9751,6 +9809,12 @@ var awsisoPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index ecd440191b0..70325bd761c 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.7" +const SDKVersion = "1.36.12" diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index fa6ab616a68..365d7b6de34 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.36.7 +# github.com/aws/aws-sdk-go v1.36.12 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 7fa81902ed291386122655c901df41d1be7de3fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Dec 2020 15:14:14 +0000 Subject: [PATCH 0344/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.36.7 to 1.36.12 (#16857) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 409d552f47b..5d39519baed 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.7 + github.com/aws/aws-sdk-go v1.36.12 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index 3a28bf9bdf2..93a69910ddc 100644 --- a/go.sum +++ b/go.sum @@ -64,8 +64,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.36.7 h1:XoJPAjKoqvdL531XGWxKYn5eGX/xMoXzMN5fBtoyfSY= -github.com/aws/aws-sdk-go v1.36.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG8= +github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 1a06a1d0faa35feb271cbbf86d0596b58d52fe65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Dec 2020 12:33:43 -0500 Subject: [PATCH 0345/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#16858) Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.3.0 to 2.4.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/master/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.3.0...v2.4.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 +--- go.sum | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 5d39519baed..e99137fcde3 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,9 @@ require ( github.com/hashicorp/aws-sdk-go-base v0.7.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 - github.com/hashicorp/go-hclog v0.10.0 // indirect github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba github.com/mattn/go-colorable v0.1.7 // indirect @@ -20,6 +19,5 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.0.4 github.com/pquerna/otp v1.3.0 - github.com/stretchr/testify v1.6.1 // indirect gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index 93a69910ddc..52e82c886d6 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,7 @@ github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7I github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= @@ -180,14 +181,16 @@ github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUC github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.10.0 h1:b86HUuA126IcSHyC55WjPo7KtCOVeTCKIjr+3lBhPxI= -github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -202,14 +205,14 @@ github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggU github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= -github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= -github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= -github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= +github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= +github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= +github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 h1:2c+vG46celrDCsfYEIzaXxvBaAXCqlVG77LwtFz8cfs= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -322,6 +325,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= From 063ec5e9cc911bfbafdb84fa3bb326960eac5ae0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Dec 2020 18:13:52 +0000 Subject: [PATCH 0346/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#16856) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 28 +- .../vendor/github.com/emirpasic/gods/LICENSE | 41 - .../emirpasic/gods/containers/containers.go | 35 - .../emirpasic/gods/containers/enumerable.go | 61 - .../emirpasic/gods/containers/iterator.go | 109 - .../gods/containers/serialization.go | 17 - .../gods/lists/arraylist/arraylist.go | 228 - .../gods/lists/arraylist/enumerable.go | 79 - .../gods/lists/arraylist/iterator.go | 83 - .../gods/lists/arraylist/serialization.go | 29 - .../github.com/emirpasic/gods/lists/lists.go | 33 - .../gods/trees/binaryheap/binaryheap.go | 163 - .../gods/trees/binaryheap/iterator.go | 84 - .../gods/trees/binaryheap/serialization.go | 22 - .../github.com/emirpasic/gods/trees/trees.go | 21 - .../emirpasic/gods/utils/comparator.go | 251 - .../github.com/emirpasic/gods/utils/sort.go | 29 - .../github.com/emirpasic/gods/utils/utils.go | 47 - .../vendor/github.com/fatih/color/.travis.yml | 5 + .../vendor/github.com/fatih/color/Gopkg.lock | 27 + .../vendor/github.com/fatih/color/Gopkg.toml | 30 + .../vendor/github.com/fatih/color/LICENSE.md | 20 + .../vendor/github.com/fatih/color/README.md | 179 + .../vendor/github.com/fatih/color/color.go | 603 ++ .../vendor/github.com/fatih/color/doc.go | 133 + .../vendor/github.com/go-git/gcfg/LICENSE | 28 - .../vendor/github.com/go-git/gcfg/README | 4 - .../vendor/github.com/go-git/gcfg/doc.go | 145 - .../vendor/github.com/go-git/gcfg/errors.go | 41 - .../vendor/github.com/go-git/gcfg/go1_0.go | 7 - .../vendor/github.com/go-git/gcfg/go1_2.go | 9 - .../vendor/github.com/go-git/gcfg/read.go | 273 - .../github.com/go-git/gcfg/scanner/errors.go | 121 - .../github.com/go-git/gcfg/scanner/scanner.go | 342 - .../vendor/github.com/go-git/gcfg/set.go | 332 - .../github.com/go-git/gcfg/token/position.go | 435 -- .../github.com/go-git/gcfg/token/serialize.go | 56 - .../github.com/go-git/gcfg/token/token.go | 83 - .../github.com/go-git/gcfg/types/bool.go | 23 - .../github.com/go-git/gcfg/types/doc.go | 4 - .../github.com/go-git/gcfg/types/enum.go | 44 - .../github.com/go-git/gcfg/types/int.go | 86 - .../github.com/go-git/gcfg/types/scan.go | 23 - .../github.com/go-git/go-billy/v5/.gitignore | 4 - .../github.com/go-git/go-billy/v5/LICENSE | 201 - .../github.com/go-git/go-billy/v5/README.md | 73 - .../github.com/go-git/go-billy/v5/fs.go | 202 - .../github.com/go-git/go-billy/v5/go.mod | 10 - .../github.com/go-git/go-billy/v5/go.sum | 14 - .../go-billy/v5/helper/chroot/chroot.go | 242 - .../go-billy/v5/helper/polyfill/polyfill.go | 105 - .../github.com/go-git/go-billy/v5/osfs/os.go | 139 - .../go-git/go-billy/v5/osfs/os_plan9.go | 83 - .../go-git/go-billy/v5/osfs/os_posix.go | 27 - .../go-git/go-billy/v5/osfs/os_windows.go | 61 - .../go-git/go-billy/v5/util/glob.go | 111 - .../go-git/go-billy/v5/util/util.go | 224 - .../github.com/go-git/go-git/v5/.gitignore | 4 - .../go-git/go-git/v5/CODE_OF_CONDUCT.md | 74 - .../go-git/go-git/v5/COMPATIBILITY.md | 111 - .../go-git/go-git/v5/CONTRIBUTING.md | 46 - .../github.com/go-git/go-git/v5/LICENSE | 201 - .../github.com/go-git/go-git/v5/Makefile | 38 - .../github.com/go-git/go-git/v5/README.md | 131 - .../github.com/go-git/go-git/v5/blame.go | 302 - .../github.com/go-git/go-git/v5/common.go | 22 - .../go-git/go-git/v5/config/branch.go | 90 - .../go-git/go-git/v5/config/config.go | 564 -- .../go-git/go-git/v5/config/modules.go | 139 - .../go-git/go-git/v5/config/refspec.go | 155 - .../vendor/github.com/go-git/go-git/v5/doc.go | 10 - .../vendor/github.com/go-git/go-git/v5/go.mod | 28 - .../vendor/github.com/go-git/go-git/v5/go.sum | 80 - .../go-git/v5/internal/revision/parser.go | 622 -- .../go-git/v5/internal/revision/scanner.go | 117 - .../go-git/v5/internal/revision/token.go | 28 - .../go-git/go-git/v5/internal/url/url.go | 37 - .../go-git/go-git/v5/object_walker.go | 104 - .../github.com/go-git/go-git/v5/options.go | 551 -- .../go-git/v5/plumbing/cache/buffer_lru.go | 98 - .../go-git/go-git/v5/plumbing/cache/common.go | 39 - .../go-git/v5/plumbing/cache/object_lru.go | 101 - .../go-git/go-git/v5/plumbing/color/color.go | 38 - .../go-git/go-git/v5/plumbing/error.go | 35 - .../go-git/v5/plumbing/filemode/filemode.go | 188 - .../v5/plumbing/format/config/common.go | 99 - .../v5/plumbing/format/config/decoder.go | 37 - .../go-git/v5/plumbing/format/config/doc.go | 122 - .../v5/plumbing/format/config/encoder.go | 77 - .../v5/plumbing/format/config/option.go | 117 - .../v5/plumbing/format/config/section.go | 146 - .../v5/plumbing/format/diff/colorconfig.go | 97 - .../go-git/v5/plumbing/format/diff/patch.go | 58 - .../plumbing/format/diff/unified_encoder.go | 376 - .../v5/plumbing/format/gitignore/dir.go | 136 - .../v5/plumbing/format/gitignore/doc.go | 70 - .../v5/plumbing/format/gitignore/matcher.go | 30 - .../v5/plumbing/format/gitignore/pattern.go | 153 - .../v5/plumbing/format/idxfile/decoder.go | 177 - .../go-git/v5/plumbing/format/idxfile/doc.go | 128 - .../v5/plumbing/format/idxfile/encoder.go | 142 - .../v5/plumbing/format/idxfile/idxfile.go | 346 - .../v5/plumbing/format/idxfile/writer.go | 186 - .../v5/plumbing/format/index/decoder.go | 477 -- .../go-git/v5/plumbing/format/index/doc.go | 360 - .../v5/plumbing/format/index/encoder.go | 150 - .../go-git/v5/plumbing/format/index/index.go | 213 - .../go-git/v5/plumbing/format/index/match.go | 186 - .../go-git/v5/plumbing/format/objfile/doc.go | 2 - .../v5/plumbing/format/objfile/reader.go | 114 - .../v5/plumbing/format/objfile/writer.go | 109 - .../v5/plumbing/format/packfile/common.go | 78 - .../plumbing/format/packfile/delta_index.go | 297 - .../format/packfile/delta_selector.go | 369 - .../v5/plumbing/format/packfile/diff_delta.go | 204 - .../go-git/v5/plumbing/format/packfile/doc.go | 39 - .../v5/plumbing/format/packfile/encoder.go | 225 - .../v5/plumbing/format/packfile/error.go | 30 - .../v5/plumbing/format/packfile/fsobject.go | 116 - .../plumbing/format/packfile/object_pack.go | 164 - .../v5/plumbing/format/packfile/packfile.go | 565 -- .../v5/plumbing/format/packfile/parser.go | 495 -- .../plumbing/format/packfile/patch_delta.go | 253 - .../v5/plumbing/format/packfile/scanner.go | 466 -- .../v5/plumbing/format/pktline/encoder.go | 122 - .../v5/plumbing/format/pktline/scanner.go | 134 - .../go-git/go-git/v5/plumbing/hash.go | 83 - .../go-git/go-git/v5/plumbing/memory.go | 61 - .../go-git/go-git/v5/plumbing/object.go | 111 - .../go-git/go-git/v5/plumbing/object/blob.go | 144 - .../go-git/v5/plumbing/object/change.go | 159 - .../v5/plumbing/object/change_adaptor.go | 61 - .../go-git/v5/plumbing/object/commit.go | 442 -- .../v5/plumbing/object/commit_walker.go | 327 - .../v5/plumbing/object/commit_walker_bfs.go | 100 - .../object/commit_walker_bfs_filtered.go | 176 - .../v5/plumbing/object/commit_walker_ctime.go | 103 - .../v5/plumbing/object/commit_walker_limit.go | 65 - .../v5/plumbing/object/commit_walker_path.go | 161 - .../go-git/v5/plumbing/object/common.go | 12 - .../go-git/v5/plumbing/object/difftree.go | 98 - .../go-git/go-git/v5/plumbing/object/file.go | 137 - .../go-git/v5/plumbing/object/merge_base.go | 210 - .../go-git/v5/plumbing/object/object.go | 239 - .../go-git/go-git/v5/plumbing/object/patch.go | 346 - .../go-git/v5/plumbing/object/rename.go | 813 -- .../go-git/go-git/v5/plumbing/object/tag.go | 357 - .../go-git/go-git/v5/plumbing/object/tree.go | 525 -- .../go-git/v5/plumbing/object/treenoder.go | 136 - .../v5/plumbing/protocol/packp/advrefs.go | 211 - .../plumbing/protocol/packp/advrefs_decode.go | 288 - .../plumbing/protocol/packp/advrefs_encode.go | 176 - .../protocol/packp/capability/capability.go | 252 - .../protocol/packp/capability/list.go | 196 - .../v5/plumbing/protocol/packp/common.go | 70 - .../go-git/v5/plumbing/protocol/packp/doc.go | 724 -- .../plumbing/protocol/packp/report_status.go | 165 - .../v5/plumbing/protocol/packp/shallowupd.go | 92 - .../protocol/packp/sideband/common.go | 33 - .../plumbing/protocol/packp/sideband/demux.go | 148 - .../plumbing/protocol/packp/sideband/doc.go | 31 - .../plumbing/protocol/packp/sideband/muxer.go | 65 - .../v5/plumbing/protocol/packp/srvresp.go | 127 - .../v5/plumbing/protocol/packp/ulreq.go | 168 - .../plumbing/protocol/packp/ulreq_decode.go | 257 - .../plumbing/protocol/packp/ulreq_encode.go | 145 - .../v5/plumbing/protocol/packp/updreq.go | 122 - .../plumbing/protocol/packp/updreq_decode.go | 250 - .../plumbing/protocol/packp/updreq_encode.go | 75 - .../v5/plumbing/protocol/packp/uppackreq.go | 98 - .../v5/plumbing/protocol/packp/uppackresp.go | 109 - .../go-git/go-git/v5/plumbing/reference.go | 209 - .../go-git/go-git/v5/plumbing/revision.go | 11 - .../go-git/v5/plumbing/revlist/revlist.go | 230 - .../go-git/go-git/v5/plumbing/storer/doc.go | 2 - .../go-git/go-git/v5/plumbing/storer/index.go | 9 - .../go-git/v5/plumbing/storer/object.go | 288 - .../go-git/v5/plumbing/storer/reference.go | 240 - .../go-git/v5/plumbing/storer/shallow.go | 10 - .../go-git/v5/plumbing/storer/storer.go | 15 - .../v5/plumbing/transport/client/client.go | 48 - .../go-git/v5/plumbing/transport/common.go | 274 - .../v5/plumbing/transport/file/client.go | 156 - .../v5/plumbing/transport/file/server.go | 53 - .../v5/plumbing/transport/git/common.go | 109 - .../v5/plumbing/transport/http/common.go | 281 - .../plumbing/transport/http/receive_pack.go | 106 - .../v5/plumbing/transport/http/upload_pack.go | 123 - .../transport/internal/common/common.go | 474 -- .../transport/internal/common/server.go | 73 - .../v5/plumbing/transport/server/loader.go | 64 - .../v5/plumbing/transport/server/server.go | 424 - .../v5/plumbing/transport/ssh/auth_method.go | 322 - .../v5/plumbing/transport/ssh/common.go | 228 - .../github.com/go-git/go-git/v5/prune.go | 66 - .../github.com/go-git/go-git/v5/references.go | 264 - .../github.com/go-git/go-git/v5/remote.go | 1154 --- .../github.com/go-git/go-git/v5/repository.go | 1614 ---- .../github.com/go-git/go-git/v5/status.go | 79 - .../go-git/v5/storage/filesystem/config.go | 48 - .../v5/storage/filesystem/deltaobject.go | 37 - .../v5/storage/filesystem/dotgit/dotgit.go | 1111 --- .../dotgit/dotgit_rewrite_packed_refs.go | 81 - .../filesystem/dotgit/dotgit_setref.go | 90 - .../v5/storage/filesystem/dotgit/writers.go | 284 - .../go-git/v5/storage/filesystem/index.go | 54 - .../go-git/v5/storage/filesystem/module.go | 20 - .../go-git/v5/storage/filesystem/object.go | 817 -- .../go-git/v5/storage/filesystem/reference.go | 44 - .../go-git/v5/storage/filesystem/shallow.go | 54 - .../go-git/v5/storage/filesystem/storage.go | 73 - .../go-git/v5/storage/memory/storage.go | 320 - .../go-git/go-git/v5/storage/storer.go | 30 - .../github.com/go-git/go-git/v5/submodule.go | 357 - .../go-git/go-git/v5/utils/binary/read.go | 180 - .../go-git/go-git/v5/utils/binary/write.go | 50 - .../go-git/go-git/v5/utils/diff/diff.go | 61 - .../go-git/go-git/v5/utils/ioutil/common.go | 170 - .../go-git/v5/utils/merkletrie/change.go | 149 - .../go-git/v5/utils/merkletrie/difftree.go | 428 - .../go-git/go-git/v5/utils/merkletrie/doc.go | 34 - .../go-git/v5/utils/merkletrie/doubleiter.go | 187 - .../v5/utils/merkletrie/filesystem/node.go | 196 - .../go-git/v5/utils/merkletrie/index/node.go | 90 - .../utils/merkletrie/internal/frame/frame.go | 91 - .../go-git/go-git/v5/utils/merkletrie/iter.go | 216 - .../go-git/v5/utils/merkletrie/noder/noder.go | 59 - .../go-git/v5/utils/merkletrie/noder/path.go | 90 - .../github.com/go-git/go-git/v5/worktree.go | 954 --- .../go-git/go-git/v5/worktree_bsd.go | 26 - .../go-git/go-git/v5/worktree_commit.go | 228 - .../go-git/go-git/v5/worktree_linux.go | 26 - .../go-git/go-git/v5/worktree_plan9.go | 31 - .../go-git/go-git/v5/worktree_status.go | 660 -- .../go-git/go-git/v5/worktree_unix_other.go | 26 - .../go-git/go-git/v5/worktree_windows.go | 35 - .../github.com/hashicorp/go-hclog/README.md | 2 +- .../hashicorp/go-hclog/colorize_unix.go | 27 + .../hashicorp/go-hclog/colorize_windows.go | 33 + .../github.com/hashicorp/go-hclog/exclude.go | 71 + .../github.com/hashicorp/go-hclog/global.go | 14 + .../github.com/hashicorp/go-hclog/go.mod | 5 + .../github.com/hashicorp/go-hclog/go.sum | 12 + .../hashicorp/go-hclog/interceptlogger.go | 235 + .../hashicorp/go-hclog/intlogger.go | 244 +- .../github.com/hashicorp/go-hclog/logger.go | 173 +- .../hashicorp/go-hclog/nulllogger.go | 6 + .../github.com/hashicorp/go-hclog/stdlog.go | 68 +- .../github.com/hashicorp/go-hclog/writer.go | 20 +- .../github.com/hashicorp/go-plugin/go.mod | 2 +- .../github.com/hashicorp/go-plugin/go.sum | 17 +- .../hashicorp/go-plugin/grpc_stdio.go | 4 +- .../github.com/hashicorp/go-plugin/server.go | 4 - .../internal/version/version.go | 2 +- .../hashicorp/terraform-exec/tfexec/cmd.go | 51 +- .../hashicorp/terraform-exec/tfexec/errors.go | 21 +- .../hashicorp/terraform-exec/tfexec/fmt.go | 160 + .../terraform-exec/tfexec/options.go | 34 + .../hashicorp/terraform-exec/tfexec/show.go | 2 + .../terraform-exec/tfexec/state_mv.go | 105 + .../terraform-exec/tfexec/terraform.go | 14 +- .../terraform-exec/tfexec/upgrade012.go | 80 + .../terraform-exec/tfexec/validate.go | 43 + .../terraform-exec/tfexec/version.go | 1 + .../terraform-exec/tfinstall/git_ref.go | 84 - .../hashicorp/terraform-json/Makefile | 2 +- .../hashicorp/terraform-json/config.go | 4 + .../hashicorp/terraform-json/go.mod | 1 + .../hashicorp/terraform-json/schemas.go | 2 +- .../hashicorp/terraform-json/state.go | 21 +- .../hashicorp/terraform-json/validate.go | 33 + .../hashicorp/terraform-json/version.go | 11 + .../terraform-plugin-sdk/v2/diag/helpers.go | 3 + .../v2/helper/resource/error.go | 12 + .../v2/helper/resource/json.go | 12 + .../v2/helper/resource/plugin.go | 14 +- .../v2/helper/resource/state_shim.go | 26 +- .../v2/helper/resource/testing_new.go | 4 +- .../v2/helper/resource/testing_new_config.go | 3 +- .../v2/helper/resource/wait.go | 4 + .../v2/helper/schema/grpc_provider.go | 8 +- .../v2/helper/schema/json.go | 12 + .../v2/helper/schema/resource.go | 10 + .../v2/helper/schema/schema.go | 2 - .../v2/helper/schema/shims.go | 14 +- .../v2/helper/validation/meta.go | 29 + .../v2/helper/validation/testing.go | 63 +- .../terraform-plugin-sdk/v2/meta/meta.go | 2 +- .../terraform-plugin-sdk/v2/plugin/serve.go | 24 +- .../github.com/imdario/mergo/.deepsource.toml | 12 - .../github.com/imdario/mergo/.gitignore | 33 - .../github.com/imdario/mergo/.travis.yml | 9 - .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - .../vendor/github.com/imdario/mergo/LICENSE | 28 - .../vendor/github.com/imdario/mergo/README.md | 238 - .../vendor/github.com/imdario/mergo/doc.go | 44 - .../vendor/github.com/imdario/mergo/map.go | 176 - .../vendor/github.com/imdario/mergo/merge.go | 338 - .../vendor/github.com/imdario/mergo/mergo.go | 97 - .../github.com/jbenet/go-context/io/ctxio.go | 120 - .../kevinburke/ssh_config/.gitattributes | 1 - .../kevinburke/ssh_config/.gitignore | 0 .../github.com/kevinburke/ssh_config/.mailmap | 1 - .../kevinburke/ssh_config/.travis.yml | 14 - .../kevinburke/ssh_config/AUTHORS.txt | 5 - .../github.com/kevinburke/ssh_config/LICENSE | 49 - .../github.com/kevinburke/ssh_config/Makefile | 30 - .../kevinburke/ssh_config/README.md | 81 - .../kevinburke/ssh_config/config.go | 649 -- .../github.com/kevinburke/ssh_config/lexer.go | 240 - .../kevinburke/ssh_config/parser.go | 191 - .../kevinburke/ssh_config/position.go | 25 - .../github.com/kevinburke/ssh_config/token.go | 49 - .../kevinburke/ssh_config/validators.go | 162 - .../github.com/mattn/go-colorable/.travis.yml | 9 + .../go-context => mattn/go-colorable}/LICENSE | 10 +- .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 29 + .../mattn/go-colorable/colorable_others.go | 30 + .../mattn/go-colorable/colorable_windows.go | 1005 +++ .../github.com/mattn/go-colorable/go.mod | 3 + .../github.com/mattn/go-colorable/go.sum | 4 + .../mattn/go-colorable/noncolorable.go | 55 + .../github.com/mattn/go-isatty/.travis.yml | 13 + .../vendor/github.com/mattn/go-isatty/LICENSE | 9 + .../github.com/mattn/go-isatty/README.md | 50 + .../vendor/github.com/mattn/go-isatty/doc.go | 2 + .../vendor/github.com/mattn/go-isatty/go.mod | 5 + .../vendor/github.com/mattn/go-isatty/go.sum | 4 + .../mattn/go-isatty/isatty_android.go | 23 + .../github.com/mattn/go-isatty/isatty_bsd.go | 24 + .../mattn/go-isatty/isatty_others.go | 15 + .../mattn/go-isatty/isatty_plan9.go | 22 + .../mattn/go-isatty/isatty_solaris.go | 22 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../vendor/github.com/sergi/go-diff/AUTHORS | 25 - .../github.com/sergi/go-diff/CONTRIBUTORS | 32 - .../vendor/github.com/sergi/go-diff/LICENSE | 20 - .../sergi/go-diff/diffmatchpatch/diff.go | 1345 ---- .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 - .../sergi/go-diff/diffmatchpatch/match.go | 160 - .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 - .../diffmatchpatch/operation_string.go | 17 - .../sergi/go-diff/diffmatchpatch/patch.go | 556 -- .../go-diff/diffmatchpatch/stringutil.go | 88 - .../github.com/xanzy/ssh-agent/.gitignore | 24 - .../vendor/github.com/xanzy/ssh-agent/LICENSE | 202 - .../github.com/xanzy/ssh-agent/README.md | 23 - .../vendor/github.com/xanzy/ssh-agent/go.mod | 6 - .../vendor/github.com/xanzy/ssh-agent/go.sum | 4 - .../xanzy/ssh-agent/pageant_windows.go | 146 - .../github.com/xanzy/ssh-agent/sshagent.go | 49 - .../xanzy/ssh-agent/sshagent_windows.go | 80 - .../golang.org/x/crypto/blowfish/block.go | 159 - .../golang.org/x/crypto/blowfish/cipher.go | 99 - .../golang.org/x/crypto/blowfish/const.go | 199 - .../x/crypto/chacha20/chacha_arm64.go | 16 - .../x/crypto/chacha20/chacha_arm64.s | 307 - .../x/crypto/chacha20/chacha_generic.go | 398 - .../x/crypto/chacha20/chacha_noasm.go | 13 - .../x/crypto/chacha20/chacha_ppc64le.go | 16 - .../x/crypto/chacha20/chacha_ppc64le.s | 449 -- .../x/crypto/chacha20/chacha_s390x.go | 26 - .../x/crypto/chacha20/chacha_s390x.s | 224 - .../golang.org/x/crypto/chacha20/xor.go | 42 - .../x/crypto/curve25519/curve25519.go | 95 - .../x/crypto/curve25519/curve25519_amd64.go | 240 - .../x/crypto/curve25519/curve25519_amd64.s | 1793 ----- .../x/crypto/curve25519/curve25519_generic.go | 828 -- .../x/crypto/curve25519/curve25519_noasm.go | 11 - .../golang.org/x/crypto/ed25519/ed25519.go | 222 - .../x/crypto/ed25519/ed25519_go113.go | 73 - .../ed25519/internal/edwards25519/const.go | 1422 ---- .../internal/edwards25519/edwards25519.go | 1793 ----- .../x/crypto/internal/subtle/aliasing.go | 32 - .../internal/subtle/aliasing_appengine.go | 35 - .../x/crypto/poly1305/bits_compat.go | 39 - .../x/crypto/poly1305/bits_go1.13.go | 21 - .../golang.org/x/crypto/poly1305/mac_noasm.go | 9 - .../golang.org/x/crypto/poly1305/poly1305.go | 99 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 47 - .../golang.org/x/crypto/poly1305/sum_amd64.s | 108 - .../x/crypto/poly1305/sum_generic.go | 310 - .../x/crypto/poly1305/sum_ppc64le.go | 47 - .../x/crypto/poly1305/sum_ppc64le.s | 181 - .../golang.org/x/crypto/poly1305/sum_s390x.go | 75 - .../golang.org/x/crypto/poly1305/sum_s390x.s | 503 -- .../golang.org/x/crypto/ssh/agent/client.go | 813 -- .../golang.org/x/crypto/ssh/agent/forward.go | 103 - .../golang.org/x/crypto/ssh/agent/keyring.go | 241 - .../golang.org/x/crypto/ssh/agent/server.go | 570 -- .../vendor/golang.org/x/crypto/ssh/buffer.go | 97 - .../vendor/golang.org/x/crypto/ssh/certs.go | 546 -- .../vendor/golang.org/x/crypto/ssh/channel.go | 633 -- .../vendor/golang.org/x/crypto/ssh/cipher.go | 781 -- .../vendor/golang.org/x/crypto/ssh/client.go | 278 - .../golang.org/x/crypto/ssh/client_auth.go | 641 -- .../vendor/golang.org/x/crypto/ssh/common.go | 404 - .../golang.org/x/crypto/ssh/connection.go | 143 - .../vendor/golang.org/x/crypto/ssh/doc.go | 21 - .../golang.org/x/crypto/ssh/handshake.go | 647 -- .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 - .../vendor/golang.org/x/crypto/ssh/kex.go | 789 -- .../vendor/golang.org/x/crypto/ssh/keys.go | 1474 ---- .../x/crypto/ssh/knownhosts/knownhosts.go | 540 -- .../vendor/golang.org/x/crypto/ssh/mac.go | 61 - .../golang.org/x/crypto/ssh/messages.go | 866 --- .../vendor/golang.org/x/crypto/ssh/mux.go | 351 - .../vendor/golang.org/x/crypto/ssh/server.go | 716 -- .../vendor/golang.org/x/crypto/ssh/session.go | 647 -- .../vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 - .../golang.org/x/crypto/ssh/streamlocal.go | 116 - .../vendor/golang.org/x/crypto/ssh/tcpip.go | 474 -- .../golang.org/x/crypto/ssh/transport.go | 353 - .../golang.org/x/net/internal/socks/client.go | 168 - .../golang.org/x/net/internal/socks/socks.go | 317 - .../vendor/golang.org/x/net/proxy/dial.go | 54 - .../vendor/golang.org/x/net/proxy/direct.go | 31 - .../vendor/golang.org/x/net/proxy/per_host.go | 155 - .../vendor/golang.org/x/net/proxy/proxy.go | 149 - .../vendor/golang.org/x/net/proxy/socks5.go | 42 - .../golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 - .../vendor/golang.org/x/sys/cpu/byteorder.go | 65 - .../vendor/golang.org/x/sys/cpu/cpu.go | 287 - .../vendor/golang.org/x/sys/cpu/cpu_aix.go | 32 - .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 - .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 173 - .../vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 - .../golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 - .../golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 - .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 - .../golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 - .../golang.org/x/sys/cpu/cpu_gccgo_x86.go | 26 - .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 - .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 23 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 31 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 159 - .../golang.org/x/sys/cpu/cpu_mips64x.go | 15 - .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 - .../golang.org/x/sys/cpu/cpu_other_arm.go | 9 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 - .../vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 - .../golang.org/x/sys/cpu/cpu_riscv64.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_s390x.go | 30 - .../vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 - .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 - .../vendor/golang.org/x/sys/cpu/cpu_x86.go | 135 - .../vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 - .../golang.org/x/sys/cpu/hwcap_linux.go | 56 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 - .../golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/dll_windows.go | 415 - .../vendor/golang.org/x/sys/windows/empty.s | 8 - .../golang.org/x/sys/windows/env_windows.go | 54 - .../golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - .../x/sys/windows/memory_windows.go | 31 - .../golang.org/x/sys/windows/mkerrors.bash | 63 - .../x/sys/windows/mkknownfolderids.bash | 27 - .../golang.org/x/sys/windows/mksyscall.go | 9 - .../vendor/golang.org/x/sys/windows/race.go | 30 - .../vendor/golang.org/x/sys/windows/race0.go | 25 - .../x/sys/windows/security_windows.go | 1406 ---- .../golang.org/x/sys/windows/service.go | 231 - .../vendor/golang.org/x/sys/windows/str.go | 22 - .../golang.org/x/sys/windows/syscall.go | 74 - .../x/sys/windows/syscall_windows.go | 1490 ---- .../golang.org/x/sys/windows/types_windows.go | 1774 ----- .../x/sys/windows/types_windows_386.go | 35 - .../x/sys/windows/types_windows_amd64.go | 34 - .../x/sys/windows/types_windows_arm.go | 35 - .../x/sys/windows/zerrors_windows.go | 6853 ----------------- .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 4083 ---------- .../vendor/gopkg.in/warnings.v0/LICENSE | 24 - .../vendor/gopkg.in/warnings.v0/README | 77 - .../vendor/gopkg.in/warnings.v0/warnings.go | 194 - awsproviderlint/vendor/modules.txt | 104 +- 486 files changed, 4166 insertions(+), 87616 deletions(-) delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/containers/containers.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/containers/enumerable.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/containers/iterator.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/containers/serialization.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/lists/lists.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/trees/trees.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/utils/comparator.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/utils/sort.go delete mode 100644 awsproviderlint/vendor/github.com/emirpasic/gods/utils/utils.go create mode 100644 awsproviderlint/vendor/github.com/fatih/color/.travis.yml create mode 100644 awsproviderlint/vendor/github.com/fatih/color/Gopkg.lock create mode 100644 awsproviderlint/vendor/github.com/fatih/color/Gopkg.toml create mode 100644 awsproviderlint/vendor/github.com/fatih/color/LICENSE.md create mode 100644 awsproviderlint/vendor/github.com/fatih/color/README.md create mode 100644 awsproviderlint/vendor/github.com/fatih/color/color.go create mode 100644 awsproviderlint/vendor/github.com/fatih/color/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/README delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/errors.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/go1_0.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/go1_2.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/read.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/scanner/errors.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/scanner/scanner.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/set.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/token/position.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/token/serialize.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/token/token.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/types/bool.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/types/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/types/enum.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/types/int.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/gcfg/types/scan.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/.gitignore delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/README.md delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/fs.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.mod delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.sum delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/glob.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/util.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/.gitignore delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/Makefile delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/README.md delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/blame.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/config/branch.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/config/config.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/config/modules.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/config/refspec.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/go.mod delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/go.sum delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/token.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/url/url.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/object_walker.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/options.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/error.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/hash.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/memory.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/reference.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revision.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/prune.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/references.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/remote.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/repository.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/status.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/storer.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/submodule.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/read.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/write.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_bsd.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_commit.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_linux.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_plan9.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_status.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go delete mode 100644 awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_windows.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_unix.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_windows.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/go-hclog/exclude.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/go-hclog/interceptlogger.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go delete mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfinstall/git_ref.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-json/validate.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-json/version.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/.gitignore delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/README.md delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/doc.go delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/map.go delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/merge.go delete mode 100644 awsproviderlint/vendor/github.com/imdario/mergo/mergo.go delete mode 100644 awsproviderlint/vendor/github.com/jbenet/go-context/io/ctxio.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitattributes delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitignore delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/.mailmap delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/.travis.yml delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/Makefile delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/README.md delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/config.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/lexer.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/parser.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/position.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/token.go delete mode 100644 awsproviderlint/vendor/github.com/kevinburke/ssh_config/validators.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/.travis.yml rename awsproviderlint/vendor/github.com/{jbenet/go-context => mattn/go-colorable}/LICENSE (88%) create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/README.md create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/go.mod create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/go.sum create mode 100644 awsproviderlint/vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/README.md create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/doc.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/go.mod create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/go.sum create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_windows.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/AUTHORS delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/CONTRIBUTORS delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go delete mode 100644 awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/.gitignore delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/LICENSE delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/README.md delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.mod delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.sum delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/pageant_windows.go delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent.go delete mode 100644 awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_generic.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/chacha20/xor.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_compat.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/mac_noasm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_generic.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.s delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/client.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/forward.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/keyring.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/server.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/ssh_gss.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 awsproviderlint/vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/internal/socks/client.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/internal/socks/socks.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/proxy/dial.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/proxy/direct.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/proxy/per_host.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/proxy/proxy.go delete mode 100644 awsproviderlint/vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_aix.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.s delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_riscv64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/hwcap_linux.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/empty.s delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/race.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/race0.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/service.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/str.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 awsproviderlint/vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 awsproviderlint/vendor/gopkg.in/warnings.v0/LICENSE delete mode 100644 awsproviderlint/vendor/gopkg.in/warnings.v0/README delete mode 100644 awsproviderlint/vendor/gopkg.in/warnings.v0/warnings.go diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index b9d02f41036..cffc47a69f9 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -5,6 +5,6 @@ go 1.15 require ( github.com/aws/aws-sdk-go v1.36.12 github.com/bflad/tfproviderlint v0.21.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab ) diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 6b16394cac8..42d8cda4de5 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -40,6 +40,7 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= @@ -81,6 +82,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -170,11 +172,16 @@ github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPE github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -197,19 +204,21 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.3.0 h1:5WLBsnv9BoEUGlHJZETROZZxw+qO3/TFQEh6JMP2uaY= github.com/hashicorp/terraform-exec v0.3.0/go.mod h1:yKWvMPtkTaHpeAmllw+1qdHZ7E5u+pAZ+x8e2jQF6gM= -github.com/hashicorp/terraform-exec v0.10.0 h1:3nh/1e3u9gYRUQGOKWp/8wPR7ABlL2F14sZMZBrp+dM= -github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= +github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= +github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= +github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= github.com/hashicorp/terraform-plugin-sdk v1.9.0 h1:WBHHIX/RgF6/lbfMCzx0qKl96BbQy3bexWFvDqt1bhE= github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0 h1:jPPqctLDg75CilV3IpypAz6on3MSMOiUMzXNz+Xex6E= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0/go.mod h1:xOf85UtHJ0/9/EF3eKgZFlJ6feN8sDtjQRWRHhimCUw= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 h1:Egv+R1tOOjPNz643KBTx3tLT6RdFGGYJcZlyLvrPcEU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0/go.mod h1:+12dJQebYjuU/yiq94iZUPuC66abfRBrXdpVJia3ojk= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 h1:2c+vG46celrDCsfYEIzaXxvBaAXCqlVG77LwtFz8cfs= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= github.com/hashicorp/terraform-plugin-test v1.2.0 h1:AWFdqyfnOj04sxTdaAF57QqvW7XXrT8PseUHkbKsE8I= github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/terraform-plugin-test/v2 v2.0.0-20200724200815-faa9931ac59e h1:Q8lNGrk3SVdXEbLuUJD03jghIjykJT9pu1aReKgb858= @@ -251,9 +260,14 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -302,6 +316,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -323,6 +339,7 @@ github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLE github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -433,6 +450,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -599,6 +617,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/LICENSE b/awsproviderlint/vendor/github.com/emirpasic/gods/LICENSE deleted file mode 100644 index e5e449b6eca..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/LICENSE +++ /dev/null @@ -1,41 +0,0 @@ -Copyright (c) 2015, Emir Pasic -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - -AVL Tree: - -Copyright (c) 2017 Benjamin Scher Purcell - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/containers.go b/awsproviderlint/vendor/github.com/emirpasic/gods/containers/containers.go deleted file mode 100644 index c35ab36d2c3..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/containers.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package containers provides core interfaces and functions for data structures. -// -// Container is the base interface for all data structures to implement. -// -// Iterators provide stateful iterators. -// -// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions. -// -// Serialization provides serializers (marshalers) and deserializers (unmarshalers). -package containers - -import "github.com/emirpasic/gods/utils" - -// Container is base interface that all data structures implement. -type Container interface { - Empty() bool - Size() int - Clear() - Values() []interface{} -} - -// GetSortedValues returns sorted container's elements with respect to the passed comparator. -// Does not effect the ordering of elements within the container. -func GetSortedValues(container Container, comparator utils.Comparator) []interface{} { - values := container.Values() - if len(values) < 2 { - return values - } - utils.Sort(values, comparator) - return values -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/enumerable.go b/awsproviderlint/vendor/github.com/emirpasic/gods/containers/enumerable.go deleted file mode 100644 index ac48b545315..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/enumerable.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index. -type EnumerableWithIndex interface { - // Each calls the given function once for each element, passing that element's index and value. - Each(func(index int, value interface{})) - - // Map invokes the given function once for each element and returns a - // container containing the values returned by the given function. - // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining) - // Map(func(index int, value interface{}) interface{}) Container - - // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Select(func(index int, value interface{}) bool) Container - - // Any passes each element of the container to the given function and - // returns true if the function ever returns true for any element. - Any(func(index int, value interface{}) bool) bool - - // All passes each element of the container to the given function and - // returns true if the function returns true for all elements. - All(func(index int, value interface{}) bool) bool - - // Find passes each element of the container to the given function and returns - // the first (index,value) for which the function is true or -1,nil otherwise - // if no element matches the criteria. - Find(func(index int, value interface{}) bool) (int, interface{}) -} - -// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs. -type EnumerableWithKey interface { - // Each calls the given function once for each element, passing that element's key and value. - Each(func(key interface{}, value interface{})) - - // Map invokes the given function once for each element and returns a container - // containing the values returned by the given function as key/value pairs. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container - - // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Select(func(key interface{}, value interface{}) bool) Container - - // Any passes each element of the container to the given function and - // returns true if the function ever returns true for any element. - Any(func(key interface{}, value interface{}) bool) bool - - // All passes each element of the container to the given function and - // returns true if the function returns true for all elements. - All(func(key interface{}, value interface{}) bool) bool - - // Find passes each element of the container to the given function and returns - // the first (key,value) for which the function is true or nil,nil otherwise if no element - // matches the criteria. - Find(func(key interface{}, value interface{}) bool) (interface{}, interface{}) -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/iterator.go b/awsproviderlint/vendor/github.com/emirpasic/gods/containers/iterator.go deleted file mode 100644 index f1a52a365ac..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/iterator.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. -type IteratorWithIndex interface { - // Next moves the iterator to the next element and returns true if there was a next element in the container. - // If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). - // If Next() was called for the first time, then it will point the iterator to the first element if it exists. - // Modifies the state of the iterator. - Next() bool - - // Value returns the current element's value. - // Does not modify the state of the iterator. - Value() interface{} - - // Index returns the current element's index. - // Does not modify the state of the iterator. - Index() int - - // Begin resets the iterator to its initial state (one-before-first) - // Call Next() to fetch the first element if any. - Begin() - - // First moves the iterator to the first element and returns true if there was a first element in the container. - // If First() returns true, then first element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - First() bool -} - -// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. -type IteratorWithKey interface { - // Next moves the iterator to the next element and returns true if there was a next element in the container. - // If Next() returns true, then next element's key and value can be retrieved by Key() and Value(). - // If Next() was called for the first time, then it will point the iterator to the first element if it exists. - // Modifies the state of the iterator. - Next() bool - - // Value returns the current element's value. - // Does not modify the state of the iterator. - Value() interface{} - - // Key returns the current element's key. - // Does not modify the state of the iterator. - Key() interface{} - - // Begin resets the iterator to its initial state (one-before-first) - // Call Next() to fetch the first element if any. - Begin() - - // First moves the iterator to the first element and returns true if there was a first element in the container. - // If First() returns true, then first element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - First() bool -} - -// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. -// -// Essentially it is the same as IteratorWithIndex, but provides additional: -// -// Prev() function to enable traversal in reverse -// -// Last() function to move the iterator to the last element. -// -// End() function to move the iterator past the last element (one-past-the-end). -type ReverseIteratorWithIndex interface { - // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. - // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - Prev() bool - - // End moves the iterator past the last element (one-past-the-end). - // Call Prev() to fetch the last element if any. - End() - - // Last moves the iterator to the last element and returns true if there was a last element in the container. - // If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - Last() bool - - IteratorWithIndex -} - -// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. -// -// Essentially it is the same as IteratorWithKey, but provides additional: -// -// Prev() function to enable traversal in reverse -// -// Last() function to move the iterator to the last element. -type ReverseIteratorWithKey interface { - // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. - // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - Prev() bool - - // End moves the iterator past the last element (one-past-the-end). - // Call Prev() to fetch the last element if any. - End() - - // Last moves the iterator to the last element and returns true if there was a last element in the container. - // If Last() returns true, then last element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - Last() bool - - IteratorWithKey -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/serialization.go b/awsproviderlint/vendor/github.com/emirpasic/gods/containers/serialization.go deleted file mode 100644 index d7c90c83a05..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/containers/serialization.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// JSONSerializer provides JSON serialization -type JSONSerializer interface { - // ToJSON outputs the JSON representation of containers's elements. - ToJSON() ([]byte, error) -} - -// JSONDeserializer provides JSON deserialization -type JSONDeserializer interface { - // FromJSON populates containers's elements from the input JSON representation. - FromJSON([]byte) error -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go deleted file mode 100644 index bfedac9eef8..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package arraylist implements the array list. -// -// Structure is not thread safe. -// -// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 -package arraylist - -import ( - "fmt" - "strings" - - "github.com/emirpasic/gods/lists" - "github.com/emirpasic/gods/utils" -) - -func assertListImplementation() { - var _ lists.List = (*List)(nil) -} - -// List holds the elements in a slice -type List struct { - elements []interface{} - size int -} - -const ( - growthFactor = float32(2.0) // growth by 100% - shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink) -) - -// New instantiates a new list and adds the passed values, if any, to the list -func New(values ...interface{}) *List { - list := &List{} - if len(values) > 0 { - list.Add(values...) - } - return list -} - -// Add appends a value at the end of the list -func (list *List) Add(values ...interface{}) { - list.growBy(len(values)) - for _, value := range values { - list.elements[list.size] = value - list.size++ - } -} - -// Get returns the element at index. -// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false. -func (list *List) Get(index int) (interface{}, bool) { - - if !list.withinRange(index) { - return nil, false - } - - return list.elements[index], true -} - -// Remove removes the element at the given index from the list. -func (list *List) Remove(index int) { - - if !list.withinRange(index) { - return - } - - list.elements[index] = nil // cleanup reference - copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this) - list.size-- - - list.shrink() -} - -// Contains checks if elements (one or more) are present in the set. -// All elements have to be present in the set for the method to return true. -// Performance time complexity of n^2. -// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set. -func (list *List) Contains(values ...interface{}) bool { - - for _, searchValue := range values { - found := false - for _, element := range list.elements { - if element == searchValue { - found = true - break - } - } - if !found { - return false - } - } - return true -} - -// Values returns all elements in the list. -func (list *List) Values() []interface{} { - newElements := make([]interface{}, list.size, list.size) - copy(newElements, list.elements[:list.size]) - return newElements -} - -//IndexOf returns index of provided element -func (list *List) IndexOf(value interface{}) int { - if list.size == 0 { - return -1 - } - for index, element := range list.elements { - if element == value { - return index - } - } - return -1 -} - -// Empty returns true if list does not contain any elements. -func (list *List) Empty() bool { - return list.size == 0 -} - -// Size returns number of elements within the list. -func (list *List) Size() int { - return list.size -} - -// Clear removes all elements from the list. -func (list *List) Clear() { - list.size = 0 - list.elements = []interface{}{} -} - -// Sort sorts values (in-place) using. -func (list *List) Sort(comparator utils.Comparator) { - if len(list.elements) < 2 { - return - } - utils.Sort(list.elements[:list.size], comparator) -} - -// Swap swaps the two values at the specified positions. -func (list *List) Swap(i, j int) { - if list.withinRange(i) && list.withinRange(j) { - list.elements[i], list.elements[j] = list.elements[j], list.elements[i] - } -} - -// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right. -// Does not do anything if position is negative or bigger than list's size -// Note: position equal to list's size is valid, i.e. append. -func (list *List) Insert(index int, values ...interface{}) { - - if !list.withinRange(index) { - // Append - if index == list.size { - list.Add(values...) - } - return - } - - l := len(values) - list.growBy(l) - list.size += l - copy(list.elements[index+l:], list.elements[index:list.size-l]) - copy(list.elements[index:], values) -} - -// Set the value at specified index -// Does not do anything if position is negative or bigger than list's size -// Note: position equal to list's size is valid, i.e. append. -func (list *List) Set(index int, value interface{}) { - - if !list.withinRange(index) { - // Append - if index == list.size { - list.Add(value) - } - return - } - - list.elements[index] = value -} - -// String returns a string representation of container -func (list *List) String() string { - str := "ArrayList\n" - values := []string{} - for _, value := range list.elements[:list.size] { - values = append(values, fmt.Sprintf("%v", value)) - } - str += strings.Join(values, ", ") - return str -} - -// Check that the index is within bounds of the list -func (list *List) withinRange(index int) bool { - return index >= 0 && index < list.size -} - -func (list *List) resize(cap int) { - newElements := make([]interface{}, cap, cap) - copy(newElements, list.elements) - list.elements = newElements -} - -// Expand the array if necessary, i.e. capacity will be reached if we add n elements -func (list *List) growBy(n int) { - // When capacity is reached, grow by a factor of growthFactor and add number of elements - currentCapacity := cap(list.elements) - if list.size+n >= currentCapacity { - newCapacity := int(growthFactor * float32(currentCapacity+n)) - list.resize(newCapacity) - } -} - -// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity -func (list *List) shrink() { - if shrinkFactor == 0.0 { - return - } - // Shrink when size is at shrinkFactor * capacity - currentCapacity := cap(list.elements) - if list.size <= int(float32(currentCapacity)*shrinkFactor) { - list.resize(list.size) - } -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go deleted file mode 100644 index b3a8738825c..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import "github.com/emirpasic/gods/containers" - -func assertEnumerableImplementation() { - var _ containers.EnumerableWithIndex = (*List)(nil) -} - -// Each calls the given function once for each element, passing that element's index and value. -func (list *List) Each(f func(index int, value interface{})) { - iterator := list.Iterator() - for iterator.Next() { - f(iterator.Index(), iterator.Value()) - } -} - -// Map invokes the given function once for each element and returns a -// container containing the values returned by the given function. -func (list *List) Map(f func(index int, value interface{}) interface{}) *List { - newList := &List{} - iterator := list.Iterator() - for iterator.Next() { - newList.Add(f(iterator.Index(), iterator.Value())) - } - return newList -} - -// Select returns a new container containing all elements for which the given function returns a true value. -func (list *List) Select(f func(index int, value interface{}) bool) *List { - newList := &List{} - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - newList.Add(iterator.Value()) - } - } - return newList -} - -// Any passes each element of the collection to the given function and -// returns true if the function ever returns true for any element. -func (list *List) Any(f func(index int, value interface{}) bool) bool { - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - return true - } - } - return false -} - -// All passes each element of the collection to the given function and -// returns true if the function returns true for all elements. -func (list *List) All(f func(index int, value interface{}) bool) bool { - iterator := list.Iterator() - for iterator.Next() { - if !f(iterator.Index(), iterator.Value()) { - return false - } - } - return true -} - -// Find passes each element of the container to the given function and returns -// the first (index,value) for which the function is true or -1,nil otherwise -// if no element matches the criteria. -func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) { - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - return iterator.Index(), iterator.Value() - } - } - return -1, nil -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go deleted file mode 100644 index 38a93f3a8f0..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import "github.com/emirpasic/gods/containers" - -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} - -// Iterator holding the iterator's state -type Iterator struct { - list *List - index int -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -func (list *List) Iterator() Iterator { - return Iterator{list: list, index: -1} -} - -// Next moves the iterator to the next element and returns true if there was a next element in the container. -// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). -// If Next() was called for the first time, then it will point the iterator to the first element if it exists. -// Modifies the state of the iterator. -func (iterator *Iterator) Next() bool { - if iterator.index < iterator.list.size { - iterator.index++ - } - return iterator.list.withinRange(iterator.index) -} - -// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. -// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Prev() bool { - if iterator.index >= 0 { - iterator.index-- - } - return iterator.list.withinRange(iterator.index) -} - -// Value returns the current element's value. -// Does not modify the state of the iterator. -func (iterator *Iterator) Value() interface{} { - return iterator.list.elements[iterator.index] -} - -// Index returns the current element's index. -// Does not modify the state of the iterator. -func (iterator *Iterator) Index() int { - return iterator.index -} - -// Begin resets the iterator to its initial state (one-before-first) -// Call Next() to fetch the first element if any. -func (iterator *Iterator) Begin() { - iterator.index = -1 -} - -// End moves the iterator past the last element (one-past-the-end). -// Call Prev() to fetch the last element if any. -func (iterator *Iterator) End() { - iterator.index = iterator.list.size -} - -// First moves the iterator to the first element and returns true if there was a first element in the container. -// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) First() bool { - iterator.Begin() - return iterator.Next() -} - -// Last moves the iterator to the last element and returns true if there was a last element in the container. -// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Last() bool { - iterator.End() - return iterator.Prev() -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go deleted file mode 100644 index 2f283fb97d9..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import ( - "encoding/json" - "github.com/emirpasic/gods/containers" -) - -func assertSerializationImplementation() { - var _ containers.JSONSerializer = (*List)(nil) - var _ containers.JSONDeserializer = (*List)(nil) -} - -// ToJSON outputs the JSON representation of list's elements. -func (list *List) ToJSON() ([]byte, error) { - return json.Marshal(list.elements[:list.size]) -} - -// FromJSON populates list's elements from the input JSON representation. -func (list *List) FromJSON(data []byte) error { - err := json.Unmarshal(data, &list.elements) - if err == nil { - list.size = len(list.elements) - } - return err -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/lists.go b/awsproviderlint/vendor/github.com/emirpasic/gods/lists/lists.go deleted file mode 100644 index 1f6bb08e945..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/lists/lists.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lists provides an abstract List interface. -// -// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item. -// -// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 -package lists - -import ( - "github.com/emirpasic/gods/containers" - "github.com/emirpasic/gods/utils" -) - -// List interface that all lists implement -type List interface { - Get(index int) (interface{}, bool) - Remove(index int) - Add(values ...interface{}) - Contains(values ...interface{}) bool - Sort(comparator utils.Comparator) - Swap(index1, index2 int) - Insert(index int, values ...interface{}) - Set(index int, value interface{}) - - containers.Container - // Empty() bool - // Size() int - // Clear() - // Values() []interface{} -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go deleted file mode 100644 index 70b28cf52d3..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package binaryheap implements a binary heap backed by array list. -// -// Comparator defines this heap as either min or max heap. -// -// Structure is not thread safe. -// -// References: http://en.wikipedia.org/wiki/Binary_heap -package binaryheap - -import ( - "fmt" - "github.com/emirpasic/gods/lists/arraylist" - "github.com/emirpasic/gods/trees" - "github.com/emirpasic/gods/utils" - "strings" -) - -func assertTreeImplementation() { - var _ trees.Tree = (*Heap)(nil) -} - -// Heap holds elements in an array-list -type Heap struct { - list *arraylist.List - Comparator utils.Comparator -} - -// NewWith instantiates a new empty heap tree with the custom comparator. -func NewWith(comparator utils.Comparator) *Heap { - return &Heap{list: arraylist.New(), Comparator: comparator} -} - -// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int. -func NewWithIntComparator() *Heap { - return &Heap{list: arraylist.New(), Comparator: utils.IntComparator} -} - -// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string. -func NewWithStringComparator() *Heap { - return &Heap{list: arraylist.New(), Comparator: utils.StringComparator} -} - -// Push adds a value onto the heap and bubbles it up accordingly. -func (heap *Heap) Push(values ...interface{}) { - if len(values) == 1 { - heap.list.Add(values[0]) - heap.bubbleUp() - } else { - // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap - for _, value := range values { - heap.list.Add(value) - } - size := heap.list.Size()/2 + 1 - for i := size; i >= 0; i-- { - heap.bubbleDownIndex(i) - } - } -} - -// Pop removes top element on heap and returns it, or nil if heap is empty. -// Second return parameter is true, unless the heap was empty and there was nothing to pop. -func (heap *Heap) Pop() (value interface{}, ok bool) { - value, ok = heap.list.Get(0) - if !ok { - return - } - lastIndex := heap.list.Size() - 1 - heap.list.Swap(0, lastIndex) - heap.list.Remove(lastIndex) - heap.bubbleDown() - return -} - -// Peek returns top element on the heap without removing it, or nil if heap is empty. -// Second return parameter is true, unless the heap was empty and there was nothing to peek. -func (heap *Heap) Peek() (value interface{}, ok bool) { - return heap.list.Get(0) -} - -// Empty returns true if heap does not contain any elements. -func (heap *Heap) Empty() bool { - return heap.list.Empty() -} - -// Size returns number of elements within the heap. -func (heap *Heap) Size() int { - return heap.list.Size() -} - -// Clear removes all elements from the heap. -func (heap *Heap) Clear() { - heap.list.Clear() -} - -// Values returns all elements in the heap. -func (heap *Heap) Values() []interface{} { - return heap.list.Values() -} - -// String returns a string representation of container -func (heap *Heap) String() string { - str := "BinaryHeap\n" - values := []string{} - for _, value := range heap.list.Values() { - values = append(values, fmt.Sprintf("%v", value)) - } - str += strings.Join(values, ", ") - return str -} - -// Performs the "bubble down" operation. This is to place the element that is at the root -// of the heap in its correct place so that the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleDown() { - heap.bubbleDownIndex(0) -} - -// Performs the "bubble down" operation. This is to place the element that is at the index -// of the heap in its correct place so that the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleDownIndex(index int) { - size := heap.list.Size() - for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 { - rightIndex := index<<1 + 2 - smallerIndex := leftIndex - leftValue, _ := heap.list.Get(leftIndex) - rightValue, _ := heap.list.Get(rightIndex) - if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 { - smallerIndex = rightIndex - } - indexValue, _ := heap.list.Get(index) - smallerValue, _ := heap.list.Get(smallerIndex) - if heap.Comparator(indexValue, smallerValue) > 0 { - heap.list.Swap(index, smallerIndex) - } else { - break - } - index = smallerIndex - } -} - -// Performs the "bubble up" operation. This is to place a newly inserted -// element (i.e. last element in the list) in its correct place so that -// the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleUp() { - index := heap.list.Size() - 1 - for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 { - indexValue, _ := heap.list.Get(index) - parentValue, _ := heap.list.Get(parentIndex) - if heap.Comparator(parentValue, indexValue) <= 0 { - break - } - heap.list.Swap(index, parentIndex) - index = parentIndex - } -} - -// Check that the index is within bounds of the list -func (heap *Heap) withinRange(index int) bool { - return index >= 0 && index < heap.list.Size() -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go deleted file mode 100644 index beeb8d70136..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package binaryheap - -import "github.com/emirpasic/gods/containers" - -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -type Iterator struct { - heap *Heap - index int -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -func (heap *Heap) Iterator() Iterator { - return Iterator{heap: heap, index: -1} -} - -// Next moves the iterator to the next element and returns true if there was a next element in the container. -// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). -// If Next() was called for the first time, then it will point the iterator to the first element if it exists. -// Modifies the state of the iterator. -func (iterator *Iterator) Next() bool { - if iterator.index < iterator.heap.Size() { - iterator.index++ - } - return iterator.heap.withinRange(iterator.index) -} - -// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. -// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Prev() bool { - if iterator.index >= 0 { - iterator.index-- - } - return iterator.heap.withinRange(iterator.index) -} - -// Value returns the current element's value. -// Does not modify the state of the iterator. -func (iterator *Iterator) Value() interface{} { - value, _ := iterator.heap.list.Get(iterator.index) - return value -} - -// Index returns the current element's index. -// Does not modify the state of the iterator. -func (iterator *Iterator) Index() int { - return iterator.index -} - -// Begin resets the iterator to its initial state (one-before-first) -// Call Next() to fetch the first element if any. -func (iterator *Iterator) Begin() { - iterator.index = -1 -} - -// End moves the iterator past the last element (one-past-the-end). -// Call Prev() to fetch the last element if any. -func (iterator *Iterator) End() { - iterator.index = iterator.heap.Size() -} - -// First moves the iterator to the first element and returns true if there was a first element in the container. -// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) First() bool { - iterator.Begin() - return iterator.Next() -} - -// Last moves the iterator to the last element and returns true if there was a last element in the container. -// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Last() bool { - iterator.End() - return iterator.Prev() -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go b/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go deleted file mode 100644 index 00d0c7719cd..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package binaryheap - -import "github.com/emirpasic/gods/containers" - -func assertSerializationImplementation() { - var _ containers.JSONSerializer = (*Heap)(nil) - var _ containers.JSONDeserializer = (*Heap)(nil) -} - -// ToJSON outputs the JSON representation of the heap. -func (heap *Heap) ToJSON() ([]byte, error) { - return heap.list.ToJSON() -} - -// FromJSON populates the heap from the input JSON representation. -func (heap *Heap) FromJSON(data []byte) error { - return heap.list.FromJSON(data) -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/trees.go b/awsproviderlint/vendor/github.com/emirpasic/gods/trees/trees.go deleted file mode 100644 index a5a7427d342..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/trees/trees.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package trees provides an abstract Tree interface. -// -// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes. -// -// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29 -package trees - -import "github.com/emirpasic/gods/containers" - -// Tree interface that all trees implement -type Tree interface { - containers.Container - // Empty() bool - // Size() int - // Clear() - // Values() []interface{} -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/comparator.go b/awsproviderlint/vendor/github.com/emirpasic/gods/utils/comparator.go deleted file mode 100644 index 6a9afbf3466..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/comparator.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "time" - -// Comparator will make type assertion (see IntComparator for example), -// which will panic if a or b are not of the asserted type. -// -// Should return a number: -// negative , if a < b -// zero , if a == b -// positive , if a > b -type Comparator func(a, b interface{}) int - -// StringComparator provides a fast comparison on strings -func StringComparator(a, b interface{}) int { - s1 := a.(string) - s2 := b.(string) - min := len(s2) - if len(s1) < len(s2) { - min = len(s1) - } - diff := 0 - for i := 0; i < min && diff == 0; i++ { - diff = int(s1[i]) - int(s2[i]) - } - if diff == 0 { - diff = len(s1) - len(s2) - } - if diff < 0 { - return -1 - } - if diff > 0 { - return 1 - } - return 0 -} - -// IntComparator provides a basic comparison on int -func IntComparator(a, b interface{}) int { - aAsserted := a.(int) - bAsserted := b.(int) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int8Comparator provides a basic comparison on int8 -func Int8Comparator(a, b interface{}) int { - aAsserted := a.(int8) - bAsserted := b.(int8) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int16Comparator provides a basic comparison on int16 -func Int16Comparator(a, b interface{}) int { - aAsserted := a.(int16) - bAsserted := b.(int16) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int32Comparator provides a basic comparison on int32 -func Int32Comparator(a, b interface{}) int { - aAsserted := a.(int32) - bAsserted := b.(int32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int64Comparator provides a basic comparison on int64 -func Int64Comparator(a, b interface{}) int { - aAsserted := a.(int64) - bAsserted := b.(int64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UIntComparator provides a basic comparison on uint -func UIntComparator(a, b interface{}) int { - aAsserted := a.(uint) - bAsserted := b.(uint) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt8Comparator provides a basic comparison on uint8 -func UInt8Comparator(a, b interface{}) int { - aAsserted := a.(uint8) - bAsserted := b.(uint8) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt16Comparator provides a basic comparison on uint16 -func UInt16Comparator(a, b interface{}) int { - aAsserted := a.(uint16) - bAsserted := b.(uint16) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt32Comparator provides a basic comparison on uint32 -func UInt32Comparator(a, b interface{}) int { - aAsserted := a.(uint32) - bAsserted := b.(uint32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt64Comparator provides a basic comparison on uint64 -func UInt64Comparator(a, b interface{}) int { - aAsserted := a.(uint64) - bAsserted := b.(uint64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Float32Comparator provides a basic comparison on float32 -func Float32Comparator(a, b interface{}) int { - aAsserted := a.(float32) - bAsserted := b.(float32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Float64Comparator provides a basic comparison on float64 -func Float64Comparator(a, b interface{}) int { - aAsserted := a.(float64) - bAsserted := b.(float64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// ByteComparator provides a basic comparison on byte -func ByteComparator(a, b interface{}) int { - aAsserted := a.(byte) - bAsserted := b.(byte) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// RuneComparator provides a basic comparison on rune -func RuneComparator(a, b interface{}) int { - aAsserted := a.(rune) - bAsserted := b.(rune) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// TimeComparator provides a basic comparison on time.Time -func TimeComparator(a, b interface{}) int { - aAsserted := a.(time.Time) - bAsserted := b.(time.Time) - - switch { - case aAsserted.After(bAsserted): - return 1 - case aAsserted.Before(bAsserted): - return -1 - default: - return 0 - } -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/sort.go b/awsproviderlint/vendor/github.com/emirpasic/gods/utils/sort.go deleted file mode 100644 index 79ced1f5d26..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/sort.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "sort" - -// Sort sorts values (in-place) with respect to the given comparator. -// -// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices). -func Sort(values []interface{}, comparator Comparator) { - sort.Sort(sortable{values, comparator}) -} - -type sortable struct { - values []interface{} - comparator Comparator -} - -func (s sortable) Len() int { - return len(s.values) -} -func (s sortable) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] -} -func (s sortable) Less(i, j int) bool { - return s.comparator(s.values[i], s.values[j]) < 0 -} diff --git a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/utils.go b/awsproviderlint/vendor/github.com/emirpasic/gods/utils/utils.go deleted file mode 100644 index 1ad49cbc072..00000000000 --- a/awsproviderlint/vendor/github.com/emirpasic/gods/utils/utils.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package utils provides common utility functions. -// -// Provided functionalities: -// - sorting -// - comparators -package utils - -import ( - "fmt" - "strconv" -) - -// ToString converts a value to string. -func ToString(value interface{}) string { - switch value.(type) { - case string: - return value.(string) - case int8: - return strconv.FormatInt(int64(value.(int8)), 10) - case int16: - return strconv.FormatInt(int64(value.(int16)), 10) - case int32: - return strconv.FormatInt(int64(value.(int32)), 10) - case int64: - return strconv.FormatInt(int64(value.(int64)), 10) - case uint8: - return strconv.FormatUint(uint64(value.(uint8)), 10) - case uint16: - return strconv.FormatUint(uint64(value.(uint16)), 10) - case uint32: - return strconv.FormatUint(uint64(value.(uint32)), 10) - case uint64: - return strconv.FormatUint(uint64(value.(uint64)), 10) - case float32: - return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64) - case float64: - return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64) - case bool: - return strconv.FormatBool(value.(bool)) - default: - return fmt.Sprintf("%+v", value) - } -} diff --git a/awsproviderlint/vendor/github.com/fatih/color/.travis.yml b/awsproviderlint/vendor/github.com/fatih/color/.travis.yml new file mode 100644 index 00000000000..95f8a1ff5c7 --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.8.x + - tip + diff --git a/awsproviderlint/vendor/github.com/fatih/color/Gopkg.lock b/awsproviderlint/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 00000000000..7d879e9caf0 --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/awsproviderlint/vendor/github.com/fatih/color/Gopkg.toml b/awsproviderlint/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 00000000000..ff1617f71da --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/awsproviderlint/vendor/github.com/fatih/color/LICENSE.md b/awsproviderlint/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000000..25fdaf639df --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/awsproviderlint/vendor/github.com/fatih/color/README.md b/awsproviderlint/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000000..3fc95446028 --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/README.md @@ -0,0 +1,179 @@ +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + + + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/awsproviderlint/vendor/github.com/fatih/color/color.go b/awsproviderlint/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000000..91c8e9f0620 --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/awsproviderlint/vendor/github.com/fatih/color/doc.go b/awsproviderlint/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000000..cf1e96500f4 --- /dev/null +++ b/awsproviderlint/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/LICENSE b/awsproviderlint/vendor/github.com/go-git/gcfg/LICENSE deleted file mode 100644 index 87a5cede339..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/README b/awsproviderlint/vendor/github.com/go-git/gcfg/README deleted file mode 100644 index 1ff233a529d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/README +++ /dev/null @@ -1,4 +0,0 @@ -Gcfg reads INI-style configuration files into Go structs; -supports user-defined types and subsections. - -Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/doc.go b/awsproviderlint/vendor/github.com/go-git/gcfg/doc.go deleted file mode 100644 index 7bdefbf0203..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/doc.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package gcfg reads "INI-style" text-based configuration files with -// "name=value" pairs grouped into sections (gcfg files). -// -// This package is still a work in progress; see the sections below for planned -// changes. -// -// Syntax -// -// The syntax is based on that used by git config: -// http://git-scm.com/docs/git-config#_syntax . -// There are some (planned) differences compared to the git config format: -// - improve data portability: -// - must be encoded in UTF-8 (for now) and must not contain the 0 byte -// - include and "path" type is not supported -// (path type may be implementable as a user-defined type) -// - internationalization -// - section and variable names can contain unicode letters, unicode digits -// (as defined in http://golang.org/ref/spec#Characters ) and hyphens -// (U+002D), starting with a unicode letter -// - disallow potentially ambiguous or misleading definitions: -// - `[sec.sub]` format is not allowed (deprecated in gitconfig) -// - `[sec ""]` is not allowed -// - use `[sec]` for section name "sec" and empty subsection name -// - (planned) within a single file, definitions must be contiguous for each: -// - section: '[secA]' -> '[secB]' -> '[secA]' is an error -// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error -// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error -// -// Data structure -// -// The functions in this package read values into a user-defined struct. -// Each section corresponds to a struct field in the config struct, and each -// variable in a section corresponds to a data field in the section struct. -// The mapping of each section or variable name to fields is done either based -// on the "gcfg" struct tag or by matching the name of the section or variable, -// ignoring case. In the latter case, hyphens '-' in section and variable names -// correspond to underscores '_' in field names. -// Fields must be exported; to use a section or variable name starting with a -// letter that is neither upper- or lower-case, prefix the field name with 'X'. -// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) -// -// For sections with subsections, the corresponding field in config must be a -// map, rather than a struct, with string keys and pointer-to-struct values. -// Values for subsection variables are stored in the map with the subsection -// name used as the map key. -// (Note that unlike section and variable names, subsection names are case -// sensitive.) -// When using a map, and there is a section with the same section name but -// without a subsection name, its values are stored with the empty string used -// as the key. -// It is possible to provide default values for subsections in the section -// "default-" (or by setting values in the corresponding struct -// field "Default_"). -// -// The functions in this package panic if config is not a pointer to a struct, -// or when a field is not of a suitable type (either a struct or a map with -// string keys and pointer-to-struct values). -// -// Parsing of values -// -// The section structs in the config struct may contain single-valued or -// multi-valued variables. Variables of unnamed slice type (that is, a type -// starting with `[]`) are treated as multi-value; all others (including named -// slice types) are treated as single-valued variables. -// -// Single-valued variables are handled based on the type as follows. -// Unnamed pointer types (that is, types starting with `*`) are dereferenced, -// and if necessary, a new instance is allocated. -// -// For types implementing the encoding.TextUnmarshaler interface, the -// UnmarshalText method is used to set the value. Implementing this method is -// the recommended way for parsing user-defined types. -// -// For fields of string kind, the value string is assigned to the field, after -// unquoting and unescaping as needed. -// For fields of bool kind, the field is set to true if the value is "true", -// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or -// "0", ignoring case. In addition, single-valued bool fields can be specified -// with a "blank" value (variable name without equals sign and value); in such -// case the value is set to true. -// -// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as -// decimal or hexadecimal (if having '0x' prefix). (This is to prevent -// unintuitively handling zero-padded numbers as octal.) Other types having -// [u]int* as the underlying type, such as os.FileMode and uintptr allow -// decimal, hexadecimal, or octal values. -// Parsing mode for integer types can be overridden using the struct tag option -// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters -// (each standing for decimal, hexadecimal, and octal, respectively.) -// -// All other types are parsed using fmt.Sscanf with the "%v" verb. -// -// For multi-valued variables, each individual value is parsed as above and -// appended to the slice. If the first value is specified as a "blank" value -// (variable name without equals sign and value), a new slice is allocated; -// that is any values previously set in the slice will be ignored. -// -// The types subpackage for provides helpers for parsing "enum-like" and integer -// types. -// -// Error handling -// -// There are 3 types of errors: -// -// - programmer errors / panics: -// - invalid configuration structure -// - data errors: -// - fatal errors: -// - invalid configuration syntax -// - warnings: -// - data that doesn't belong to any part of the config structure -// -// Programmer errors trigger panics. These are should be fixed by the programmer -// before releasing code that uses gcfg. -// -// Data errors cause gcfg to return a non-nil error value. This includes the -// case when there are extra unknown key-value definitions in the configuration -// data (extra data). -// However, in some occasions it is desirable to be able to proceed in -// situations when the only data error is that of extra data. -// These errors are handled at a different (warning) priority and can be -// filtered out programmatically. To ignore extra data warnings, wrap the -// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. -// -// TODO -// -// The following is a list of changes under consideration: -// - documentation -// - self-contained syntax documentation -// - more practical examples -// - move TODOs to issue tracker (eventually) -// - syntax -// - reconsider valid escape sequences -// (gitconfig doesn't support \r in value, \t in subsection name, etc.) -// - reading / parsing gcfg files -// - define internal representation structure -// - support multiple inputs (readers, strings, files) -// - support declaring encoding (?) -// - support varying fields sets for subsections (?) -// - writing gcfg files -// - error handling -// - make error context accessible programmatically? -// - limit input size? -// -package gcfg // import "github.com/go-git/gcfg" diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/errors.go b/awsproviderlint/vendor/github.com/go-git/gcfg/errors.go deleted file mode 100644 index 853c76021de..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/errors.go +++ /dev/null @@ -1,41 +0,0 @@ -package gcfg - -import ( - "gopkg.in/warnings.v0" -) - -// FatalOnly filters the results of a Read*Into invocation and returns only -// fatal errors. That is, errors (warnings) indicating data for unknown -// sections / variables is ignored. Example invocation: -// -// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) -// if err != nil { -// ... -// -func FatalOnly(err error) error { - return warnings.FatalOnly(err) -} - -func isFatal(err error) bool { - _, ok := err.(extraData) - return !ok -} - -type extraData struct { - section string - subsection *string - variable *string -} - -func (e extraData) Error() string { - s := "can't store data at section \"" + e.section + "\"" - if e.subsection != nil { - s += ", subsection \"" + *e.subsection + "\"" - } - if e.variable != nil { - s += ", variable \"" + *e.variable + "\"" - } - return s -} - -var _ error = extraData{} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/go1_0.go b/awsproviderlint/vendor/github.com/go-git/gcfg/go1_0.go deleted file mode 100644 index 6670210791d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/go1_0.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.2 - -package gcfg - -type textUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/go1_2.go b/awsproviderlint/vendor/github.com/go-git/gcfg/go1_2.go deleted file mode 100644 index 6f5843bc7cd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/go1_2.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.2 - -package gcfg - -import ( - "encoding" -) - -type textUnmarshaler encoding.TextUnmarshaler diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/read.go b/awsproviderlint/vendor/github.com/go-git/gcfg/read.go deleted file mode 100644 index 4dfdc5cf301..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/read.go +++ /dev/null @@ -1,273 +0,0 @@ -package gcfg - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/go-git/gcfg/scanner" - "github.com/go-git/gcfg/token" - "gopkg.in/warnings.v0" -) - -var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} - -// no error: invalid literals should be caught by scanner -func unquote(s string) string { - u, q, esc := make([]rune, 0, len(s)), false, false - for _, c := range s { - if esc { - uc, ok := unescape[c] - switch { - case ok: - u = append(u, uc) - fallthrough - case !q && c == '\n': - esc = false - continue - } - panic("invalid escape sequence") - } - switch c { - case '"': - q = !q - case '\\': - esc = true - default: - u = append(u, c) - } - } - if q { - panic("missing end quote") - } - if esc { - panic("invalid escape sequence") - } - return string(u) -} - -func read(c *warnings.Collector, callback func(string, string, string, string, bool) error, - fset *token.FileSet, file *token.File, src []byte) error { - // - var s scanner.Scanner - var errs scanner.ErrorList - s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) - sect, sectsub := "", "" - pos, tok, lit := s.Scan() - errfn := func(msg string) error { - return fmt.Errorf("%s: %s", fset.Position(pos), msg) - } - for { - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - switch tok { - case token.EOF: - return nil - case token.EOL, token.COMMENT: - pos, tok, lit = s.Scan() - case token.LBRACK: - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.IDENT { - if err := c.Collect(errfn("expected section name")); err != nil { - return err - } - } - sect, sectsub = lit, "" - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok == token.STRING { - sectsub = unquote(lit) - if sectsub == "" { - if err := c.Collect(errfn("empty subsection name")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - } - if tok != token.RBRACK { - if sectsub == "" { - if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { - return err - } - } - if err := c.Collect(errfn("expected right bracket")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { - return err - } - } - // If a section/subsection header was found, ensure a - // container object is created, even if there are no - // variables further down. - err := c.Collect(callback(sect, sectsub, "", "", true)) - if err != nil { - return err - } - case token.IDENT: - if sect == "" { - if err := c.Collect(errfn("expected section header")); err != nil { - return err - } - } - n := lit - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - return errs.Err() - } - blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" - if !blank { - if tok != token.ASSIGN { - if err := c.Collect(errfn("expected '='")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.STRING { - if err := c.Collect(errfn("expected value")); err != nil { - return err - } - } - v = unquote(lit) - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { - return err - } - } - } - err := c.Collect(callback(sect, sectsub, n, v, blank)) - if err != nil { - return err - } - default: - if sect == "" { - if err := c.Collect(errfn("expected section header")); err != nil { - return err - } - } - if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { - return err - } - } - } - panic("never reached") -} - -func readInto(config interface{}, fset *token.FileSet, file *token.File, - src []byte) error { - // - c := warnings.NewCollector(isFatal) - firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { - return set(c, config, s, ss, k, v, bv, false) - } - err := read(c, firstPassCallback, fset, file, src) - if err != nil { - return err - } - secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { - return set(c, config, s, ss, k, v, bv, true) - } - err = read(c, secondPassCallback, fset, file, src) - if err != nil { - return err - } - return c.Done() -} - -// ReadWithCallback reads gcfg formatted data from reader and calls -// callback with each section and option found. -// -// Callback is called with section, subsection, option key, option value -// and blank value flag as arguments. -// -// When a section is found, callback is called with nil subsection, option key -// and option value. -// -// When a subsection is found, callback is called with nil option key and -// option value. -// -// If blank value flag is true, it means that the value was not set for an option -// (as opposed to set to empty string). -// -// If callback returns an error, ReadWithCallback terminates with an error too. -func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { - src, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - - fset := token.NewFileSet() - file := fset.AddFile("", fset.Base(), len(src)) - c := warnings.NewCollector(isFatal) - - return read(c, callback, fset, file, src) -} - -// ReadInto reads gcfg formatted data from reader and sets the values into the -// corresponding fields in config. -func ReadInto(config interface{}, reader io.Reader) error { - src, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - fset := token.NewFileSet() - file := fset.AddFile("", fset.Base(), len(src)) - return readInto(config, fset, file, src) -} - -// ReadStringInto reads gcfg formatted data from str and sets the values into -// the corresponding fields in config. -func ReadStringInto(config interface{}, str string) error { - r := strings.NewReader(str) - return ReadInto(config, r) -} - -// ReadFileInto reads gcfg formatted data from the file filename and sets the -// values into the corresponding fields in config. -func ReadFileInto(config interface{}, filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - src, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fset := token.NewFileSet() - file := fset.AddFile(filename, fset.Base(), len(src)) - return readInto(config, fset, file, src) -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/errors.go b/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/errors.go deleted file mode 100644 index a6e00f5c64e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package scanner - -import ( - "fmt" - "io" - "sort" -) - -import ( - "github.com/go-git/gcfg/token" -) - -// In an ErrorList, an error is represented by an *Error. -// The position Pos, if valid, points to the beginning of -// the offending token, and the error condition is described -// by Msg. -// -type Error struct { - Pos token.Position - Msg string -} - -// Error implements the error interface. -func (e Error) Error() string { - if e.Pos.Filename != "" || e.Pos.IsValid() { - // don't print "" - // TODO(gri) reconsider the semantics of Position.IsValid - return e.Pos.String() + ": " + e.Msg - } - return e.Msg -} - -// ErrorList is a list of *Errors. -// The zero value for an ErrorList is an empty ErrorList ready to use. -// -type ErrorList []*Error - -// Add adds an Error with given position and error message to an ErrorList. -func (p *ErrorList) Add(pos token.Position, msg string) { - *p = append(*p, &Error{pos, msg}) -} - -// Reset resets an ErrorList to no errors. -func (p *ErrorList) Reset() { *p = (*p)[0:0] } - -// ErrorList implements the sort Interface. -func (p ErrorList) Len() int { return len(p) } -func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p ErrorList) Less(i, j int) bool { - e := &p[i].Pos - f := &p[j].Pos - if e.Filename < f.Filename { - return true - } - if e.Filename == f.Filename { - return e.Offset < f.Offset - } - return false -} - -// Sort sorts an ErrorList. *Error entries are sorted by position, -// other errors are sorted by error message, and before any *Error -// entry. -// -func (p ErrorList) Sort() { - sort.Sort(p) -} - -// RemoveMultiples sorts an ErrorList and removes all but the first error per line. -func (p *ErrorList) RemoveMultiples() { - sort.Sort(p) - var last token.Position // initial last.Line is != any legal error line - i := 0 - for _, e := range *p { - if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { - last = e.Pos - (*p)[i] = e - i++ - } - } - (*p) = (*p)[0:i] -} - -// An ErrorList implements the error interface. -func (p ErrorList) Error() string { - switch len(p) { - case 0: - return "no errors" - case 1: - return p[0].Error() - } - return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) -} - -// Err returns an error equivalent to this error list. -// If the list is empty, Err returns nil. -func (p ErrorList) Err() error { - if len(p) == 0 { - return nil - } - return p -} - -// PrintError is a utility function that prints a list of errors to w, -// one error per line, if the err parameter is an ErrorList. Otherwise -// it prints the err string. -// -func PrintError(w io.Writer, err error) { - if list, ok := err.(ErrorList); ok { - for _, e := range list { - fmt.Fprintf(w, "%s\n", e) - } - } else if err != nil { - fmt.Fprintf(w, "%s\n", err) - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/scanner.go b/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/scanner.go deleted file mode 100644 index 41aafec7589..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/scanner/scanner.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scanner implements a scanner for gcfg configuration text. -// It takes a []byte as source which can then be tokenized -// through repeated calls to the Scan method. -// -// Note that the API for the scanner package may change to accommodate new -// features or implementation changes in gcfg. -// -package scanner - -import ( - "fmt" - "path/filepath" - "unicode" - "unicode/utf8" -) - -import ( - "github.com/go-git/gcfg/token" -) - -// An ErrorHandler may be provided to Scanner.Init. If a syntax error is -// encountered and a handler was installed, the handler is called with a -// position and an error message. The position points to the beginning of -// the offending token. -// -type ErrorHandler func(pos token.Position, msg string) - -// A Scanner holds the scanner's internal state while processing -// a given text. It can be allocated as part of another data -// structure but must be initialized via Init before use. -// -type Scanner struct { - // immutable state - file *token.File // source file handle - dir string // directory portion of file.Name() - src []byte // source - err ErrorHandler // error reporting; or nil - mode Mode // scanning mode - - // scanning state - ch rune // current character - offset int // character offset - rdOffset int // reading offset (position after current character) - lineOffset int // current line offset - nextVal bool // next token is expected to be a value - - // public state - ok to modify - ErrorCount int // number of errors encountered -} - -// Read the next Unicode char into s.ch. -// s.ch < 0 means end-of-file. -// -func (s *Scanner) next() { - if s.rdOffset < len(s.src) { - s.offset = s.rdOffset - if s.ch == '\n' { - s.lineOffset = s.offset - s.file.AddLine(s.offset) - } - r, w := rune(s.src[s.rdOffset]), 1 - switch { - case r == 0: - s.error(s.offset, "illegal character NUL") - case r >= 0x80: - // not ASCII - r, w = utf8.DecodeRune(s.src[s.rdOffset:]) - if r == utf8.RuneError && w == 1 { - s.error(s.offset, "illegal UTF-8 encoding") - } - } - s.rdOffset += w - s.ch = r - } else { - s.offset = len(s.src) - if s.ch == '\n' { - s.lineOffset = s.offset - s.file.AddLine(s.offset) - } - s.ch = -1 // eof - } -} - -// A mode value is a set of flags (or 0). -// They control scanner behavior. -// -type Mode uint - -const ( - ScanComments Mode = 1 << iota // return comments as COMMENT tokens -) - -// Init prepares the scanner s to tokenize the text src by setting the -// scanner at the beginning of src. The scanner uses the file set file -// for position information and it adds line information for each line. -// It is ok to re-use the same file when re-scanning the same file as -// line information which is already present is ignored. Init causes a -// panic if the file size does not match the src size. -// -// Calls to Scan will invoke the error handler err if they encounter a -// syntax error and err is not nil. Also, for each error encountered, -// the Scanner field ErrorCount is incremented by one. The mode parameter -// determines how comments are handled. -// -// Note that Init may call err if there is an error in the first character -// of the file. -// -func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { - // Explicitly initialize all fields since a scanner may be reused. - if file.Size() != len(src) { - panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) - } - s.file = file - s.dir, _ = filepath.Split(file.Name()) - s.src = src - s.err = err - s.mode = mode - - s.ch = ' ' - s.offset = 0 - s.rdOffset = 0 - s.lineOffset = 0 - s.ErrorCount = 0 - s.nextVal = false - - s.next() -} - -func (s *Scanner) error(offs int, msg string) { - if s.err != nil { - s.err(s.file.Position(s.file.Pos(offs)), msg) - } - s.ErrorCount++ -} - -func (s *Scanner) scanComment() string { - // initial [;#] already consumed - offs := s.offset - 1 // position of initial [;#] - - for s.ch != '\n' && s.ch >= 0 { - s.next() - } - return string(s.src[offs:s.offset]) -} - -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) -} - -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -func (s *Scanner) scanIdentifier() string { - offs := s.offset - for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { - s.next() - } - return string(s.src[offs:s.offset]) -} - -func (s *Scanner) scanEscape(val bool) { - offs := s.offset - ch := s.ch - s.next() // always make progress - switch ch { - case '\\', '"': - // ok - case 'n', 't', 'b': - if val { - break // ok - } - fallthrough - default: - s.error(offs, "unknown escape sequence") - } -} - -func (s *Scanner) scanString() string { - // '"' opening already consumed - offs := s.offset - 1 - - for s.ch != '"' { - ch := s.ch - s.next() - if ch == '\n' || ch < 0 { - s.error(offs, "string not terminated") - break - } - if ch == '\\' { - s.scanEscape(false) - } - } - - s.next() - - return string(s.src[offs:s.offset]) -} - -func stripCR(b []byte) []byte { - c := make([]byte, len(b)) - i := 0 - for _, ch := range b { - if ch != '\r' { - c[i] = ch - i++ - } - } - return c[:i] -} - -func (s *Scanner) scanValString() string { - offs := s.offset - - hasCR := false - end := offs - inQuote := false -loop: - for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { - ch := s.ch - s.next() - switch { - case inQuote && ch == '\\': - s.scanEscape(true) - case !inQuote && ch == '\\': - if s.ch == '\r' { - hasCR = true - s.next() - } - if s.ch != '\n' { - s.scanEscape(true) - } else { - s.next() - } - case ch == '"': - inQuote = !inQuote - case ch == '\r': - hasCR = true - case ch < 0 || inQuote && ch == '\n': - s.error(offs, "string not terminated") - break loop - } - if inQuote || !isWhiteSpace(ch) { - end = s.offset - } - } - - lit := s.src[offs:end] - if hasCR { - lit = stripCR(lit) - } - - return string(lit) -} - -func isWhiteSpace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\r' -} - -func (s *Scanner) skipWhitespace() { - for isWhiteSpace(s.ch) { - s.next() - } -} - -// Scan scans the next token and returns the token position, the token, -// and its literal string if applicable. The source end is indicated by -// token.EOF. -// -// If the returned token is a literal (token.IDENT, token.STRING) or -// token.COMMENT, the literal string has the corresponding value. -// -// If the returned token is token.ILLEGAL, the literal string is the -// offending character. -// -// In all other cases, Scan returns an empty literal string. -// -// For more tolerant parsing, Scan will return a valid token if -// possible even if a syntax error was encountered. Thus, even -// if the resulting token sequence contains no illegal tokens, -// a client may not assume that no error occurred. Instead it -// must check the scanner's ErrorCount or the number of calls -// of the error handler, if there was one installed. -// -// Scan adds line information to the file added to the file -// set with Init. Token positions are relative to that file -// and thus relative to the file set. -// -func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { -scanAgain: - s.skipWhitespace() - - // current token start - pos = s.file.Pos(s.offset) - - // determine token value - switch ch := s.ch; { - case s.nextVal: - lit = s.scanValString() - tok = token.STRING - s.nextVal = false - case isLetter(ch): - lit = s.scanIdentifier() - tok = token.IDENT - default: - s.next() // always make progress - switch ch { - case -1: - tok = token.EOF - case '\n': - tok = token.EOL - case '"': - tok = token.STRING - lit = s.scanString() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case ';', '#': - // comment - lit = s.scanComment() - if s.mode&ScanComments == 0 { - // skip comment - goto scanAgain - } - tok = token.COMMENT - case '=': - tok = token.ASSIGN - s.nextVal = true - default: - s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) - tok = token.ILLEGAL - lit = string(ch) - } - } - - return -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/set.go b/awsproviderlint/vendor/github.com/go-git/gcfg/set.go deleted file mode 100644 index e2d9278025c..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/set.go +++ /dev/null @@ -1,332 +0,0 @@ -package gcfg - -import ( - "bytes" - "encoding/gob" - "fmt" - "math/big" - "reflect" - "strings" - "unicode" - "unicode/utf8" - - "github.com/go-git/gcfg/types" - "gopkg.in/warnings.v0" -) - -type tag struct { - ident string - intMode string -} - -func newTag(ts string) tag { - t := tag{} - s := strings.Split(ts, ",") - t.ident = s[0] - for _, tse := range s[1:] { - if strings.HasPrefix(tse, "int=") { - t.intMode = tse[len("int="):] - } - } - return t -} - -func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { - var n string - r0, _ := utf8.DecodeRuneInString(name) - if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { - n = "X" - } - n += strings.Replace(name, "-", "_", -1) - f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { - if !v.FieldByName(fieldName).CanSet() { - return false - } - f, _ := v.Type().FieldByName(fieldName) - t := newTag(f.Tag.Get("gcfg")) - if t.ident != "" { - return strings.EqualFold(t.ident, name) - } - return strings.EqualFold(n, fieldName) - }) - if !ok { - return reflect.Value{}, tag{} - } - return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) -} - -type setter func(destp interface{}, blank bool, val string, t tag) error - -var errUnsupportedType = fmt.Errorf("unsupported type") -var errBlankUnsupported = fmt.Errorf("blank value not supported for type") - -var setters = []setter{ - typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, -} - -func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { - dtu, ok := d.(textUnmarshaler) - if !ok { - return errUnsupportedType - } - if blank { - return errBlankUnsupported - } - return dtu.UnmarshalText([]byte(val)) -} - -func boolSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) - return nil - } - b, err := types.ParseBool(val) - if err == nil { - reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) - } - return err -} - -func intMode(mode string) types.IntMode { - var m types.IntMode - if strings.ContainsAny(mode, "dD") { - m |= types.Dec - } - if strings.ContainsAny(mode, "hH") { - m |= types.Hex - } - if strings.ContainsAny(mode, "oO") { - m |= types.Oct - } - return m -} - -var typeModes = map[reflect.Type]types.IntMode{ - reflect.TypeOf(int(0)): types.Dec | types.Hex, - reflect.TypeOf(int8(0)): types.Dec | types.Hex, - reflect.TypeOf(int16(0)): types.Dec | types.Hex, - reflect.TypeOf(int32(0)): types.Dec | types.Hex, - reflect.TypeOf(int64(0)): types.Dec | types.Hex, - reflect.TypeOf(uint(0)): types.Dec | types.Hex, - reflect.TypeOf(uint8(0)): types.Dec | types.Hex, - reflect.TypeOf(uint16(0)): types.Dec | types.Hex, - reflect.TypeOf(uint32(0)): types.Dec | types.Hex, - reflect.TypeOf(uint64(0)): types.Dec | types.Hex, - // use default mode (allow dec/hex/oct) for uintptr type - reflect.TypeOf(big.Int{}): types.Dec | types.Hex, -} - -func intModeDefault(t reflect.Type) types.IntMode { - m, ok := typeModes[t] - if !ok { - m = types.Dec | types.Hex | types.Oct - } - return m -} - -func intSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - return errBlankUnsupported - } - mode := intMode(t.intMode) - if mode == 0 { - mode = intModeDefault(reflect.TypeOf(d).Elem()) - } - return types.ParseInt(d, val, mode) -} - -func stringSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - return errBlankUnsupported - } - dsp, ok := d.(*string) - if !ok { - return errUnsupportedType - } - *dsp = val - return nil -} - -var kindSetters = map[reflect.Kind]setter{ - reflect.String: stringSetter, - reflect.Bool: boolSetter, - reflect.Int: intSetter, - reflect.Int8: intSetter, - reflect.Int16: intSetter, - reflect.Int32: intSetter, - reflect.Int64: intSetter, - reflect.Uint: intSetter, - reflect.Uint8: intSetter, - reflect.Uint16: intSetter, - reflect.Uint32: intSetter, - reflect.Uint64: intSetter, - reflect.Uintptr: intSetter, -} - -var typeSetters = map[reflect.Type]setter{ - reflect.TypeOf(big.Int{}): intSetter, -} - -func typeSetter(d interface{}, blank bool, val string, tt tag) error { - t := reflect.ValueOf(d).Type().Elem() - setter, ok := typeSetters[t] - if !ok { - return errUnsupportedType - } - return setter(d, blank, val, tt) -} - -func kindSetter(d interface{}, blank bool, val string, tt tag) error { - k := reflect.ValueOf(d).Type().Elem().Kind() - setter, ok := kindSetters[k] - if !ok { - return errUnsupportedType - } - return setter(d, blank, val, tt) -} - -func scanSetter(d interface{}, blank bool, val string, tt tag) error { - if blank { - return errBlankUnsupported - } - return types.ScanFully(d, val, 'v') -} - -func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, - vType reflect.Type) (reflect.Value, error) { - // - pv := reflect.New(vType) - dfltName := "default-" + sect - dfltField, _ := fieldFold(vCfg, dfltName) - var err error - if dfltField.IsValid() { - b := bytes.NewBuffer(nil) - ge := gob.NewEncoder(b) - if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { - return pv, err - } - gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) - if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { - return pv, err - } - } - return pv, nil -} - -func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, - value string, blankValue bool, subsectPass bool) error { - // - vPCfg := reflect.ValueOf(cfg) - if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("config must be a pointer to a struct")) - } - vCfg := vPCfg.Elem() - vSect, _ := fieldFold(vCfg, sect) - if !vSect.IsValid() { - err := extraData{section: sect} - return c.Collect(err) - } - isSubsect := vSect.Kind() == reflect.Map - if subsectPass != isSubsect { - return nil - } - if isSubsect { - vst := vSect.Type() - if vst.Key().Kind() != reflect.String || - vst.Elem().Kind() != reflect.Ptr || - vst.Elem().Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("map field for section must have string keys and "+ - " pointer-to-struct values: section %q", sect)) - } - if vSect.IsNil() { - vSect.Set(reflect.MakeMap(vst)) - } - k := reflect.ValueOf(sub) - pv := vSect.MapIndex(k) - if !pv.IsValid() { - vType := vSect.Type().Elem().Elem() - var err error - if pv, err = newValue(c, sect, vCfg, vType); err != nil { - return err - } - vSect.SetMapIndex(k, pv) - } - vSect = pv.Elem() - } else if vSect.Kind() != reflect.Struct { - panic(fmt.Errorf("field for section must be a map or a struct: "+ - "section %q", sect)) - } else if sub != "" { - err := extraData{section: sect, subsection: &sub} - return c.Collect(err) - } - // Empty name is a special value, meaning that only the - // section/subsection object is to be created, with no values set. - if name == "" { - return nil - } - vVar, t := fieldFold(vSect, name) - if !vVar.IsValid() { - var err error - if isSubsect { - err = extraData{section: sect, subsection: &sub, variable: &name} - } else { - err = extraData{section: sect, variable: &name} - } - return c.Collect(err) - } - // vVal is either single-valued var, or newly allocated value within multi-valued var - var vVal reflect.Value - // multi-value if unnamed slice type - isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || - vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice - if isMulti && vVar.Kind() == reflect.Ptr { - if vVar.IsNil() { - vVar.Set(reflect.New(vVar.Type().Elem())) - } - vVar = vVar.Elem() - } - if isMulti && blankValue { - vVar.Set(reflect.Zero(vVar.Type())) - return nil - } - if isMulti { - vVal = reflect.New(vVar.Type().Elem()).Elem() - } else { - vVal = vVar - } - isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr - isNew := isDeref && vVal.IsNil() - // vAddr is address of value to set (dereferenced & allocated as needed) - var vAddr reflect.Value - switch { - case isNew: - vAddr = reflect.New(vVal.Type().Elem()) - case isDeref && !isNew: - vAddr = vVal - default: - vAddr = vVal.Addr() - } - vAddrI := vAddr.Interface() - err, ok := error(nil), false - for _, s := range setters { - err = s(vAddrI, blankValue, value, t) - if err == nil { - ok = true - break - } - if err != errUnsupportedType { - return err - } - } - if !ok { - // in case all setters returned errUnsupportedType - return err - } - if isNew { // set reference if it was dereferenced and newly allocated - vVal.Set(vAddr) - } - if isMulti { // append if multi-valued - vVar.Set(reflect.Append(vVar, vVal)) - } - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/token/position.go b/awsproviderlint/vendor/github.com/go-git/gcfg/token/position.go deleted file mode 100644 index fc45c1e7693..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/token/position.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO(gri) consider making this a separate package outside the go directory. - -package token - -import ( - "fmt" - "sort" - "sync" -) - -// ----------------------------------------------------------------------------- -// Positions - -// Position describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -// -type Position struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (pos *Position) IsValid() bool { return pos.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -// -func (pos Position) String() string { - s := pos.Filename - if pos.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Pos is a compact encoding of a source position within a file set. -// It can be converted into a Position for a more convenient, but much -// larger, representation. -// -// The Pos value for a given file is a number in the range [base, base+size], -// where base and size are specified when adding the file to the file set via -// AddFile. -// -// To create the Pos value for a specific source offset, first add -// the respective file to the current file set (via FileSet.AddFile) -// and then call File.Pos(offset) for that file. Given a Pos value p -// for a specific file set fset, the corresponding Position value is -// obtained by calling fset.Position(p). -// -// Pos values can be compared directly with the usual comparison operators: -// If two Pos values p and q are in the same file, comparing p and q is -// equivalent to comparing the respective source file offsets. If p and q -// are in different files, p < q is true if the file implied by p was added -// to the respective file set before the file implied by q. -// -type Pos int - -// The zero value for Pos is NoPos; there is no file and line information -// associated with it, and NoPos().IsValid() is false. NoPos is always -// smaller than any other Pos value. The corresponding Position value -// for NoPos is the zero value for Position. -// -const NoPos Pos = 0 - -// IsValid returns true if the position is valid. -func (p Pos) IsValid() bool { - return p != NoPos -} - -// ----------------------------------------------------------------------------- -// File - -// A File is a handle for a file belonging to a FileSet. -// A File has a name, size, and line offset table. -// -type File struct { - set *FileSet - name string // file name as provided to AddFile - base int // Pos value range for this file is [base...base+size] - size int // file size as provided to AddFile - - // lines and infos are protected by set.mutex - lines []int - infos []lineInfo -} - -// Name returns the file name of file f as registered with AddFile. -func (f *File) Name() string { - return f.name -} - -// Base returns the base offset of file f as registered with AddFile. -func (f *File) Base() int { - return f.base -} - -// Size returns the size of file f as registered with AddFile. -func (f *File) Size() int { - return f.size -} - -// LineCount returns the number of lines in file f. -func (f *File) LineCount() int { - f.set.mutex.RLock() - n := len(f.lines) - f.set.mutex.RUnlock() - return n -} - -// AddLine adds the line offset for a new line. -// The line offset must be larger than the offset for the previous line -// and smaller than the file size; otherwise the line offset is ignored. -// -func (f *File) AddLine(offset int) { - f.set.mutex.Lock() - if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { - f.lines = append(f.lines, offset) - } - f.set.mutex.Unlock() -} - -// SetLines sets the line offsets for a file and returns true if successful. -// The line offsets are the offsets of the first character of each line; -// for instance for the content "ab\nc\n" the line offsets are {0, 3}. -// An empty file has an empty line offset table. -// Each line offset must be larger than the offset for the previous line -// and smaller than the file size; otherwise SetLines fails and returns -// false. -// -func (f *File) SetLines(lines []int) bool { - // verify validity of lines table - size := f.size - for i, offset := range lines { - if i > 0 && offset <= lines[i-1] || size <= offset { - return false - } - } - - // set lines table - f.set.mutex.Lock() - f.lines = lines - f.set.mutex.Unlock() - return true -} - -// SetLinesForContent sets the line offsets for the given file content. -func (f *File) SetLinesForContent(content []byte) { - var lines []int - line := 0 - for offset, b := range content { - if line >= 0 { - lines = append(lines, line) - } - line = -1 - if b == '\n' { - line = offset + 1 - } - } - - // set lines table - f.set.mutex.Lock() - f.lines = lines - f.set.mutex.Unlock() -} - -// A lineInfo object describes alternative file and line number -// information (such as provided via a //line comment in a .go -// file) for a given file offset. -type lineInfo struct { - // fields are exported to make them accessible to gob - Offset int - Filename string - Line int -} - -// AddLineInfo adds alternative file and line number information for -// a given file offset. The offset must be larger than the offset for -// the previously added alternative line info and smaller than the -// file size; otherwise the information is ignored. -// -// AddLineInfo is typically used to register alternative position -// information for //line filename:line comments in source files. -// -func (f *File) AddLineInfo(offset int, filename string, line int) { - f.set.mutex.Lock() - if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { - f.infos = append(f.infos, lineInfo{offset, filename, line}) - } - f.set.mutex.Unlock() -} - -// Pos returns the Pos value for the given file offset; -// the offset must be <= f.Size(). -// f.Pos(f.Offset(p)) == p. -// -func (f *File) Pos(offset int) Pos { - if offset > f.size { - panic("illegal file offset") - } - return Pos(f.base + offset) -} - -// Offset returns the offset for the given file position p; -// p must be a valid Pos value in that file. -// f.Offset(f.Pos(offset)) == offset. -// -func (f *File) Offset(p Pos) int { - if int(p) < f.base || int(p) > f.base+f.size { - panic("illegal Pos value") - } - return int(p) - f.base -} - -// Line returns the line number for the given file position p; -// p must be a Pos value in that file or NoPos. -// -func (f *File) Line(p Pos) int { - // TODO(gri) this can be implemented much more efficiently - return f.Position(p).Line -} - -func searchLineInfos(a []lineInfo, x int) int { - return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 -} - -// info returns the file name, line, and column number for a file offset. -func (f *File) info(offset int) (filename string, line, column int) { - filename = f.name - if i := searchInts(f.lines, offset); i >= 0 { - line, column = i+1, offset-f.lines[i]+1 - } - if len(f.infos) > 0 { - // almost no files have extra line infos - if i := searchLineInfos(f.infos, offset); i >= 0 { - alt := &f.infos[i] - filename = alt.Filename - if i := searchInts(f.lines, alt.Offset); i >= 0 { - line += alt.Line - i - 1 - } - } - } - return -} - -func (f *File) position(p Pos) (pos Position) { - offset := int(p) - f.base - pos.Offset = offset - pos.Filename, pos.Line, pos.Column = f.info(offset) - return -} - -// Position returns the Position value for the given file position p; -// p must be a Pos value in that file or NoPos. -// -func (f *File) Position(p Pos) (pos Position) { - if p != NoPos { - if int(p) < f.base || int(p) > f.base+f.size { - panic("illegal Pos value") - } - pos = f.position(p) - } - return -} - -// ----------------------------------------------------------------------------- -// FileSet - -// A FileSet represents a set of source files. -// Methods of file sets are synchronized; multiple goroutines -// may invoke them concurrently. -// -type FileSet struct { - mutex sync.RWMutex // protects the file set - base int // base offset for the next file - files []*File // list of files in the order added to the set - last *File // cache of last file looked up -} - -// NewFileSet creates a new file set. -func NewFileSet() *FileSet { - s := new(FileSet) - s.base = 1 // 0 == NoPos - return s -} - -// Base returns the minimum base offset that must be provided to -// AddFile when adding the next file. -// -func (s *FileSet) Base() int { - s.mutex.RLock() - b := s.base - s.mutex.RUnlock() - return b - -} - -// AddFile adds a new file with a given filename, base offset, and file size -// to the file set s and returns the file. Multiple files may have the same -// name. The base offset must not be smaller than the FileSet's Base(), and -// size must not be negative. -// -// Adding the file will set the file set's Base() value to base + size + 1 -// as the minimum base value for the next file. The following relationship -// exists between a Pos value p for a given file offset offs: -// -// int(p) = base + offs -// -// with offs in the range [0, size] and thus p in the range [base, base+size]. -// For convenience, File.Pos may be used to create file-specific position -// values from a file offset. -// -func (s *FileSet) AddFile(filename string, base, size int) *File { - s.mutex.Lock() - defer s.mutex.Unlock() - if base < s.base || size < 0 { - panic("illegal base or size") - } - // base >= s.base && size >= 0 - f := &File{s, filename, base, size, []int{0}, nil} - base += size + 1 // +1 because EOF also has a position - if base < 0 { - panic("token.Pos offset overflow (> 2G of source code in file set)") - } - // add the file to the file set - s.base = base - s.files = append(s.files, f) - s.last = f - return f -} - -// Iterate calls f for the files in the file set in the order they were added -// until f returns false. -// -func (s *FileSet) Iterate(f func(*File) bool) { - for i := 0; ; i++ { - var file *File - s.mutex.RLock() - if i < len(s.files) { - file = s.files[i] - } - s.mutex.RUnlock() - if file == nil || !f(file) { - break - } - } -} - -func searchFiles(a []*File, x int) int { - return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 -} - -func (s *FileSet) file(p Pos) *File { - // common case: p is in last file - if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { - return f - } - // p is not in last file - search all files - if i := searchFiles(s.files, int(p)); i >= 0 { - f := s.files[i] - // f.base <= int(p) by definition of searchFiles - if int(p) <= f.base+f.size { - s.last = f - return f - } - } - return nil -} - -// File returns the file that contains the position p. -// If no such file is found (for instance for p == NoPos), -// the result is nil. -// -func (s *FileSet) File(p Pos) (f *File) { - if p != NoPos { - s.mutex.RLock() - f = s.file(p) - s.mutex.RUnlock() - } - return -} - -// Position converts a Pos in the fileset into a general Position. -func (s *FileSet) Position(p Pos) (pos Position) { - if p != NoPos { - s.mutex.RLock() - if f := s.file(p); f != nil { - pos = f.position(p) - } - s.mutex.RUnlock() - } - return -} - -// ----------------------------------------------------------------------------- -// Helper functions - -func searchInts(a []int, x int) int { - // This function body is a manually inlined version of: - // - // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 - // - // With better compiler optimizations, this may not be needed in the - // future, but at the moment this change improves the go/printer - // benchmark performance by ~30%. This has a direct impact on the - // speed of gofmt and thus seems worthwhile (2011-04-29). - // TODO(gri): Remove this when compilers have caught up. - i, j := 0, len(a) - for i < j { - h := i + (j-i)/2 // avoid overflow when computing h - // i ≤ h < j - if a[h] <= x { - i = h + 1 - } else { - j = h - } - } - return i - 1 -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/token/serialize.go b/awsproviderlint/vendor/github.com/go-git/gcfg/token/serialize.go deleted file mode 100644 index 4adc8f9e334..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/token/serialize.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package token - -type serializedFile struct { - // fields correspond 1:1 to fields with same (lower-case) name in File - Name string - Base int - Size int - Lines []int - Infos []lineInfo -} - -type serializedFileSet struct { - Base int - Files []serializedFile -} - -// Read calls decode to deserialize a file set into s; s must not be nil. -func (s *FileSet) Read(decode func(interface{}) error) error { - var ss serializedFileSet - if err := decode(&ss); err != nil { - return err - } - - s.mutex.Lock() - s.base = ss.Base - files := make([]*File, len(ss.Files)) - for i := 0; i < len(ss.Files); i++ { - f := &ss.Files[i] - files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} - } - s.files = files - s.last = nil - s.mutex.Unlock() - - return nil -} - -// Write calls encode to serialize the file set s. -func (s *FileSet) Write(encode func(interface{}) error) error { - var ss serializedFileSet - - s.mutex.Lock() - ss.Base = s.base - files := make([]serializedFile, len(s.files)) - for i, f := range s.files { - files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} - } - ss.Files = files - s.mutex.Unlock() - - return encode(ss) -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/token/token.go b/awsproviderlint/vendor/github.com/go-git/gcfg/token/token.go deleted file mode 100644 index b3c7c83fa9e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/token/token.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package token defines constants representing the lexical tokens of the gcfg -// configuration syntax and basic operations on tokens (printing, predicates). -// -// Note that the API for the token package may change to accommodate new -// features or implementation changes in gcfg. -// -package token - -import "strconv" - -// Token is the set of lexical tokens of the gcfg configuration syntax. -type Token int - -// The list of tokens. -const ( - // Special tokens - ILLEGAL Token = iota - EOF - COMMENT - - literal_beg - // Identifiers and basic type literals - // (these tokens stand for classes of literals) - IDENT // section-name, variable-name - STRING // "subsection-name", variable value - literal_end - - operator_beg - // Operators and delimiters - ASSIGN // = - LBRACK // [ - RBRACK // ] - EOL // \n - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - COMMENT: "COMMENT", - - IDENT: "IDENT", - STRING: "STRING", - - ASSIGN: "=", - LBRACK: "[", - RBRACK: "]", - EOL: "\n", -} - -// String returns the string corresponding to the token tok. -// For operators and delimiters, the string is the actual token character -// sequence (e.g., for the token ASSIGN, the string is "="). For all other -// tokens the string corresponds to the token constant name (e.g. for the -// token IDENT, the string is "IDENT"). -// -func (tok Token) String() string { - s := "" - if 0 <= tok && tok < Token(len(tokens)) { - s = tokens[tok] - } - if s == "" { - s = "token(" + strconv.Itoa(int(tok)) + ")" - } - return s -} - -// Predicates - -// IsLiteral returns true for tokens corresponding to identifiers -// and basic type literals; it returns false otherwise. -// -func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -// -func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/types/bool.go b/awsproviderlint/vendor/github.com/go-git/gcfg/types/bool.go deleted file mode 100644 index 8dcae0d8cfd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/types/bool.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// BoolValues defines the name and value mappings for ParseBool. -var BoolValues = map[string]interface{}{ - "true": true, "yes": true, "on": true, "1": true, - "false": false, "no": false, "off": false, "0": false, -} - -var boolParser = func() *EnumParser { - ep := &EnumParser{} - ep.AddVals(BoolValues) - return ep -}() - -// ParseBool parses bool values according to the definitions in BoolValues. -// Parsing is case-insensitive. -func ParseBool(s string) (bool, error) { - v, err := boolParser.Parse(s) - if err != nil { - return false, err - } - return v.(bool), nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/types/doc.go b/awsproviderlint/vendor/github.com/go-git/gcfg/types/doc.go deleted file mode 100644 index 9f9c345f6ea..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/types/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package types defines helpers for type conversions. -// -// The API for this package is not finalized yet. -package types diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/types/enum.go b/awsproviderlint/vendor/github.com/go-git/gcfg/types/enum.go deleted file mode 100644 index 1a0c7ef453d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/types/enum.go +++ /dev/null @@ -1,44 +0,0 @@ -package types - -import ( - "fmt" - "reflect" - "strings" -) - -// EnumParser parses "enum" values; i.e. a predefined set of strings to -// predefined values. -type EnumParser struct { - Type string // type name; if not set, use type of first value added - CaseMatch bool // if true, matching of strings is case-sensitive - // PrefixMatch bool - vals map[string]interface{} -} - -// AddVals adds strings and values to an EnumParser. -func (ep *EnumParser) AddVals(vals map[string]interface{}) { - if ep.vals == nil { - ep.vals = make(map[string]interface{}) - } - for k, v := range vals { - if ep.Type == "" { - ep.Type = reflect.TypeOf(v).Name() - } - if !ep.CaseMatch { - k = strings.ToLower(k) - } - ep.vals[k] = v - } -} - -// Parse parses the string and returns the value or an error. -func (ep EnumParser) Parse(s string) (interface{}, error) { - if !ep.CaseMatch { - s = strings.ToLower(s) - } - v, ok := ep.vals[s] - if !ok { - return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) - } - return v, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/types/int.go b/awsproviderlint/vendor/github.com/go-git/gcfg/types/int.go deleted file mode 100644 index af7e75c1250..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/types/int.go +++ /dev/null @@ -1,86 +0,0 @@ -package types - -import ( - "fmt" - "strings" -) - -// An IntMode is a mode for parsing integer values, representing a set of -// accepted bases. -type IntMode uint8 - -// IntMode values for ParseInt; can be combined using binary or. -const ( - Dec IntMode = 1 << iota - Hex - Oct -) - -// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. -func (m IntMode) String() string { - var modes []string - if m&Dec != 0 { - modes = append(modes, "Dec") - } - if m&Hex != 0 { - modes = append(modes, "Hex") - } - if m&Oct != 0 { - modes = append(modes, "Oct") - } - return "IntMode(" + strings.Join(modes, "|") + ")" -} - -var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") - -func prefix0(val string) bool { - return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") -} - -func prefix0x(val string) bool { - return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") -} - -// ParseInt parses val using mode into intptr, which must be a pointer to an -// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases -// when mode permits ambiguity of base; otherwise the prefix can be omitted. -func ParseInt(intptr interface{}, val string, mode IntMode) error { - val = strings.TrimSpace(val) - verb := byte(0) - switch mode { - case Dec: - verb = 'd' - case Dec + Hex: - if prefix0x(val) { - verb = 'v' - } else { - verb = 'd' - } - case Dec + Oct: - if prefix0(val) && !prefix0x(val) { - verb = 'v' - } else { - verb = 'd' - } - case Dec + Hex + Oct: - verb = 'v' - case Hex: - if prefix0x(val) { - verb = 'v' - } else { - verb = 'x' - } - case Oct: - verb = 'o' - case Hex + Oct: - if prefix0(val) { - verb = 'v' - } else { - return errIntAmbig - } - } - if verb == 0 { - panic("unsupported mode") - } - return ScanFully(intptr, val, verb) -} diff --git a/awsproviderlint/vendor/github.com/go-git/gcfg/types/scan.go b/awsproviderlint/vendor/github.com/go-git/gcfg/types/scan.go deleted file mode 100644 index db2f6ed3caf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/gcfg/types/scan.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -import ( - "fmt" - "io" - "reflect" -) - -// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. -func ScanFully(ptr interface{}, val string, verb byte) error { - t := reflect.ValueOf(ptr).Elem().Type() - // attempt to read extra bytes to make sure the value is consumed - var b []byte - n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) - switch { - case n < 1 || n == 1 && err != io.EOF: - return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) - case n > 1: - return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) - } - // n == 1 && err == io.EOF - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/.gitignore b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/.gitignore deleted file mode 100644 index 7aeb46699cd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/coverage.txt -/vendor -Gopkg.lock -Gopkg.toml diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/LICENSE b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/LICENSE deleted file mode 100644 index 9d60756894a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 Sourced Technologies S.L. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/README.md b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/README.md deleted file mode 100644 index ca58b1c8ae1..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) - -The missing interface filesystem abstraction for Go. -Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. - -Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project. - -## Installation - -```go -import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) -import "github.com/go-git/go-billy" // with go modules disabled -``` - -## Usage - -Billy exposes filesystems using the -[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem). -Each filesystem implementation gives you a `New` method, whose arguments depend on -the implementation itself, that returns a new `Filesystem`. - -The following example caches in memory all readable files in a directory from any -billy's filesystem implementation. - -```go -func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) { - memory := memory.New() - - files, err := origin.ReadDir("/") - if err != nil { - return nil, err - } - - for _, file := range files { - if file.IsDir() { - continue - } - - src, err := origin.Open(file.Name()) - if err != nil { - return nil, err - } - - dst, err := memory.Create(file.Name()) - if err != nil { - return nil, err - } - - if _, err = io.Copy(dst, src); err != nil { - return nil, err - } - - if err := dst.Close(); err != nil { - return nil, err - } - - if err := src.Close(); err != nil { - return nil, err - } - } - - return memory, nil -} -``` - -## Why billy? - -The library billy deals with storage systems and Billy is the name of a well-known, IKEA -bookcase. That's it. - -## License - -Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/fs.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/fs.go deleted file mode 100644 index a9efccdeb2f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/fs.go +++ /dev/null @@ -1,202 +0,0 @@ -package billy - -import ( - "errors" - "io" - "os" - "time" -) - -var ( - ErrReadOnly = errors.New("read-only filesystem") - ErrNotSupported = errors.New("feature not supported") - ErrCrossedBoundary = errors.New("chroot boundary crossed") -) - -// Capability holds the supported features of a billy filesystem. This does -// not mean that the capability has to be supported by the underlying storage. -// For example, a billy filesystem may support WriteCapability but the -// storage be mounted in read only mode. -type Capability uint64 - -const ( - // WriteCapability means that the fs is writable. - WriteCapability Capability = 1 << iota - // ReadCapability means that the fs is readable. - ReadCapability - // ReadAndWriteCapability is the ability to open a file in read and write mode. - ReadAndWriteCapability - // SeekCapability means it is able to move position inside the file. - SeekCapability - // TruncateCapability means that a file can be truncated. - TruncateCapability - // LockCapability is the ability to lock a file. - LockCapability - - // DefaultCapabilities lists all capable features supported by filesystems - // without Capability interface. This list should not be changed until a - // major version is released. - DefaultCapabilities Capability = WriteCapability | ReadCapability | - ReadAndWriteCapability | SeekCapability | TruncateCapability | - LockCapability - - // AllCapabilities lists all capable features. - AllCapabilities Capability = WriteCapability | ReadCapability | - ReadAndWriteCapability | SeekCapability | TruncateCapability | - LockCapability -) - -// Filesystem abstract the operations in a storage-agnostic interface. -// Each method implementation mimics the behavior of the equivalent functions -// at the os package from the standard library. -type Filesystem interface { - Basic - TempFile - Dir - Symlink - Chroot -} - -// Basic abstract the basic operations in a storage-agnostic interface as -// an extension to the Basic interface. -type Basic interface { - // Create creates the named file with mode 0666 (before umask), truncating - // it if it already exists. If successful, methods on the returned File can - // be used for I/O; the associated file descriptor has mode O_RDWR. - Create(filename string) (File, error) - // Open opens the named file for reading. If successful, methods on the - // returned file can be used for reading; the associated file descriptor has - // mode O_RDONLY. - Open(filename string) (File, error) - // OpenFile is the generalized open call; most users will use Open or Create - // instead. It opens the named file with specified flag (O_RDONLY etc.) and - // perm, (0666 etc.) if applicable. If successful, methods on the returned - // File can be used for I/O. - OpenFile(filename string, flag int, perm os.FileMode) (File, error) - // Stat returns a FileInfo describing the named file. - Stat(filename string) (os.FileInfo, error) - // Rename renames (moves) oldpath to newpath. If newpath already exists and - // is not a directory, Rename replaces it. OS-specific restrictions may - // apply when oldpath and newpath are in different directories. - Rename(oldpath, newpath string) error - // Remove removes the named file or directory. - Remove(filename string) error - // Join joins any number of path elements into a single path, adding a - // Separator if necessary. Join calls filepath.Clean on the result; in - // particular, all empty strings are ignored. On Windows, the result is a - // UNC path if and only if the first path element is a UNC path. - Join(elem ...string) string -} - -type TempFile interface { - // TempFile creates a new temporary file in the directory dir with a name - // beginning with prefix, opens the file for reading and writing, and - // returns the resulting *os.File. If dir is the empty string, TempFile - // uses the default directory for temporary files (see os.TempDir). - // Multiple programs calling TempFile simultaneously will not choose the - // same file. The caller can use f.Name() to find the pathname of the file. - // It is the caller's responsibility to remove the file when no longer - // needed. - TempFile(dir, prefix string) (File, error) -} - -// Dir abstract the dir related operations in a storage-agnostic interface as -// an extension to the Basic interface. -type Dir interface { - // ReadDir reads the directory named by dirname and returns a list of - // directory entries sorted by filename. - ReadDir(path string) ([]os.FileInfo, error) - // MkdirAll creates a directory named path, along with any necessary - // parents, and returns nil, or else returns an error. The permission bits - // perm are used for all directories that MkdirAll creates. If path is/ - // already a directory, MkdirAll does nothing and returns nil. - MkdirAll(filename string, perm os.FileMode) error -} - -// Symlink abstract the symlink related operations in a storage-agnostic -// interface as an extension to the Basic interface. -type Symlink interface { - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. - Lstat(filename string) (os.FileInfo, error) - // Symlink creates a symbolic-link from link to target. target may be an - // absolute or relative path, and need not refer to an existing node. - // Parent directories of link are created as necessary. - Symlink(target, link string) error - // Readlink returns the target path of link. - Readlink(link string) (string, error) -} - -// Change abstract the FileInfo change related operations in a storage-agnostic -// interface as an extension to the Basic interface -type Change interface { - // Chmod changes the mode of the named file to mode. If the file is a - // symbolic link, it changes the mode of the link's target. - Chmod(name string, mode os.FileMode) error - // Lchown changes the numeric uid and gid of the named file. If the file is - // a symbolic link, it changes the uid and gid of the link itself. - Lchown(name string, uid, gid int) error - // Chown changes the numeric uid and gid of the named file. If the file is a - // symbolic link, it changes the uid and gid of the link's target. - Chown(name string, uid, gid int) error - // Chtimes changes the access and modification times of the named file, - // similar to the Unix utime() or utimes() functions. - // - // The underlying filesystem may truncate or round the values to a less - // precise time unit. - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -// Chroot abstract the chroot related operations in a storage-agnostic interface -// as an extension to the Basic interface. -type Chroot interface { - // Chroot returns a new filesystem from the same type where the new root is - // the given path. Files outside of the designated directory tree cannot be - // accessed. - Chroot(path string) (Filesystem, error) - // Root returns the root path of the filesystem. - Root() string -} - -// File represent a file, being a subset of the os.File -type File interface { - // Name returns the name of the file as presented to Open. - Name() string - io.Writer - io.Reader - io.ReaderAt - io.Seeker - io.Closer - // Lock locks the file like e.g. flock. It protects against access from - // other processes. - Lock() error - // Unlock unlocks the file. - Unlock() error - // Truncate the file. - Truncate(size int64) error -} - -// Capable interface can return the available features of a filesystem. -type Capable interface { - // Capabilities returns the capabilities of a filesystem in bit flags. - Capabilities() Capability -} - -// Capabilities returns the features supported by a filesystem. If the FS -// does not implement Capable interface it returns all features. -func Capabilities(fs Basic) Capability { - capable, ok := fs.(Capable) - if !ok { - return DefaultCapabilities - } - - return capable.Capabilities() -} - -// CapabilityCheck tests the filesystem for the provided capabilities and -// returns true in case it supports all of them. -func CapabilityCheck(fs Basic, capabilities Capability) bool { - fsCaps := Capabilities(fs) - return fsCaps&capabilities == capabilities -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.mod b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.mod deleted file mode 100644 index 78ce0af2a57..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/go-git/go-billy/v5 - -require ( - github.com/kr/text v0.2.0 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f -) - -go 1.13 diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.sum b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.sum deleted file mode 100644 index cdc052bc7e5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4= -github.com/go-git/go-billy v3.1.0+incompatible h1:dwrJ8G2Jt1srYgIJs+lRjA36qBY68O2Lg5idKG8ef5M= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go deleted file mode 100644 index 8b44e784bd7..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go +++ /dev/null @@ -1,242 +0,0 @@ -package chroot - -import ( - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/polyfill" -) - -// ChrootHelper is a helper to implement billy.Chroot. -type ChrootHelper struct { - underlying billy.Filesystem - base string -} - -// New creates a new filesystem wrapping up the given 'fs'. -// The created filesystem has its base in the given ChrootHelperectory of the -// underlying filesystem. -func New(fs billy.Basic, base string) billy.Filesystem { - return &ChrootHelper{ - underlying: polyfill.New(fs), - base: base, - } -} - -func (fs *ChrootHelper) underlyingPath(filename string) (string, error) { - if isCrossBoundaries(filename) { - return "", billy.ErrCrossedBoundary - } - - return fs.Join(fs.Root(), filename), nil -} - -func isCrossBoundaries(path string) bool { - path = filepath.ToSlash(path) - path = filepath.Clean(path) - - return strings.HasPrefix(path, ".."+string(filepath.Separator)) -} - -func (fs *ChrootHelper) Create(filename string) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.Create(fullpath) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) Open(filename string) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.Open(fullpath) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.OpenFile(fullpath, flag, mode) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - return fs.underlying.Stat(fullpath) -} - -func (fs *ChrootHelper) Rename(from, to string) error { - var err error - from, err = fs.underlyingPath(from) - if err != nil { - return err - } - - to, err = fs.underlyingPath(to) - if err != nil { - return err - } - - return fs.underlying.Rename(from, to) -} - -func (fs *ChrootHelper) Remove(path string) error { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return err - } - - return fs.underlying.Remove(fullpath) -} - -func (fs *ChrootHelper) Join(elem ...string) string { - return fs.underlying.Join(elem...) -} - -func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) { - fullpath, err := fs.underlyingPath(dir) - if err != nil { - return nil, err - } - - f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix) - if err != nil { - return nil, err - } - - return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil -} - -func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return nil, err - } - - return fs.underlying.(billy.Dir).ReadDir(fullpath) -} - -func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return err - } - - return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm) -} - -func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - return fs.underlying.(billy.Symlink).Lstat(fullpath) -} - -func (fs *ChrootHelper) Symlink(target, link string) error { - target = filepath.FromSlash(target) - - // only rewrite target if it's already absolute - if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) { - target = fs.Join(fs.Root(), target) - target = filepath.Clean(filepath.FromSlash(target)) - } - - link, err := fs.underlyingPath(link) - if err != nil { - return err - } - - return fs.underlying.(billy.Symlink).Symlink(target, link) -} - -func (fs *ChrootHelper) Readlink(link string) (string, error) { - fullpath, err := fs.underlyingPath(link) - if err != nil { - return "", err - } - - target, err := fs.underlying.(billy.Symlink).Readlink(fullpath) - if err != nil { - return "", err - } - - if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) { - return target, nil - } - - target, err = filepath.Rel(fs.base, target) - if err != nil { - return "", err - } - - return string(os.PathSeparator) + target, nil -} - -func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return nil, err - } - - return New(fs.underlying, fullpath), nil -} - -func (fs *ChrootHelper) Root() string { - return fs.base -} - -func (fs *ChrootHelper) Underlying() billy.Basic { - return fs.underlying -} - -// Capabilities implements the Capable interface. -func (fs *ChrootHelper) Capabilities() billy.Capability { - return billy.Capabilities(fs.underlying) -} - -type file struct { - billy.File - name string -} - -func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File { - filename = fs.Join(fs.Root(), filename) - filename, _ = filepath.Rel(fs.Root(), filename) - - return &file{ - File: f, - name: filename, - } -} - -func (f *file) Name() string { - return f.name -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go deleted file mode 100644 index 1efce0e7b8f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go +++ /dev/null @@ -1,105 +0,0 @@ -package polyfill - -import ( - "os" - "path/filepath" - - "github.com/go-git/go-billy/v5" -) - -// Polyfill is a helper that implements all missing method from billy.Filesystem. -type Polyfill struct { - billy.Basic - c capabilities -} - -type capabilities struct{ tempfile, dir, symlink, chroot bool } - -// New creates a new filesystem wrapping up 'fs' the intercepts all the calls -// made and errors if fs doesn't implement any of the billy interfaces. -func New(fs billy.Basic) billy.Filesystem { - if original, ok := fs.(billy.Filesystem); ok { - return original - } - - h := &Polyfill{Basic: fs} - - _, h.c.tempfile = h.Basic.(billy.TempFile) - _, h.c.dir = h.Basic.(billy.Dir) - _, h.c.symlink = h.Basic.(billy.Symlink) - _, h.c.chroot = h.Basic.(billy.Chroot) - return h -} - -func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) { - if !h.c.tempfile { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.TempFile).TempFile(dir, prefix) -} - -func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) { - if !h.c.dir { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Dir).ReadDir(path) -} - -func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error { - if !h.c.dir { - return billy.ErrNotSupported - } - - return h.Basic.(billy.Dir).MkdirAll(filename, perm) -} - -func (h *Polyfill) Symlink(target, link string) error { - if !h.c.symlink { - return billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Symlink(target, link) -} - -func (h *Polyfill) Readlink(link string) (string, error) { - if !h.c.symlink { - return "", billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Readlink(link) -} - -func (h *Polyfill) Lstat(path string) (os.FileInfo, error) { - if !h.c.symlink { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Lstat(path) -} - -func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) { - if !h.c.chroot { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Chroot).Chroot(path) -} - -func (h *Polyfill) Root() string { - if !h.c.chroot { - return string(filepath.Separator) - } - - return h.Basic.(billy.Chroot).Root() -} - -func (h *Polyfill) Underlying() billy.Basic { - return h.Basic -} - -// Capabilities implements the Capable interface. -func (h *Polyfill) Capabilities() billy.Capability { - return billy.Capabilities(h.Basic) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os.go deleted file mode 100644 index 880389fff2f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os.go +++ /dev/null @@ -1,139 +0,0 @@ -// Package osfs provides a billy filesystem for the OS. -package osfs // import "github.com/go-git/go-billy/v5/osfs" - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/chroot" -) - -const ( - defaultDirectoryMode = 0755 - defaultCreateMode = 0666 -) - -// OS is a filesystem based on the os filesystem. -type OS struct{} - -// New returns a new OS filesystem. -func New(baseDir string) billy.Filesystem { - return chroot.New(&OS{}, baseDir) -} - -func (fs *OS) Create(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) -} - -func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { - if flag&os.O_CREATE != 0 { - if err := fs.createDir(filename); err != nil { - return nil, err - } - } - - f, err := os.OpenFile(filename, flag, perm) - if err != nil { - return nil, err - } - return &file{File: f}, err -} - -func (fs *OS) createDir(fullpath string) error { - dir := filepath.Dir(fullpath) - if dir != "." { - if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { - return err - } - } - - return nil -} - -func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { - l, err := ioutil.ReadDir(path) - if err != nil { - return nil, err - } - - var s = make([]os.FileInfo, len(l)) - for i, f := range l { - s[i] = f - } - - return s, nil -} - -func (fs *OS) Rename(from, to string) error { - if err := fs.createDir(to); err != nil { - return err - } - - return rename(from, to) -} - -func (fs *OS) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, defaultDirectoryMode) -} - -func (fs *OS) Open(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDONLY, 0) -} - -func (fs *OS) Stat(filename string) (os.FileInfo, error) { - return os.Stat(filename) -} - -func (fs *OS) Remove(filename string) error { - return os.Remove(filename) -} - -func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { - if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { - return nil, err - } - - f, err := ioutil.TempFile(dir, prefix) - if err != nil { - return nil, err - } - return &file{File: f}, nil -} - -func (fs *OS) Join(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *OS) RemoveAll(path string) error { - return os.RemoveAll(filepath.Clean(path)) -} - -func (fs *OS) Lstat(filename string) (os.FileInfo, error) { - return os.Lstat(filepath.Clean(filename)) -} - -func (fs *OS) Symlink(target, link string) error { - if err := fs.createDir(link); err != nil { - return err - } - - return os.Symlink(target, link) -} - -func (fs *OS) Readlink(link string) (string, error) { - return os.Readlink(link) -} - -// Capabilities implements the Capable interface. -func (fs *OS) Capabilities() billy.Capability { - return billy.DefaultCapabilities -} - -// file is a wrapper for an os.File which adds support for file locking. -type file struct { - *os.File - m sync.Mutex -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go deleted file mode 100644 index fe1eb85df41..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go +++ /dev/null @@ -1,83 +0,0 @@ -package osfs - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -func (f *file) Lock() error { - // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. - // - // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open - // for I/O by only one fid at a time across all clients of the server. If a - // second open is attempted, it draws an error.” - // - // There is no obvious way to implement this function using the exclusive use bit. - // See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go - // for how file locking is done by the go tool on Plan 9. - return nil -} - -func (f *file) Unlock() error { - return nil -} - -func rename(from, to string) error { - // If from and to are in different directories, copy the file - // since Plan 9 does not support cross-directory rename. - if filepath.Dir(from) != filepath.Dir(to) { - fi, err := os.Stat(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - if fi.Mode().IsDir() { - return &os.LinkError{"rename", from, to, syscall.EISDIR} - } - fromFile, err := os.Open(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - _, err = io.Copy(toFile, fromFile) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - - // Copy mtime and mode from original file. - // We need only one syscall if we avoid os.Chmod and os.Chtimes. - dir := fi.Sys().(*syscall.Dir) - var d syscall.Dir - d.Null() - d.Mtime = dir.Mtime - d.Mode = dir.Mode - if err = dirwstat(to, &d); err != nil { - return &os.LinkError{"rename", from, to, err} - } - - // Remove original file. - err = os.Remove(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - return nil - } - return os.Rename(from, to) -} - -func dirwstat(name string, d *syscall.Dir) error { - var buf [syscall.STATFIXLEN]byte - - n, err := d.Marshal(buf[:]) - if err != nil { - return &os.PathError{"dirwstat", name, err} - } - if err = syscall.Wstat(name, buf[:n]); err != nil { - return &os.PathError{"dirwstat", name, err} - } - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go deleted file mode 100644 index 7645dd52e69..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !plan9,!windows - -package osfs - -import ( - "os" - - "golang.org/x/sys/unix" -) - -func (f *file) Lock() error { - f.m.Lock() - defer f.m.Unlock() - - return unix.Flock(int(f.File.Fd()), unix.LOCK_EX) -} - -func (f *file) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - return unix.Flock(int(f.File.Fd()), unix.LOCK_UN) -} - -func rename(from, to string) error { - return os.Rename(from, to) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go deleted file mode 100644 index 8f5caeb0edd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -package osfs - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -type fileInfo struct { - os.FileInfo - name string -} - -func (fi *fileInfo) Name() string { - return fi.name -} - -var ( - kernel32DLL = windows.NewLazySystemDLL("kernel32.dll") - lockFileExProc = kernel32DLL.NewProc("LockFileEx") - unlockFileProc = kernel32DLL.NewProc("UnlockFile") -) - -const ( - lockfileExclusiveLock = 0x2 -) - -func (f *file) Lock() error { - f.m.Lock() - defer f.m.Unlock() - - var overlapped windows.Overlapped - // err is always non-nil as per sys/windows semantics. - ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0, - uintptr(unsafe.Pointer(&overlapped))) - runtime.KeepAlive(&overlapped) - if ret == 0 { - return err - } - return nil -} - -func (f *file) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // err is always non-nil as per sys/windows semantics. - ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0) - if ret == 0 { - return err - } - return nil -} - -func rename(from, to string) error { - return os.Rename(from, to) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/glob.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/glob.go deleted file mode 100644 index f7cb1de8966..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/glob.go +++ /dev/null @@ -1,111 +0,0 @@ -package util - -import ( - "path/filepath" - "sort" - "strings" - - "github.com/go-git/go-billy/v5" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// Function originally from https://golang.org/src/path/filepath/match_test.go -func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - if _, err = fs.Lstat(pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - // Prevent infinite recursion. See issue 15879. - if dir == pattern { - return nil, filepath.ErrBadPattern - } - - var m []string - m, err = Glob(fs, cleanGlobPath(dir)) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// cleanGlobPath prepares path for glob matching. -func cleanGlobPath(path string) string { - switch path { - case "": - return "." - case string(filepath.Separator): - // do nothing to the path - return path - default: - return path[0 : len(path)-1] // chop off trailing separator - } -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - - if !fi.IsDir() { - return - } - - names, _ := readdirnames(fs, dir) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} - -func readdirnames(fs billy.Filesystem, dir string) ([]string, error) { - files, err := fs.ReadDir(dir) - if err != nil { - return nil, err - } - - var names []string - for _, file := range files { - names = append(names, file.Name()) - } - - return names, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/util.go b/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/util.go deleted file mode 100644 index 34c1d9e7494..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-billy/v5/util/util.go +++ /dev/null @@ -1,224 +0,0 @@ -package util - -import ( - "io" - "os" - "path/filepath" - "strconv" - "sync" - "time" - - "github.com/go-git/go-billy/v5" -) - -// RemoveAll removes path and any children it contains. It removes everything it -// can but returns the first error it encounters. If the path does not exist, -// RemoveAll returns nil (no error). -func RemoveAll(fs billy.Basic, path string) error { - fs, path = getUnderlyingAndPath(fs, path) - - if r, ok := fs.(removerAll); ok { - return r.RemoveAll(path) - } - - return removeAll(fs, path) -} - -type removerAll interface { - RemoveAll(string) error -} - -func removeAll(fs billy.Basic, path string) error { - // This implementation is adapted from os.RemoveAll. - - // Simple case: if Remove works, we're done. - err := fs.Remove(path) - if err == nil || os.IsNotExist(err) { - return nil - } - - // Otherwise, is this a directory we need to recurse into? - dir, serr := fs.Stat(path) - if serr != nil { - if os.IsNotExist(serr) { - return nil - } - - return serr - } - - if !dir.IsDir() { - // Not a directory; return the error from Remove. - return err - } - - dirfs, ok := fs.(billy.Dir) - if !ok { - return billy.ErrNotSupported - } - - // Directory. - fis, err := dirfs.ReadDir(path) - if err != nil { - if os.IsNotExist(err) { - // Race. It was deleted between the Lstat and Open. - // Return nil per RemoveAll's docs. - return nil - } - - return err - } - - // Remove contents & return first error. - err = nil - for _, fi := range fis { - cpath := fs.Join(path, fi.Name()) - err1 := removeAll(fs, cpath) - if err == nil { - err = err1 - } - } - - // Remove directory. - err1 := fs.Remove(path) - if err1 == nil || os.IsNotExist(err1) { - return nil - } - - if err == nil { - err = err1 - } - - return err - -} - -// WriteFile writes data to a file named by filename in the given filesystem. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - - if err1 := f.Close(); err == nil { - err = err1 - } - - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir with a name -// beginning with prefix, opens the file for reading and writing, and returns -// the resulting *os.File. If dir is the empty string, TempFile uses the default -// directory for temporary files (see os.TempDir). Multiple programs calling -// TempFile simultaneously will not choose the same file. The caller can use -// f.Name() to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { - // This implementation is based on stdlib ioutil.TempFile. - - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { - // This implementation is based on stdlib ioutil.TempDir - - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.MkdirAll(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if os.IsNotExist(err) { - if _, err := os.Stat(dir); os.IsNotExist(err) { - return "", err - } - } - if err == nil { - name = try - } - break - } - return -} - -type underlying interface { - Underlying() billy.Basic -} - -func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { - u, ok := fs.(underlying) - if !ok { - return fs, path - } - if ch, ok := fs.(billy.Chroot); ok { - path = fs.Join(ch.Root(), path) - } - - return u.Underlying(), path -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/.gitignore b/awsproviderlint/vendor/github.com/go-git/go-git/v5/.gitignore deleted file mode 100644 index 038dd9f1ed5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -coverage.out -*~ -coverage.txt -profile.out diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md b/awsproviderlint/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md deleted file mode 100644 index a689fa3c34a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, race, -religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at conduct@sourced.tech. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/awsproviderlint/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md deleted file mode 100644 index 2a72b501e2e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ /dev/null @@ -1,111 +0,0 @@ -Supported Capabilities -====================== - -Here is a non-comprehensive table of git commands and features whose equivalent -is supported by go-git. - -| Feature | Status | Notes | -|---------------------------------------|--------|-------| -| **config** | -| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | -| **getting and creating repositories** | -| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | -| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | -| **basic snapshotting** | -| add | ✔ | Plain add is supported. Any other flags aren't supported | -| status | ✔ | -| commit | ✔ | -| reset | ✔ | -| rm | ✔ | -| mv | ✔ | -| **branching and merging** | -| branch | ✔ | -| checkout | ✔ | Basic usages of checkout are supported. | -| merge | ✖ | -| mergetool | ✖ | -| stash | ✖ | -| tag | ✔ | -| **sharing and updating projects** | -| fetch | ✔ | -| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | -| push | ✔ | -| remote | ✔ | -| submodule | ✔ | -| **inspection and comparison** | -| show | ✔ | -| log | ✔ | -| shortlog | (see log) | -| describe | | -| **patching** | -| apply | ✖ | -| cherry-pick | ✖ | -| diff | ✔ | Patch object with UnifiedDiff output representation | -| rebase | ✖ | -| revert | ✖ | -| **debugging** | -| bisect | ✖ | -| blame | ✔ | -| grep | ✔ | -| **email** || -| am | ✖ | -| apply | ✖ | -| format-patch | ✖ | -| send-email | ✖ | -| request-pull | ✖ | -| **external systems** | -| svn | ✖ | -| fast-import | ✖ | -| **administration** | -| clean | ✔ | -| gc | ✖ | -| fsck | ✖ | -| reflog | ✖ | -| filter-branch | ✖ | -| instaweb | ✖ | -| archive | ✖ | -| bundle | ✖ | -| prune | ✖ | -| repack | ✖ | -| **server admin** | -| daemon | | -| update-server-info | | -| **advanced** | -| notes | ✖ | -| replace | ✖ | -| worktree | ✖ | -| annotate | (see blame) | -| **gpg** | -| git-verify-commit | ✔ | -| git-verify-tag | ✔ | -| **plumbing commands** | -| cat-file | ✔ | -| check-ignore | | -| commit-tree | | -| count-objects | | -| diff-index | | -| for-each-ref | ✔ | -| hash-object | ✔ | -| ls-files | ✔ | -| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | -| read-tree | | -| rev-list | ✔ | -| rev-parse | | -| show-ref | ✔ | -| symbolic-ref | ✔ | -| update-index | | -| update-ref | | -| verify-pack | | -| write-tree | | -| **protocols** | -| http(s):// (dumb) | ✖ | -| http(s):// (smart) | ✔ | -| git:// | ✔ | -| ssh:// | ✔ | -| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | -| custom | ✔ | -| **other features** | -| gitignore | ✔ | -| gitattributes | ✖ | -| index version | | -| packfile version | | -| push-certs | ✖ | diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/awsproviderlint/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md deleted file mode 100644 index fce25328a7f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributing Guidelines - -source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts -contributions via GitHub pull requests. This document outlines some of the -conventions on development workflow, commit message formatting, contact points, -and other resources to make it easier to get your contribution accepted. - -## Support Channels - -The official support channels, for both users and contributors, are: - -- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. -- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. - -*Before opening a new issue or submitting a new pull request, it's helpful to -search the project - it's likely that another user has already reported the -issue you're facing, or it's a known issue that we're already aware of. - - -## How to Contribute - -Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. -In order for a PR to be accepted it needs to pass a list of requirements: - -- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. -- The expected behavior must match the [official git implementation](https://github.com/git/git). -- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. -- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). -- They should in general include tests, and those shall pass. -- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. -- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. -- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. - -### Format of the commit message - -Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: - -``` -plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 -``` - -The format can be described more formally as follows: - -``` -: , . [Fixes #] -``` diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/LICENSE b/awsproviderlint/vendor/github.com/go-git/go-git/v5/LICENSE deleted file mode 100644 index 8aa3d854cf7..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018 Sourced Technologies, S.L. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/Makefile b/awsproviderlint/vendor/github.com/go-git/go-git/v5/Makefile deleted file mode 100644 index d10922fb10c..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# General -WORKDIR = $(PWD) - -# Go parameters -GOCMD = go -GOTEST = $(GOCMD) test - -# Git config -GIT_VERSION ?= -GIT_DIST_PATH ?= $(PWD)/.git-dist -GIT_REPOSITORY = http://github.com/git/git.git - -# Coverage -COVERAGE_REPORT = coverage.out -COVERAGE_MODE = count - -build-git: - @if [ -f $(GIT_DIST_PATH)/git ]; then \ - echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ - else \ - git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ - cd $(GIT_DIST_PATH); \ - make configure; \ - ./configure; \ - make all; \ - fi - -test: - @echo "running against `git version`"; \ - $(GOTEST) ./... - -test-coverage: - @echo "running against `git version`"; \ - echo "" > $(COVERAGE_REPORT); \ - $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... - -clean: - rm -rf $(GIT_DIST_PATH) \ No newline at end of file diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/README.md b/awsproviderlint/vendor/github.com/go-git/go-git/v5/README.md deleted file mode 100644 index ff0c9b72bae..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/README.md +++ /dev/null @@ -1,131 +0,0 @@ -![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png) -[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git) - -*go-git* is a highly extensible git implementation library written in **pure Go**. - -It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface. - -It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools. - -Project Status --------------- - -After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**. - -The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale. - - -Comparison with git -------------------- - -*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. - -*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). - - -Installation ------------- - -The recommended way to install *go-git* is: - -```go -import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) -import "github.com/go-git/go-git" // with go modules disabled -``` - - -Examples --------- - -> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples. - - -### Basic example - -A basic example that mimics the standard `git clone` command - -```go -// Clone the given repository to the given directory -Info("git clone https://github.com/go-git/go-git") - -_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ - URL: "https://github.com/go-git/go-git", - Progress: os.Stdout, -}) - -CheckIfError(err) -``` - -Outputs: -``` -Counting objects: 4924, done. -Compressing objects: 100% (1333/1333), done. -Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 -``` - -### In-memory example - -Cloning a repository into memory and printing the history of HEAD, just like `git log` does - - -```go -// Clones the given repository in memory, creating the remote, the local -// branches and fetching the objects, exactly as: -Info("git clone https://github.com/go-git/go-billy") - -r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ - URL: "https://github.com/go-git/go-billy", -}) - -CheckIfError(err) - -// Gets the HEAD history from HEAD, just like this command: -Info("git log") - -// ... retrieves the branch pointed by HEAD -ref, err := r.Head() -CheckIfError(err) - - -// ... retrieves the commit history -cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) -CheckIfError(err) - -// ... just iterates over the commits, printing it -err = cIter.ForEach(func(c *object.Commit) error { - fmt.Println(c) - return nil -}) -CheckIfError(err) -``` - -Outputs: -``` -commit ded8054fd0c3994453e9c8aacaf48d118d42991e -Author: Santiago M. Mola -Date: Sat Nov 12 21:18:41 2016 +0100 - - index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) - -commit df707095626f384ce2dc1a83b30f9a21d69b9dfc -Author: Santiago M. Mola -Date: Fri Nov 11 13:23:22 2016 +0100 - - readwriter: fix bug when writing index. (#10) - - When using ReadWriter on an existing siva file, absolute offset for - index entries was not being calculated correctly. -... -``` - -You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. - -Contribute ----------- - -[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to -our [Contributing Guidelines](CONTRIBUTING.md). - -License -------- -Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/blame.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/blame.go deleted file mode 100644 index 43634b32ca6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/blame.go +++ /dev/null @@ -1,302 +0,0 @@ -package git - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" -) - -// BlameResult represents the result of a Blame operation. -type BlameResult struct { - // Path is the path of the File that we're blaming. - Path string - // Rev (Revision) is the hash of the specified Commit used to generate this result. - Rev plumbing.Hash - // Lines contains every line with its authorship. - Lines []*Line -} - -// Blame returns a BlameResult with the information about the last author of -// each line from file `path` at commit `c`. -func Blame(c *object.Commit, path string) (*BlameResult, error) { - // The file to blame is identified by the input arguments: - // commit and path. commit is a Commit object obtained from a Repository. Path - // represents a path to a specific file contained into the repository. - // - // Blaming a file is a two step process: - // - // 1. Create a linear history of the commits affecting a file. We use - // revlist.New for that. - // - // 2. Then build a graph with a node for every line in every file in - // the history of the file. - // - // Each node is assigned a commit: Start by the nodes in the first - // commit. Assign that commit as the creator of all its lines. - // - // Then jump to the nodes in the next commit, and calculate the diff - // between the two files. Newly created lines get - // assigned the new commit as its origin. Modified lines also get - // this new commit. Untouched lines retain the old commit. - // - // All this work is done in the assignOrigin function which holds all - // the internal relevant data in a "blame" struct, that is not - // exported. - // - // TODO: ways to improve the efficiency of this function: - // 1. Improve revlist - // 2. Improve how to traverse the history (example a backward traversal will - // be much more efficient) - // - // TODO: ways to improve the function in general: - // 1. Add memoization between revlist and assign. - // 2. It is using much more memory than needed, see the TODOs below. - - b := new(blame) - b.fRev = c - b.path = path - - // get all the file revisions - if err := b.fillRevs(); err != nil { - return nil, err - } - - // calculate the line tracking graph and fill in - // file contents in data. - if err := b.fillGraphAndData(); err != nil { - return nil, err - } - - file, err := b.fRev.File(b.path) - if err != nil { - return nil, err - } - finalLines, err := file.Lines() - if err != nil { - return nil, err - } - - // Each node (line) holds the commit where it was introduced or - // last modified. To achieve that we use the FORWARD algorithm - // described in Zimmermann, et al. "Mining Version Archives for - // Co-changed Lines", in proceedings of the Mining Software - // Repositories workshop, Shanghai, May 22-23, 2006. - lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) - if err != nil { - return nil, err - } - - return &BlameResult{ - Path: path, - Rev: c.Hash, - Lines: lines, - }, nil -} - -// Line values represent the contents and author of a line in BlamedResult values. -type Line struct { - // Author is the email address of the last author that modified the line. - Author string - // Text is the original text of the line. - Text string - // Date is when the original text of the line was introduced - Date time.Time - // Hash is the commit hash that introduced the original line - Hash plumbing.Hash -} - -func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { - return &Line{ - Author: author, - Text: text, - Hash: hash, - Date: date, - } -} - -func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { - lcontents := len(contents) - lcommits := len(commits) - - if lcontents != lcommits { - if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { - contents = append(contents, "\n") - } else { - return nil, errors.New("contents and commits have different length") - } - } - - result := make([]*Line, 0, lcontents) - for i := range contents { - result = append(result, newLine( - commits[i].Author.Email, contents[i], - commits[i].Author.When, commits[i].Hash, - )) - } - - return result, nil -} - -// this struct is internally used by the blame function to hold its -// inputs, outputs and state. -type blame struct { - // the path of the file to blame - path string - // the commit of the final revision of the file to blame - fRev *object.Commit - // the chain of revisions affecting the the file to blame - revs []*object.Commit - // the contents of the file across all its revisions - data []string - // the graph of the lines in the file across all the revisions - graph [][]*object.Commit -} - -// calculate the history of a file "path", starting from commit "from", sorted by commit date. -func (b *blame) fillRevs() error { - var err error - - b.revs, err = references(b.fRev, b.path) - return err -} - -// build graph of a file from its revision history -func (b *blame) fillGraphAndData() error { - //TODO: not all commits are needed, only the current rev and the prev - b.graph = make([][]*object.Commit, len(b.revs)) - b.data = make([]string, len(b.revs)) // file contents in all the revisions - // for every revision of the file, starting with the first - // one... - for i, rev := range b.revs { - // get the contents of the file - file, err := rev.File(b.path) - if err != nil { - return nil - } - b.data[i], err = file.Contents() - if err != nil { - return err - } - nLines := countLines(b.data[i]) - // create a node for each line - b.graph[i] = make([]*object.Commit, nLines) - // assign a commit to each node - // if this is the first revision, then the node is assigned to - // this first commit. - if i == 0 { - for j := 0; j < nLines; j++ { - b.graph[i][j] = b.revs[i] - } - } else { - // if this is not the first commit, then assign to the old - // commit or to the new one, depending on what the diff - // says. - b.assignOrigin(i, i-1) - } - } - return nil -} - -// sliceGraph returns a slice of commits (one per line) for a particular -// revision of a file (0=first revision). -func (b *blame) sliceGraph(i int) []*object.Commit { - fVs := b.graph[i] - result := make([]*object.Commit, 0, len(fVs)) - for _, v := range fVs { - c := *v - result = append(result, &c) - } - return result -} - -// Assigns origin to vertexes in current (c) rev from data in its previous (p) -// revision -func (b *blame) assignOrigin(c, p int) { - // assign origin based on diff info - hunks := diff.Do(b.data[p], b.data[c]) - sl := -1 // source line - dl := -1 // destination line - for h := range hunks { - hLines := countLines(hunks[h].Text) - for hl := 0; hl < hLines; hl++ { - switch { - case hunks[h].Type == 0: - sl++ - dl++ - b.graph[c][dl] = b.graph[p][sl] - case hunks[h].Type == 1: - dl++ - b.graph[c][dl] = b.revs[c] - case hunks[h].Type == -1: - sl++ - default: - panic("unreachable") - } - } - } -} - -// GoString prints the results of a Blame using git-blame's style. -func (b *blame) GoString() string { - var buf bytes.Buffer - - file, err := b.fRev.File(b.path) - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - contents, err := file.Contents() - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - - lines := strings.Split(contents, "\n") - // max line number length - mlnl := len(strconv.Itoa(len(lines))) - // max author length - mal := b.maxAuthorLength() - format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", - mal, mlnl) - - fVs := b.graph[len(b.graph)-1] - for ln, v := range fVs { - fmt.Fprintf(&buf, format, v.Hash.String()[:8], - prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) - } - return buf.String() -} - -// utility function to pretty print the author. -func prettyPrintAuthor(c *object.Commit) string { - return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) -} - -// utility function to calculate the number of runes needed -// to print the longest author name in the blame of a file. -func (b *blame) maxAuthorLength() int { - memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) - fVs := b.graph[len(b.graph)-1] - m := 0 - for ln := range fVs { - if _, ok := memo[fVs[ln].Hash]; ok { - continue - } - memo[fVs[ln].Hash] = struct{}{} - m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) - } - return m -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/common.go deleted file mode 100644 index f837a2654c1..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/common.go +++ /dev/null @@ -1,22 +0,0 @@ -package git - -import "strings" - -const defaultDotGitPath = ".git" - -// countLines returns the number of lines in a string à la git, this is -// The newline character is assumed to be '\n'. The empty string -// contains 0 lines. If the last line of the string doesn't end with a -// newline, it will still be considered a line. -func countLines(s string) int { - if s == "" { - return 0 - } - - nEOL := strings.Count(s, "\n") - if strings.HasSuffix(s, "\n") { - return nEOL - } - - return nEOL + 1 -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/branch.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/branch.go deleted file mode 100644 index fe86cf542cb..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/branch.go +++ /dev/null @@ -1,90 +0,0 @@ -package config - -import ( - "errors" - - "github.com/go-git/go-git/v5/plumbing" - format "github.com/go-git/go-git/v5/plumbing/format/config" -) - -var ( - errBranchEmptyName = errors.New("branch config: empty name") - errBranchInvalidMerge = errors.New("branch config: invalid merge") - errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'") -) - -// Branch contains information on the -// local branches and which remote to track -type Branch struct { - // Name of branch - Name string - // Remote name of remote to track - Remote string - // Merge is the local refspec for the branch - Merge plumbing.ReferenceName - // Rebase instead of merge when pulling. Valid values are - // "true" and "interactive". "false" is undocumented and - // typically represented by the non-existence of this field - Rebase string - - raw *format.Subsection -} - -// Validate validates fields of branch -func (b *Branch) Validate() error { - if b.Name == "" { - return errBranchEmptyName - } - - if b.Merge != "" && !b.Merge.IsBranch() { - return errBranchInvalidMerge - } - - if b.Rebase != "" && - b.Rebase != "true" && - b.Rebase != "interactive" && - b.Rebase != "false" { - return errBranchInvalidRebase - } - - return nil -} - -func (b *Branch) marshal() *format.Subsection { - if b.raw == nil { - b.raw = &format.Subsection{} - } - - b.raw.Name = b.Name - - if b.Remote == "" { - b.raw.RemoveOption(remoteSection) - } else { - b.raw.SetOption(remoteSection, b.Remote) - } - - if b.Merge == "" { - b.raw.RemoveOption(mergeKey) - } else { - b.raw.SetOption(mergeKey, string(b.Merge)) - } - - if b.Rebase == "" { - b.raw.RemoveOption(rebaseKey) - } else { - b.raw.SetOption(rebaseKey, b.Rebase) - } - - return b.raw -} - -func (b *Branch) unmarshal(s *format.Subsection) error { - b.raw = s - - b.Name = b.raw.Name - b.Remote = b.raw.Options.Get(remoteSection) - b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) - b.Rebase = b.raw.Options.Get(rebaseKey) - - return b.Validate() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/config.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/config.go deleted file mode 100644 index 7d6ab5886b1..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/config.go +++ /dev/null @@ -1,564 +0,0 @@ -// Package config contains the abstraction of multiple config files -package config - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - - "github.com/go-git/go-git/v5/internal/url" - format "github.com/go-git/go-git/v5/plumbing/format/config" - "github.com/mitchellh/go-homedir" -) - -const ( - // DefaultFetchRefSpec is the default refspec used for fetch. - DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" - // DefaultPushRefSpec is the default refspec used for push. - DefaultPushRefSpec = "refs/heads/*:refs/heads/*" -) - -// ConfigStorer generic storage of Config object -type ConfigStorer interface { - Config() (*Config, error) - SetConfig(*Config) error -} - -var ( - ErrInvalid = errors.New("config invalid key in remote or branch") - ErrRemoteConfigNotFound = errors.New("remote config not found") - ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") - ErrRemoteConfigEmptyName = errors.New("remote config: empty name") -) - -// Scope defines the scope of a config file, such as local, global or system. -type Scope int - -// Available ConfigScope's -const ( - LocalScope Scope = iota - GlobalScope - SystemScope -) - -// Config contains the repository configuration -// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES -type Config struct { - Core struct { - // IsBare if true this repository is assumed to be bare and has no - // working directory associated with it. - IsBare bool - // Worktree is the path to the root of the working tree. - Worktree string - // CommentChar is the character indicating the start of a - // comment for commands like commit and tag - CommentChar string - } - - User struct { - // Name is the personal name of the author and the commiter of a commit. - Name string - // Email is the email of the author and the commiter of a commit. - Email string - } - - Author struct { - // Name is the personal name of the author of a commit. - Name string - // Email is the email of the author of a commit. - Email string - } - - Committer struct { - // Name is the personal name of the commiter of a commit. - Name string - // Email is the email of the the commiter of a commit. - Email string - } - - Pack struct { - // Window controls the size of the sliding window for delta - // compression. The default is 10. A value of 0 turns off - // delta compression entirely. - Window uint - } - - // Remotes list of repository remotes, the key of the map is the name - // of the remote, should equal to RemoteConfig.Name. - Remotes map[string]*RemoteConfig - // Submodules list of repository submodules, the key of the map is the name - // of the submodule, should equal to Submodule.Name. - Submodules map[string]*Submodule - // Branches list of branches, the key is the branch name and should - // equal Branch.Name - Branches map[string]*Branch - // Raw contains the raw information of a config file. The main goal is - // preserve the parsed information from the original format, to avoid - // dropping unsupported fields. - Raw *format.Config -} - -// NewConfig returns a new empty Config. -func NewConfig() *Config { - config := &Config{ - Remotes: make(map[string]*RemoteConfig), - Submodules: make(map[string]*Submodule), - Branches: make(map[string]*Branch), - Raw: format.New(), - } - - config.Pack.Window = DefaultPackWindow - - return config -} - -// ReadConfig reads a config file from a io.Reader. -func ReadConfig(r io.Reader) (*Config, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - cfg := NewConfig() - if err = cfg.Unmarshal(b); err != nil { - return nil, err - } - - return cfg, nil -} - -// LoadConfig loads a config file from a given scope. The returned Config, -// contains exclusively information fom the given scope. If couldn't find a -// config file to the given scope, a empty one is returned. -func LoadConfig(scope Scope) (*Config, error) { - if scope == LocalScope { - return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.") - } - - files, err := Paths(scope) - if err != nil { - return nil, err - } - - for _, file := range files { - f, err := os.Open(file) - if err != nil { - if os.IsNotExist(err) { - continue - } - - return nil, err - } - - defer f.Close() - return ReadConfig(f) - } - - return NewConfig(), nil -} - -// Paths returns the config file location for a given scope. -func Paths(scope Scope) ([]string, error) { - var files []string - switch scope { - case GlobalScope: - xdg := os.Getenv("XDG_CONFIG_HOME") - if xdg != "" { - files = append(files, filepath.Join(xdg, "git/config")) - } - - home, err := homedir.Dir() - if err != nil { - return nil, err - } - - files = append(files, - filepath.Join(home, ".gitconfig"), - filepath.Join(home, ".config/git/config"), - ) - case SystemScope: - files = append(files, "/etc/gitconfig") - } - - return files, nil -} - -// Validate validates the fields and sets the default values. -func (c *Config) Validate() error { - for name, r := range c.Remotes { - if r.Name != name { - return ErrInvalid - } - - if err := r.Validate(); err != nil { - return err - } - } - - for name, b := range c.Branches { - if b.Name != name { - return ErrInvalid - } - - if err := b.Validate(); err != nil { - return err - } - } - - return nil -} - -const ( - remoteSection = "remote" - submoduleSection = "submodule" - branchSection = "branch" - coreSection = "core" - packSection = "pack" - userSection = "user" - authorSection = "author" - committerSection = "committer" - fetchKey = "fetch" - urlKey = "url" - bareKey = "bare" - worktreeKey = "worktree" - commentCharKey = "commentChar" - windowKey = "window" - mergeKey = "merge" - rebaseKey = "rebase" - nameKey = "name" - emailKey = "email" - - // DefaultPackWindow holds the number of previous objects used to - // generate deltas. The value 10 is the same used by git command. - DefaultPackWindow = uint(10) -) - -// Unmarshal parses a git-config file and stores it. -func (c *Config) Unmarshal(b []byte) error { - r := bytes.NewBuffer(b) - d := format.NewDecoder(r) - - c.Raw = format.New() - if err := d.Decode(c.Raw); err != nil { - return err - } - - c.unmarshalCore() - c.unmarshalUser() - if err := c.unmarshalPack(); err != nil { - return err - } - unmarshalSubmodules(c.Raw, c.Submodules) - - if err := c.unmarshalBranches(); err != nil { - return err - } - - return c.unmarshalRemotes() -} - -func (c *Config) unmarshalCore() { - s := c.Raw.Section(coreSection) - if s.Options.Get(bareKey) == "true" { - c.Core.IsBare = true - } - - c.Core.Worktree = s.Options.Get(worktreeKey) - c.Core.CommentChar = s.Options.Get(commentCharKey) -} - -func (c *Config) unmarshalUser() { - s := c.Raw.Section(userSection) - c.User.Name = s.Options.Get(nameKey) - c.User.Email = s.Options.Get(emailKey) - - s = c.Raw.Section(authorSection) - c.Author.Name = s.Options.Get(nameKey) - c.Author.Email = s.Options.Get(emailKey) - - s = c.Raw.Section(committerSection) - c.Committer.Name = s.Options.Get(nameKey) - c.Committer.Email = s.Options.Get(emailKey) -} - -func (c *Config) unmarshalPack() error { - s := c.Raw.Section(packSection) - window := s.Options.Get(windowKey) - if window == "" { - c.Pack.Window = DefaultPackWindow - } else { - winUint, err := strconv.ParseUint(window, 10, 32) - if err != nil { - return err - } - c.Pack.Window = uint(winUint) - } - return nil -} - -func (c *Config) unmarshalRemotes() error { - s := c.Raw.Section(remoteSection) - for _, sub := range s.Subsections { - r := &RemoteConfig{} - if err := r.unmarshal(sub); err != nil { - return err - } - - c.Remotes[r.Name] = r - } - - return nil -} - -func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { - s := fc.Section(submoduleSection) - for _, sub := range s.Subsections { - m := &Submodule{} - m.unmarshal(sub) - - if m.Validate() == ErrModuleBadPath { - continue - } - - submodules[m.Name] = m - } -} - -func (c *Config) unmarshalBranches() error { - bs := c.Raw.Section(branchSection) - for _, sub := range bs.Subsections { - b := &Branch{} - - if err := b.unmarshal(sub); err != nil { - return err - } - - c.Branches[b.Name] = b - } - return nil -} - -// Marshal returns Config encoded as a git-config file. -func (c *Config) Marshal() ([]byte, error) { - c.marshalCore() - c.marshalUser() - c.marshalPack() - c.marshalRemotes() - c.marshalSubmodules() - c.marshalBranches() - - buf := bytes.NewBuffer(nil) - if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Config) marshalCore() { - s := c.Raw.Section(coreSection) - s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) - - if c.Core.Worktree != "" { - s.SetOption(worktreeKey, c.Core.Worktree) - } -} - -func (c *Config) marshalUser() { - s := c.Raw.Section(userSection) - if c.User.Name != "" { - s.SetOption(nameKey, c.User.Name) - } - - if c.User.Email != "" { - s.SetOption(emailKey, c.User.Email) - } - - s = c.Raw.Section(authorSection) - if c.Author.Name != "" { - s.SetOption(nameKey, c.Author.Name) - } - - if c.Author.Email != "" { - s.SetOption(emailKey, c.Author.Email) - } - - s = c.Raw.Section(committerSection) - if c.Committer.Name != "" { - s.SetOption(nameKey, c.Committer.Name) - } - - if c.Committer.Email != "" { - s.SetOption(emailKey, c.Committer.Email) - } -} - -func (c *Config) marshalPack() { - s := c.Raw.Section(packSection) - if c.Pack.Window != DefaultPackWindow { - s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) - } -} - -func (c *Config) marshalRemotes() { - s := c.Raw.Section(remoteSection) - newSubsections := make(format.Subsections, 0, len(c.Remotes)) - added := make(map[string]bool) - for _, subsection := range s.Subsections { - if remote, ok := c.Remotes[subsection.Name]; ok { - newSubsections = append(newSubsections, remote.marshal()) - added[subsection.Name] = true - } - } - - remoteNames := make([]string, 0, len(c.Remotes)) - for name := range c.Remotes { - remoteNames = append(remoteNames, name) - } - - sort.Strings(remoteNames) - - for _, name := range remoteNames { - if !added[name] { - newSubsections = append(newSubsections, c.Remotes[name].marshal()) - } - } - - s.Subsections = newSubsections -} - -func (c *Config) marshalSubmodules() { - s := c.Raw.Section(submoduleSection) - s.Subsections = make(format.Subsections, len(c.Submodules)) - - var i int - for _, r := range c.Submodules { - section := r.marshal() - // the submodule section at config is a subset of the .gitmodule file - // we should remove the non-valid options for the config file. - section.RemoveOption(pathKey) - s.Subsections[i] = section - i++ - } -} - -func (c *Config) marshalBranches() { - s := c.Raw.Section(branchSection) - newSubsections := make(format.Subsections, 0, len(c.Branches)) - added := make(map[string]bool) - for _, subsection := range s.Subsections { - if branch, ok := c.Branches[subsection.Name]; ok { - newSubsections = append(newSubsections, branch.marshal()) - added[subsection.Name] = true - } - } - - branchNames := make([]string, 0, len(c.Branches)) - for name := range c.Branches { - branchNames = append(branchNames, name) - } - - sort.Strings(branchNames) - - for _, name := range branchNames { - if !added[name] { - newSubsections = append(newSubsections, c.Branches[name].marshal()) - } - } - - s.Subsections = newSubsections -} - -// RemoteConfig contains the configuration for a given remote repository. -type RemoteConfig struct { - // Name of the remote - Name string - // URLs the URLs of a remote repository. It must be non-empty. Fetch will - // always use the first URL, while push will use all of them. - URLs []string - // Fetch the default set of "refspec" for fetch operation - Fetch []RefSpec - - // raw representation of the subsection, filled by marshal or unmarshal are - // called - raw *format.Subsection -} - -// Validate validates the fields and sets the default values. -func (c *RemoteConfig) Validate() error { - if c.Name == "" { - return ErrRemoteConfigEmptyName - } - - if len(c.URLs) == 0 { - return ErrRemoteConfigEmptyURL - } - - for _, r := range c.Fetch { - if err := r.Validate(); err != nil { - return err - } - } - - if len(c.Fetch) == 0 { - c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} - } - - return nil -} - -func (c *RemoteConfig) unmarshal(s *format.Subsection) error { - c.raw = s - - fetch := []RefSpec{} - for _, f := range c.raw.Options.GetAll(fetchKey) { - rs := RefSpec(f) - if err := rs.Validate(); err != nil { - return err - } - - fetch = append(fetch, rs) - } - - c.Name = c.raw.Name - c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) - c.Fetch = fetch - - return nil -} - -func (c *RemoteConfig) marshal() *format.Subsection { - if c.raw == nil { - c.raw = &format.Subsection{} - } - - c.raw.Name = c.Name - if len(c.URLs) == 0 { - c.raw.RemoveOption(urlKey) - } else { - c.raw.SetOption(urlKey, c.URLs...) - } - - if len(c.Fetch) == 0 { - c.raw.RemoveOption(fetchKey) - } else { - var values []string - for _, rs := range c.Fetch { - values = append(values, rs.String()) - } - - c.raw.SetOption(fetchKey, values...) - } - - return c.raw -} - -func (c *RemoteConfig) IsFirstURLLocal() bool { - return url.IsLocalEndpoint(c.URLs[0]) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/modules.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/modules.go deleted file mode 100644 index 1c10aa354eb..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/modules.go +++ /dev/null @@ -1,139 +0,0 @@ -package config - -import ( - "bytes" - "errors" - "regexp" - - format "github.com/go-git/go-git/v5/plumbing/format/config" -) - -var ( - ErrModuleEmptyURL = errors.New("module config: empty URL") - ErrModuleEmptyPath = errors.New("module config: empty path") - ErrModuleBadPath = errors.New("submodule has an invalid path") -) - -var ( - // Matches module paths with dotdot ".." components. - dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) -) - -// Modules defines the submodules properties, represents a .gitmodules file -// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html -type Modules struct { - // Submodules is a map of submodules being the key the name of the submodule. - Submodules map[string]*Submodule - - raw *format.Config -} - -// NewModules returns a new empty Modules -func NewModules() *Modules { - return &Modules{ - Submodules: make(map[string]*Submodule), - raw: format.New(), - } -} - -const ( - pathKey = "path" - branchKey = "branch" -) - -// Unmarshal parses a git-config file and stores it. -func (m *Modules) Unmarshal(b []byte) error { - r := bytes.NewBuffer(b) - d := format.NewDecoder(r) - - m.raw = format.New() - if err := d.Decode(m.raw); err != nil { - return err - } - - unmarshalSubmodules(m.raw, m.Submodules) - return nil -} - -// Marshal returns Modules encoded as a git-config file. -func (m *Modules) Marshal() ([]byte, error) { - s := m.raw.Section(submoduleSection) - s.Subsections = make(format.Subsections, len(m.Submodules)) - - var i int - for _, r := range m.Submodules { - s.Subsections[i] = r.marshal() - i++ - } - - buf := bytes.NewBuffer(nil) - if err := format.NewEncoder(buf).Encode(m.raw); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// Submodule defines a submodule. -type Submodule struct { - // Name module name - Name string - // Path defines the path, relative to the top-level directory of the Git - // working tree. - Path string - // URL defines a URL from which the submodule repository can be cloned. - URL string - // Branch is a remote branch name for tracking updates in the upstream - // submodule. Optional value. - Branch string - - // raw representation of the subsection, filled by marshal or unmarshal are - // called. - raw *format.Subsection -} - -// Validate validates the fields and sets the default values. -func (m *Submodule) Validate() error { - if m.Path == "" { - return ErrModuleEmptyPath - } - - if m.URL == "" { - return ErrModuleEmptyURL - } - - if dotdotPath.MatchString(m.Path) { - return ErrModuleBadPath - } - - return nil -} - -func (m *Submodule) unmarshal(s *format.Subsection) { - m.raw = s - - m.Name = m.raw.Name - m.Path = m.raw.Option(pathKey) - m.URL = m.raw.Option(urlKey) - m.Branch = m.raw.Option(branchKey) -} - -func (m *Submodule) marshal() *format.Subsection { - if m.raw == nil { - m.raw = &format.Subsection{} - } - - m.raw.Name = m.Name - if m.raw.Name == "" { - m.raw.Name = m.Path - } - - m.raw.SetOption(pathKey, m.Path) - m.raw.SetOption(urlKey, m.URL) - - if m.Branch != "" { - m.raw.SetOption(branchKey, m.Branch) - } - - return m.raw -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/refspec.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/refspec.go deleted file mode 100644 index 4bfaa37bbb2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/config/refspec.go +++ /dev/null @@ -1,155 +0,0 @@ -package config - -import ( - "errors" - "strings" - - "github.com/go-git/go-git/v5/plumbing" -) - -const ( - refSpecWildcard = "*" - refSpecForce = "+" - refSpecSeparator = ":" -) - -var ( - ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") - ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") -) - -// RefSpec is a mapping from local branches to remote references. -// The format of the refspec is an optional +, followed by :, where -// is the pattern for references on the remote side and is where -// those references will be written locally. The + tells Git to update the -// reference even if it isn’t a fast-forward. -// eg.: "+refs/heads/*:refs/remotes/origin/*" -// -// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec -type RefSpec string - -// Validate validates the RefSpec -func (s RefSpec) Validate() error { - spec := string(s) - if strings.Count(spec, refSpecSeparator) != 1 { - return ErrRefSpecMalformedSeparator - } - - sep := strings.Index(spec, refSpecSeparator) - if sep == len(spec)-1 { - return ErrRefSpecMalformedSeparator - } - - ws := strings.Count(spec[0:sep], refSpecWildcard) - wd := strings.Count(spec[sep+1:], refSpecWildcard) - if ws == wd && ws < 2 && wd < 2 { - return nil - } - - return ErrRefSpecMalformedWildcard -} - -// IsForceUpdate returns if update is allowed in non fast-forward merges. -func (s RefSpec) IsForceUpdate() bool { - return s[0] == refSpecForce[0] -} - -// IsDelete returns true if the refspec indicates a delete (empty src). -func (s RefSpec) IsDelete() bool { - return s[0] == refSpecSeparator[0] -} - -// IsExactSHA1 returns true if the source is a SHA1 hash. -func (s RefSpec) IsExactSHA1() bool { - return plumbing.IsHash(s.Src()) -} - -// Src return the src side. -func (s RefSpec) Src() string { - spec := string(s) - - var start int - if s.IsForceUpdate() { - start = 1 - } else { - start = 0 - } - - end := strings.Index(spec, refSpecSeparator) - return spec[start:end] -} - -// Match match the given plumbing.ReferenceName against the source. -func (s RefSpec) Match(n plumbing.ReferenceName) bool { - if !s.IsWildcard() { - return s.matchExact(n) - } - - return s.matchGlob(n) -} - -// IsWildcard returns true if the RefSpec contains a wildcard. -func (s RefSpec) IsWildcard() bool { - return strings.Contains(string(s), refSpecWildcard) -} - -func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { - return s.Src() == n.String() -} - -func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { - src := s.Src() - name := n.String() - wildcard := strings.Index(src, refSpecWildcard) - - var prefix, suffix string - prefix = src[0:wildcard] - if len(src) > wildcard+1 { - suffix = src[wildcard+1:] - } - - return len(name) >= len(prefix)+len(suffix) && - strings.HasPrefix(name, prefix) && - strings.HasSuffix(name, suffix) -} - -// Dst returns the destination for the given remote reference. -func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { - spec := string(s) - start := strings.Index(spec, refSpecSeparator) + 1 - dst := spec[start:] - src := s.Src() - - if !s.IsWildcard() { - return plumbing.ReferenceName(dst) - } - - name := n.String() - ws := strings.Index(src, refSpecWildcard) - wd := strings.Index(dst, refSpecWildcard) - match := name[ws : len(name)-(len(src)-(ws+1))] - - return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) -} - -func (s RefSpec) Reverse() RefSpec { - spec := string(s) - separator := strings.Index(spec, refSpecSeparator) - - return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator]) -} - -func (s RefSpec) String() string { - return string(s) -} - -// MatchAny returns true if any of the RefSpec match with the given ReferenceName. -func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { - for _, r := range l { - if r.Match(n) { - return true - } - } - - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/doc.go deleted file mode 100644 index 3d817fe9c8c..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// A highly extensible git implementation in pure Go. -// -// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the -// majority of the plumbing read operations and some of the main write -// operations, but lacks the main porcelain operations such as merges. -// -// It is highly extensible, we have been following the open/close principle in -// its design to facilitate extensions, mainly focusing the efforts on the -// persistence of the objects. -package git diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.mod b/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.mod deleted file mode 100644 index 0c9cfd2cae3..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.mod +++ /dev/null @@ -1,28 +0,0 @@ -module github.com/go-git/go-git/v5 - -require ( - github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect - github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 - github.com/emirpasic/gods v1.12.0 - github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect - github.com/gliderlabs/ssh v0.2.2 - github.com/go-git/gcfg v1.5.0 - github.com/go-git/go-billy/v5 v5.0.0 - github.com/go-git/go-git-fixtures/v4 v4.0.1 - github.com/google/go-cmp v0.3.0 - github.com/imdario/mergo v0.3.9 - github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 - github.com/jessevdk/go-flags v1.4.0 - github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd - github.com/mitchellh/go-homedir v1.1.0 - github.com/pkg/errors v0.8.1 // indirect - github.com/sergi/go-diff v1.1.0 - github.com/xanzy/ssh-agent v0.2.1 - golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 - golang.org/x/net v0.0.0-20200301022130-244492dfa37a - golang.org/x/text v0.3.2 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f - gopkg.in/warnings.v0 v0.1.2 // indirect -) - -go 1.13 diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.sum b/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.sum deleted file mode 100644 index e14e29ae398..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/go.sum +++ /dev/null @@ -1,80 +0,0 @@ -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go deleted file mode 100644 index 61de386b231..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go +++ /dev/null @@ -1,622 +0,0 @@ -// Package revision extracts git revision from string -// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html -package revision - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "time" -) - -// ErrInvalidRevision is emitted if string doesn't match valid revision -type ErrInvalidRevision struct { - s string -} - -func (e *ErrInvalidRevision) Error() string { - return "Revision invalid : " + e.s -} - -// Revisioner represents a revision component. -// A revision is made of multiple revision components -// obtained after parsing a revision string, -// for instance revision "master~" will be converted in -// two revision components Ref and TildePath -type Revisioner interface { -} - -// Ref represents a reference name : HEAD, master -type Ref string - -// TildePath represents ~, ~{n} -type TildePath struct { - Depth int -} - -// CaretPath represents ^, ^{n} -type CaretPath struct { - Depth int -} - -// CaretReg represents ^{/foo bar} -type CaretReg struct { - Regexp *regexp.Regexp - Negate bool -} - -// CaretType represents ^{commit} -type CaretType struct { - ObjectType string -} - -// AtReflog represents @{n} -type AtReflog struct { - Depth int -} - -// AtCheckout represents @{-n} -type AtCheckout struct { - Depth int -} - -// AtUpstream represents @{upstream}, @{u} -type AtUpstream struct { - BranchName string -} - -// AtPush represents @{push} -type AtPush struct { - BranchName string -} - -// AtDate represents @{"2006-01-02T15:04:05Z"} -type AtDate struct { - Date time.Time -} - -// ColonReg represents :/foo bar -type ColonReg struct { - Regexp *regexp.Regexp - Negate bool -} - -// ColonPath represents :./ : -type ColonPath struct { - Path string -} - -// ColonStagePath represents ::/ -type ColonStagePath struct { - Path string - Stage int -} - -// Parser represents a parser -// use to tokenize and transform to revisioner chunks -// a given string -type Parser struct { - s *scanner - currentParsedChar struct { - tok token - lit string - } - unreadLastChar bool -} - -// NewParserFromString returns a new instance of parser from a string. -func NewParserFromString(s string) *Parser { - return NewParser(bytes.NewBufferString(s)) -} - -// NewParser returns a new instance of parser. -func NewParser(r io.Reader) *Parser { - return &Parser{s: newScanner(r)} -} - -// scan returns the next token from the underlying scanner -// or the last scanned token if an unscan was requested -func (p *Parser) scan() (token, string, error) { - if p.unreadLastChar { - p.unreadLastChar = false - return p.currentParsedChar.tok, p.currentParsedChar.lit, nil - } - - tok, lit, err := p.s.scan() - - p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit - - return tok, lit, err -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { p.unreadLastChar = true } - -// Parse explode a revision string into revisioner chunks -func (p *Parser) Parse() ([]Revisioner, error) { - var rev Revisioner - var revs []Revisioner - var tok token - var err error - - for { - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case at: - rev, err = p.parseAt() - case tilde: - rev, err = p.parseTilde() - case caret: - rev, err = p.parseCaret() - case colon: - rev, err = p.parseColon() - case eof: - err = p.validateFullRevision(&revs) - - if err != nil { - return []Revisioner{}, err - } - - return revs, nil - default: - p.unscan() - rev, err = p.parseRef() - } - - if err != nil { - return []Revisioner{}, err - } - - revs = append(revs, rev) - } -} - -// validateFullRevision ensures all revisioner chunks make a valid revision -func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { - var hasReference bool - - for i, chunk := range *chunks { - switch chunk.(type) { - case Ref: - if i == 0 { - hasReference = true - } else { - return &ErrInvalidRevision{`reference must be defined once at the beginning`} - } - case AtDate: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} - case AtReflog: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} - case AtCheckout: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`} - case AtUpstream: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`} - case AtPush: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`} - case TildePath, CaretPath, CaretReg: - if !hasReference { - return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} - } - case ColonReg: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : :/`} - case ColonPath: - if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : :`} - case ColonStagePath: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : ::`} - } - } - - return nil -} - -// parseAt extract @ statements -func (p *Parser) parseAt() (Revisioner, error) { - var tok, nextTok token - var lit, nextLit string - var err error - - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - if tok != obrace { - p.unscan() - - return Ref("HEAD"), nil - } - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, nextLit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: - return AtUpstream{}, nil - case tok == word && lit == "push" && nextTok == cbrace: - return AtPush{}, nil - case tok == number && nextTok == cbrace: - n, _ := strconv.Atoi(lit) - - return AtReflog{n}, nil - case tok == minus && nextTok == number: - n, _ := strconv.Atoi(nextLit) - - t, _, err := p.scan() - - if err != nil { - return nil, err - } - - if t != cbrace { - return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)} - } - - return AtCheckout{n}, nil - default: - p.unscan() - - date := lit - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == cbrace: - t, err := time.Parse("2006-01-02T15:04:05Z", date) - - if err != nil { - return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} - } - - return AtDate{t}, nil - default: - date += lit - } - } - } -} - -// parseTilde extract ~ statements -func (p *Parser) parseTilde() (Revisioner, error) { - var tok token - var lit string - var err error - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == number: - n, _ := strconv.Atoi(lit) - - return TildePath{n}, nil - default: - p.unscan() - return TildePath{1}, nil - } -} - -// parseCaret extract ^ statements -func (p *Parser) parseCaret() (Revisioner, error) { - var tok token - var lit string - var err error - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == obrace: - r, err := p.parseCaretBraces() - - if err != nil { - return nil, err - } - - return r, nil - case tok == number: - n, _ := strconv.Atoi(lit) - - if n > 2 { - return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} - } - - return CaretPath{n}, nil - default: - p.unscan() - return CaretPath{1}, nil - } -} - -// parseCaretBraces extract ^{} statements -func (p *Parser) parseCaretBraces() (Revisioner, error) { - var tok, nextTok token - var lit, _ string - start := true - var re string - var negate bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): - return CaretType{lit}, nil - case re == "" && tok == cbrace: - return CaretType{"tag"}, nil - case re == "" && tok == emark && nextTok == emark: - re += lit - case re == "" && tok == emark && nextTok == minus: - negate = true - case re == "" && tok == emark: - return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} - case re == "" && tok == slash: - p.unscan() - case tok != slash && start: - return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} - case tok != cbrace: - p.unscan() - re += lit - case tok == cbrace: - p.unscan() - - reg, err := regexp.Compile(re) - - if err != nil { - return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} - } - - return CaretReg{reg, negate}, nil - } - - start = false - } -} - -// parseColon extract : statements -func (p *Parser) parseColon() (Revisioner, error) { - var tok token - var err error - - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case slash: - return p.parseColonSlash() - default: - p.unscan() - return p.parseColonDefault() - } -} - -// parseColonSlash extract :/ statements -func (p *Parser) parseColonSlash() (Revisioner, error) { - var tok, nextTok token - var lit string - var re string - var negate bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == emark && nextTok == emark: - re += lit - case re == "" && tok == emark && nextTok == minus: - negate = true - case re == "" && tok == emark: - return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} - case tok == eof: - p.unscan() - reg, err := regexp.Compile(re) - - if err != nil { - return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} - } - - return ColonReg{reg, negate}, nil - default: - p.unscan() - re += lit - } - } -} - -// parseColonDefault extract : statements -func (p *Parser) parseColonDefault() (Revisioner, error) { - var tok token - var lit string - var path string - var stage int - var err error - var n = -1 - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err := p.scan() - - if err != nil { - return nil, err - } - - if tok == number && nextTok == colon { - n, _ = strconv.Atoi(lit) - } - - switch n { - case 0, 1, 2, 3: - stage = n - default: - path += lit - p.unscan() - } - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == eof && n == -1: - return ColonPath{path}, nil - case tok == eof: - return ColonStagePath{path, stage}, nil - default: - path += lit - } - } -} - -// parseRef extract reference name -func (p *Parser) parseRef() (Revisioner, error) { - var tok, prevTok token - var lit, buf string - var endOfRef bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case eof, at, colon, tilde, caret: - endOfRef = true - } - - err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) - - if err != nil { - return "", err - } - - if endOfRef { - p.unscan() - return Ref(buf), nil - } - - buf += lit - prevTok = tok - } -} - -// checkRefFormat ensure reference name follow rules defined here : -// https://git-scm.com/docs/git-check-ref-format -func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { - switch token { - case aslash, space, control, qmark, asterisk, obracket: - return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} - } - - switch { - case (token == dot || token == slash) && buffer == "": - return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} - case previousToken == slash && endOfRef: - return &ErrInvalidRevision{`must not end with "/"`} - case previousToken == dot && endOfRef: - return &ErrInvalidRevision{`must not end with "."`} - case token == dot && previousToken == slash: - return &ErrInvalidRevision{`must not contains "/."`} - case previousToken == dot && token == dot: - return &ErrInvalidRevision{`must not contains ".."`} - case previousToken == slash && token == slash: - return &ErrInvalidRevision{`must not contains consecutively "/"`} - case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": - return &ErrInvalidRevision{"cannot end with .lock"} - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go deleted file mode 100644 index c46c21b7959..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go +++ /dev/null @@ -1,117 +0,0 @@ -package revision - -import ( - "bufio" - "io" - "unicode" -) - -// runeCategoryValidator takes a rune as input and -// validates it belongs to a rune category -type runeCategoryValidator func(r rune) bool - -// tokenizeExpression aggregates a series of runes matching check predicate into a single -// string and provides given tokenType as token type -func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { - var data []rune - data = append(data, ch) - - for { - c, _, err := r.ReadRune() - - if c == zeroRune { - break - } - - if err != nil { - return tokenError, "", err - } - - if check(c) { - data = append(data, c) - } else { - err := r.UnreadRune() - - if err != nil { - return tokenError, "", err - } - - return tokenType, string(data), nil - } - } - - return tokenType, string(data), nil -} - -var zeroRune = rune(0) - -// scanner represents a lexical scanner. -type scanner struct { - r *bufio.Reader -} - -// newScanner returns a new instance of scanner. -func newScanner(r io.Reader) *scanner { - return &scanner{r: bufio.NewReader(r)} -} - -// Scan extracts tokens and their strings counterpart -// from the reader -func (s *scanner) scan() (token, string, error) { - ch, _, err := s.r.ReadRune() - - if err != nil && err != io.EOF { - return tokenError, "", err - } - - switch ch { - case zeroRune: - return eof, "", nil - case ':': - return colon, string(ch), nil - case '~': - return tilde, string(ch), nil - case '^': - return caret, string(ch), nil - case '.': - return dot, string(ch), nil - case '/': - return slash, string(ch), nil - case '{': - return obrace, string(ch), nil - case '}': - return cbrace, string(ch), nil - case '-': - return minus, string(ch), nil - case '@': - return at, string(ch), nil - case '\\': - return aslash, string(ch), nil - case '?': - return qmark, string(ch), nil - case '*': - return asterisk, string(ch), nil - case '[': - return obracket, string(ch), nil - case '!': - return emark, string(ch), nil - } - - if unicode.IsSpace(ch) { - return space, string(ch), nil - } - - if unicode.IsControl(ch) { - return control, string(ch), nil - } - - if unicode.IsLetter(ch) { - return tokenizeExpression(ch, word, unicode.IsLetter, s.r) - } - - if unicode.IsNumber(ch) { - return tokenizeExpression(ch, number, unicode.IsNumber, s.r) - } - - return tokenError, string(ch), nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/token.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/token.go deleted file mode 100644 index abc40488693..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/revision/token.go +++ /dev/null @@ -1,28 +0,0 @@ -package revision - -// token represents a entity extracted from string parsing -type token int - -const ( - eof token = iota - - aslash - asterisk - at - caret - cbrace - colon - control - dot - emark - minus - number - obrace - obracket - qmark - slash - space - tilde - tokenError - word -) diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/url/url.go deleted file mode 100644 index 14cf133de8f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/internal/url/url.go +++ /dev/null @@ -1,37 +0,0 @@ -package url - -import ( - "regexp" -) - -var ( - isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) - scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})(?:\/|:))?(?P[^\\].*\/[^\\].*)$`) -) - -// MatchesScheme returns true if the given string matches a URL-like -// format scheme. -func MatchesScheme(url string) bool { - return isSchemeRegExp.MatchString(url) -} - -// MatchesScpLike returns true if the given string matches an SCP-like -// format scheme. -func MatchesScpLike(url string) bool { - return scpLikeUrlRegExp.MatchString(url) -} - -// FindScpLikeComponents returns the user, host, port and path of the -// given SCP-like URL. -func FindScpLikeComponents(url string) (user, host, port, path string) { - m := scpLikeUrlRegExp.FindStringSubmatch(url) - return m[1], m[2], m[3], m[4] -} - -// IsLocalEndpoint returns true if the given URL string specifies a -// local file endpoint. For example, on a Linux machine, -// `/home/user/src/go-git` would match as a local endpoint, but -// `https://github.com/src-d/go-git` would not. -func IsLocalEndpoint(url string) bool { - return !MatchesScheme(url) && !MatchesScpLike(url) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/object_walker.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/object_walker.go deleted file mode 100644 index 3fcdd2999cd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/object_walker.go +++ /dev/null @@ -1,104 +0,0 @@ -package git - -import ( - "fmt" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage" -) - -type objectWalker struct { - Storer storage.Storer - // seen is the set of objects seen in the repo. - // seen map can become huge if walking over large - // repos. Thus using struct{} as the value type. - seen map[plumbing.Hash]struct{} -} - -func newObjectWalker(s storage.Storer) *objectWalker { - return &objectWalker{s, map[plumbing.Hash]struct{}{}} -} - -// walkAllRefs walks all (hash) references from the repo. -func (p *objectWalker) walkAllRefs() error { - // Walk over all the references in the repo. - it, err := p.Storer.IterReferences() - if err != nil { - return err - } - defer it.Close() - err = it.ForEach(func(ref *plumbing.Reference) error { - // Exit this iteration early for non-hash references. - if ref.Type() != plumbing.HashReference { - return nil - } - return p.walkObjectTree(ref.Hash()) - }) - return err -} - -func (p *objectWalker) isSeen(hash plumbing.Hash) bool { - _, seen := p.seen[hash] - return seen -} - -func (p *objectWalker) add(hash plumbing.Hash) { - p.seen[hash] = struct{}{} -} - -// walkObjectTree walks over all objects and remembers references -// to them in the objectWalker. This is used instead of the revlist -// walks because memory usage is tight with huge repos. -func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { - // Check if we have already seen, and mark this object - if p.isSeen(hash) { - return nil - } - p.add(hash) - // Fetch the object. - obj, err := object.GetObject(p.Storer, hash) - if err != nil { - return fmt.Errorf("Getting object %s failed: %v", hash, err) - } - // Walk all children depending on object type. - switch obj := obj.(type) { - case *object.Commit: - err = p.walkObjectTree(obj.TreeHash) - if err != nil { - return err - } - for _, h := range obj.ParentHashes { - err = p.walkObjectTree(h) - if err != nil { - return err - } - } - case *object.Tree: - for i := range obj.Entries { - // Shortcut for blob objects: - // 'or' the lower bits of a mode and check that it - // it matches a filemode.Executable. The type information - // is in the higher bits, but this is the cleanest way - // to handle plain files with different modes. - // Other non-tree objects are somewhat rare, so they - // are not special-cased. - if obj.Entries[i].Mode|0755 == filemode.Executable { - p.add(obj.Entries[i].Hash) - continue - } - // Normal walk for sub-trees (and symlinks etc). - err = p.walkObjectTree(obj.Entries[i].Hash) - if err != nil { - return err - } - } - case *object.Tag: - return p.walkObjectTree(obj.Target) - default: - // Error out on unhandled object types. - return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) - } - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/options.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/options.go deleted file mode 100644 index 5367031f426..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/options.go +++ /dev/null @@ -1,551 +0,0 @@ -package git - -import ( - "errors" - "regexp" - "strings" - "time" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" - "golang.org/x/crypto/openpgp" -) - -// SubmoduleRescursivity defines how depth will affect any submodule recursive -// operation. -type SubmoduleRescursivity uint - -const ( - // DefaultRemoteName name of the default Remote, just like git command. - DefaultRemoteName = "origin" - - // NoRecurseSubmodules disables the recursion for a submodule operation. - NoRecurseSubmodules SubmoduleRescursivity = 0 - // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. - DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 -) - -var ( - ErrMissingURL = errors.New("URL field is required") -) - -// CloneOptions describes how a clone should be performed. -type CloneOptions struct { - // The (possibly remote) repository URL to clone from. - URL string - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Name of the remote to be added, by default `origin`. - RemoteName string - // Remote branch to clone. - ReferenceName plumbing.ReferenceName - // Fetch only ReferenceName if true. - SingleBranch bool - // No checkout of HEAD after clone if true. - NoCheckout bool - // Limit fetching to the specified number of commits. - Depth int - // RecurseSubmodules after the clone is created, initialize all submodules - // within, using their default settings. This option is ignored if the - // cloned repository does not have a worktree. - RecurseSubmodules SubmoduleRescursivity - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Tags describe how the tags will be fetched from the remote repository, - // by default is AllTags. - Tags TagMode -} - -// Validate validates the fields and sets the default values. -func (o *CloneOptions) Validate() error { - if o.URL == "" { - return ErrMissingURL - } - - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.ReferenceName == "" { - o.ReferenceName = plumbing.HEAD - } - - if o.Tags == InvalidTagMode { - o.Tags = AllTags - } - - return nil -} - -// PullOptions describes how a pull should be performed. -type PullOptions struct { - // Name of the remote to be pulled. If empty, uses the default. - RemoteName string - // Remote branch to clone. If empty, uses HEAD. - ReferenceName plumbing.ReferenceName - // Fetch only ReferenceName if true. - SingleBranch bool - // Limit fetching to the specified number of commits. - Depth int - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // RecurseSubmodules controls if new commits of all populated submodules - // should be fetched too. - RecurseSubmodules SubmoduleRescursivity - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Force allows the pull to update a local branch even when the remote - // branch does not descend from it. - Force bool -} - -// Validate validates the fields and sets the default values. -func (o *PullOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.ReferenceName == "" { - o.ReferenceName = plumbing.HEAD - } - - return nil -} - -type TagMode int - -const ( - InvalidTagMode TagMode = iota - // TagFollowing any tag that points into the histories being fetched is also - // fetched. TagFollowing requires a server with `include-tag` capability - // in order to fetch the annotated tags objects. - TagFollowing - // AllTags fetch all tags from the remote (i.e., fetch remote tags - // refs/tags/* into local tags with the same name) - AllTags - //NoTags fetch no tags from the remote at all - NoTags -) - -// FetchOptions describes how a fetch should be performed -type FetchOptions struct { - // Name of the remote to fetch from. Defaults to origin. - RemoteName string - RefSpecs []config.RefSpec - // Depth limit fetching to the specified number of commits from the tip of - // each remote branch history. - Depth int - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Tags describe how the tags will be fetched from the remote repository, - // by default is TagFollowing. - Tags TagMode - // Force allows the fetch to update a local branch even when the remote - // branch does not descend from it. - Force bool -} - -// Validate validates the fields and sets the default values. -func (o *FetchOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.Tags == InvalidTagMode { - o.Tags = TagFollowing - } - - for _, r := range o.RefSpecs { - if err := r.Validate(); err != nil { - return err - } - } - - return nil -} - -// PushOptions describes how a push should be performed. -type PushOptions struct { - // RemoteName is the name of the remote to be pushed to. - RemoteName string - // RefSpecs specify what destination ref to update with what source - // object. A refspec with empty src can be used to delete a reference. - RefSpecs []config.RefSpec - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored. - Progress sideband.Progress - // Prune specify that remote refs that match given RefSpecs and that do - // not exist locally will be removed. - Prune bool - // Force allows the push to update a remote branch even when the local - // branch does not descend from it. - Force bool -} - -// Validate validates the fields and sets the default values. -func (o *PushOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if len(o.RefSpecs) == 0 { - o.RefSpecs = []config.RefSpec{ - config.RefSpec(config.DefaultPushRefSpec), - } - } - - for _, r := range o.RefSpecs { - if err := r.Validate(); err != nil { - return err - } - } - - return nil -} - -// SubmoduleUpdateOptions describes how a submodule update should be performed. -type SubmoduleUpdateOptions struct { - // Init, if true initializes the submodules recorded in the index. - Init bool - // NoFetch tell to the update command to not fetch new objects from the - // remote site. - NoFetch bool - // RecurseSubmodules the update is performed not only in the submodules of - // the current repository but also in any nested submodules inside those - // submodules (and so on). Until the SubmoduleRescursivity is reached. - RecurseSubmodules SubmoduleRescursivity - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod -} - -var ( - ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") - ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") -) - -// CheckoutOptions describes how a checkout operation should be performed. -type CheckoutOptions struct { - // Hash is the hash of the commit to be checked out. If used, HEAD will be - // in detached mode. If Create is not used, Branch and Hash are mutually - // exclusive. - Hash plumbing.Hash - // Branch to be checked out, if Branch and Hash are empty is set to `master`. - Branch plumbing.ReferenceName - // Create a new branch named Branch and start it at Hash. - Create bool - // Force, if true when switching branches, proceed even if the index or the - // working tree differs from HEAD. This is used to throw away local changes - Force bool - // Keep, if true when switching branches, local changes (the index or the - // working tree changes) will be kept so that they can be committed to the - // target branch. Force and Keep are mutually exclusive, should not be both - // set to true. - Keep bool -} - -// Validate validates the fields and sets the default values. -func (o *CheckoutOptions) Validate() error { - if !o.Create && !o.Hash.IsZero() && o.Branch != "" { - return ErrBranchHashExclusive - } - - if o.Create && o.Branch == "" { - return ErrCreateRequiresBranch - } - - if o.Branch == "" { - o.Branch = plumbing.Master - } - - return nil -} - -// ResetMode defines the mode of a reset operation. -type ResetMode int8 - -const ( - // MixedReset resets the index but not the working tree (i.e., the changed - // files are preserved but not marked for commit) and reports what has not - // been updated. This is the default action. - MixedReset ResetMode = iota - // HardReset resets the index and working tree. Any changes to tracked files - // in the working tree are discarded. - HardReset - // MergeReset resets the index and updates the files in the working tree - // that are different between Commit and HEAD, but keeps those which are - // different between the index and working tree (i.e. which have changes - // which have not been added). - // - // If a file that is different between Commit and the index has unstaged - // changes, reset is aborted. - MergeReset - // SoftReset does not touch the index file or the working tree at all (but - // resets the head to , just like all modes do). This leaves all - // your changed files "Changes to be committed", as git status would put it. - SoftReset -) - -// ResetOptions describes how a reset operation should be performed. -type ResetOptions struct { - // Commit, if commit is present set the current branch head (HEAD) to it. - Commit plumbing.Hash - // Mode, form resets the current branch head to Commit and possibly updates - // the index (resetting it to the tree of Commit) and the working tree - // depending on Mode. If empty MixedReset is used. - Mode ResetMode -} - -// Validate validates the fields and sets the default values. -func (o *ResetOptions) Validate(r *Repository) error { - if o.Commit == plumbing.ZeroHash { - ref, err := r.Head() - if err != nil { - return err - } - - o.Commit = ref.Hash() - } - - return nil -} - -type LogOrder int8 - -const ( - LogOrderDefault LogOrder = iota - LogOrderDFS - LogOrderDFSPost - LogOrderBSF - LogOrderCommitterTime -) - -// LogOptions describes how a log action should be performed. -type LogOptions struct { - // When the From option is set the log will only contain commits - // reachable from it. If this option is not set, HEAD will be used as - // the default From. - From plumbing.Hash - - // The default traversal algorithm is Depth-first search - // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) - // set Order=LogOrderBSF for Breadth-first search - Order LogOrder - - // Show only those commits in which the specified file was inserted/updated. - // It is equivalent to running `git log -- `. - // this field is kept for compatility, it can be replaced with PathFilter - FileName *string - - // Filter commits based on the path of files that are updated - // takes file path as argument and should return true if the file is desired - // It can be used to implement `git log -- ` - // either is a file path, or directory path, or a regexp of file/directory path - PathFilter func(string) bool - - // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as . - // It is equivalent to running `git log --all`. - // If set on true, the From option will be ignored. - All bool - - // Show commits more recent than a specific date. - // It is equivalent to running `git log --since ` or `git log --after `. - Since *time.Time - - // Show commits older than a specific date. - // It is equivalent to running `git log --until ` or `git log --before `. - Until *time.Time -} - -var ( - ErrMissingAuthor = errors.New("author field is required") -) - -// CommitOptions describes how a commit operation should be performed. -type CommitOptions struct { - // All automatically stage files that have been modified and deleted, but - // new files you have not told Git about are not affected. - All bool - // Author is the author's signature of the commit. If Author is empty the - // Name and Email is read from the config, and time.Now it's used as When. - Author *object.Signature - // Committer is the committer's signature of the commit. If Committer is - // nil the Author signature is used. - Committer *object.Signature - // Parents are the parents commits for the new commit, by default when - // len(Parents) is zero, the hash of HEAD reference is used. - Parents []plumbing.Hash - // SignKey denotes a key to sign the commit with. A nil value here means the - // commit will not be signed. The private key must be present and already - // decrypted. - SignKey *openpgp.Entity -} - -// Validate validates the fields and sets the default values. -func (o *CommitOptions) Validate(r *Repository) error { - if o.Author == nil { - if err := o.loadConfigAuthorAndCommitter(r); err != nil { - return err - } - } - - if o.Committer == nil { - o.Committer = o.Author - } - - if len(o.Parents) == 0 { - head, err := r.Head() - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - if head != nil { - o.Parents = []plumbing.Hash{head.Hash()} - } - } - - return nil -} - -func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error { - cfg, err := r.ConfigScoped(config.SystemScope) - if err != nil { - return err - } - - if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { - o.Author = &object.Signature{ - Name: cfg.Author.Name, - Email: cfg.Author.Email, - When: time.Now(), - } - } - - if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" { - o.Committer = &object.Signature{ - Name: cfg.Committer.Name, - Email: cfg.Committer.Email, - When: time.Now(), - } - } - - if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" { - o.Author = &object.Signature{ - Name: cfg.User.Name, - Email: cfg.User.Email, - When: time.Now(), - } - } - - if o.Author == nil { - return ErrMissingAuthor - } - - return nil -} - -var ( - ErrMissingName = errors.New("name field is required") - ErrMissingTagger = errors.New("tagger field is required") - ErrMissingMessage = errors.New("message field is required") -) - -// CreateTagOptions describes how a tag object should be created. -type CreateTagOptions struct { - // Tagger defines the signature of the tag creator. - Tagger *object.Signature - // Message defines the annotation of the tag. It is canonicalized during - // validation into the format expected by git - no leading whitespace and - // ending in a newline. - Message string - // SignKey denotes a key to sign the tag with. A nil value here means the tag - // will not be signed. The private key must be present and already decrypted. - SignKey *openpgp.Entity -} - -// Validate validates the fields and sets the default values. -func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { - if o.Tagger == nil { - return ErrMissingTagger - } - - if o.Message == "" { - return ErrMissingMessage - } - - // Canonicalize the message into the expected message format. - o.Message = strings.TrimSpace(o.Message) + "\n" - - return nil -} - -// ListOptions describes how a remote list should be performed. -type ListOptions struct { - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod -} - -// CleanOptions describes how a clean should be performed. -type CleanOptions struct { - Dir bool -} - -// GrepOptions describes how a grep should be performed. -type GrepOptions struct { - // Patterns are compiled Regexp objects to be matched. - Patterns []*regexp.Regexp - // InvertMatch selects non-matching lines. - InvertMatch bool - // CommitHash is the hash of the commit from which worktree should be derived. - CommitHash plumbing.Hash - // ReferenceName is the branch or tag name from which worktree should be derived. - ReferenceName plumbing.ReferenceName - // PathSpecs are compiled Regexp objects of pathspec to use in the matching. - PathSpecs []*regexp.Regexp -} - -var ( - ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") -) - -// Validate validates the fields and sets the default values. -func (o *GrepOptions) Validate(w *Worktree) error { - if !o.CommitHash.IsZero() && o.ReferenceName != "" { - return ErrHashOrReference - } - - // If none of CommitHash and ReferenceName are provided, set commit hash of - // the repository's head. - if o.CommitHash.IsZero() && o.ReferenceName == "" { - ref, err := w.r.Head() - if err != nil { - return err - } - o.CommitHash = ref.Hash() - } - - return nil -} - -// PlainOpenOptions describes how opening a plain repository should be -// performed. -type PlainOpenOptions struct { - // DetectDotGit defines whether parent directories should be - // walked until a .git directory or file is found. - DetectDotGit bool -} - -// Validate validates the fields and sets the default values. -func (o *PlainOpenOptions) Validate() error { return nil } diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go deleted file mode 100644 index acaf1952033..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go +++ /dev/null @@ -1,98 +0,0 @@ -package cache - -import ( - "container/list" - "sync" -) - -// BufferLRU implements an object cache with an LRU eviction policy and a -// maximum size (measured in object size). -type BufferLRU struct { - MaxSize FileSize - - actualSize FileSize - ll *list.List - cache map[int64]*list.Element - mut sync.Mutex -} - -// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum -// size will never be exceeded. -func NewBufferLRU(maxSize FileSize) *BufferLRU { - return &BufferLRU{MaxSize: maxSize} -} - -// NewBufferLRUDefault creates a new BufferLRU with the default cache size. -func NewBufferLRUDefault() *BufferLRU { - return &BufferLRU{MaxSize: DefaultMaxSize} -} - -type buffer struct { - Key int64 - Slice []byte -} - -// Put puts a buffer into the cache. If the buffer is already in the cache, it -// will be marked as used. Otherwise, it will be inserted. A buffers might -// be evicted to make room for the new one. -func (c *BufferLRU) Put(key int64, slice []byte) { - c.mut.Lock() - defer c.mut.Unlock() - - if c.cache == nil { - c.actualSize = 0 - c.cache = make(map[int64]*list.Element, 1000) - c.ll = list.New() - } - - bufSize := FileSize(len(slice)) - if ee, ok := c.cache[key]; ok { - oldBuf := ee.Value.(buffer) - // in this case bufSize is a delta: new size - old size - bufSize -= FileSize(len(oldBuf.Slice)) - c.ll.MoveToFront(ee) - ee.Value = buffer{key, slice} - } else { - if bufSize > c.MaxSize { - return - } - ee := c.ll.PushFront(buffer{key, slice}) - c.cache[key] = ee - } - - c.actualSize += bufSize - for c.actualSize > c.MaxSize { - last := c.ll.Back() - lastObj := last.Value.(buffer) - lastSize := FileSize(len(lastObj.Slice)) - - c.ll.Remove(last) - delete(c.cache, lastObj.Key) - c.actualSize -= lastSize - } -} - -// Get returns a buffer by its key. It marks the buffer as used. If the buffer -// is not in the cache, (nil, false) will be returned. -func (c *BufferLRU) Get(key int64) ([]byte, bool) { - c.mut.Lock() - defer c.mut.Unlock() - - ee, ok := c.cache[key] - if !ok { - return nil, false - } - - c.ll.MoveToFront(ee) - return ee.Value.(buffer).Slice, true -} - -// Clear the content of this buffer cache. -func (c *BufferLRU) Clear() { - c.mut.Lock() - defer c.mut.Unlock() - - c.ll = nil - c.cache = nil - c.actualSize = 0 -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go deleted file mode 100644 index 7b0d0c76bb3..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go +++ /dev/null @@ -1,39 +0,0 @@ -package cache - -import "github.com/go-git/go-git/v5/plumbing" - -const ( - Byte FileSize = 1 << (iota * 10) - KiByte - MiByte - GiByte -) - -type FileSize int64 - -const DefaultMaxSize FileSize = 96 * MiByte - -// Object is an interface to a object cache. -type Object interface { - // Put puts the given object into the cache. Whether this object will - // actually be put into the cache or not is implementation specific. - Put(o plumbing.EncodedObject) - // Get gets an object from the cache given its hash. The second return value - // is true if the object was returned, and false otherwise. - Get(k plumbing.Hash) (plumbing.EncodedObject, bool) - // Clear clears every object from the cache. - Clear() -} - -// Buffer is an interface to a buffer cache. -type Buffer interface { - // Put puts a buffer into the cache. If the buffer is already in the cache, - // it will be marked as used. Otherwise, it will be inserted. Buffer might - // be evicted to make room for the new one. - Put(key int64, slice []byte) - // Get returns a buffer by its key. It marks the buffer as used. If the - // buffer is not in the cache, (nil, false) will be returned. - Get(key int64) ([]byte, bool) - // Clear clears every object from the cache. - Clear() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go deleted file mode 100644 index c50d0d1e6c5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "container/list" - "sync" - - "github.com/go-git/go-git/v5/plumbing" -) - -// ObjectLRU implements an object cache with an LRU eviction policy and a -// maximum size (measured in object size). -type ObjectLRU struct { - MaxSize FileSize - - actualSize FileSize - ll *list.List - cache map[interface{}]*list.Element - mut sync.Mutex -} - -// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum -// size will never be exceeded. -func NewObjectLRU(maxSize FileSize) *ObjectLRU { - return &ObjectLRU{MaxSize: maxSize} -} - -// NewObjectLRUDefault creates a new ObjectLRU with the default cache size. -func NewObjectLRUDefault() *ObjectLRU { - return &ObjectLRU{MaxSize: DefaultMaxSize} -} - -// Put puts an object into the cache. If the object is already in the cache, it -// will be marked as used. Otherwise, it will be inserted. A single object might -// be evicted to make room for the new object. -func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { - c.mut.Lock() - defer c.mut.Unlock() - - if c.cache == nil { - c.actualSize = 0 - c.cache = make(map[interface{}]*list.Element, 1000) - c.ll = list.New() - } - - objSize := FileSize(obj.Size()) - key := obj.Hash() - if ee, ok := c.cache[key]; ok { - oldObj := ee.Value.(plumbing.EncodedObject) - // in this case objSize is a delta: new size - old size - objSize -= FileSize(oldObj.Size()) - c.ll.MoveToFront(ee) - ee.Value = obj - } else { - if objSize > c.MaxSize { - return - } - ee := c.ll.PushFront(obj) - c.cache[key] = ee - } - - c.actualSize += objSize - for c.actualSize > c.MaxSize { - last := c.ll.Back() - if last == nil { - c.actualSize = 0 - break - } - - lastObj := last.Value.(plumbing.EncodedObject) - lastSize := FileSize(lastObj.Size()) - - c.ll.Remove(last) - delete(c.cache, lastObj.Hash()) - c.actualSize -= lastSize - } -} - -// Get returns an object by its hash. It marks the object as used. If the object -// is not in the cache, (nil, false) will be returned. -func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { - c.mut.Lock() - defer c.mut.Unlock() - - ee, ok := c.cache[k] - if !ok { - return nil, false - } - - c.ll.MoveToFront(ee) - return ee.Value.(plumbing.EncodedObject), true -} - -// Clear the content of this object cache. -func (c *ObjectLRU) Clear() { - c.mut.Lock() - defer c.mut.Unlock() - - c.ll = nil - c.cache = nil - c.actualSize = 0 -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go deleted file mode 100644 index 2cd74bdc1a8..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go +++ /dev/null @@ -1,38 +0,0 @@ -package color - -// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct -// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c - -// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53. -const ( - Normal = "" - Reset = "\033[m" - Bold = "\033[1m" - Red = "\033[31m" - Green = "\033[32m" - Yellow = "\033[33m" - Blue = "\033[34m" - Magenta = "\033[35m" - Cyan = "\033[36m" - BoldRed = "\033[1;31m" - BoldGreen = "\033[1;32m" - BoldYellow = "\033[1;33m" - BoldBlue = "\033[1;34m" - BoldMagenta = "\033[1;35m" - BoldCyan = "\033[1;36m" - FaintRed = "\033[2;31m" - FaintGreen = "\033[2;32m" - FaintYellow = "\033[2;33m" - FaintBlue = "\033[2;34m" - FaintMagenta = "\033[2;35m" - FaintCyan = "\033[2;36m" - BgRed = "\033[41m" - BgGreen = "\033[42m" - BgYellow = "\033[43m" - BgBlue = "\033[44m" - BgMagenta = "\033[45m" - BgCyan = "\033[46m" - Faint = "\033[2m" - FaintItalic = "\033[2;3m" - Reverse = "\033[7m" -) diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/error.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/error.go deleted file mode 100644 index a3ebed3f6c2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/error.go +++ /dev/null @@ -1,35 +0,0 @@ -package plumbing - -import "fmt" - -type PermanentError struct { - Err error -} - -func NewPermanentError(err error) *PermanentError { - if err == nil { - return nil - } - - return &PermanentError{Err: err} -} - -func (e *PermanentError) Error() string { - return fmt.Sprintf("permanent client error: %s", e.Err.Error()) -} - -type UnexpectedError struct { - Err error -} - -func NewUnexpectedError(err error) *UnexpectedError { - if err == nil { - return nil - } - - return &UnexpectedError{Err: err} -} - -func (e *UnexpectedError) Error() string { - return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go deleted file mode 100644 index 594984f9ed1..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go +++ /dev/null @@ -1,188 +0,0 @@ -package filemode - -import ( - "encoding/binary" - "fmt" - "os" - "strconv" -) - -// A FileMode represents the kind of tree entries used by git. It -// resembles regular file systems modes, although FileModes are -// considerably simpler (there are not so many), and there are some, -// like Submodule that has no file system equivalent. -type FileMode uint32 - -const ( - // Empty is used as the FileMode of tree elements when comparing - // trees in the following situations: - // - // - the mode of tree elements before their creation. - the mode of - // tree elements after their deletion. - the mode of unmerged - // elements when checking the index. - // - // Empty has no file system equivalent. As Empty is the zero value - // of FileMode, it is also returned by New and - // NewFromOsNewFromOSFileMode along with an error, when they fail. - Empty FileMode = 0 - // Dir represent a Directory. - Dir FileMode = 0040000 - // Regular represent non-executable files. Please note this is not - // the same as golang regular files, which include executable files. - Regular FileMode = 0100644 - // Deprecated represent non-executable files with the group writable - // bit set. This mode was supported by the first versions of git, - // but it has been deprecated nowadays. This library uses them - // internally, so you can read old packfiles, but will treat them as - // Regulars when interfacing with the outside world. This is the - // standard git behaviour. - Deprecated FileMode = 0100664 - // Executable represents executable files. - Executable FileMode = 0100755 - // Symlink represents symbolic links to files. - Symlink FileMode = 0120000 - // Submodule represents git submodules. This mode has no file system - // equivalent. - Submodule FileMode = 0160000 -) - -// New takes the octal string representation of a FileMode and returns -// the FileMode and a nil error. If the string can not be parsed to a -// 32 bit unsigned octal number, it returns Empty and the parsing error. -// -// Example: "40000" means Dir, "100644" means Regular. -// -// Please note this function does not check if the returned FileMode -// is valid in git or if it is malformed. For instance, "1" will -// return the malformed FileMode(1) and a nil error. -func New(s string) (FileMode, error) { - n, err := strconv.ParseUint(s, 8, 32) - if err != nil { - return Empty, err - } - - return FileMode(n), nil -} - -// NewFromOSFileMode returns the FileMode used by git to represent -// the provided file system modes and a nil error on success. If the -// file system mode cannot be mapped to any valid git mode (as with -// sockets or named pipes), it will return Empty and an error. -// -// Note that some git modes cannot be generated from os.FileModes, like -// Deprecated and Submodule; while Empty will be returned, along with an -// error, only when the method fails. -func NewFromOSFileMode(m os.FileMode) (FileMode, error) { - if m.IsRegular() { - if isSetTemporary(m) { - return Empty, fmt.Errorf("no equivalent git mode for %s", m) - } - if isSetCharDevice(m) { - return Empty, fmt.Errorf("no equivalent git mode for %s", m) - } - if isSetUserExecutable(m) { - return Executable, nil - } - return Regular, nil - } - - if m.IsDir() { - return Dir, nil - } - - if isSetSymLink(m) { - return Symlink, nil - } - - return Empty, fmt.Errorf("no equivalent git mode for %s", m) -} - -func isSetCharDevice(m os.FileMode) bool { - return m&os.ModeCharDevice != 0 -} - -func isSetTemporary(m os.FileMode) bool { - return m&os.ModeTemporary != 0 -} - -func isSetUserExecutable(m os.FileMode) bool { - return m&0100 != 0 -} - -func isSetSymLink(m os.FileMode) bool { - return m&os.ModeSymlink != 0 -} - -// Bytes return a slice of 4 bytes with the mode in little endian -// encoding. -func (m FileMode) Bytes() []byte { - ret := make([]byte, 4) - binary.LittleEndian.PutUint32(ret, uint32(m)) - return ret[:] -} - -// IsMalformed returns if the FileMode should not appear in a git packfile, -// this is: Empty and any other mode not mentioned as a constant in this -// package. -func (m FileMode) IsMalformed() bool { - return m != Dir && - m != Regular && - m != Deprecated && - m != Executable && - m != Symlink && - m != Submodule -} - -// String returns the FileMode as a string in the standatd git format, -// this is, an octal number padded with ceros to 7 digits. Malformed -// modes are printed in that same format, for easier debugging. -// -// Example: Regular is "0100644", Empty is "0000000". -func (m FileMode) String() string { - return fmt.Sprintf("%07o", uint32(m)) -} - -// IsRegular returns if the FileMode represents that of a regular file, -// this is, either Regular or Deprecated. Please note that Executable -// are not regular even though in the UNIX tradition, they usually are: -// See the IsFile method. -func (m FileMode) IsRegular() bool { - return m == Regular || - m == Deprecated -} - -// IsFile returns if the FileMode represents that of a file, this is, -// Regular, Deprecated, Executable or Link. -func (m FileMode) IsFile() bool { - return m == Regular || - m == Deprecated || - m == Executable || - m == Symlink -} - -// ToOSFileMode returns the os.FileMode to be used when creating file -// system elements with the given git mode and a nil error on success. -// -// When the provided mode cannot be mapped to a valid file system mode -// (e.g. Submodule) it returns os.FileMode(0) and an error. -// -// The returned file mode does not take into account the umask. -func (m FileMode) ToOSFileMode() (os.FileMode, error) { - switch m { - case Dir: - return os.ModePerm | os.ModeDir, nil - case Submodule: - return os.ModePerm | os.ModeDir, nil - case Regular: - return os.FileMode(0644), nil - // Deprecated is no longer allowed: treated as a Regular instead - case Deprecated: - return os.FileMode(0644), nil - case Executable: - return os.FileMode(0755), nil - case Symlink: - return os.ModePerm | os.ModeSymlink, nil - } - - return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go deleted file mode 100644 index 8f98ad1741e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go +++ /dev/null @@ -1,99 +0,0 @@ -package config - -// New creates a new config instance. -func New() *Config { - return &Config{} -} - -// Config contains all the sections, comments and includes from a config file. -type Config struct { - Comment *Comment - Sections Sections - Includes Includes -} - -// Includes is a list of Includes in a config file. -type Includes []*Include - -// Include is a reference to an included config file. -type Include struct { - Path string - Config *Config -} - -// Comment string without the prefix '#' or ';'. -type Comment string - -const ( - // NoSubsection token is passed to Config.Section and Config.SetSection to - // represent the absence of a section. - NoSubsection = "" -) - -// Section returns a existing section with the given name or creates a new one. -func (c *Config) Section(name string) *Section { - for i := len(c.Sections) - 1; i >= 0; i-- { - s := c.Sections[i] - if s.IsName(name) { - return s - } - } - - s := &Section{Name: name} - c.Sections = append(c.Sections, s) - return s -} - -// AddOption adds an option to a given section and subsection. Use the -// NoSubsection constant for the subsection argument if no subsection is wanted. -func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { - if subsection == "" { - c.Section(section).AddOption(key, value) - } else { - c.Section(section).Subsection(subsection).AddOption(key, value) - } - - return c -} - -// SetOption sets an option to a given section and subsection. Use the -// NoSubsection constant for the subsection argument if no subsection is wanted. -func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { - if subsection == "" { - c.Section(section).SetOption(key, value) - } else { - c.Section(section).Subsection(subsection).SetOption(key, value) - } - - return c -} - -// RemoveSection removes a section from a config file. -func (c *Config) RemoveSection(name string) *Config { - result := Sections{} - for _, s := range c.Sections { - if !s.IsName(name) { - result = append(result, s) - } - } - - c.Sections = result - return c -} - -// RemoveSubsection remove s a subsection from a config file. -func (c *Config) RemoveSubsection(section string, subsection string) *Config { - for _, s := range c.Sections { - if s.IsName(section) { - result := Subsections{} - for _, ss := range s.Subsections { - if !ss.IsName(subsection) { - result = append(result, ss) - } - } - s.Subsections = result - } - } - - return c -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go deleted file mode 100644 index 8e52d57f302..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go +++ /dev/null @@ -1,37 +0,0 @@ -package config - -import ( - "io" - - "github.com/go-git/gcfg" -) - -// A Decoder reads and decodes config files from an input stream. -type Decoder struct { - io.Reader -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r} -} - -// Decode reads the whole config from its input and stores it in the -// value pointed to by config. -func (d *Decoder) Decode(config *Config) error { - cb := func(s string, ss string, k string, v string, bv bool) error { - if ss == "" && k == "" { - config.Section(s) - return nil - } - - if ss != "" && k == "" { - config.Section(s).Subsection(ss) - return nil - } - - config.AddOption(s, ss, k, v) - return nil - } - return gcfg.ReadWithCallback(d, cb) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go deleted file mode 100644 index 3986c836581..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package config implements encoding and decoding of git config files. -// -// Configuration File -// ------------------ -// -// The Git configuration file contains a number of variables that affect -// the Git commands' behavior. The `.git/config` file in each repository -// is used to store the configuration for that repository, and -// `$HOME/.gitconfig` is used to store a per-user configuration as -// fallback values for the `.git/config` file. The file `/etc/gitconfig` -// can be used to store a system-wide default configuration. -// -// The configuration variables are used by both the Git plumbing -// and the porcelains. The variables are divided into sections, wherein -// the fully qualified variable name of the variable itself is the last -// dot-separated segment and the section name is everything before the last -// dot. The variable names are case-insensitive, allow only alphanumeric -// characters and `-`, and must start with an alphabetic character. Some -// variables may appear multiple times; we say then that the variable is -// multivalued. -// -// Syntax -// ~~~~~~ -// -// The syntax is fairly flexible and permissive; whitespaces are mostly -// ignored. The '#' and ';' characters begin comments to the end of line, -// blank lines are ignored. -// -// The file consists of sections and variables. A section begins with -// the name of the section in square brackets and continues until the next -// section begins. Section names are case-insensitive. Only alphanumeric -// characters, `-` and `.` are allowed in section names. Each variable -// must belong to some section, which means that there must be a section -// header before the first setting of a variable. -// -// Sections can be further divided into subsections. To begin a subsection -// put its name in double quotes, separated by space from the section name, -// in the section header, like in the example below: -// -// -------- -// [section "subsection"] -// -// -------- -// -// Subsection names are case sensitive and can contain any characters except -// newline (doublequote `"` and backslash can be included by escaping them -// as `\"` and `\\`, respectively). Section headers cannot span multiple -// lines. Variables may belong directly to a section or to a given subsection. -// You can have `[section]` if you have `[section "subsection"]`, but you -// don't need to. -// -// There is also a deprecated `[section.subsection]` syntax. With this -// syntax, the subsection name is converted to lower-case and is also -// compared case sensitively. These subsection names follow the same -// restrictions as section names. -// -// All the other lines (and the remainder of the line after the section -// header) are recognized as setting variables, in the form -// 'name = value' (or just 'name', which is a short-hand to say that -// the variable is the boolean "true"). -// The variable names are case-insensitive, allow only alphanumeric characters -// and `-`, and must start with an alphabetic character. -// -// A line that defines a value can be continued to the next line by -// ending it with a `\`; the backquote and the end-of-line are -// stripped. Leading whitespaces after 'name =', the remainder of the -// line after the first comment character '#' or ';', and trailing -// whitespaces of the line are discarded unless they are enclosed in -// double quotes. Internal whitespaces within the value are retained -// verbatim. -// -// Inside double quotes, double quote `"` and backslash `\` characters -// must be escaped: use `\"` for `"` and `\\` for `\`. -// -// The following escape sequences (beside `\"` and `\\`) are recognized: -// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) -// and `\b` for backspace (BS). Other char escape sequences (including octal -// escape sequences) are invalid. -// -// Includes -// ~~~~~~~~ -// -// You can include one config file from another by setting the special -// `include.path` variable to the name of the file to be included. The -// variable takes a pathname as its value, and is subject to tilde -// expansion. -// -// The included file is expanded immediately, as if its contents had been -// found at the location of the include directive. If the value of the -// `include.path` variable is a relative path, the path is considered to be -// relative to the configuration file in which the include directive was -// found. See below for examples. -// -// -// Example -// ~~~~~~~ -// -// # Core variables -// [core] -// ; Don't trust file modes -// filemode = false -// -// # Our diff algorithm -// [diff] -// external = /usr/local/bin/diff-wrapper -// renames = true -// -// [branch "devel"] -// remote = origin -// merge = refs/heads/devel -// -// # Proxy settings -// [core] -// gitProxy="ssh" for "kernel.org" -// gitProxy=default-proxy ; for the rest -// -// [include] -// path = /path/to/foo.inc ; include by absolute path -// path = foo ; expand "foo" relative to the current file -// path = ~/foo ; expand "foo" in your `$HOME` directory -// -package config diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go deleted file mode 100644 index 4eac8968adf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go +++ /dev/null @@ -1,77 +0,0 @@ -package config - -import ( - "fmt" - "io" - "strings" -) - -// An Encoder writes config files to an output stream. -type Encoder struct { - w io.Writer -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{w} -} - -// Encode writes the config in git config format to the stream of the encoder. -func (e *Encoder) Encode(cfg *Config) error { - for _, s := range cfg.Sections { - if err := e.encodeSection(s); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSection(s *Section) error { - if len(s.Options) > 0 { - if err := e.printf("[%s]\n", s.Name); err != nil { - return err - } - - if err := e.encodeOptions(s.Options); err != nil { - return err - } - } - - for _, ss := range s.Subsections { - if err := e.encodeSubsection(s.Name, ss); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { - //TODO: escape - if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { - return err - } - - return e.encodeOptions(s.Options) -} - -func (e *Encoder) encodeOptions(opts Options) error { - for _, o := range opts { - pattern := "\t%s = %s\n" - if strings.Contains(o.Value, "\\") { - pattern = "\t%s = %q\n" - } - - if err := e.printf(pattern, o.Key, o.Value); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) printf(msg string, args ...interface{}) error { - _, err := fmt.Fprintf(e.w, msg, args...) - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go deleted file mode 100644 index d4775e4f0e1..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go +++ /dev/null @@ -1,117 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// Option defines a key/value entity in a config file. -type Option struct { - // Key preserving original caseness. - // Use IsKey instead to compare key regardless of caseness. - Key string - // Original value as string, could be not normalized. - Value string -} - -type Options []*Option - -// IsKey returns true if the given key matches -// this option's key in a case-insensitive comparison. -func (o *Option) IsKey(key string) bool { - return strings.ToLower(o.Key) == strings.ToLower(key) -} - -func (opts Options) GoString() string { - var strs []string - for _, opt := range opts { - strs = append(strs, fmt.Sprintf("%#v", opt)) - } - - return strings.Join(strs, ", ") -} - -// Get gets the value for the given key if set, -// otherwise it returns the empty string. -// -// Note that there is no difference -// -// This matches git behaviour since git v1.8.1-rc1, -// if there are multiple definitions of a key, the -// last one wins. -// -// See: http://article.gmane.org/gmane.linux.kernel/1407184 -// -// In order to get all possible values for the same key, -// use GetAll. -func (opts Options) Get(key string) string { - for i := len(opts) - 1; i >= 0; i-- { - o := opts[i] - if o.IsKey(key) { - return o.Value - } - } - return "" -} - -// GetAll returns all possible values for the same key. -func (opts Options) GetAll(key string) []string { - result := []string{} - for _, o := range opts { - if o.IsKey(key) { - result = append(result, o.Value) - } - } - return result -} - -func (opts Options) withoutOption(key string) Options { - result := Options{} - for _, o := range opts { - if !o.IsKey(key) { - result = append(result, o) - } - } - return result -} - -func (opts Options) withAddedOption(key string, value string) Options { - return append(opts, &Option{key, value}) -} - -func (opts Options) withSettedOption(key string, values ...string) Options { - var result Options - var added []string - for _, o := range opts { - if !o.IsKey(key) { - result = append(result, o) - continue - } - - if contains(values, o.Value) { - added = append(added, o.Value) - result = append(result, o) - continue - } - } - - for _, value := range values { - if contains(added, value) { - continue - } - - result = result.withAddedOption(key, value) - } - - return result -} - -func contains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go deleted file mode 100644 index 4a17e3b21bd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go +++ /dev/null @@ -1,146 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// Section is the representation of a section inside git configuration files. -// Each Section contains Options that are used by both the Git plumbing -// and the porcelains. -// Sections can be further divided into subsections. To begin a subsection -// put its name in double quotes, separated by space from the section name, -// in the section header, like in the example below: -// -// [section "subsection"] -// -// All the other lines (and the remainder of the line after the section header) -// are recognized as option variables, in the form "name = value" (or just name, -// which is a short-hand to say that the variable is the boolean "true"). -// The variable names are case-insensitive, allow only alphanumeric characters -// and -, and must start with an alphabetic character: -// -// [section "subsection1"] -// option1 = value1 -// option2 -// [section "subsection2"] -// option3 = value2 -// -type Section struct { - Name string - Options Options - Subsections Subsections -} - -type Subsection struct { - Name string - Options Options -} - -type Sections []*Section - -func (s Sections) GoString() string { - var strs []string - for _, ss := range s { - strs = append(strs, fmt.Sprintf("%#v", ss)) - } - - return strings.Join(strs, ", ") -} - -type Subsections []*Subsection - -func (s Subsections) GoString() string { - var strs []string - for _, ss := range s { - strs = append(strs, fmt.Sprintf("%#v", ss)) - } - - return strings.Join(strs, ", ") -} - -// IsName checks if the name provided is equals to the Section name, case insensitive. -func (s *Section) IsName(name string) bool { - return strings.ToLower(s.Name) == strings.ToLower(name) -} - -// Option return the value for the specified key. Empty string is returned if -// key does not exists. -func (s *Section) Option(key string) string { - return s.Options.Get(key) -} - -// AddOption adds a new Option to the Section. The updated Section is returned. -func (s *Section) AddOption(key string, value string) *Section { - s.Options = s.Options.withAddedOption(key, value) - return s -} - -// SetOption adds a new Option to the Section. If the option already exists, is replaced. -// The updated Section is returned. -func (s *Section) SetOption(key string, value string) *Section { - s.Options = s.Options.withSettedOption(key, value) - return s -} - -// Remove an option with the specified key. The updated Section is returned. -func (s *Section) RemoveOption(key string) *Section { - s.Options = s.Options.withoutOption(key) - return s -} - -// Subsection returns a Subsection from the specified Section. If the -// Subsection does not exists, new one is created and added to Section. -func (s *Section) Subsection(name string) *Subsection { - for i := len(s.Subsections) - 1; i >= 0; i-- { - ss := s.Subsections[i] - if ss.IsName(name) { - return ss - } - } - - ss := &Subsection{Name: name} - s.Subsections = append(s.Subsections, ss) - return ss -} - -// HasSubsection checks if the Section has a Subsection with the specified name. -func (s *Section) HasSubsection(name string) bool { - for _, ss := range s.Subsections { - if ss.IsName(name) { - return true - } - } - - return false -} - -// IsName checks if the name of the subsection is exactly the specified name. -func (s *Subsection) IsName(name string) bool { - return s.Name == name -} - -// Option returns an option with the specified key. If the option does not exists, -// empty spring will be returned. -func (s *Subsection) Option(key string) string { - return s.Options.Get(key) -} - -// AddOption adds a new Option to the Subsection. The updated Subsection is returned. -func (s *Subsection) AddOption(key string, value string) *Subsection { - s.Options = s.Options.withAddedOption(key, value) - return s -} - -// SetOption adds a new Option to the Subsection. If the option already exists, is replaced. -// The updated Subsection is returned. -func (s *Subsection) SetOption(key string, value ...string) *Subsection { - s.Options = s.Options.withSettedOption(key, value...) - return s -} - -// RemoveOption removes the option with the specified key. The updated Subsection is returned. -func (s *Subsection) RemoveOption(key string) *Subsection { - s.Options = s.Options.withoutOption(key) - return s -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go deleted file mode 100644 index 6fd4158462d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go +++ /dev/null @@ -1,97 +0,0 @@ -package diff - -import "github.com/go-git/go-git/v5/plumbing/color" - -// A ColorKey is a key into a ColorConfig map and also equal to the key in the -// diff.color subsection of the config. See -// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106. -type ColorKey string - -// ColorKeys. -const ( - Context ColorKey = "context" - Meta ColorKey = "meta" - Frag ColorKey = "frag" - Old ColorKey = "old" - New ColorKey = "new" - Commit ColorKey = "commit" - Whitespace ColorKey = "whitespace" - Func ColorKey = "func" - OldMoved ColorKey = "oldMoved" - OldMovedAlternative ColorKey = "oldMovedAlternative" - OldMovedDimmed ColorKey = "oldMovedDimmed" - OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed" - NewMoved ColorKey = "newMoved" - NewMovedAlternative ColorKey = "newMovedAlternative" - NewMovedDimmed ColorKey = "newMovedDimmed" - NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed" - ContextDimmed ColorKey = "contextDimmed" - OldDimmed ColorKey = "oldDimmed" - NewDimmed ColorKey = "newDimmed" - ContextBold ColorKey = "contextBold" - OldBold ColorKey = "oldBold" - NewBold ColorKey = "newBold" -) - -// A ColorConfig is a color configuration. A nil or empty ColorConfig -// corresponds to no color. -type ColorConfig map[ColorKey]string - -// A ColorConfigOption sets an option on a ColorConfig. -type ColorConfigOption func(ColorConfig) - -// WithColor sets the color for key. -func WithColor(key ColorKey, color string) ColorConfigOption { - return func(cc ColorConfig) { - cc[key] = color - } -} - -// defaultColorConfig is the default color configuration. See -// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81. -var defaultColorConfig = ColorConfig{ - Context: color.Normal, - Meta: color.Bold, - Frag: color.Cyan, - Old: color.Red, - New: color.Green, - Commit: color.Yellow, - Whitespace: color.BgRed, - Func: color.Normal, - OldMoved: color.BoldMagenta, - OldMovedAlternative: color.BoldBlue, - OldMovedDimmed: color.Faint, - OldMovedAlternativeDimmed: color.FaintItalic, - NewMoved: color.BoldCyan, - NewMovedAlternative: color.BoldYellow, - NewMovedDimmed: color.Faint, - NewMovedAlternativeDimmed: color.FaintItalic, - ContextDimmed: color.Faint, - OldDimmed: color.FaintRed, - NewDimmed: color.FaintGreen, - ContextBold: color.Bold, - OldBold: color.BoldRed, - NewBold: color.BoldGreen, -} - -// NewColorConfig returns a new ColorConfig. -func NewColorConfig(options ...ColorConfigOption) ColorConfig { - cc := make(ColorConfig) - for key, value := range defaultColorConfig { - cc[key] = value - } - for _, option := range options { - option(cc) - } - return cc -} - -// Reset returns the ANSI escape sequence to reset the color with key set from -// cc. If no color was set then no reset is needed so it returns the empty -// string. -func (cc ColorConfig) Reset(key ColorKey) string { - if cc[key] == "" { - return "" - } - return color.Reset -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go deleted file mode 100644 index 39a66a1a806..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go +++ /dev/null @@ -1,58 +0,0 @@ -package diff - -import ( - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" -) - -// Operation defines the operation of a diff item. -type Operation int - -const ( - // Equal item represents a equals diff. - Equal Operation = iota - // Add item represents an insert diff. - Add - // Delete item represents a delete diff. - Delete -) - -// Patch represents a collection of steps to transform several files. -type Patch interface { - // FilePatches returns a slice of patches per file. - FilePatches() []FilePatch - // Message returns an optional message that can be at the top of the - // Patch representation. - Message() string -} - -// FilePatch represents the necessary steps to transform one file to another. -type FilePatch interface { - // IsBinary returns true if this patch is representing a binary file. - IsBinary() bool - // Files returns the from and to Files, with all the necessary metadata to - // about them. If the patch creates a new file, "from" will be nil. - // If the patch deletes a file, "to" will be nil. - Files() (from, to File) - // Chunks returns a slice of ordered changes to transform "from" File to - // "to" File. If the file is a binary one, Chunks will be empty. - Chunks() []Chunk -} - -// File contains all the file metadata necessary to print some patch formats. -type File interface { - // Hash returns the File Hash. - Hash() plumbing.Hash - // Mode returns the FileMode. - Mode() filemode.FileMode - // Path returns the complete Path to the file, including the filename. - Path() string -} - -// Chunk represents a portion of a file transformation to another. -type Chunk interface { - // Content contains the portion of the file. - Content() string - // Type contains the Operation to do with this Chunk. - Type() Operation -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go deleted file mode 100644 index 413984aa54d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go +++ /dev/null @@ -1,376 +0,0 @@ -package diff - -import ( - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing" -) - -// DefaultContextLines is the default number of context lines. -const DefaultContextLines = 3 - -var ( - splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`) - - operationChar = map[Operation]byte{ - Add: '+', - Delete: '-', - Equal: ' ', - } - - operationColorKey = map[Operation]ColorKey{ - Add: New, - Delete: Old, - Equal: Context, - } -) - -// UnifiedEncoder encodes an unified diff into the provided Writer. It does not -// support similarity index for renames or sorting hash representations. -type UnifiedEncoder struct { - io.Writer - - // contextLines is the count of unchanged lines that will appear surrounding - // a change. - contextLines int - - // colorConfig is the color configuration. The default is no color. - color ColorConfig -} - -// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w. -func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { - return &UnifiedEncoder{ - Writer: w, - contextLines: contextLines, - } -} - -// SetColor sets e's color configuration and returns e. -func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { - e.color = colorConfig - return e -} - -// Encode encodes patch. -func (e *UnifiedEncoder) Encode(patch Patch) error { - sb := &strings.Builder{} - - if message := patch.Message(); message != "" { - sb.WriteString(message) - if !strings.HasSuffix(message, "\n") { - sb.WriteByte('\n') - } - } - - for _, filePatch := range patch.FilePatches() { - e.writeFilePatchHeader(sb, filePatch) - g := newHunksGenerator(filePatch.Chunks(), e.contextLines) - for _, hunk := range g.Generate() { - hunk.writeTo(sb, e.color) - } - } - - _, err := e.Write([]byte(sb.String())) - return err -} - -func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) { - from, to := filePatch.Files() - if from == nil && to == nil { - return - } - isBinary := filePatch.IsBinary() - - var lines []string - switch { - case from != nil && to != nil: - hashEquals := from.Hash() == to.Hash() - lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()), - ) - if from.Mode() != to.Mode() { - lines = append(lines, - fmt.Sprintf("old mode %o", from.Mode()), - fmt.Sprintf("new mode %o", to.Mode()), - ) - } - if from.Path() != to.Path() { - lines = append(lines, - fmt.Sprintf("rename from %s", from.Path()), - fmt.Sprintf("rename to %s", to.Path()), - ) - } - if from.Mode() != to.Mode() && !hashEquals { - lines = append(lines, - fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()), - ) - } else if !hashEquals { - lines = append(lines, - fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()), - ) - } - if !hashEquals { - lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary) - } - case from == nil: - lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()), - fmt.Sprintf("new file mode %o", to.Mode()), - fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), - ) - lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary) - case to == nil: - lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()), - fmt.Sprintf("deleted file mode %o", from.Mode()), - fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), - ) - lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary) - } - - sb.WriteString(e.color[Meta]) - sb.WriteString(lines[0]) - for _, line := range lines[1:] { - sb.WriteByte('\n') - sb.WriteString(line) - } - sb.WriteString(e.color.Reset(Meta)) - sb.WriteByte('\n') -} - -func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string { - if isBinary { - return append(lines, - fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath), - ) - } - return append(lines, - fmt.Sprintf("--- %s", fromPath), - fmt.Sprintf("+++ %s", toPath), - ) -} - -type hunksGenerator struct { - fromLine, toLine int - ctxLines int - chunks []Chunk - current *hunk - hunks []*hunk - beforeContext, afterContext []string -} - -func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { - return &hunksGenerator{ - chunks: chunks, - ctxLines: ctxLines, - } -} - -func (g *hunksGenerator) Generate() []*hunk { - for i, chunk := range g.chunks { - lines := splitLines(chunk.Content()) - nLines := len(lines) - - switch chunk.Type() { - case Equal: - g.fromLine += nLines - g.toLine += nLines - g.processEqualsLines(lines, i) - case Delete: - if nLines != 0 { - g.fromLine++ - } - - g.processHunk(i, chunk.Type()) - g.fromLine += nLines - 1 - g.current.AddOp(chunk.Type(), lines...) - case Add: - if nLines != 0 { - g.toLine++ - } - g.processHunk(i, chunk.Type()) - g.toLine += nLines - 1 - g.current.AddOp(chunk.Type(), lines...) - } - - if i == len(g.chunks)-1 && g.current != nil { - g.hunks = append(g.hunks, g.current) - } - } - - return g.hunks -} - -func (g *hunksGenerator) processHunk(i int, op Operation) { - if g.current != nil { - return - } - - var ctxPrefix string - linesBefore := len(g.beforeContext) - if linesBefore > g.ctxLines { - ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1] - g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:] - linesBefore = g.ctxLines - } - - g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")} - g.current.AddOp(Equal, g.beforeContext...) - - switch op { - case Delete: - g.current.fromLine, g.current.toLine = - g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add) - case Add: - g.current.toLine, g.current.fromLine = - g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete) - } - - g.beforeContext = nil -} - -// addLineNumbers obtains the line numbers in a new chunk. -func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { - cla = la - linesBefore - // we need to search for a reference for the next diff - switch { - case linesBefore != 0 && g.ctxLines != 0: - if lb > g.ctxLines { - clb = lb - g.ctxLines + 1 - } else { - clb = 1 - } - case g.ctxLines == 0: - clb = lb - case i != len(g.chunks)-1: - next := g.chunks[i+1] - if next.Type() == op || next.Type() == Equal { - // this diff will be into this chunk - clb = lb + 1 - } - } - - return -} - -func (g *hunksGenerator) processEqualsLines(ls []string, i int) { - if g.current == nil { - g.beforeContext = append(g.beforeContext, ls...) - return - } - - g.afterContext = append(g.afterContext, ls...) - if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 { - g.current.AddOp(Equal, g.afterContext...) - g.afterContext = nil - } else { - ctxLines := g.ctxLines - if ctxLines > len(g.afterContext) { - ctxLines = len(g.afterContext) - } - g.current.AddOp(Equal, g.afterContext[:ctxLines]...) - g.hunks = append(g.hunks, g.current) - - g.current = nil - g.beforeContext = g.afterContext[ctxLines:] - g.afterContext = nil - } -} - -func splitLines(s string) []string { - out := splitLinesRegexp.FindAllString(s, -1) - if out[len(out)-1] == "" { - out = out[:len(out)-1] - } - return out -} - -type hunk struct { - fromLine int - toLine int - - fromCount int - toCount int - - ctxPrefix string - ops []*op -} - -func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) { - sb.WriteString(color[Frag]) - sb.WriteString("@@ -") - - if h.fromCount == 1 { - sb.WriteString(strconv.Itoa(h.fromLine)) - } else { - sb.WriteString(strconv.Itoa(h.fromLine)) - sb.WriteByte(',') - sb.WriteString(strconv.Itoa(h.fromCount)) - } - - sb.WriteString(" +") - - if h.toCount == 1 { - sb.WriteString(strconv.Itoa(h.toLine)) - } else { - sb.WriteString(strconv.Itoa(h.toLine)) - sb.WriteByte(',') - sb.WriteString(strconv.Itoa(h.toCount)) - } - - sb.WriteString(" @@") - sb.WriteString(color.Reset(Frag)) - - if h.ctxPrefix != "" { - sb.WriteByte(' ') - sb.WriteString(color[Func]) - sb.WriteString(h.ctxPrefix) - sb.WriteString(color.Reset(Func)) - } - - sb.WriteByte('\n') - - for _, op := range h.ops { - op.writeTo(sb, color) - } -} - -func (h *hunk) AddOp(t Operation, ss ...string) { - n := len(ss) - switch t { - case Add: - h.toCount += n - case Delete: - h.fromCount += n - case Equal: - h.toCount += n - h.fromCount += n - } - - for _, s := range ss { - h.ops = append(h.ops, &op{s, t}) - } -} - -type op struct { - text string - t Operation -} - -func (o *op) writeTo(sb *strings.Builder, color ColorConfig) { - colorKey := operationColorKey[o.t] - sb.WriteString(color[colorKey]) - sb.WriteByte(operationChar[o.t]) - if strings.HasSuffix(o.text, "\n") { - sb.WriteString(strings.TrimSuffix(o.text, "\n")) - } else { - sb.WriteString(o.text + "\n\\ No newline at end of file") - } - sb.WriteString(color.Reset(colorKey)) - sb.WriteByte('\n') -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go deleted file mode 100644 index f4444bfb3ff..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ /dev/null @@ -1,136 +0,0 @@ -package gitignore - -import ( - "bytes" - "io/ioutil" - "os" - "os/user" - "strings" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing/format/config" - gioutil "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - commentPrefix = "#" - coreSection = "core" - eol = "\n" - excludesfile = "excludesfile" - gitDir = ".git" - gitignoreFile = ".gitignore" - gitconfigFile = ".gitconfig" - systemFile = "/etc/gitconfig" -) - -// readIgnoreFile reads a specific git ignore file. -func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { - f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) - if err == nil { - defer f.Close() - - if data, err := ioutil.ReadAll(f); err == nil { - for _, s := range strings.Split(string(data), eol) { - if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { - ps = append(ps, ParsePattern(s, path)) - } - } - } - } else if !os.IsNotExist(err) { - return nil, err - } - - return -} - -// ReadPatterns reads gitignore patterns recursively traversing through the directory -// structure. The result is in the ascending order of priority (last higher). -func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { - ps, _ = readIgnoreFile(fs, path, gitignoreFile) - - var fis []os.FileInfo - fis, err = fs.ReadDir(fs.Join(path...)) - if err != nil { - return - } - - for _, fi := range fis { - if fi.IsDir() && fi.Name() != gitDir { - var subps []Pattern - subps, err = ReadPatterns(fs, append(path, fi.Name())) - if err != nil { - return - } - - if len(subps) > 0 { - ps = append(ps, subps...) - } - } - } - - return -} - -func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { - f, err := fs.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer gioutil.CheckClose(f, &err) - - b, err := ioutil.ReadAll(f) - if err != nil { - return - } - - d := config.NewDecoder(bytes.NewBuffer(b)) - - raw := config.New() - if err = d.Decode(raw); err != nil { - return - } - - s := raw.Section(coreSection) - efo := s.Options.Get(excludesfile) - if efo == "" { - return nil, nil - } - - ps, err = readIgnoreFile(fs, nil, efo) - if os.IsNotExist(err) { - return nil, nil - } - - return -} - -// LoadGlobalPatterns loads gitignore patterns from from the gitignore file -// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not -// exist the function will return nil. If the core.excludesfile property -// is not declared, the function will return nil. If the file pointed to by -// the core.excludesfile property does not exist, the function will return nil. -// -// The function assumes fs is rooted at the root filesystem. -func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - usr, err := user.Current() - if err != nil { - return - } - - return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) -} - -// LoadSystemPatterns loads gitignore patterns from from the gitignore file -// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does -// not exist the function will return nil. If the core.excludesfile property -// is not declared, the function will return nil. If the file pointed to by -// the core.excludesfile property does not exist, the function will return nil. -// -// The function assumes fs is rooted at the root filesystem. -func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - return loadPatterns(fs, systemFile) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go deleted file mode 100644 index eecd4baccb2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go +++ /dev/null @@ -1,70 +0,0 @@ -// Package gitignore implements matching file system paths to gitignore patterns that -// can be automatically read from a git repository tree in the order of definition -// priorities. It support all pattern formats as specified in the original gitignore -// documentation, copied below: -// -// Pattern format -// ============== -// -// - A blank line matches no files, so it can serve as a separator for readability. -// -// - A line starting with # serves as a comment. Put a backslash ("\") in front of -// the first hash for patterns that begin with a hash. -// -// - Trailing spaces are ignored unless they are quoted with backslash ("\"). -// -// - An optional prefix "!" which negates the pattern; any matching file excluded -// by a previous pattern will become included again. It is not possible to -// re-include a file if a parent directory of that file is excluded. -// Git doesn’t list excluded directories for performance reasons, so -// any patterns on contained files have no effect, no matter where they are -// defined. Put a backslash ("\") in front of the first "!" for patterns -// that begin with a literal "!", for example, "\!important!.txt". -// -// - If the pattern ends with a slash, it is removed for the purpose of the -// following description, but it would only find a match with a directory. -// In other words, foo/ will match a directory foo and paths underneath it, -// but will not match a regular file or a symbolic link foo (this is consistent -// with the way how pathspec works in general in Git). -// -// - If the pattern does not contain a slash /, Git treats it as a shell glob -// pattern and checks for a match against the pathname relative to the location -// of the .gitignore file (relative to the toplevel of the work tree if not -// from a .gitignore file). -// -// - Otherwise, Git treats the pattern as a shell glob suitable for consumption -// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will -// not match a / in the pathname. For example, "Documentation/*.html" matches -// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or -// "tools/perf/Documentation/perf.html". -// -// - A leading slash matches the beginning of the pathname. For example, -// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". -// -// Two consecutive asterisks ("**") in patterns matched against full pathname -// may have special meaning: -// -// - A leading "**" followed by a slash means match in all directories. -// For example, "**/foo" matches file or directory "foo" anywhere, the same as -// pattern "foo". "**/foo/bar" matches file or directory "bar" -// anywhere that is directly under directory "foo". -// -// - A trailing "/**" matches everything inside. For example, "abc/**" matches -// all files inside directory "abc", relative to the location of the -// .gitignore file, with infinite depth. -// -// - A slash followed by two consecutive asterisks then a slash matches -// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", -// "a/x/y/b" and so on. -// -// - Other consecutive asterisks are considered invalid. -// -// Copyright and license -// ===================== -// -// Copyright (c) Oleg Sklyar, Silvertern and source{d} -// -// The package code was donated to source{d} to include, modify and develop -// further as a part of the `go-git` project, release it on the license of -// the whole project or delete it from the project. -package gitignore diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go deleted file mode 100644 index bd1e9e2d4cf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go +++ /dev/null @@ -1,30 +0,0 @@ -package gitignore - -// Matcher defines a global multi-pattern matcher for gitignore patterns -type Matcher interface { - // Match matches patterns in the order of priorities. As soon as an inclusion or - // exclusion is found, not further matching is performed. - Match(path []string, isDir bool) bool -} - -// NewMatcher constructs a new global matcher. Patterns must be given in the order of -// increasing priority. That is most generic settings files first, then the content of -// the repo .gitignore, then content of .gitignore down the path or the repo and then -// the content command line arguments. -func NewMatcher(ps []Pattern) Matcher { - return &matcher{ps} -} - -type matcher struct { - patterns []Pattern -} - -func (m *matcher) Match(path []string, isDir bool) bool { - n := len(m.patterns) - for i := n - 1; i >= 0; i-- { - if match := m.patterns[i].Match(path, isDir); match > NoMatch { - return match == Exclude - } - } - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go deleted file mode 100644 index 098cb502127..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go +++ /dev/null @@ -1,153 +0,0 @@ -package gitignore - -import ( - "path/filepath" - "strings" -) - -// MatchResult defines outcomes of a match, no match, exclusion or inclusion. -type MatchResult int - -const ( - // NoMatch defines the no match outcome of a match check - NoMatch MatchResult = iota - // Exclude defines an exclusion of a file as a result of a match check - Exclude - // Include defines an explicit inclusion of a file as a result of a match check - Include -) - -const ( - inclusionPrefix = "!" - zeroToManyDirs = "**" - patternDirSep = "/" -) - -// Pattern defines a single gitignore pattern. -type Pattern interface { - // Match matches the given path to the pattern. - Match(path []string, isDir bool) MatchResult -} - -type pattern struct { - domain []string - pattern []string - inclusion bool - dirOnly bool - isGlob bool -} - -// ParsePattern parses a gitignore pattern string into the Pattern structure. -func ParsePattern(p string, domain []string) Pattern { - res := pattern{domain: domain} - - if strings.HasPrefix(p, inclusionPrefix) { - res.inclusion = true - p = p[1:] - } - - if !strings.HasSuffix(p, "\\ ") { - p = strings.TrimRight(p, " ") - } - - if strings.HasSuffix(p, patternDirSep) { - res.dirOnly = true - p = p[:len(p)-1] - } - - if strings.Contains(p, patternDirSep) { - res.isGlob = true - } - - res.pattern = strings.Split(p, patternDirSep) - return &res -} - -func (p *pattern) Match(path []string, isDir bool) MatchResult { - if len(path) <= len(p.domain) { - return NoMatch - } - for i, e := range p.domain { - if path[i] != e { - return NoMatch - } - } - - path = path[len(p.domain):] - if p.isGlob && !p.globMatch(path, isDir) { - return NoMatch - } else if !p.isGlob && !p.simpleNameMatch(path, isDir) { - return NoMatch - } - - if p.inclusion { - return Include - } else { - return Exclude - } -} - -func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { - for i, name := range path { - if match, err := filepath.Match(p.pattern[0], name); err != nil { - return false - } else if !match { - continue - } - if p.dirOnly && !isDir && i == len(path)-1 { - return false - } - return true - } - return false -} - -func (p *pattern) globMatch(path []string, isDir bool) bool { - matched := false - canTraverse := false - for i, pattern := range p.pattern { - if pattern == "" { - canTraverse = false - continue - } - if pattern == zeroToManyDirs { - if i == len(p.pattern)-1 { - break - } - canTraverse = true - continue - } - if strings.Contains(pattern, zeroToManyDirs) { - return false - } - if len(path) == 0 { - return false - } - if canTraverse { - canTraverse = false - for len(path) > 0 { - e := path[0] - path = path[1:] - if match, err := filepath.Match(pattern, e); err != nil { - return false - } else if match { - matched = true - break - } else if len(path) == 0 { - // if nothing left then fail - matched = false - } - } - } else { - if match, err := filepath.Match(pattern, path[0]); err != nil || !match { - return false - } - matched = true - path = path[1:] - } - } - if matched && p.dirOnly && !isDir && len(path) == 0 { - matched = false - } - return matched -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go deleted file mode 100644 index 7768bd6505d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go +++ /dev/null @@ -1,177 +0,0 @@ -package idxfile - -import ( - "bufio" - "bytes" - "errors" - "io" - - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // ErrUnsupportedVersion is returned by Decode when the idx file version - // is not supported. - ErrUnsupportedVersion = errors.New("Unsupported version") - // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. - ErrMalformedIdxFile = errors.New("Malformed IDX file") -) - -const ( - fanout = 256 - objectIDLength = 20 -) - -// Decoder reads and decodes idx files from an input stream. -type Decoder struct { - *bufio.Reader -} - -// NewDecoder builds a new idx stream decoder, that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{bufio.NewReader(r)} -} - -// Decode reads from the stream and decode the content into the MemoryIndex struct. -func (d *Decoder) Decode(idx *MemoryIndex) error { - if err := validateHeader(d); err != nil { - return err - } - - flow := []func(*MemoryIndex, io.Reader) error{ - readVersion, - readFanout, - readObjectNames, - readCRC32, - readOffsets, - readChecksums, - } - - for _, f := range flow { - if err := f(idx, d); err != nil { - return err - } - } - - return nil -} - -func validateHeader(r io.Reader) error { - var h = make([]byte, 4) - if _, err := io.ReadFull(r, h); err != nil { - return err - } - - if !bytes.Equal(h, idxHeader) { - return ErrMalformedIdxFile - } - - return nil -} - -func readVersion(idx *MemoryIndex, r io.Reader) error { - v, err := binary.ReadUint32(r) - if err != nil { - return err - } - - if v > VersionSupported { - return ErrUnsupportedVersion - } - - idx.Version = v - return nil -} - -func readFanout(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - n, err := binary.ReadUint32(r) - if err != nil { - return err - } - - idx.Fanout[k] = n - idx.FanoutMapping[k] = noMapping - } - - return nil -} - -func readObjectNames(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - var buckets uint32 - if k == 0 { - buckets = idx.Fanout[k] - } else { - buckets = idx.Fanout[k] - idx.Fanout[k-1] - } - - if buckets == 0 { - continue - } - - idx.FanoutMapping[k] = len(idx.Names) - - nameLen := int(buckets * objectIDLength) - bin := make([]byte, nameLen) - if _, err := io.ReadFull(r, bin); err != nil { - return err - } - - idx.Names = append(idx.Names, bin) - idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) - idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) - } - - return nil -} - -func readCRC32(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - if pos := idx.FanoutMapping[k]; pos != noMapping { - if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { - return err - } - } - } - - return nil -} - -func readOffsets(idx *MemoryIndex, r io.Reader) error { - var o64cnt int - for k := 0; k < fanout; k++ { - if pos := idx.FanoutMapping[k]; pos != noMapping { - if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { - return err - } - - for p := 0; p < len(idx.Offset32[pos]); p += 4 { - if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { - o64cnt++ - } - } - } - } - - if o64cnt > 0 { - idx.Offset64 = make([]byte, o64cnt*8) - if _, err := io.ReadFull(r, idx.Offset64); err != nil { - return err - } - } - - return nil -} - -func readChecksums(idx *MemoryIndex, r io.Reader) error { - if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { - return err - } - - if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { - return err - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go deleted file mode 100644 index 1e628ab4a5e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package idxfile implements encoding and decoding of packfile idx files. -// -// == Original (version 1) pack-*.idx files have the following format: -// -// - The header consists of 256 4-byte network byte order -// integers. N-th entry of this table records the number of -// objects in the corresponding pack, the first byte of whose -// object name is less than or equal to N. This is called the -// 'first-level fan-out' table. -// -// - The header is followed by sorted 24-byte entries, one entry -// per object in the pack. Each entry is: -// -// 4-byte network byte order integer, recording where the -// object is stored in the packfile as the offset from the -// beginning. -// -// 20-byte object name. -// -// - The file is concluded with a trailer: -// -// A copy of the 20-byte SHA1 checksum at the end of -// corresponding packfile. -// -// 20-byte SHA1-checksum of all of the above. -// -// Pack Idx file: -// -// -- +--------------------------------+ -// fanout | fanout[0] = 2 (for example) |-. -// table +--------------------------------+ | -// | fanout[1] | | -// +--------------------------------+ | -// | fanout[2] | | -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | -// | fanout[255] = total objects |---. -// -- +--------------------------------+ | | -// main | offset | | | -// index | object name 00XXXXXXXXXXXXXXXX | | | -// tab +--------------------------------+ | | -// | offset | | | -// | object name 00XXXXXXXXXXXXXXXX | | | -// +--------------------------------+<+ | -// .-| offset | | -// | | object name 01XXXXXXXXXXXXXXXX | | -// | +--------------------------------+ | -// | | offset | | -// | | object name 01XXXXXXXXXXXXXXXX | | -// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | -// | | offset | | -// | | object name FFXXXXXXXXXXXXXXXX | | -// --| +--------------------------------+<--+ -// trailer | | packfile checksum | -// | +--------------------------------+ -// | | idxfile checksum | -// | +--------------------------------+ -// .---------. -// | -// Pack file entry: <+ -// -// packed object header: -// 1-byte size extension bit (MSB) -// type (next 3 bit) -// size0 (lower 4-bit) -// n-byte sizeN (as long as MSB is set, each 7-bit) -// size0..sizeN form 4+7+7+..+7 bit integer, size0 -// is the least significant part, and sizeN is the -// most significant part. -// packed object data: -// If it is not DELTA, then deflated bytes (the size above -// is the size before compression). -// If it is REF_DELTA, then -// 20-byte base object name SHA1 (the size above is the -// size of the delta data that follows). -// delta data, deflated. -// If it is OFS_DELTA, then -// n-byte offset (see below) interpreted as a negative -// offset from the type-byte of the header of the -// ofs-delta entry (the size above is the size of -// the delta data that follows). -// delta data, deflated. -// -// offset encoding: -// n bytes with MSB set in all but the last one. -// The offset is then the number constructed by -// concatenating the lower 7 bit of each byte, and -// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) -// to the result. -// -// == Version 2 pack-*.idx files support packs larger than 4 GiB, and -// have some other reorganizations. They have the format: -// -// - A 4-byte magic number '\377tOc' which is an unreasonable -// fanout[0] value. -// -// - A 4-byte version number (= 2) -// -// - A 256-entry fan-out table just like v1. -// -// - A table of sorted 20-byte SHA1 object names. These are -// packed together without offset values to reduce the cache -// footprint of the binary search for a specific object name. -// -// - A table of 4-byte CRC32 values of the packed object data. -// This is new in v2 so compressed data can be copied directly -// from pack to pack during repacking without undetected -// data corruption. -// -// - A table of 4-byte offset values (in network byte order). -// These are usually 31-bit pack file offsets, but large -// offsets are encoded as an index into the next table with -// the msbit set. -// -// - A table of 8-byte offset entries (empty for pack files less -// than 2 GiB). Pack files are organized with heavily used -// objects toward the front, so most object references should -// not need to refer to this table. -// -// - The same trailer as a v1 pack file: -// -// A copy of the 20-byte SHA1 checksum at the end of -// corresponding packfile. -// -// 20-byte SHA1-checksum of all of the above. -// -// Source: -// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt -package idxfile diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go deleted file mode 100644 index 26b2e4d6b57..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go +++ /dev/null @@ -1,142 +0,0 @@ -package idxfile - -import ( - "crypto/sha1" - "hash" - "io" - - "github.com/go-git/go-git/v5/utils/binary" -) - -// Encoder writes MemoryIndex structs to an output stream. -type Encoder struct { - io.Writer - hash hash.Hash -} - -// NewEncoder returns a new stream encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() - mw := io.MultiWriter(w, h) - return &Encoder{mw, h} -} - -// Encode encodes an MemoryIndex to the encoder writer. -func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { - flow := []func(*MemoryIndex) (int, error){ - e.encodeHeader, - e.encodeFanout, - e.encodeHashes, - e.encodeCRC32, - e.encodeOffsets, - e.encodeChecksums, - } - - sz := 0 - for _, f := range flow { - i, err := f(idx) - sz += i - - if err != nil { - return sz, err - } - } - - return sz, nil -} - -func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { - c, err := e.Write(idxHeader) - if err != nil { - return c, err - } - - return c + 4, binary.WriteUint32(e, idx.Version) -} - -func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { - for _, c := range idx.Fanout { - if err := binary.WriteUint32(e, c); err != nil { - return 0, err - } - } - - return fanout * 4, nil -} - -func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.Names[pos]) - if err != nil { - return size, err - } - size += n - } - return size, nil -} - -func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.CRC32[pos]) - if err != nil { - return size, err - } - - size += n - } - - return size, nil -} - -func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.Offset32[pos]) - if err != nil { - return size, err - } - - size += n - } - - if len(idx.Offset64) > 0 { - n, err := e.Write(idx.Offset64) - if err != nil { - return size, err - } - - size += n - } - - return size, nil -} - -func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { - if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { - return 0, err - } - - copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) - if _, err := e.Write(idx.IdxChecksum[:]); err != nil { - return 0, err - } - - return 40, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go deleted file mode 100644 index 64dd8dcef4a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go +++ /dev/null @@ -1,346 +0,0 @@ -package idxfile - -import ( - "bytes" - "io" - "sort" - - encbin "encoding/binary" - - "github.com/go-git/go-git/v5/plumbing" -) - -const ( - // VersionSupported is the only idx version supported. - VersionSupported = 2 - - noMapping = -1 -) - -var ( - idxHeader = []byte{255, 't', 'O', 'c'} -) - -// Index represents an index of a packfile. -type Index interface { - // Contains checks whether the given hash is in the index. - Contains(h plumbing.Hash) (bool, error) - // FindOffset finds the offset in the packfile for the object with - // the given hash. - FindOffset(h plumbing.Hash) (int64, error) - // FindCRC32 finds the CRC32 of the object with the given hash. - FindCRC32(h plumbing.Hash) (uint32, error) - // FindHash finds the hash for the object with the given offset. - FindHash(o int64) (plumbing.Hash, error) - // Count returns the number of entries in the index. - Count() (int64, error) - // Entries returns an iterator to retrieve all index entries. - Entries() (EntryIter, error) - // EntriesByOffset returns an iterator to retrieve all index entries ordered - // by offset. - EntriesByOffset() (EntryIter, error) -} - -// MemoryIndex is the in memory representation of an idx file. -type MemoryIndex struct { - Version uint32 - Fanout [256]uint32 - // FanoutMapping maps the position in the fanout table to the position - // in the Names, Offset32 and CRC32 slices. This improves the memory - // usage by not needing an array with unnecessary empty slots. - FanoutMapping [256]int - Names [][]byte - Offset32 [][]byte - CRC32 [][]byte - Offset64 []byte - PackfileChecksum [20]byte - IdxChecksum [20]byte - - offsetHash map[int64]plumbing.Hash - offsetHashIsFull bool -} - -var _ Index = (*MemoryIndex)(nil) - -// NewMemoryIndex returns an instance of a new MemoryIndex. -func NewMemoryIndex() *MemoryIndex { - return &MemoryIndex{} -} - -func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { - k := idx.FanoutMapping[h[0]] - if k == noMapping { - return 0, false - } - - if len(idx.Names) <= k { - return 0, false - } - - data := idx.Names[k] - high := uint64(len(idx.Offset32[k])) >> 2 - if high == 0 { - return 0, false - } - - low := uint64(0) - for { - mid := (low + high) >> 1 - offset := mid * objectIDLength - - cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) - if cmp < 0 { - high = mid - } else if cmp == 0 { - return int(mid), true - } else { - low = mid + 1 - } - - if low >= high { - break - } - } - - return 0, false -} - -// Contains implements the Index interface. -func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { - _, ok := idx.findHashIndex(h) - return ok, nil -} - -// FindOffset implements the Index interface. -func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { - if len(idx.FanoutMapping) <= int(h[0]) { - return 0, plumbing.ErrObjectNotFound - } - - k := idx.FanoutMapping[h[0]] - i, ok := idx.findHashIndex(h) - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - offset := idx.getOffset(k, i) - - if !idx.offsetHashIsFull { - // Save the offset for reverse lookup - if idx.offsetHash == nil { - idx.offsetHash = make(map[int64]plumbing.Hash) - } - idx.offsetHash[int64(offset)] = h - } - - return int64(offset), nil -} - -const isO64Mask = uint64(1) << 31 - -func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { - offset := secondLevel << 2 - ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) - - if (uint64(ofs) & isO64Mask) != 0 { - offset := 8 * (uint64(ofs) & ^isO64Mask) - n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) - return n - } - - return uint64(ofs) -} - -// FindCRC32 implements the Index interface. -func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { - k := idx.FanoutMapping[h[0]] - i, ok := idx.findHashIndex(h) - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - return idx.getCRC32(k, i), nil -} - -func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { - offset := secondLevel << 2 - return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) -} - -// FindHash implements the Index interface. -func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { - var hash plumbing.Hash - var ok bool - - if idx.offsetHash != nil { - if hash, ok = idx.offsetHash[o]; ok { - return hash, nil - } - } - - // Lazily generate the reverse offset/hash map if required. - if !idx.offsetHashIsFull || idx.offsetHash == nil { - if err := idx.genOffsetHash(); err != nil { - return plumbing.ZeroHash, err - } - - hash, ok = idx.offsetHash[o] - } - - if !ok { - return plumbing.ZeroHash, plumbing.ErrObjectNotFound - } - - return hash, nil -} - -// genOffsetHash generates the offset/hash mapping for reverse search. -func (idx *MemoryIndex) genOffsetHash() error { - count, err := idx.Count() - if err != nil { - return err - } - - idx.offsetHash = make(map[int64]plumbing.Hash, count) - idx.offsetHashIsFull = true - - var hash plumbing.Hash - i := uint32(0) - for firstLevel, fanoutValue := range idx.Fanout { - mappedFirstLevel := idx.FanoutMapping[firstLevel] - for secondLevel := uint32(0); i < fanoutValue; i++ { - copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) - offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) - idx.offsetHash[offset] = hash - secondLevel++ - } - } - - return nil -} - -// Count implements the Index interface. -func (idx *MemoryIndex) Count() (int64, error) { - return int64(idx.Fanout[fanout-1]), nil -} - -// Entries implements the Index interface. -func (idx *MemoryIndex) Entries() (EntryIter, error) { - return &idxfileEntryIter{idx, 0, 0, 0}, nil -} - -// EntriesByOffset implements the Index interface. -func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { - count, err := idx.Count() - if err != nil { - return nil, err - } - - iter := &idxfileEntryOffsetIter{ - entries: make(entriesByOffset, count), - } - - entries, err := idx.Entries() - if err != nil { - return nil, err - } - - for pos := 0; int64(pos) < count; pos++ { - entry, err := entries.Next() - if err != nil { - return nil, err - } - - iter.entries[pos] = entry - } - - sort.Sort(iter.entries) - - return iter, nil -} - -// EntryIter is an iterator that will return the entries in a packfile index. -type EntryIter interface { - // Next returns the next entry in the packfile index. - Next() (*Entry, error) - // Close closes the iterator. - Close() error -} - -type idxfileEntryIter struct { - idx *MemoryIndex - total int - firstLevel, secondLevel int -} - -func (i *idxfileEntryIter) Next() (*Entry, error) { - for { - if i.firstLevel >= fanout { - return nil, io.EOF - } - - if i.total >= int(i.idx.Fanout[i.firstLevel]) { - i.firstLevel++ - i.secondLevel = 0 - continue - } - - mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] - entry := new(Entry) - copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) - entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) - entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) - - i.secondLevel++ - i.total++ - - return entry, nil - } -} - -func (i *idxfileEntryIter) Close() error { - i.firstLevel = fanout - return nil -} - -// Entry is the in memory representation of an object entry in the idx file. -type Entry struct { - Hash plumbing.Hash - CRC32 uint32 - Offset uint64 -} - -type idxfileEntryOffsetIter struct { - entries entriesByOffset - pos int -} - -func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { - if i.pos >= len(i.entries) { - return nil, io.EOF - } - - entry := i.entries[i.pos] - i.pos++ - - return entry, nil -} - -func (i *idxfileEntryOffsetIter) Close() error { - i.pos = len(i.entries) + 1 - return nil -} - -type entriesByOffset []*Entry - -func (o entriesByOffset) Len() int { - return len(o) -} - -func (o entriesByOffset) Less(i int, j int) bool { - return o[i].Offset < o[j].Offset -} - -func (o entriesByOffset) Swap(i int, j int) { - o[i], o[j] = o[j], o[i] -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go deleted file mode 100644 index daa160502ae..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go +++ /dev/null @@ -1,186 +0,0 @@ -package idxfile - -import ( - "bytes" - "fmt" - "math" - "sort" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" -) - -// objects implements sort.Interface and uses hash as sorting key. -type objects []Entry - -// Writer implements a packfile Observer interface and is used to generate -// indexes. -type Writer struct { - m sync.Mutex - - count uint32 - checksum plumbing.Hash - objects objects - offset64 uint32 - finished bool - index *MemoryIndex - added map[plumbing.Hash]struct{} -} - -// Index returns a previously created MemoryIndex or creates a new one if -// needed. -func (w *Writer) Index() (*MemoryIndex, error) { - w.m.Lock() - defer w.m.Unlock() - - if w.index == nil { - return w.createIndex() - } - - return w.index, nil -} - -// Add appends new object data. -func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { - w.m.Lock() - defer w.m.Unlock() - - if w.added == nil { - w.added = make(map[plumbing.Hash]struct{}) - } - - if _, ok := w.added[h]; !ok { - w.added[h] = struct{}{} - w.objects = append(w.objects, Entry{h, crc, pos}) - } - -} - -func (w *Writer) Finished() bool { - return w.finished -} - -// OnHeader implements packfile.Observer interface. -func (w *Writer) OnHeader(count uint32) error { - w.count = count - w.objects = make(objects, 0, count) - return nil -} - -// OnInflatedObjectHeader implements packfile.Observer interface. -func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { - return nil -} - -// OnInflatedObjectContent implements packfile.Observer interface. -func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { - w.Add(h, uint64(pos), crc) - return nil -} - -// OnFooter implements packfile.Observer interface. -func (w *Writer) OnFooter(h plumbing.Hash) error { - w.checksum = h - w.finished = true - _, err := w.createIndex() - if err != nil { - return err - } - - return nil -} - -// creatIndex returns a filled MemoryIndex with the information filled by -// the observer callbacks. -func (w *Writer) createIndex() (*MemoryIndex, error) { - if !w.finished { - return nil, fmt.Errorf("the index still hasn't finished building") - } - - idx := new(MemoryIndex) - w.index = idx - - sort.Sort(w.objects) - - // unmap all fans by default - for i := range idx.FanoutMapping { - idx.FanoutMapping[i] = noMapping - } - - buf := new(bytes.Buffer) - - last := -1 - bucket := -1 - for i, o := range w.objects { - fan := o.Hash[0] - - // fill the gaps between fans - for j := last + 1; j < int(fan); j++ { - idx.Fanout[j] = uint32(i) - } - - // update the number of objects for this position - idx.Fanout[fan] = uint32(i + 1) - - // we move from one bucket to another, update counters and allocate - // memory - if last != int(fan) { - bucket++ - idx.FanoutMapping[fan] = bucket - last = int(fan) - - idx.Names = append(idx.Names, make([]byte, 0)) - idx.Offset32 = append(idx.Offset32, make([]byte, 0)) - idx.CRC32 = append(idx.CRC32, make([]byte, 0)) - } - - idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) - - offset := o.Offset - if offset > math.MaxInt32 { - offset = w.addOffset64(offset) - } - - buf.Truncate(0) - binary.WriteUint32(buf, uint32(offset)) - idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) - - buf.Truncate(0) - binary.WriteUint32(buf, o.CRC32) - idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) - } - - for j := last + 1; j < 256; j++ { - idx.Fanout[j] = uint32(len(w.objects)) - } - - idx.Version = VersionSupported - idx.PackfileChecksum = w.checksum - - return idx, nil -} - -func (w *Writer) addOffset64(pos uint64) uint64 { - buf := new(bytes.Buffer) - binary.WriteUint64(buf, pos) - w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) - - index := uint64(w.offset64 | (1 << 31)) - w.offset64++ - - return index -} - -func (o objects) Len() int { - return len(o) -} - -func (o objects) Less(i int, j int) bool { - cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) - return cmp < 0 -} - -func (o objects) Swap(i int, j int) { - o[i], o[j] = o[j], o[i] -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go deleted file mode 100644 index 79d0b9e1110..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go +++ /dev/null @@ -1,477 +0,0 @@ -package index - -import ( - "bufio" - "bytes" - "crypto/sha1" - "errors" - "hash" - "io" - "io/ioutil" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // DecodeVersionSupported is the range of supported index versions - DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} - - // ErrMalformedSignature is returned by Decode when the index header file is - // malformed - ErrMalformedSignature = errors.New("malformed index signature file") - // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with - // the read content - ErrInvalidChecksum = errors.New("invalid checksum") - - errUnknownExtension = errors.New("unknown extension") -) - -const ( - entryHeaderLength = 62 - entryExtended = 0x4000 - entryValid = 0x8000 - nameMask = 0xfff - intentToAddMask = 1 << 13 - skipWorkTreeMask = 1 << 14 -) - -// A Decoder reads and decodes index files from an input stream. -type Decoder struct { - r io.Reader - hash hash.Hash - lastEntry *Entry - - extReader *bufio.Reader -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - h := sha1.New() - return &Decoder{ - r: io.TeeReader(r, h), - hash: h, - extReader: bufio.NewReader(nil), - } -} - -// Decode reads the whole index object from its input and stores it in the -// value pointed to by idx. -func (d *Decoder) Decode(idx *Index) error { - var err error - idx.Version, err = validateHeader(d.r) - if err != nil { - return err - } - - entryCount, err := binary.ReadUint32(d.r) - if err != nil { - return err - } - - if err := d.readEntries(idx, int(entryCount)); err != nil { - return err - } - - return d.readExtensions(idx) -} - -func (d *Decoder) readEntries(idx *Index, count int) error { - for i := 0; i < count; i++ { - e, err := d.readEntry(idx) - if err != nil { - return err - } - - d.lastEntry = e - idx.Entries = append(idx.Entries, e) - } - - return nil -} - -func (d *Decoder) readEntry(idx *Index) (*Entry, error) { - e := &Entry{} - - var msec, mnsec, sec, nsec uint32 - var flags uint16 - - flow := []interface{}{ - &sec, &nsec, - &msec, &mnsec, - &e.Dev, - &e.Inode, - &e.Mode, - &e.UID, - &e.GID, - &e.Size, - &e.Hash, - &flags, - } - - if err := binary.Read(d.r, flow...); err != nil { - return nil, err - } - - read := entryHeaderLength - - if sec != 0 || nsec != 0 { - e.CreatedAt = time.Unix(int64(sec), int64(nsec)) - } - - if msec != 0 || mnsec != 0 { - e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) - } - - e.Stage = Stage(flags>>12) & 0x3 - - if flags&entryExtended != 0 { - extended, err := binary.ReadUint16(d.r) - if err != nil { - return nil, err - } - - read += 2 - e.IntentToAdd = extended&intentToAddMask != 0 - e.SkipWorktree = extended&skipWorkTreeMask != 0 - } - - if err := d.readEntryName(idx, e, flags); err != nil { - return nil, err - } - - return e, d.padEntry(idx, e, read) -} - -func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { - var name string - var err error - - switch idx.Version { - case 2, 3: - len := flags & nameMask - name, err = d.doReadEntryName(len) - case 4: - name, err = d.doReadEntryNameV4() - default: - return ErrUnsupportedVersion - } - - if err != nil { - return err - } - - e.Name = name - return nil -} - -func (d *Decoder) doReadEntryNameV4() (string, error) { - l, err := binary.ReadVariableWidthInt(d.r) - if err != nil { - return "", err - } - - var base string - if d.lastEntry != nil { - base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] - } - - name, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return "", err - } - - return base + string(name), nil -} - -func (d *Decoder) doReadEntryName(len uint16) (string, error) { - name := make([]byte, len) - _, err := io.ReadFull(d.r, name[:]) - - return string(name), err -} - -// Index entries are padded out to the next 8 byte alignment -// for historical reasons related to how C Git read the files. -func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { - if idx.Version == 4 { - return nil - } - - entrySize := read + len(e.Name) - padLen := 8 - entrySize%8 - _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) - return err -} - -func (d *Decoder) readExtensions(idx *Index) error { - // TODO: support 'Split index' and 'Untracked cache' extensions, take in - // count that they are not supported by jgit or libgit - - var expected []byte - var err error - - var header [4]byte - for { - expected = d.hash.Sum(nil) - - var n int - if n, err = io.ReadFull(d.r, header[:]); err != nil { - if n == 0 { - err = io.EOF - } - - break - } - - err = d.readExtension(idx, header[:]) - if err != nil { - break - } - } - - if err != errUnknownExtension { - return err - } - - return d.readChecksum(expected, header) -} - -func (d *Decoder) readExtension(idx *Index, header []byte) error { - switch { - case bytes.Equal(header, treeExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.Cache = &Tree{} - d := &treeExtensionDecoder{r} - if err := d.Decode(idx.Cache); err != nil { - return err - } - case bytes.Equal(header, resolveUndoExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.ResolveUndo = &ResolveUndo{} - d := &resolveUndoDecoder{r} - if err := d.Decode(idx.ResolveUndo); err != nil { - return err - } - case bytes.Equal(header, endOfIndexEntryExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.EndOfIndexEntry = &EndOfIndexEntry{} - d := &endOfIndexEntryDecoder{r} - if err := d.Decode(idx.EndOfIndexEntry); err != nil { - return err - } - default: - return errUnknownExtension - } - - return nil -} - -func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { - len, err := binary.ReadUint32(d.r) - if err != nil { - return nil, err - } - - d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) - return d.extReader, nil -} - -func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { - var h plumbing.Hash - copy(h[:4], alreadyRead[:]) - - if _, err := io.ReadFull(d.r, h[4:]); err != nil { - return err - } - - if !bytes.Equal(h[:], expected) { - return ErrInvalidChecksum - } - - return nil -} - -func validateHeader(r io.Reader) (version uint32, err error) { - var s = make([]byte, 4) - if _, err := io.ReadFull(r, s); err != nil { - return 0, err - } - - if !bytes.Equal(s, indexSignature) { - return 0, ErrMalformedSignature - } - - version, err = binary.ReadUint32(r) - if err != nil { - return 0, err - } - - if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { - return 0, ErrUnsupportedVersion - } - - return -} - -type treeExtensionDecoder struct { - r *bufio.Reader -} - -func (d *treeExtensionDecoder) Decode(t *Tree) error { - for { - e, err := d.readEntry() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if e == nil { - continue - } - - t.Entries = append(t.Entries, *e) - } -} - -func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { - e := &TreeEntry{} - - path, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return nil, err - } - - e.Path = string(path) - - count, err := binary.ReadUntil(d.r, ' ') - if err != nil { - return nil, err - } - - i, err := strconv.Atoi(string(count)) - if err != nil { - return nil, err - } - - // An entry can be in an invalidated state and is represented by having a - // negative number in the entry_count field. - if i == -1 { - return nil, nil - } - - e.Entries = i - trees, err := binary.ReadUntil(d.r, '\n') - if err != nil { - return nil, err - } - - i, err = strconv.Atoi(string(trees)) - if err != nil { - return nil, err - } - - e.Trees = i - _, err = io.ReadFull(d.r, e.Hash[:]) - - return e, nil -} - -type resolveUndoDecoder struct { - r *bufio.Reader -} - -func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { - for { - e, err := d.readEntry() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - ru.Entries = append(ru.Entries, *e) - } -} - -func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { - e := &ResolveUndoEntry{ - Stages: make(map[Stage]plumbing.Hash), - } - - path, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return nil, err - } - - e.Path = string(path) - - for i := 0; i < 3; i++ { - if err := d.readStage(e, Stage(i+1)); err != nil { - return nil, err - } - } - - for s := range e.Stages { - var hash plumbing.Hash - if _, err := io.ReadFull(d.r, hash[:]); err != nil { - return nil, err - } - - e.Stages[s] = hash - } - - return e, nil -} - -func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { - ascii, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return err - } - - stage, err := strconv.ParseInt(string(ascii), 8, 64) - if err != nil { - return err - } - - if stage != 0 { - e.Stages[s] = plumbing.ZeroHash - } - - return nil -} - -type endOfIndexEntryDecoder struct { - r *bufio.Reader -} - -func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { - var err error - e.Offset, err = binary.ReadUint32(d.r) - if err != nil { - return err - } - - _, err = io.ReadFull(d.r, e.Hash[:]) - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go deleted file mode 100644 index 39ae6ad5f91..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go +++ /dev/null @@ -1,360 +0,0 @@ -// Package index implements encoding and decoding of index format files. -// -// Git index format -// ================ -// -// == The Git index file has the following format -// -// All binary numbers are in network byte order. Version 2 is described -// here unless stated otherwise. -// -// - A 12-byte header consisting of -// -// 4-byte signature: -// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") -// -// 4-byte version number: -// The current supported versions are 2, 3 and 4. -// -// 32-bit number of index entries. -// -// - A number of sorted index entries (see below). -// -// - Extensions -// -// Extensions are identified by signature. Optional extensions can -// be ignored if Git does not understand them. -// -// Git currently supports cached tree and resolve undo extensions. -// -// 4-byte extension signature. If the first byte is 'A'..'Z' the -// extension is optional and can be ignored. -// -// 32-bit size of the extension -// -// Extension data -// -// - 160-bit SHA-1 over the content of the index file before this -// checksum. -// -// == Index entry -// -// Index entries are sorted in ascending order on the name field, -// interpreted as a string of unsigned bytes (i.e. memcmp() order, no -// localization, no special casing of directory separator '/'). Entries -// with the same name are sorted by their stage field. -// -// 32-bit ctime seconds, the last time a file's metadata changed -// this is stat(2) data -// -// 32-bit ctime nanosecond fractions -// this is stat(2) data -// -// 32-bit mtime seconds, the last time a file's data changed -// this is stat(2) data -// -// 32-bit mtime nanosecond fractions -// this is stat(2) data -// -// 32-bit dev -// this is stat(2) data -// -// 32-bit ino -// this is stat(2) data -// -// 32-bit mode, split into (high to low bits) -// -// 4-bit object type -// valid values in binary are 1000 (regular file), 1010 (symbolic link) -// and 1110 (gitlink) -// -// 3-bit unused -// -// 9-bit unix permission. Only 0755 and 0644 are valid for regular files. -// Symbolic links and gitlinks have value 0 in this field. -// -// 32-bit uid -// this is stat(2) data -// -// 32-bit gid -// this is stat(2) data -// -// 32-bit file size -// This is the on-disk size from stat(2), truncated to 32-bit. -// -// 160-bit SHA-1 for the represented object -// -// A 16-bit 'flags' field split into (high to low bits) -// -// 1-bit assume-valid flag -// -// 1-bit extended flag (must be zero in version 2) -// -// 2-bit stage (during merge) -// -// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF -// is stored in this field. -// -// (Version 3 or later) A 16-bit field, only applicable if the -// "extended flag" above is 1, split into (high to low bits). -// -// 1-bit reserved for future -// -// 1-bit skip-worktree flag (used by sparse checkout) -// -// 1-bit intent-to-add flag (used by "git add -N") -// -// 13-bit unused, must be zero -// -// Entry path name (variable length) relative to top level directory -// (without leading slash). '/' is used as path separator. The special -// path components ".", ".." and ".git" (without quotes) are disallowed. -// Trailing slash is also disallowed. -// -// The exact encoding is undefined, but the '.' and '/' characters -// are encoded in 7-bit ASCII and the encoding cannot contain a NUL -// byte (iow, this is a UNIX pathname). -// -// (Version 4) In version 4, the entry path name is prefix-compressed -// relative to the path name for the previous entry (the very first -// entry is encoded as if the path name for the previous entry is an -// empty string). At the beginning of an entry, an integer N in the -// variable width encoding (the same encoding as the offset is encoded -// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed -// by a NUL-terminated string S. Removing N bytes from the end of the -// path name for the previous entry, and replacing it with the string S -// yields the path name for this entry. -// -// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes -// while keeping the name NUL-terminated. -// -// (Version 4) In version 4, the padding after the pathname does not -// exist. -// -// Interpretation of index entries in split index mode is completely -// different. See below for details. -// -// == Extensions -// -// === Cached tree -// -// Cached tree extension contains pre-computed hashes for trees that can -// be derived from the index. It helps speed up tree object generation -// from index for a new commit. -// -// When a path is updated in index, the path must be invalidated and -// removed from tree cache. -// -// The signature for this extension is { 'T', 'R', 'E', 'E' }. -// -// A series of entries fill the entire extension; each of which -// consists of: -// -// - NUL-terminated path component (relative to its parent directory); -// -// - ASCII decimal number of entries in the index that is covered by the -// tree this entry represents (entry_count); -// -// - A space (ASCII 32); -// -// - ASCII decimal number that represents the number of subtrees this -// tree has; -// -// - A newline (ASCII 10); and -// -// - 160-bit object name for the object that would result from writing -// this span of index as a tree. -// -// An entry can be in an invalidated state and is represented by having -// a negative number in the entry_count field. In this case, there is no -// object name and the next entry starts immediately after the newline. -// When writing an invalid entry, -1 should always be used as entry_count. -// -// The entries are written out in the top-down, depth-first order. The -// first entry represents the root level of the repository, followed by the -// first subtree--let's call this A--of the root level (with its name -// relative to the root level), followed by the first subtree of A (with -// its name relative to A), ... -// -// === Resolve undo -// -// A conflict is represented in the index as a set of higher stage entries. -// When a conflict is resolved (e.g. with "git add path"), these higher -// stage entries will be removed and a stage-0 entry with proper resolution -// is added. -// -// When these higher stage entries are removed, they are saved in the -// resolve undo extension, so that conflicts can be recreated (e.g. with -// "git checkout -m"), in case users want to redo a conflict resolution -// from scratch. -// -// The signature for this extension is { 'R', 'E', 'U', 'C' }. -// -// A series of entries fill the entire extension; each of which -// consists of: -// -// - NUL-terminated pathname the entry describes (relative to the root of -// the repository, i.e. full pathname); -// -// - Three NUL-terminated ASCII octal numbers, entry mode of entries in -// stage 1 to 3 (a missing stage is represented by "0" in this field); -// and -// -// - At most three 160-bit object names of the entry in stages from 1 to 3 -// (nothing is written for a missing stage). -// -// === Split index -// -// In split index mode, the majority of index entries could be stored -// in a separate file. This extension records the changes to be made on -// top of that to produce the final index. -// -// The signature for this extension is { 'l', 'i', 'n', 'k' }. -// -// The extension consists of: -// -// - 160-bit SHA-1 of the shared index file. The shared index file path -// is $GIT_DIR/sharedindex.. If all 160 bits are zero, the -// index does not require a shared index file. -// -// - An ewah-encoded delete bitmap, each bit represents an entry in the -// shared index. If a bit is set, its corresponding entry in the -// shared index will be removed from the final index. Note, because -// a delete operation changes index entry positions, but we do need -// original positions in replace phase, it's best to just mark -// entries for removal, then do a mass deletion after replacement. -// -// - An ewah-encoded replace bitmap, each bit represents an entry in -// the shared index. If a bit is set, its corresponding entry in the -// shared index will be replaced with an entry in this index -// file. All replaced entries are stored in sorted order in this -// index. The first "1" bit in the replace bitmap corresponds to the -// first index entry, the second "1" bit to the second entry and so -// on. Replaced entries may have empty path names to save space. -// -// The remaining index entries after replaced ones will be added to the -// final index. These added entries are also sorted by entry name then -// stage. -// -// == Untracked cache -// -// Untracked cache saves the untracked file list and necessary data to -// verify the cache. The signature for this extension is { 'U', 'N', -// 'T', 'R' }. -// -// The extension starts with -// -// - A sequence of NUL-terminated strings, preceded by the size of the -// sequence in variable width encoding. Each string describes the -// environment where the cache can be used. -// -// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from -// ctime field until "file size". -// -// - Stat data of plumbing.excludesfile -// -// - 32-bit dir_flags (see struct dir_struct) -// -// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file -// does not exist. -// -// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does -// not exist. -// -// - NUL-terminated string of per-dir exclude file name. This usually -// is ".gitignore". -// -// - The number of following directory blocks, variable width -// encoding. If this number is zero, the extension ends here with a -// following NUL. -// -// - A number of directory blocks in depth-first-search order, each -// consists of -// -// - The number of untracked entries, variable width encoding. -// -// - The number of sub-directory blocks, variable width encoding. -// -// - The directory name terminated by NUL. -// -// - A number of untracked file/dir names terminated by NUL. -// -// The remaining data of each directory block is grouped by type: -// -// - An ewah bitmap, the n-th bit marks whether the n-th directory has -// valid untracked cache entries. -// -// - An ewah bitmap, the n-th bit records "check-only" bit of -// read_directory_recursive() for the n-th directory. -// -// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data -// is valid for the n-th directory and exists in the next data. -// -// - An array of stat data. The n-th data corresponds with the n-th -// "one" bit in the previous ewah bitmap. -// -// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit -// in the previous ewah bitmap. -// -// - One NUL. -// -// == File System Monitor cache -// -// The file system monitor cache tracks files for which the core.fsmonitor -// hook has told us about changes. The signature for this extension is -// { 'F', 'S', 'M', 'N' }. -// -// The extension starts with -// -// - 32-bit version number: the current supported version is 1. -// -// - 64-bit time: the extension data reflects all changes through the given -// time which is stored as the nanoseconds elapsed since midnight, -// January 1, 1970. -// -// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. -// -// - An ewah bitmap, the n-th bit indicates whether the n-th index entry -// is not CE_FSMONITOR_VALID. -// -// == End of Index Entry -// -// The End of Index Entry (EOIE) is used to locate the end of the variable -// length index entries and the beginning of the extensions. Code can take -// advantage of this to quickly locate the index extensions without having -// to parse through all of the index entries. -// -// Because it must be able to be loaded before the variable length cache -// entries and other index extensions, this extension must be written last. -// The signature for this extension is { 'E', 'O', 'I', 'E' }. -// -// The extension consists of: -// -// - 32-bit offset to the end of the index entries -// -// - 160-bit SHA-1 over the extension types and their sizes (but not -// their contents). E.g. if we have "TREE" extension that is N-bytes -// long, "REUC" extension that is M-bytes long, followed by "EOIE", -// then the hash would be: -// -// SHA-1("TREE" + + -// "REUC" + ) -// -// == Index Entry Offset Table -// -// The Index Entry Offset Table (IEOT) is used to help address the CPU -// cost of loading the index by enabling multi-threading the process of -// converting cache entries from the on-disk format to the in-memory format. -// The signature for this extension is { 'I', 'E', 'O', 'T' }. -// -// The extension consists of: -// -// - 32-bit version (currently 1) -// -// - A number of index offset entries each consisting of: -// -// - 32-bit offset from the beginning of the file to the first cache entry -// in this block of entries. -// -// - 32-bit count of cache entries in this blockpackage index -package index diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go deleted file mode 100644 index 00d4e7a3178..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go +++ /dev/null @@ -1,150 +0,0 @@ -package index - -import ( - "bytes" - "crypto/sha1" - "errors" - "hash" - "io" - "sort" - "time" - - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // EncodeVersionSupported is the range of supported index versions - EncodeVersionSupported uint32 = 2 - - // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with - // negative timestamp values - ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") -) - -// An Encoder writes an Index to an output stream. -type Encoder struct { - w io.Writer - hash hash.Hash -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() - mw := io.MultiWriter(w, h) - return &Encoder{mw, h} -} - -// Encode writes the Index to the stream of the encoder. -func (e *Encoder) Encode(idx *Index) error { - // TODO: support versions v3 and v4 - // TODO: support extensions - if idx.Version != EncodeVersionSupported { - return ErrUnsupportedVersion - } - - if err := e.encodeHeader(idx); err != nil { - return err - } - - if err := e.encodeEntries(idx); err != nil { - return err - } - - return e.encodeFooter() -} - -func (e *Encoder) encodeHeader(idx *Index) error { - return binary.Write(e.w, - indexSignature, - idx.Version, - uint32(len(idx.Entries)), - ) -} - -func (e *Encoder) encodeEntries(idx *Index) error { - sort.Sort(byName(idx.Entries)) - - for _, entry := range idx.Entries { - if err := e.encodeEntry(entry); err != nil { - return err - } - - wrote := entryHeaderLength + len(entry.Name) - if err := e.padEntry(wrote); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeEntry(entry *Entry) error { - if entry.IntentToAdd || entry.SkipWorktree { - return ErrUnsupportedVersion - } - - sec, nsec, err := e.timeToUint32(&entry.CreatedAt) - if err != nil { - return err - } - - msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) - if err != nil { - return err - } - - flags := uint16(entry.Stage&0x3) << 12 - if l := len(entry.Name); l < nameMask { - flags |= uint16(l) - } else { - flags |= nameMask - } - - flow := []interface{}{ - sec, nsec, - msec, mnsec, - entry.Dev, - entry.Inode, - entry.Mode, - entry.UID, - entry.GID, - entry.Size, - entry.Hash[:], - flags, - } - - if err := binary.Write(e.w, flow...); err != nil { - return err - } - - return binary.Write(e.w, []byte(entry.Name)) -} - -func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { - if t.IsZero() { - return 0, 0, nil - } - - if t.Unix() < 0 || t.UnixNano() < 0 { - return 0, 0, ErrInvalidTimestamp - } - - return uint32(t.Unix()), uint32(t.Nanosecond()), nil -} - -func (e *Encoder) padEntry(wrote int) error { - padLen := 8 - wrote%8 - - _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) - return err -} - -func (e *Encoder) encodeFooter() error { - return binary.Write(e.w, e.hash.Sum(nil)) -} - -type byName []*Entry - -func (l byName) Len() int { return len(l) } -func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go deleted file mode 100644 index 649416a2b44..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go +++ /dev/null @@ -1,213 +0,0 @@ -package index - -import ( - "bytes" - "errors" - "fmt" - "path/filepath" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" -) - -var ( - // ErrUnsupportedVersion is returned by Decode when the index file version - // is not supported. - ErrUnsupportedVersion = errors.New("unsupported version") - // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. - ErrEntryNotFound = errors.New("entry not found") - - indexSignature = []byte{'D', 'I', 'R', 'C'} - treeExtSignature = []byte{'T', 'R', 'E', 'E'} - resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} - endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} -) - -// Stage during merge -type Stage int - -const ( - // Merged is the default stage, fully merged - Merged Stage = 1 - // AncestorMode is the base revision - AncestorMode Stage = 1 - // OurMode is the first tree revision, ours - OurMode Stage = 2 - // TheirMode is the second tree revision, theirs - TheirMode Stage = 3 -) - -// Index contains the information about which objects are currently checked out -// in the worktree, having information about the working files. Changes in -// worktree are detected using this Index. The Index is also used during merges -type Index struct { - // Version is index version - Version uint32 - // Entries collection of entries represented by this Index. The order of - // this collection is not guaranteed - Entries []*Entry - // Cache represents the 'Cached tree' extension - Cache *Tree - // ResolveUndo represents the 'Resolve undo' extension - ResolveUndo *ResolveUndo - // EndOfIndexEntry represents the 'End of Index Entry' extension - EndOfIndexEntry *EndOfIndexEntry -} - -// Add creates a new Entry and returns it. The caller should first check that -// another entry with the same path does not exist. -func (i *Index) Add(path string) *Entry { - e := &Entry{ - Name: filepath.ToSlash(path), - } - - i.Entries = append(i.Entries, e) - return e -} - -// Entry returns the entry that match the given path, if any. -func (i *Index) Entry(path string) (*Entry, error) { - path = filepath.ToSlash(path) - for _, e := range i.Entries { - if e.Name == path { - return e, nil - } - } - - return nil, ErrEntryNotFound -} - -// Remove remove the entry that match the give path and returns deleted entry. -func (i *Index) Remove(path string) (*Entry, error) { - path = filepath.ToSlash(path) - for index, e := range i.Entries { - if e.Name == path { - i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) - return e, nil - } - } - - return nil, ErrEntryNotFound -} - -// Glob returns the all entries matching pattern or nil if there is no matching -// entry. The syntax of patterns is the same as in filepath.Glob. -func (i *Index) Glob(pattern string) (matches []*Entry, err error) { - pattern = filepath.ToSlash(pattern) - for _, e := range i.Entries { - m, err := match(pattern, e.Name) - if err != nil { - return nil, err - } - - if m { - matches = append(matches, e) - } - } - - return -} - -// String is equivalent to `git ls-files --stage --debug` -func (i *Index) String() string { - buf := bytes.NewBuffer(nil) - for _, e := range i.Entries { - buf.WriteString(e.String()) - } - - return buf.String() -} - -// Entry represents a single file (or stage of a file) in the cache. An entry -// represents exactly one stage of a file. If a file path is unmerged then -// multiple Entry instances may appear for the same path name. -type Entry struct { - // Hash is the SHA1 of the represented file - Hash plumbing.Hash - // Name is the Entry path name relative to top level directory - Name string - // CreatedAt time when the tracked path was created - CreatedAt time.Time - // ModifiedAt time when the tracked path was changed - ModifiedAt time.Time - // Dev and Inode of the tracked path - Dev, Inode uint32 - // Mode of the path - Mode filemode.FileMode - // UID and GID, userid and group id of the owner - UID, GID uint32 - // Size is the length in bytes for regular files - Size uint32 - // Stage on a merge is defines what stage is representing this entry - // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging - Stage Stage - // SkipWorktree used in sparse checkouts - // https://git-scm.com/docs/git-read-tree#_sparse_checkout - SkipWorktree bool - // IntentToAdd record only the fact that the path will be added later - // https://git-scm.com/docs/git-add ("git add -N") - IntentToAdd bool -} - -func (e Entry) String() string { - buf := bytes.NewBuffer(nil) - - fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) - fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) - fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) - fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode) - fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID) - fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0) - - return buf.String() -} - -// Tree contains pre-computed hashes for trees that can be derived from the -// index. It helps speed up tree object generation from index for a new commit. -type Tree struct { - Entries []TreeEntry -} - -// TreeEntry entry of a cached Tree -type TreeEntry struct { - // Path component (relative to its parent directory) - Path string - // Entries is the number of entries in the index that is covered by the tree - // this entry represents. - Entries int - // Trees is the number that represents the number of subtrees this tree has - Trees int - // Hash object name for the object that would result from writing this span - // of index as a tree. - Hash plumbing.Hash -} - -// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), -// these higher stage entries are removed and a stage-0 entry with proper -// resolution is added. When these higher stage entries are removed, they are -// saved in the resolve undo extension. -type ResolveUndo struct { - Entries []ResolveUndoEntry -} - -// ResolveUndoEntry contains the information about a conflict when is resolved -type ResolveUndoEntry struct { - Path string - Stages map[Stage]plumbing.Hash -} - -// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of -// the variable length index entries and the beginning of the extensions. Code -// can take advantage of this to quickly locate the index extensions without -// having to parse through all of the index entries. -// -// Because it must be able to be loaded before the variable length cache -// entries and other index extensions, this extension must be written last. -type EndOfIndexEntry struct { - // Offset to the end of the index entries - Offset uint32 - // Hash is a SHA-1 over the extension types and their sizes (but not - // their contents). - Hash plumbing.Hash -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go deleted file mode 100644 index 2891d7d34cc..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go +++ /dev/null @@ -1,186 +0,0 @@ -package index - -import ( - "path/filepath" - "runtime" - "unicode/utf8" -) - -// match is filepath.Match with support to match fullpath and not only filenames -// code from: -// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 -func match(pattern, name string) (matched bool, err error) { -Pattern: - for len(pattern) > 0 { - var star bool - var chunk string - star, chunk, pattern = scanChunk(pattern) - - // Look for match at current position. - t, ok, err := matchChunk(chunk, name) - // if we're the last chunk, make sure we've exhausted the name - // otherwise we'll give a false result even if we could still match - // using the star - if ok && (len(t) == 0 || len(pattern) > 0) { - name = t - continue - } - if err != nil { - return false, err - } - if star { - // Look for match skipping i+1 bytes. - // Cannot skip /. - for i := 0; i < len(name); i++ { - t, ok, err := matchChunk(chunk, name[i+1:]) - if ok { - // if we're the last chunk, make sure we exhausted the name - if len(pattern) == 0 && len(t) > 0 { - continue - } - name = t - continue Pattern - } - if err != nil { - return false, err - } - } - } - return false, nil - } - return len(name) == 0, nil -} - -// scanChunk gets the next segment of pattern, which is a non-star string -// possibly preceded by a star. -func scanChunk(pattern string) (star bool, chunk, rest string) { - for len(pattern) > 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - if runtime.GOOS != "windows" { - // error check handled in matchChunk: bad pattern. - if i+1 < len(pattern) { - i++ - } - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] -} - -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // We can't end right after '[', we're expecting at least - // a closing bracket and possibly a caret. - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - // possibly negated - negated := chunk[0] == '^' - if negated { - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match == negated { - return - } - - case '?': - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - if runtime.GOOS != "windows" { - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = filepath.ErrBadPattern - return - } - if chunk[0] == '\\' && runtime.GOOS != "windows" { - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = filepath.ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = filepath.ErrBadPattern - } - return -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go deleted file mode 100644 index a7145160ae0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package objfile implements encoding and decoding of object files. -package objfile diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go deleted file mode 100644 index b6b2ca06dd9..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go +++ /dev/null @@ -1,114 +0,0 @@ -package objfile - -import ( - "compress/zlib" - "errors" - "io" - "strconv" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/packfile" -) - -var ( - ErrClosed = errors.New("objfile: already closed") - ErrHeader = errors.New("objfile: invalid header") - ErrNegativeSize = errors.New("objfile: negative object size") -) - -// Reader reads and decodes compressed objfile data from a provided io.Reader. -// Reader implements io.ReadCloser. Close should be called when finished with -// the Reader. Close will not close the underlying io.Reader. -type Reader struct { - multi io.Reader - zlib io.ReadCloser - hasher plumbing.Hasher -} - -// NewReader returns a new Reader reading from r. -func NewReader(r io.Reader) (*Reader, error) { - zlib, err := zlib.NewReader(r) - if err != nil { - return nil, packfile.ErrZLib.AddDetails(err.Error()) - } - - return &Reader{ - zlib: zlib, - }, nil -} - -// Header reads the type and the size of object, and prepares the reader for read -func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { - var raw []byte - raw, err = r.readUntil(' ') - if err != nil { - return - } - - t, err = plumbing.ParseObjectType(string(raw)) - if err != nil { - return - } - - raw, err = r.readUntil(0) - if err != nil { - return - } - - size, err = strconv.ParseInt(string(raw), 10, 64) - if err != nil { - err = ErrHeader - return - } - - defer r.prepareForRead(t, size) - return -} - -// readSlice reads one byte at a time from r until it encounters delim or an -// error. -func (r *Reader) readUntil(delim byte) ([]byte, error) { - var buf [1]byte - value := make([]byte, 0, 16) - for { - if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { - if err == io.EOF { - return nil, ErrHeader - } - return nil, err - } - - if buf[0] == delim { - return value, nil - } - - value = append(value, buf[0]) - } -} - -func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { - r.hasher = plumbing.NewHasher(t, size) - r.multi = io.TeeReader(r.zlib, r.hasher) -} - -// Read reads len(p) bytes into p from the object data stream. It returns -// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even -// if Read returns n < len(p), it may use all of p as scratch space during the -// call. -// -// If Read encounters the end of the data stream it will return err == io.EOF, -// either in the current call if n > 0 or in a subsequent call. -func (r *Reader) Read(p []byte) (n int, err error) { - return r.multi.Read(p) -} - -// Hash returns the hash of the object data stream that has been read so far. -func (r *Reader) Hash() plumbing.Hash { - return r.hasher.Sum() -} - -// Close releases any resources consumed by the Reader. Calling Close does not -// close the wrapped io.Reader originally passed to NewReader. -func (r *Reader) Close() error { - return r.zlib.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go deleted file mode 100644 index 2a96a4370bc..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go +++ /dev/null @@ -1,109 +0,0 @@ -package objfile - -import ( - "compress/zlib" - "errors" - "io" - "strconv" - - "github.com/go-git/go-git/v5/plumbing" -) - -var ( - ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") -) - -// Writer writes and encodes data in compressed objfile format to a provided -// io.Writer. Close should be called when finished with the Writer. Close will -// not close the underlying io.Writer. -type Writer struct { - raw io.Writer - zlib io.WriteCloser - hasher plumbing.Hasher - multi io.Writer - - closed bool - pending int64 // number of unwritten bytes -} - -// NewWriter returns a new Writer writing to w. -// -// The returned Writer implements io.WriteCloser. Close should be called when -// finished with the Writer. Close will not close the underlying io.Writer. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - raw: w, - zlib: zlib.NewWriter(w), - } -} - -// WriteHeader writes the type and the size and prepares to accept the object's -// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a -// negative size is provided, ErrNegativeSize is returned. -func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { - if !t.Valid() { - return plumbing.ErrInvalidType - } - if size < 0 { - return ErrNegativeSize - } - - b := t.Bytes() - b = append(b, ' ') - b = append(b, []byte(strconv.FormatInt(size, 10))...) - b = append(b, 0) - - defer w.prepareForWrite(t, size) - _, err := w.zlib.Write(b) - - return err -} - -func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { - w.pending = size - - w.hasher = plumbing.NewHasher(t, size) - w.multi = io.MultiWriter(w.zlib, w.hasher) -} - -// Write writes the object's contents. Write returns the error ErrOverflow if -// more than size bytes are written after WriteHeader. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.closed { - return 0, ErrClosed - } - - overwrite := false - if int64(len(p)) > w.pending { - p = p[0:w.pending] - overwrite = true - } - - n, err = w.multi.Write(p) - w.pending -= int64(n) - if err == nil && overwrite { - err = ErrOverflow - return - } - - return -} - -// Hash returns the hash of the object data stream that has been written so far. -// It can be called before or after Close. -func (w *Writer) Hash() plumbing.Hash { - return w.hasher.Sum() // Not yet closed, return hash of data written so far -} - -// Close releases any resources consumed by the Writer. -// -// Calling Close does not close the wrapped io.Writer originally passed to -// NewWriter. -func (w *Writer) Close() error { - if err := w.zlib.Close(); err != nil { - return err - } - - w.closed = true - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go deleted file mode 100644 index df423ad50c8..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go +++ /dev/null @@ -1,78 +0,0 @@ -package packfile - -import ( - "bytes" - "compress/zlib" - "io" - "sync" - - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var signature = []byte{'P', 'A', 'C', 'K'} - -const ( - // VersionSupported is the packfile version supported by this package - VersionSupported uint32 = 2 - - firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length - lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length - maskFirstLength = 15 // 0000 1111 - maskContinue = 0x80 // 1000 0000 - maskLength = uint8(127) // 0111 1111 - maskType = uint8(112) // 0111 0000 -) - -// UpdateObjectStorage updates the storer with the objects in the given -// packfile. -func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { - if pw, ok := s.(storer.PackfileWriter); ok { - return WritePackfileToObjectStorage(pw, packfile) - } - - p, err := NewParserWithStorage(NewScanner(packfile), s) - if err != nil { - return err - } - - _, err = p.Parse() - return err -} - -// WritePackfileToObjectStorage writes all the packfile objects into the given -// object storage. -func WritePackfileToObjectStorage( - sw storer.PackfileWriter, - packfile io.Reader, -) (err error) { - w, err := sw.PackfileWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - var n int64 - n, err = io.Copy(w, packfile) - if err == nil && n == 0 { - return ErrEmptyPackfile - } - - return err -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} - -var zlibReaderPool = sync.Pool{ - New: func() interface{} { - r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) - return r - }, -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go deleted file mode 100644 index 07a61120e5a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go +++ /dev/null @@ -1,297 +0,0 @@ -package packfile - -const blksz = 16 -const maxChainLength = 64 - -// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current -// design. -type deltaIndex struct { - table []int - entries []int - mask int -} - -func (idx *deltaIndex) init(buf []byte) { - scanner := newDeltaIndexScanner(buf, len(buf)) - idx.mask = scanner.mask - idx.table = scanner.table - idx.entries = make([]int, countEntries(scanner)+1) - idx.copyEntries(scanner) -} - -// findMatch returns the offset of src where the block starting at tgtOffset -// is and the length of the match. A length of 0 means there was no match. A -// length of -1 means the src length is lower than the blksz and whatever -// other positive length is the length of the match in bytes. -func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { - if len(tgt) < tgtOffset+s { - return 0, len(tgt) - tgtOffset - } - - if len(src) < blksz { - return 0, -1 - } - - if len(tgt) >= tgtOffset+s && len(src) >= blksz { - h := hashBlock(tgt, tgtOffset) - tIdx := h & idx.mask - eIdx := idx.table[tIdx] - if eIdx != 0 { - srcOffset = idx.entries[eIdx] - } else { - return - } - - l = matchLength(src, tgt, tgtOffset, srcOffset) - } - - return -} - -func matchLength(src, tgt []byte, otgt, osrc int) (l int) { - lensrc := len(src) - lentgt := len(tgt) - for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { - l++ - osrc++ - otgt++ - } - return -} - -func countEntries(scan *deltaIndexScanner) (cnt int) { - // Figure out exactly how many entries we need. As we do the - // enumeration truncate any delta chains longer than what we - // are willing to scan during encode. This keeps the encode - // logic linear in the size of the input rather than quadratic. - for i := 0; i < len(scan.table); i++ { - h := scan.table[i] - if h == 0 { - continue - } - - size := 0 - for { - size++ - if size == maxChainLength { - scan.next[h] = 0 - break - } - h = scan.next[h] - - if h == 0 { - break - } - } - cnt += size - } - - return -} - -func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { - // Rebuild the entries list from the scanner, positioning all - // blocks in the same hash chain next to each other. We can - // then later discard the next list, along with the scanner. - // - next := 1 - for i := 0; i < len(idx.table); i++ { - h := idx.table[i] - if h == 0 { - continue - } - - idx.table[i] = next - for { - idx.entries[next] = scanner.entries[h] - next++ - h = scanner.next[h] - - if h == 0 { - break - } - } - } -} - -type deltaIndexScanner struct { - table []int - entries []int - next []int - mask int - count int -} - -func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { - size -= size % blksz - worstCaseBlockCnt := size / blksz - if worstCaseBlockCnt < 1 { - return new(deltaIndexScanner) - } - - tableSize := tableSize(worstCaseBlockCnt) - scanner := &deltaIndexScanner{ - table: make([]int, tableSize), - mask: tableSize - 1, - entries: make([]int, worstCaseBlockCnt+1), - next: make([]int, worstCaseBlockCnt+1), - } - - scanner.scan(buf, size) - return scanner -} - -// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries -// instead of the entries and the key, so we avoid operations to retrieve the offset later, as -// we don't use the key. -// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java -func (s *deltaIndexScanner) scan(buf []byte, end int) { - lastHash := 0 - ptr := end - blksz - - for { - key := hashBlock(buf, ptr) - tIdx := key & s.mask - head := s.table[tIdx] - if head != 0 && lastHash == key { - s.entries[head] = ptr - } else { - s.count++ - eIdx := s.count - s.entries[eIdx] = ptr - s.next[eIdx] = head - s.table[tIdx] = eIdx - } - - lastHash = key - ptr -= blksz - - if 0 > ptr { - break - } - } -} - -func tableSize(worstCaseBlockCnt int) int { - shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) - sz := 1 << uint(shift-1) - if sz < worstCaseBlockCnt { - sz <<= 1 - } - return sz -} - -// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future -func leadingZeros(x uint32) (n int) { - if x >= 1<<16 { - x >>= 16 - n = 16 - } - if x >= 1<<8 { - x >>= 8 - n += 8 - } - n += int(len8tab[x]) - return 32 - n -} - -var len8tab = [256]uint8{ - 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, -} - -func hashBlock(raw []byte, ptr int) int { - // The first 4 steps collapse out into a 4 byte big-endian decode, - // with a larger right shift as we combined shift lefts together. - // - hash := ((uint32(raw[ptr]) & 0xff) << 24) | - ((uint32(raw[ptr+1]) & 0xff) << 16) | - ((uint32(raw[ptr+2]) & 0xff) << 8) | - (uint32(raw[ptr+3]) & 0xff) - hash ^= T[hash>>31] - - hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] - - hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] - - hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] - - return int(hash) -} - -var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, - 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, - 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, - 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, - 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, - 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, - 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, - 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, - 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, - 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, - 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, - 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, - 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, - 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, - 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, - 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, - 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, - 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, - 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, - 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, - 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, - 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, - 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, - 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, - 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, - 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, - 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, - 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, - 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, - 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, - 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, - 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, - 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, - 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, - 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, - 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, - 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, - 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, - 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, - 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, - 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, - 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, - 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, - 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, - 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, - 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, - 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, - 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, - 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, - 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, - 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, - 0xe4fe0d44, 0x4d736b1e, 0x99b5d833, -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go deleted file mode 100644 index 4b60ff39470..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go +++ /dev/null @@ -1,369 +0,0 @@ -package packfile - -import ( - "sort" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -const ( - // deltas based on deltas, how many steps we can do. - // 50 is the default value used in JGit - maxDepth = int64(50) -) - -// applyDelta is the set of object types that we should apply deltas -var applyDelta = map[plumbing.ObjectType]bool{ - plumbing.BlobObject: true, - plumbing.TreeObject: true, -} - -type deltaSelector struct { - storer storer.EncodedObjectStorer -} - -func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { - return &deltaSelector{s} -} - -// ObjectsToPack creates a list of ObjectToPack from the hashes -// provided, creating deltas if it's suitable, using an specific -// internal logic. `packWindow` specifies the size of the sliding -// window used to compare objects for delta compression; 0 turns off -// delta compression entirely. -func (dw *deltaSelector) ObjectsToPack( - hashes []plumbing.Hash, - packWindow uint, -) ([]*ObjectToPack, error) { - otp, err := dw.objectsToPack(hashes, packWindow) - if err != nil { - return nil, err - } - - if packWindow == 0 { - return otp, nil - } - - dw.sort(otp) - - var objectGroups [][]*ObjectToPack - var prev *ObjectToPack - i := -1 - for _, obj := range otp { - if prev == nil || prev.Type() != obj.Type() { - objectGroups = append(objectGroups, []*ObjectToPack{obj}) - i++ - prev = obj - } else { - objectGroups[i] = append(objectGroups[i], obj) - } - } - - var wg sync.WaitGroup - var once sync.Once - for _, objs := range objectGroups { - objs := objs - wg.Add(1) - go func() { - if walkErr := dw.walk(objs, packWindow); walkErr != nil { - once.Do(func() { - err = walkErr - }) - } - wg.Done() - }() - } - wg.Wait() - - if err != nil { - return nil, err - } - - return otp, nil -} - -func (dw *deltaSelector) objectsToPack( - hashes []plumbing.Hash, - packWindow uint, -) ([]*ObjectToPack, error) { - var objectsToPack []*ObjectToPack - for _, h := range hashes { - var o plumbing.EncodedObject - var err error - if packWindow == 0 { - o, err = dw.encodedObject(h) - } else { - o, err = dw.encodedDeltaObject(h) - } - if err != nil { - return nil, err - } - - otp := newObjectToPack(o) - if _, ok := o.(plumbing.DeltaObject); ok { - otp.CleanOriginal() - } - - objectsToPack = append(objectsToPack, otp) - } - - if packWindow == 0 { - return objectsToPack, nil - } - - if err := dw.fixAndBreakChains(objectsToPack); err != nil { - return nil, err - } - - return objectsToPack, nil -} - -func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { - edos, ok := dw.storer.(storer.DeltaObjectStorer) - if !ok { - return dw.encodedObject(h) - } - - return edos.DeltaObject(plumbing.AnyObject, h) -} - -func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { - return dw.storer.EncodedObject(plumbing.AnyObject, h) -} - -func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { - m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) - for _, otp := range objectsToPack { - m[otp.Hash()] = otp - } - - for _, otp := range objectsToPack { - if err := dw.fixAndBreakChainsOne(m, otp); err != nil { - return err - } - } - - return nil -} - -func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { - if !otp.Object.Type().IsDelta() { - return nil - } - - // Initial ObjectToPack instances might have a delta assigned to Object - // but no actual base initially. Once Base is assigned to a delta, it means - // we already fixed it. - if otp.Base != nil { - return nil - } - - do, ok := otp.Object.(plumbing.DeltaObject) - if !ok { - // if this is not a DeltaObject, then we cannot retrieve its base, - // so we have to break the delta chain here. - return dw.undeltify(otp) - } - - base, ok := objectsToPack[do.BaseHash()] - if !ok { - // The base of the delta is not in our list of objects to pack, so - // we break the chain. - return dw.undeltify(otp) - } - - if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { - return err - } - - otp.SetDelta(base, otp.Object) - return nil -} - -func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { - if otp.Original != nil { - return nil - } - - if !otp.Object.Type().IsDelta() { - return nil - } - - obj, err := dw.encodedObject(otp.Hash()) - if err != nil { - return err - } - - otp.SetOriginal(obj) - - return nil -} - -// undeltify undeltifies an *ObjectToPack by retrieving the original object from -// the storer and resetting it. -func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { - if err := dw.restoreOriginal(otp); err != nil { - return err - } - - otp.Object = otp.Original - otp.Depth = 0 - return nil -} - -func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { - sort.Sort(byTypeAndSize(objectsToPack)) -} - -func (dw *deltaSelector) walk( - objectsToPack []*ObjectToPack, - packWindow uint, -) error { - indexMap := make(map[plumbing.Hash]*deltaIndex) - for i := 0; i < len(objectsToPack); i++ { - // Clean up the index map and reconstructed delta objects for anything - // outside our pack window, to save memory. - if i > int(packWindow) { - obj := objectsToPack[i-int(packWindow)] - - delete(indexMap, obj.Hash()) - - if obj.IsDelta() { - obj.SaveOriginalMetadata() - obj.CleanOriginal() - } - } - - target := objectsToPack[i] - - // If we already have a delta, we don't try to find a new one for this - // object. This happens when a delta is set to be reused from an existing - // packfile. - if target.IsDelta() { - continue - } - - // We only want to create deltas from specific types. - if !applyDelta[target.Type()] { - continue - } - - for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { - base := objectsToPack[j] - // Objects must use only the same type as their delta base. - // Since objectsToPack is sorted by type and size, once we find - // a different type, we know we won't find more of them. - if base.Type() != target.Type() { - break - } - - if err := dw.tryToDeltify(indexMap, base, target); err != nil { - return err - } - } - } - - return nil -} - -func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { - // Original object might not be present if we're reusing a delta, so we - // ensure it is restored. - if err := dw.restoreOriginal(target); err != nil { - return err - } - - if err := dw.restoreOriginal(base); err != nil { - return err - } - - // If the sizes are radically different, this is a bad pairing. - if target.Size() < base.Size()>>4 { - return nil - } - - msz := dw.deltaSizeLimit( - target.Object.Size(), - base.Depth, - target.Depth, - target.IsDelta(), - ) - - // Nearly impossible to fit useful delta. - if msz <= 8 { - return nil - } - - // If we have to insert a lot to make this work, find another. - if base.Size()-target.Size() > msz { - return nil - } - - if _, ok := indexMap[base.Hash()]; !ok { - indexMap[base.Hash()] = new(deltaIndex) - } - - // Now we can generate the delta using originals - delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) - if err != nil { - return err - } - - // if delta better than target - if delta.Size() < msz { - target.SetDelta(base, delta) - } - - return nil -} - -func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, - targetDepth int, targetDelta bool) int64 { - if !targetDelta { - // Any delta should be no more than 50% of the original size - // (for text files deflate of whole form should shrink 50%). - n := targetSize >> 1 - - // Evenly distribute delta size limits over allowed depth. - // If src is non-delta (depth = 0), delta <= 50% of original. - // If src is almost at limit (9/10), delta <= 10% of original. - return n * (maxDepth - int64(baseDepth)) / maxDepth - } - - // With a delta base chosen any new delta must be "better". - // Retain the distribution described above. - d := int64(targetDepth) - n := targetSize - - // If target depth is bigger than maxDepth, this delta is not suitable to be used. - if d >= maxDepth { - return 0 - } - - // If src is whole (depth=0) and base is near limit (depth=9/10) - // any delta using src can be 10x larger and still be better. - // - // If src is near limit (depth=9/10) and base is whole (depth=0) - // a new delta dependent on src must be 1/10th the size. - return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) -} - -type byTypeAndSize []*ObjectToPack - -func (a byTypeAndSize) Len() int { return len(a) } - -func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a byTypeAndSize) Less(i, j int) bool { - if a[i].Type() < a[j].Type() { - return false - } - - if a[i].Type() > a[j].Type() { - return true - } - - return a[i].Size() > a[j].Size() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go deleted file mode 100644 index 1951b34ef19..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go +++ /dev/null @@ -1,204 +0,0 @@ -package packfile - -import ( - "bytes" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and -// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js -// for more info - -const ( - // Standard chunk size used to generate fingerprints - s = 16 - - // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 - // Max size of a copy operation (64KB) - maxCopySize = 64 * 1024 -) - -// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, -// will be loaded into memory to be able to create the delta object. -// To generate target again, you will need the obtained object and "base" one. -// Error will be returned if base or target object cannot be read. -func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { - return getDelta(new(deltaIndex), base, target) -} - -func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) { - br, err := base.Reader() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(br, &err) - - tr, err := target.Reader() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(tr, &err) - - bb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(bb) - bb.Reset() - - _, err = bb.ReadFrom(br) - if err != nil { - return nil, err - } - - tb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(tb) - tb.Reset() - - _, err = tb.ReadFrom(tr) - if err != nil { - return nil, err - } - - db := diffDelta(index, bb.Bytes(), tb.Bytes()) - delta := &plumbing.MemoryObject{} - _, err = delta.Write(db) - if err != nil { - return nil, err - } - - delta.SetSize(int64(len(db))) - delta.SetType(plumbing.OFSDeltaObject) - - return delta, nil -} - -// DiffDelta returns the delta that transforms src into tgt. -func DiffDelta(src, tgt []byte) []byte { - return diffDelta(new(deltaIndex), src, tgt) -} - -func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - buf.Write(deltaEncodeSize(len(src))) - buf.Write(deltaEncodeSize(len(tgt))) - - if len(index.entries) == 0 { - index.init(src) - } - - ibuf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(ibuf) - ibuf.Reset() - for i := 0; i < len(tgt); i++ { - offset, l := index.findMatch(src, tgt, i) - - if l == 0 { - // couldn't find a match, just write the current byte and continue - ibuf.WriteByte(tgt[i]) - } else if l < 0 { - // src is less than blksz, copy the rest of the target to avoid - // calls to findMatch - for ; i < len(tgt); i++ { - ibuf.WriteByte(tgt[i]) - } - } else if l < s { - // remaining target is less than blksz, copy what's left of it - // and avoid calls to findMatch - for j := i; j < i+l; j++ { - ibuf.WriteByte(tgt[j]) - } - i += l - 1 - } else { - encodeInsertOperation(ibuf, buf) - - rl := l - aOffset := offset - for rl > 0 { - if rl < maxCopySize { - buf.Write(encodeCopyOperation(aOffset, rl)) - break - } - - buf.Write(encodeCopyOperation(aOffset, maxCopySize)) - rl -= maxCopySize - aOffset += maxCopySize - } - - i += l - 1 - } - } - - encodeInsertOperation(ibuf, buf) - - // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it. - return append([]byte{}, buf.Bytes()...) -} - -func encodeInsertOperation(ibuf, buf *bytes.Buffer) { - if ibuf.Len() == 0 { - return - } - - b := ibuf.Bytes() - s := ibuf.Len() - o := 0 - for { - if s <= 127 { - break - } - buf.WriteByte(byte(127)) - buf.Write(b[o : o+127]) - s -= 127 - o += 127 - } - buf.WriteByte(byte(s)) - buf.Write(b[o : o+s]) - - ibuf.Reset() -} - -func deltaEncodeSize(size int) []byte { - var ret []byte - c := size & 0x7f - size >>= 7 - for { - if size == 0 { - break - } - - ret = append(ret, byte(c|0x80)) - c = size & 0x7f - size >>= 7 - } - ret = append(ret, byte(c)) - - return ret -} - -func encodeCopyOperation(offset, length int) []byte { - code := 0x80 - var opcodes []byte - - var i uint - for i = 0; i < 4; i++ { - f := 0xff << (i * 8) - if offset&f != 0 { - opcodes = append(opcodes, byte(offset&f>>(i*8))) - code |= 0x01 << i - } - } - - for i = 0; i < 3; i++ { - f := 0xff << (i * 8) - if length&f != 0 { - opcodes = append(opcodes, byte(length&f>>(i*8))) - code |= 0x10 << i - } - } - - return append([]byte{byte(code)}, opcodes...) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go deleted file mode 100644 index 2882a7f3782..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go +++ /dev/null @@ -1,39 +0,0 @@ -// Package packfile implements encoding and decoding of packfile format. -// -// == pack-*.pack files have the following format: -// -// - A header appears at the beginning and consists of the following: -// -// 4-byte signature: -// The signature is: {'P', 'A', 'C', 'K'} -// -// 4-byte version number (network byte order): -// GIT currently accepts version number 2 or 3 but -// generates version 2 only. -// -// 4-byte number of objects contained in the pack (network byte order) -// -// Observation: we cannot have more than 4G versions ;-) and -// more than 4G objects in a pack. -// -// - The header is followed by number of object entries, each of -// which looks like this: -// -// (undeltified representation) -// n-byte type and length (3-bit type, (n-1)*7+4-bit length) -// compressed data -// -// (deltified representation) -// n-byte type and length (3-bit type, (n-1)*7+4-bit length) -// 20-byte base object name -// compressed delta data -// -// Observation: length of each object is encoded in a variable -// length format and is not constrained to 32-bit or anything. -// -// - The trailer records 20-byte SHA1 checksum of all of the above. -// -// -// Source: -// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt -package packfile diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go deleted file mode 100644 index 5501f8861cb..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go +++ /dev/null @@ -1,225 +0,0 @@ -package packfile - -import ( - "compress/zlib" - "crypto/sha1" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Encoder gets the data from the storage and write it into the writer in PACK -// format -type Encoder struct { - selector *deltaSelector - w *offsetWriter - zw *zlib.Writer - hasher plumbing.Hasher - - useRefDeltas bool -} - -// NewEncoder creates a new packfile encoder using a specific Writer and -// EncodedObjectStorer. By default deltas used to generate the packfile will be -// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. -func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { - h := plumbing.Hasher{ - Hash: sha1.New(), - } - mw := io.MultiWriter(w, h) - ow := newOffsetWriter(mw) - zw := zlib.NewWriter(mw) - return &Encoder{ - selector: newDeltaSelector(s), - w: ow, - zw: zw, - hasher: h, - useRefDeltas: useRefDeltas, - } -} - -// Encode creates a packfile containing all the objects referenced in -// hashes and writes it to the writer in the Encoder. `packWindow` -// specifies the size of the sliding window used to compare objects -// for delta compression; 0 turns off delta compression entirely. -func (e *Encoder) Encode( - hashes []plumbing.Hash, - packWindow uint, -) (plumbing.Hash, error) { - objects, err := e.selector.ObjectsToPack(hashes, packWindow) - if err != nil { - return plumbing.ZeroHash, err - } - - return e.encode(objects) -} - -func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { - if err := e.head(len(objects)); err != nil { - return plumbing.ZeroHash, err - } - - for _, o := range objects { - if err := e.entry(o); err != nil { - return plumbing.ZeroHash, err - } - } - - return e.footer() -} - -func (e *Encoder) head(numEntries int) error { - return binary.Write( - e.w, - signature, - int32(VersionSupported), - int32(numEntries), - ) -} - -func (e *Encoder) entry(o *ObjectToPack) (err error) { - if o.WantWrite() { - // A cycle exists in this delta chain. This should only occur if a - // selected object representation disappeared during writing - // (for example due to a concurrent repack) and a different base - // was chosen, forcing a cycle. Select something other than a - // delta, and write this object. - e.selector.restoreOriginal(o) - o.BackToOriginal() - } - - if o.IsWritten() { - return nil - } - - o.MarkWantWrite() - - if err := e.writeBaseIfDelta(o); err != nil { - return err - } - - // We need to check if we already write that object due a cyclic delta chain - if o.IsWritten() { - return nil - } - - o.Offset = e.w.Offset() - - if o.IsDelta() { - if err := e.writeDeltaHeader(o); err != nil { - return err - } - } else { - if err := e.entryHead(o.Type(), o.Size()); err != nil { - return err - } - } - - e.zw.Reset(e.w) - - defer ioutil.CheckClose(e.zw, &err) - - or, err := o.Object.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(or, &err) - - _, err = io.Copy(e.zw, or) - if err != nil { - return err - } - - return nil -} - -func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { - if o.IsDelta() && !o.Base.IsWritten() { - // We must write base first - return e.entry(o.Base) - } - - return nil -} - -func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { - // Write offset deltas by default - t := plumbing.OFSDeltaObject - if e.useRefDeltas { - t = plumbing.REFDeltaObject - } - - if err := e.entryHead(t, o.Object.Size()); err != nil { - return err - } - - if e.useRefDeltas { - return e.writeRefDeltaHeader(o.Base.Hash()) - } else { - return e.writeOfsDeltaHeader(o) - } -} - -func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { - return binary.Write(e.w, base) -} - -func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { - // for OFS_DELTA, offset of the base is interpreted as negative offset - // relative to the type-byte of the header of the ofs-delta entry. - relativeOffset := o.Offset - o.Base.Offset - if relativeOffset <= 0 { - return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) - } - - return binary.WriteVariableWidthInt(e.w, relativeOffset) -} - -func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { - t := int64(typeNum) - header := []byte{} - c := (t << firstLengthBits) | (size & maskFirstLength) - size >>= firstLengthBits - for { - if size == 0 { - break - } - header = append(header, byte(c|maskContinue)) - c = size & int64(maskLength) - size >>= lengthBits - } - - header = append(header, byte(c)) - _, err := e.w.Write(header) - - return err -} - -func (e *Encoder) footer() (plumbing.Hash, error) { - h := e.hasher.Sum() - return h, binary.Write(e.w, h) -} - -type offsetWriter struct { - w io.Writer - offset int64 -} - -func newOffsetWriter(w io.Writer) *offsetWriter { - return &offsetWriter{w: w} -} - -func (ow *offsetWriter) Write(p []byte) (n int, err error) { - n, err = ow.w.Write(p) - ow.offset += int64(n) - return n, err -} - -func (ow *offsetWriter) Offset() int64 { - return ow.offset -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go deleted file mode 100644 index c0b91633131..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go +++ /dev/null @@ -1,30 +0,0 @@ -package packfile - -import "fmt" - -// Error specifies errors returned during packfile parsing. -type Error struct { - reason, details string -} - -// NewError returns a new error. -func NewError(reason string) *Error { - return &Error{reason: reason} -} - -// Error returns a text representation of the error. -func (e *Error) Error() string { - if e.details == "" { - return e.reason - } - - return fmt.Sprintf("%s: %s", e.reason, e.details) -} - -// AddDetails adds details to an error, with additional text. -func (e *Error) AddDetails(format string, args ...interface{}) *Error { - return &Error{ - reason: e.reason, - details: fmt.Sprintf(format, args...), - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go deleted file mode 100644 index c5edaf52ee3..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go +++ /dev/null @@ -1,116 +0,0 @@ -package packfile - -import ( - "io" - - billy "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" -) - -// FSObject is an object from the packfile on the filesystem. -type FSObject struct { - hash plumbing.Hash - h *ObjectHeader - offset int64 - size int64 - typ plumbing.ObjectType - index idxfile.Index - fs billy.Filesystem - path string - cache cache.Object -} - -// NewFSObject creates a new filesystem object. -func NewFSObject( - hash plumbing.Hash, - finalType plumbing.ObjectType, - offset int64, - contentSize int64, - index idxfile.Index, - fs billy.Filesystem, - path string, - cache cache.Object, -) *FSObject { - return &FSObject{ - hash: hash, - offset: offset, - size: contentSize, - typ: finalType, - index: index, - fs: fs, - path: path, - cache: cache, - } -} - -// Reader implements the plumbing.EncodedObject interface. -func (o *FSObject) Reader() (io.ReadCloser, error) { - obj, ok := o.cache.Get(o.hash) - if ok && obj != o { - reader, err := obj.Reader() - if err != nil { - return nil, err - } - - return reader, nil - } - - f, err := o.fs.Open(o.path) - if err != nil { - return nil, err - } - - p := NewPackfileWithCache(o.index, nil, f, o.cache) - r, err := p.getObjectContent(o.offset) - if err != nil { - _ = f.Close() - return nil, err - } - - if err := f.Close(); err != nil { - return nil, err - } - - return r, nil -} - -// SetSize implements the plumbing.EncodedObject interface. This method -// is a noop. -func (o *FSObject) SetSize(int64) {} - -// SetType implements the plumbing.EncodedObject interface. This method is -// a noop. -func (o *FSObject) SetType(plumbing.ObjectType) {} - -// Hash implements the plumbing.EncodedObject interface. -func (o *FSObject) Hash() plumbing.Hash { return o.hash } - -// Size implements the plumbing.EncodedObject interface. -func (o *FSObject) Size() int64 { return o.size } - -// Type implements the plumbing.EncodedObject interface. -func (o *FSObject) Type() plumbing.ObjectType { - return o.typ -} - -// Writer implements the plumbing.EncodedObject interface. This method always -// returns a nil writer. -func (o *FSObject) Writer() (io.WriteCloser, error) { - return nil, nil -} - -type objectReader struct { - io.ReadCloser - f billy.File -} - -func (r *objectReader) Close() error { - if err := r.ReadCloser.Close(); err != nil { - _ = r.f.Close() - return err - } - - return r.f.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go deleted file mode 100644 index 8ce29ef8ba0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go +++ /dev/null @@ -1,164 +0,0 @@ -package packfile - -import ( - "github.com/go-git/go-git/v5/plumbing" -) - -// ObjectToPack is a representation of an object that is going to be into a -// pack file. -type ObjectToPack struct { - // The main object to pack, it could be any object, including deltas - Object plumbing.EncodedObject - // Base is the object that a delta is based on (it could be also another delta). - // If the main object is not a delta, Base will be null - Base *ObjectToPack - // Original is the object that we can generate applying the delta to - // Base, or the same object as Object in the case of a non-delta - // object. - Original plumbing.EncodedObject - // Depth is the amount of deltas needed to resolve to obtain Original - // (delta based on delta based on ...) - Depth int - - // offset in pack when object has been already written, or 0 if it - // has not been written yet - Offset int64 - - // Information from the original object - resolvedOriginal bool - originalType plumbing.ObjectType - originalSize int64 - originalHash plumbing.Hash -} - -// newObjectToPack creates a correct ObjectToPack based on a non-delta object -func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { - return &ObjectToPack{ - Object: o, - Original: o, - } -} - -// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on -// his base (could be another delta), the delta target (in this case called original), -// and the delta Object itself -func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { - return &ObjectToPack{ - Object: delta, - Base: base, - Original: original, - Depth: base.Depth + 1, - } -} - -// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one -func (o *ObjectToPack) BackToOriginal() { - if o.IsDelta() && o.Original != nil { - o.Object = o.Original - o.Base = nil - o.Depth = 0 - } -} - -// IsWritten returns if that ObjectToPack was -// already written into the packfile or not -func (o *ObjectToPack) IsWritten() bool { - return o.Offset > 1 -} - -// MarkWantWrite marks this ObjectToPack as WantWrite -// to avoid delta chain loops -func (o *ObjectToPack) MarkWantWrite() { - o.Offset = 1 -} - -// WantWrite checks if this ObjectToPack was marked as WantWrite before -func (o *ObjectToPack) WantWrite() bool { - return o.Offset == 1 -} - -// SetOriginal sets both Original and saves size, type and hash. If object -// is nil Original is set but previous resolved values are kept -func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { - o.Original = obj - o.SaveOriginalMetadata() -} - -// SaveOriginalMetadata saves size, type and hash of Original object -func (o *ObjectToPack) SaveOriginalMetadata() { - if o.Original != nil { - o.originalSize = o.Original.Size() - o.originalType = o.Original.Type() - o.originalHash = o.Original.Hash() - o.resolvedOriginal = true - } -} - -// CleanOriginal sets Original to nil -func (o *ObjectToPack) CleanOriginal() { - o.Original = nil -} - -func (o *ObjectToPack) Type() plumbing.ObjectType { - if o.Original != nil { - return o.Original.Type() - } - - if o.resolvedOriginal { - return o.originalType - } - - if o.Base != nil { - return o.Base.Type() - } - - if o.Object != nil { - return o.Object.Type() - } - - panic("cannot get type") -} - -func (o *ObjectToPack) Hash() plumbing.Hash { - if o.Original != nil { - return o.Original.Hash() - } - - if o.resolvedOriginal { - return o.originalHash - } - - do, ok := o.Object.(plumbing.DeltaObject) - if ok { - return do.ActualHash() - } - - panic("cannot get hash") -} - -func (o *ObjectToPack) Size() int64 { - if o.Original != nil { - return o.Original.Size() - } - - if o.resolvedOriginal { - return o.originalSize - } - - do, ok := o.Object.(plumbing.DeltaObject) - if ok { - return do.ActualSize() - } - - panic("cannot get ObjectToPack size") -} - -func (o *ObjectToPack) IsDelta() bool { - return o.Base != nil -} - -func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { - o.Object = delta - o.Base = base - o.Depth = base.Depth + 1 -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go deleted file mode 100644 index ddd7f62fce4..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go +++ /dev/null @@ -1,565 +0,0 @@ -package packfile - -import ( - "bytes" - "io" - "os" - - billy "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrInvalidObject is returned by Decode when an invalid object is - // found in the packfile. - ErrInvalidObject = NewError("invalid git object") - // ErrZLib is returned by Decode when there was an error unzipping - // the packfile contents. - ErrZLib = NewError("zlib reading error") -) - -// When reading small objects from packfile it is beneficial to do so at -// once to exploit the buffered I/O. In many cases the objects are so small -// that they were already loaded to memory when the object header was -// loaded from the packfile. Wrapping in FSObject would cause this buffered -// data to be thrown away and then re-read later, with the additional -// seeking causing reloads from disk. Objects smaller than this threshold -// are now always read into memory and stored in cache instead of being -// wrapped in FSObject. -const smallObjectThreshold = 16 * 1024 - -// Packfile allows retrieving information from inside a packfile. -type Packfile struct { - idxfile.Index - fs billy.Filesystem - file billy.File - s *Scanner - deltaBaseCache cache.Object - offsetToType map[int64]plumbing.ObjectType -} - -// NewPackfileWithCache creates a new Packfile with the given object cache. -// If the filesystem is provided, the packfile will return FSObjects, otherwise -// it will return MemoryObjects. -func NewPackfileWithCache( - index idxfile.Index, - fs billy.Filesystem, - file billy.File, - cache cache.Object, -) *Packfile { - s := NewScanner(file) - return &Packfile{ - index, - fs, - file, - s, - cache, - make(map[int64]plumbing.ObjectType), - } -} - -// NewPackfile returns a packfile representation for the given packfile file -// and packfile idx. -// If the filesystem is provided, the packfile will return FSObjects, otherwise -// it will return MemoryObjects. -func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { - return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) -} - -// Get retrieves the encoded object in the packfile with the given hash. -func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { - offset, err := p.FindOffset(h) - if err != nil { - return nil, err - } - - return p.objectAtOffset(offset, h) -} - -// GetByOffset retrieves the encoded object from the packfile at the given -// offset. -func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { - hash, err := p.FindHash(o) - if err != nil { - return nil, err - } - - return p.objectAtOffset(o, hash) -} - -// GetSizeByOffset retrieves the size of the encoded object from the -// packfile with the given offset. -func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { - if _, err := p.s.SeekFromStart(o); err != nil { - if err == io.EOF || isInvalid(err) { - return 0, plumbing.ErrObjectNotFound - } - - return 0, err - } - - h, err := p.nextObjectHeader() - if err != nil { - return 0, err - } - return p.getObjectSize(h) -} - -func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { - h, err := p.s.SeekObjectHeader(offset) - p.s.pendingObject = nil - return h, err -} - -func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { - h, err := p.s.NextObjectHeader() - p.s.pendingObject = nil - return h, err -} - -func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { - delta := buf.Bytes() - _, delta = decodeLEB128(delta) // skip src size - sz, _ := decodeLEB128(delta) - return int64(sz) -} - -func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - return h.Length, nil - case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - - if _, _, err := p.s.NextObject(buf); err != nil { - return 0, err - } - - return p.getDeltaObjectSize(buf), nil - default: - return 0, ErrInvalidObject.AddDetails("type %q", h.Type) - } -} - -func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - return h.Type, nil - case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - var offset int64 - if h.Type == plumbing.REFDeltaObject { - offset, err = p.FindOffset(h.Reference) - if err != nil { - return - } - } else { - offset = h.OffsetReference - } - - if baseType, ok := p.offsetToType[offset]; ok { - typ = baseType - } else { - h, err = p.objectHeaderAtOffset(offset) - if err != nil { - return - } - - typ, err = p.getObjectType(h) - if err != nil { - return - } - } - default: - err = ErrInvalidObject.AddDetails("type %q", h.Type) - } - - p.offsetToType[h.Offset] = typ - - return -} - -func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { - if obj, ok := p.cacheGet(hash); ok { - return obj, nil - } - - h, err := p.objectHeaderAtOffset(offset) - if err != nil { - if err == io.EOF || isInvalid(err) { - return nil, plumbing.ErrObjectNotFound - } - return nil, err - } - - return p.getNextObject(h, hash) -} - -func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { - var err error - - // If we have no filesystem, we will return a MemoryObject instead - // of an FSObject. - if p.fs == nil { - return p.getNextMemoryObject(h) - } - - // If the object is small enough then read it completely into memory now since - // it is already read from disk into buffer anyway. For delta objects we want - // to perform the optimization too, but we have to be careful about applying - // small deltas on big objects. - var size int64 - if h.Length <= smallObjectThreshold { - if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { - return p.getNextMemoryObject(h) - } - - // For delta objects we read the delta data and apply the small object - // optimization only if the expanded version of the object still meets - // the small object threshold condition. - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - if _, _, err := p.s.NextObject(buf); err != nil { - return nil, err - } - - size = p.getDeltaObjectSize(buf) - if size <= smallObjectThreshold { - var obj = new(plumbing.MemoryObject) - obj.SetSize(size) - if h.Type == plumbing.REFDeltaObject { - err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) - } else { - err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) - } - return obj, err - } - } else { - size, err = p.getObjectSize(h) - if err != nil { - return nil, err - } - } - - typ, err := p.getObjectType(h) - if err != nil { - return nil, err - } - - p.offsetToType[h.Offset] = typ - - return NewFSObject( - hash, - typ, - h.Offset, - size, - p.Index, - p.fs, - p.file.Name(), - p.deltaBaseCache, - ), nil -} - -func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { - h, err := p.objectHeaderAtOffset(offset) - if err != nil { - return nil, err - } - - // getObjectContent is called from FSObject, so we have to explicitly - // get memory object here to avoid recursive cycle - obj, err := p.getNextMemoryObject(h) - if err != nil { - return nil, err - } - - return obj.Reader() -} - -func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { - var obj = new(plumbing.MemoryObject) - obj.SetSize(h.Length) - obj.SetType(h.Type) - - var err error - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - err = p.fillRegularObjectContent(obj) - case plumbing.REFDeltaObject: - err = p.fillREFDeltaObjectContent(obj, h.Reference) - case plumbing.OFSDeltaObject: - err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) - default: - err = ErrInvalidObject.AddDetails("type %q", h.Type) - } - - if err != nil { - return nil, err - } - - p.offsetToType[h.Offset] = obj.Type() - - return obj, nil -} - -func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) { - w, err := obj.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - _, _, err = p.s.NextObject(w) - p.cachePut(obj) - - return err -} - -func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, _, err := p.s.NextObject(buf) - if err != nil { - return err - } - - return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) -} - -func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { - var err error - - base, ok := p.cacheGet(ref) - if !ok { - base, err = p.Get(ref) - if err != nil { - return err - } - } - - obj.SetType(base.Type()) - err = ApplyDelta(obj, base, buf.Bytes()) - p.cachePut(obj) - - return err -} - -func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, _, err := p.s.NextObject(buf) - if err != nil { - return err - } - - return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) -} - -func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { - hash, err := p.FindHash(offset) - if err != nil { - return err - } - - base, err := p.objectAtOffset(offset, hash) - if err != nil { - return err - } - - obj.SetType(base.Type()) - err = ApplyDelta(obj, base, buf.Bytes()) - p.cachePut(obj) - - return err -} - -func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { - if p.deltaBaseCache == nil { - return nil, false - } - - return p.deltaBaseCache.Get(h) -} - -func (p *Packfile) cachePut(obj plumbing.EncodedObject) { - if p.deltaBaseCache == nil { - return - } - - p.deltaBaseCache.Put(obj) -} - -// GetAll returns an iterator with all encoded objects in the packfile. -// The iterator returned is not thread-safe, it should be used in the same -// thread as the Packfile instance. -func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { - return p.GetByType(plumbing.AnyObject) -} - -// GetByType returns all the objects of the given type. -func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { - switch typ { - case plumbing.AnyObject, - plumbing.BlobObject, - plumbing.TreeObject, - plumbing.CommitObject, - plumbing.TagObject: - entries, err := p.EntriesByOffset() - if err != nil { - return nil, err - } - - return &objectIter{ - // Easiest way to provide an object decoder is just to pass a Packfile - // instance. To not mess with the seeks, it's a new instance with a - // different scanner but the same cache and offset to hash map for - // reusing as much cache as possible. - p: p, - iter: entries, - typ: typ, - }, nil - default: - return nil, plumbing.ErrInvalidType - } -} - -// ID returns the ID of the packfile, which is the checksum at the end of it. -func (p *Packfile) ID() (plumbing.Hash, error) { - prev, err := p.file.Seek(-20, io.SeekEnd) - if err != nil { - return plumbing.ZeroHash, err - } - - var hash plumbing.Hash - if _, err := io.ReadFull(p.file, hash[:]); err != nil { - return plumbing.ZeroHash, err - } - - if _, err := p.file.Seek(prev, io.SeekStart); err != nil { - return plumbing.ZeroHash, err - } - - return hash, nil -} - -// Scanner returns the packfile's Scanner -func (p *Packfile) Scanner() *Scanner { - return p.s -} - -// Close the packfile and its resources. -func (p *Packfile) Close() error { - closer, ok := p.file.(io.Closer) - if !ok { - return nil - } - - return closer.Close() -} - -type objectIter struct { - p *Packfile - typ plumbing.ObjectType - iter idxfile.EntryIter -} - -func (i *objectIter) Next() (plumbing.EncodedObject, error) { - for { - e, err := i.iter.Next() - if err != nil { - return nil, err - } - - if i.typ != plumbing.AnyObject { - if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { - if typ != i.typ { - continue - } - } else if obj, ok := i.p.cacheGet(e.Hash); ok { - if obj.Type() != i.typ { - i.p.offsetToType[int64(e.Offset)] = obj.Type() - continue - } - return obj, nil - } else { - h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) - if err != nil { - return nil, err - } - - if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { - typ, err := i.p.getObjectType(h) - if err != nil { - return nil, err - } - if typ != i.typ { - i.p.offsetToType[int64(e.Offset)] = typ - continue - } - // getObjectType will seek in the file so we cannot use getNextObject safely - return i.p.objectAtOffset(int64(e.Offset), e.Hash) - } else { - if h.Type != i.typ { - i.p.offsetToType[int64(e.Offset)] = h.Type - continue - } - return i.p.getNextObject(h, e.Hash) - } - } - } - - obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) - if err != nil { - return nil, err - } - - return obj, nil - } -} - -func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { - for { - o, err := i.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - - if err := f(o); err != nil { - return err - } - } -} - -func (i *objectIter) Close() { - i.iter.Close() -} - -// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid -// error inside. It also checks for the windows error, which is different from -// os.ErrInvalid. -func isInvalid(err error) bool { - pe, ok := err.(*os.PathError) - if !ok { - return false - } - - errstr := pe.Err.Error() - return errstr == errInvalidUnix || errstr == errInvalidWindows -} - -// errInvalidWindows is the Windows equivalent to os.ErrInvalid -const errInvalidWindows = "The parameter is incorrect." - -var errInvalidUnix = os.ErrInvalid.Error() diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go deleted file mode 100644 index 4b5a5708cc2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go +++ /dev/null @@ -1,495 +0,0 @@ -package packfile - -import ( - "bytes" - "errors" - "io" - stdioutil "io/ioutil" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrReferenceDeltaNotFound is returned when the reference delta is not - // found. - ErrReferenceDeltaNotFound = errors.New("reference delta not found") - - // ErrNotSeekableSource is returned when the source for the parser is not - // seekable and a storage was not provided, so it can't be parsed. - ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") - - // ErrDeltaNotCached is returned when the delta could not be found in cache. - ErrDeltaNotCached = errors.New("delta could not be found in cache") -) - -// Observer interface is implemented by index encoders. -type Observer interface { - // OnHeader is called when a new packfile is opened. - OnHeader(count uint32) error - // OnInflatedObjectHeader is called for each object header read. - OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error - // OnInflatedObjectContent is called for each decoded object. - OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error - // OnFooter is called when decoding is done. - OnFooter(h plumbing.Hash) error -} - -// Parser decodes a packfile and calls any observer associated to it. Is used -// to generate indexes. -type Parser struct { - storage storer.EncodedObjectStorer - scanner *Scanner - count uint32 - oi []*objectInfo - oiByHash map[plumbing.Hash]*objectInfo - oiByOffset map[int64]*objectInfo - hashOffset map[plumbing.Hash]int64 - checksum plumbing.Hash - - cache *cache.BufferLRU - // delta content by offset, only used if source is not seekable - deltas map[int64][]byte - - ob []Observer -} - -// NewParser creates a new Parser. The Scanner source must be seekable. -// If it's not, NewParserWithStorage should be used instead. -func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { - return NewParserWithStorage(scanner, nil, ob...) -} - -// NewParserWithStorage creates a new Parser. The scanner source must either -// be seekable or a storage must be provided. -func NewParserWithStorage( - scanner *Scanner, - storage storer.EncodedObjectStorer, - ob ...Observer, -) (*Parser, error) { - if !scanner.IsSeekable && storage == nil { - return nil, ErrNotSeekableSource - } - - var deltas map[int64][]byte - if !scanner.IsSeekable { - deltas = make(map[int64][]byte) - } - - return &Parser{ - storage: storage, - scanner: scanner, - ob: ob, - count: 0, - cache: cache.NewBufferLRUDefault(), - deltas: deltas, - }, nil -} - -func (p *Parser) forEachObserver(f func(o Observer) error) error { - for _, o := range p.ob { - if err := f(o); err != nil { - return err - } - } - return nil -} - -func (p *Parser) onHeader(count uint32) error { - return p.forEachObserver(func(o Observer) error { - return o.OnHeader(count) - }) -} - -func (p *Parser) onInflatedObjectHeader( - t plumbing.ObjectType, - objSize int64, - pos int64, -) error { - return p.forEachObserver(func(o Observer) error { - return o.OnInflatedObjectHeader(t, objSize, pos) - }) -} - -func (p *Parser) onInflatedObjectContent( - h plumbing.Hash, - pos int64, - crc uint32, - content []byte, -) error { - return p.forEachObserver(func(o Observer) error { - return o.OnInflatedObjectContent(h, pos, crc, content) - }) -} - -func (p *Parser) onFooter(h plumbing.Hash) error { - return p.forEachObserver(func(o Observer) error { - return o.OnFooter(h) - }) -} - -// Parse start decoding phase of the packfile. -func (p *Parser) Parse() (plumbing.Hash, error) { - if err := p.init(); err != nil { - return plumbing.ZeroHash, err - } - - if err := p.indexObjects(); err != nil { - return plumbing.ZeroHash, err - } - - var err error - p.checksum, err = p.scanner.Checksum() - if err != nil && err != io.EOF { - return plumbing.ZeroHash, err - } - - if err := p.resolveDeltas(); err != nil { - return plumbing.ZeroHash, err - } - - if err := p.onFooter(p.checksum); err != nil { - return plumbing.ZeroHash, err - } - - return p.checksum, nil -} - -func (p *Parser) init() error { - _, c, err := p.scanner.Header() - if err != nil { - return err - } - - if err := p.onHeader(c); err != nil { - return err - } - - p.count = c - p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) - p.oiByOffset = make(map[int64]*objectInfo, p.count) - p.oi = make([]*objectInfo, p.count) - - return nil -} - -func (p *Parser) indexObjects() error { - buf := new(bytes.Buffer) - - for i := uint32(0); i < p.count; i++ { - buf.Reset() - - oh, err := p.scanner.NextObjectHeader() - if err != nil { - return err - } - - delta := false - var ota *objectInfo - switch t := oh.Type; t { - case plumbing.OFSDeltaObject: - delta = true - - parent, ok := p.oiByOffset[oh.OffsetReference] - if !ok { - return plumbing.ErrObjectNotFound - } - - ota = newDeltaObject(oh.Offset, oh.Length, t, parent) - parent.Children = append(parent.Children, ota) - case plumbing.REFDeltaObject: - delta = true - parent, ok := p.oiByHash[oh.Reference] - if !ok { - // can't find referenced object in this pack file - // this must be a "thin" pack. - parent = &objectInfo{ //Placeholder parent - SHA1: oh.Reference, - ExternalRef: true, // mark as an external reference that must be resolved - Type: plumbing.AnyObject, - DiskType: plumbing.AnyObject, - } - p.oiByHash[oh.Reference] = parent - } - ota = newDeltaObject(oh.Offset, oh.Length, t, parent) - parent.Children = append(parent.Children, ota) - - default: - ota = newBaseObject(oh.Offset, oh.Length, t) - } - - _, crc, err := p.scanner.NextObject(buf) - if err != nil { - return err - } - - ota.Crc32 = crc - ota.Length = oh.Length - - data := buf.Bytes() - if !delta { - sha1, err := getSHA1(ota.Type, data) - if err != nil { - return err - } - - ota.SHA1 = sha1 - p.oiByHash[ota.SHA1] = ota - } - - if p.storage != nil && !delta { - obj := new(plumbing.MemoryObject) - obj.SetSize(oh.Length) - obj.SetType(oh.Type) - if _, err := obj.Write(data); err != nil { - return err - } - - if _, err := p.storage.SetEncodedObject(obj); err != nil { - return err - } - } - - if delta && !p.scanner.IsSeekable { - p.deltas[oh.Offset] = make([]byte, len(data)) - copy(p.deltas[oh.Offset], data) - } - - p.oiByOffset[oh.Offset] = ota - p.oi[i] = ota - } - - return nil -} - -func (p *Parser) resolveDeltas() error { - buf := &bytes.Buffer{} - for _, obj := range p.oi { - buf.Reset() - err := p.get(obj, buf) - if err != nil { - return err - } - content := buf.Bytes() - - if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { - return err - } - - if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { - return err - } - - if !obj.IsDelta() && len(obj.Children) > 0 { - for _, child := range obj.Children { - if err := p.resolveObject(stdioutil.Discard, child, content); err != nil { - return err - } - } - - // Remove the delta from the cache. - if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { - delete(p.deltas, obj.Offset) - } - } - } - - return nil -} - -func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { - if !o.ExternalRef { // skip cache check for placeholder parents - b, ok := p.cache.Get(o.Offset) - if ok { - _, err := buf.Write(b) - return err - } - } - - // If it's not on the cache and is not a delta we can try to find it in the - // storage, if there's one. External refs must enter here. - if p.storage != nil && !o.Type.IsDelta() { - var e plumbing.EncodedObject - e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) - if err != nil { - return err - } - o.Type = e.Type() - - var r io.ReadCloser - r, err = e.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - _, err = buf.ReadFrom(io.LimitReader(r, e.Size())) - return err - } - - if o.ExternalRef { - // we were not able to resolve a ref in a thin pack - return ErrReferenceDeltaNotFound - } - - if o.DiskType.IsDelta() { - b := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(b) - b.Reset() - err := p.get(o.Parent, b) - if err != nil { - return err - } - base := b.Bytes() - - err = p.resolveObject(buf, o, base) - if err != nil { - return err - } - } else { - err := p.readData(buf, o) - if err != nil { - return err - } - } - - if len(o.Children) > 0 { - data := make([]byte, buf.Len()) - copy(data, buf.Bytes()) - p.cache.Put(o.Offset, data) - } - return nil -} - -func (p *Parser) resolveObject( - w io.Writer, - o *objectInfo, - base []byte, -) error { - if !o.DiskType.IsDelta() { - return nil - } - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - err := p.readData(buf, o) - if err != nil { - return err - } - data := buf.Bytes() - - data, err = applyPatchBase(o, data, base) - if err != nil { - return err - } - - if p.storage != nil { - obj := new(plumbing.MemoryObject) - obj.SetSize(o.Size()) - obj.SetType(o.Type) - if _, err := obj.Write(data); err != nil { - return err - } - - if _, err := p.storage.SetEncodedObject(obj); err != nil { - return err - } - } - _, err = w.Write(data) - return err -} - -func (p *Parser) readData(w io.Writer, o *objectInfo) error { - if !p.scanner.IsSeekable && o.DiskType.IsDelta() { - data, ok := p.deltas[o.Offset] - if !ok { - return ErrDeltaNotCached - } - _, err := w.Write(data) - return err - } - - if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { - return err - } - - if _, _, err := p.scanner.NextObject(w); err != nil { - return err - } - return nil -} - -func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { - patched, err := PatchDelta(base, data) - if err != nil { - return nil, err - } - - if ota.SHA1 == plumbing.ZeroHash { - ota.Type = ota.Parent.Type - sha1, err := getSHA1(ota.Type, patched) - if err != nil { - return nil, err - } - - ota.SHA1 = sha1 - ota.Length = int64(len(patched)) - } - - return patched, nil -} - -func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { - hasher := plumbing.NewHasher(t, int64(len(data))) - if _, err := hasher.Write(data); err != nil { - return plumbing.ZeroHash, err - } - - return hasher.Sum(), nil -} - -type objectInfo struct { - Offset int64 - Length int64 - Type plumbing.ObjectType - DiskType plumbing.ObjectType - ExternalRef bool // indicates this is an external reference in a thin pack file - - Crc32 uint32 - - Parent *objectInfo - Children []*objectInfo - SHA1 plumbing.Hash -} - -func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { - return newDeltaObject(offset, length, t, nil) -} - -func newDeltaObject( - offset, length int64, - t plumbing.ObjectType, - parent *objectInfo, -) *objectInfo { - obj := &objectInfo{ - Offset: offset, - Length: length, - Type: t, - DiskType: t, - Crc32: 0, - Parent: parent, - } - - return obj -} - -func (o *objectInfo) IsDelta() bool { - return o.Type.IsDelta() -} - -func (o *objectInfo) Size() int64 { - return o.Length -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go deleted file mode 100644 index 1dc8b8b0094..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go +++ /dev/null @@ -1,253 +0,0 @@ -package packfile - -import ( - "bytes" - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h -// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, -// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js -// for details about the delta format. - -const deltaSizeMin = 4 - -// ApplyDelta writes to target the result of applying the modification deltas in delta to base. -func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { - r, err := base.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - w, err := target.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, err = buf.ReadFrom(r) - if err != nil { - return err - } - src := buf.Bytes() - - dst := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(dst) - dst.Reset() - err = patchDelta(dst, src, delta) - if err != nil { - return err - } - - - target.SetSize(int64(dst.Len())) - - b := byteSlicePool.Get().([]byte) - _, err = io.CopyBuffer(w, dst, b) - byteSlicePool.Put(b) - return err -} - -var ( - ErrInvalidDelta = errors.New("invalid delta") - ErrDeltaCmd = errors.New("wrong delta command") -) - -// PatchDelta returns the result of applying the modification deltas in delta to src. -// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command -// is not copy from source or copy from delta (ErrDeltaCmd). -func PatchDelta(src, delta []byte) ([]byte, error) { - b := &bytes.Buffer{} - if err := patchDelta(b, src, delta); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func patchDelta(dst *bytes.Buffer, src, delta []byte) error { - if len(delta) < deltaSizeMin { - return ErrInvalidDelta - } - - srcSz, delta := decodeLEB128(delta) - if srcSz != uint(len(src)) { - return ErrInvalidDelta - } - - targetSz, delta := decodeLEB128(delta) - remainingTargetSz := targetSz - - var cmd byte - dst.Grow(int(targetSz)) - for { - if len(delta) == 0 { - return ErrInvalidDelta - } - - cmd = delta[0] - delta = delta[1:] - if isCopyFromSrc(cmd) { - var offset, sz uint - var err error - offset, delta, err = decodeOffset(cmd, delta) - if err != nil { - return err - } - - sz, delta, err = decodeSize(cmd, delta) - if err != nil { - return err - } - - if invalidSize(sz, targetSz) || - invalidOffsetSize(offset, sz, srcSz) { - break - } - dst.Write(src[offset:offset+sz]) - remainingTargetSz -= sz - } else if isCopyFromDelta(cmd) { - sz := uint(cmd) // cmd is the size itself - if invalidSize(sz, targetSz) { - return ErrInvalidDelta - } - - if uint(len(delta)) < sz { - return ErrInvalidDelta - } - - dst.Write(delta[0:sz]) - remainingTargetSz -= sz - delta = delta[sz:] - } else { - return ErrDeltaCmd - } - - if remainingTargetSz <= 0 { - break - } - } - - return nil -} - -// Decodes a number encoded as an unsigned LEB128 at the start of some -// binary data and returns the decoded number and the rest of the -// stream. -// -// This must be called twice on the delta data buffer, first to get the -// expected source buffer size, and again to get the target buffer size. -func decodeLEB128(input []byte) (uint, []byte) { - var num, sz uint - var b byte - for { - b = input[sz] - num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks - sz++ - - if uint(b)&continuation == 0 || sz == uint(len(input)) { - break - } - } - - return num, input[sz:] -} - -const ( - payload = 0x7f // 0111 1111 - continuation = 0x80 // 1000 0000 -) - -func isCopyFromSrc(cmd byte) bool { - return (cmd & 0x80) != 0 -} - -func isCopyFromDelta(cmd byte) bool { - return (cmd&0x80) == 0 && cmd != 0 -} - -func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { - var offset uint - if (cmd & 0x01) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x02) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x04) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 16 - delta = delta[1:] - } - if (cmd & 0x08) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 24 - delta = delta[1:] - } - - return offset, delta, nil -} - -func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { - var sz uint - if (cmd & 0x10) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x20) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x40) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 16 - delta = delta[1:] - } - if sz == 0 { - sz = 0x10000 - } - - return sz, delta, nil -} - -func invalidSize(sz, targetSz uint) bool { - return sz > targetSz -} - -func invalidOffsetSize(offset, sz, srcSz uint) bool { - return sumOverflows(offset, sz) || - offset+sz > srcSz -} - -func sumOverflows(a, b uint) bool { - return a+b < a -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go deleted file mode 100644 index 6e6a687886a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go +++ /dev/null @@ -1,466 +0,0 @@ -package packfile - -import ( - "bufio" - "bytes" - "compress/zlib" - "fmt" - "hash" - "hash/crc32" - "io" - stdioutil "io/ioutil" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile - ErrEmptyPackfile = NewError("empty packfile") - // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. - ErrBadSignature = NewError("malformed pack file signature") - // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is - // different than VersionSupported. - ErrUnsupportedVersion = NewError("unsupported packfile version") - // ErrSeekNotSupported returned if seek is not support - ErrSeekNotSupported = NewError("not seek support") -) - -// ObjectHeader contains the information related to the object, this information -// is collected from the previous bytes to the content of the object. -type ObjectHeader struct { - Type plumbing.ObjectType - Offset int64 - Length int64 - Reference plumbing.Hash - OffsetReference int64 -} - -type Scanner struct { - r *scannerReader - crc hash.Hash32 - - // pendingObject is used to detect if an object has been read, or still - // is waiting to be read - pendingObject *ObjectHeader - version, objects uint32 - - // lsSeekable says if this scanner can do Seek or not, to have a Scanner - // seekable a r implementing io.Seeker is required - IsSeekable bool -} - -// NewScanner returns a new Scanner based on a reader, if the given reader -// implements io.ReadSeeker the Scanner will be also Seekable -func NewScanner(r io.Reader) *Scanner { - _, ok := r.(io.ReadSeeker) - - crc := crc32.NewIEEE() - return &Scanner{ - r: newScannerReader(r, crc), - crc: crc, - IsSeekable: ok, - } -} - -func (s *Scanner) Reset(r io.Reader) { - _, ok := r.(io.ReadSeeker) - - s.r.Reset(r) - s.crc.Reset() - s.IsSeekable = ok - s.pendingObject = nil - s.version = 0 - s.objects = 0 -} - -// Header reads the whole packfile header (signature, version and object count). -// It returns the version and the object count and performs checks on the -// validity of the signature and the version fields. -func (s *Scanner) Header() (version, objects uint32, err error) { - if s.version != 0 { - return s.version, s.objects, nil - } - - sig, err := s.readSignature() - if err != nil { - if err == io.EOF { - err = ErrEmptyPackfile - } - - return - } - - if !s.isValidSignature(sig) { - err = ErrBadSignature - return - } - - version, err = s.readVersion() - s.version = version - if err != nil { - return - } - - if !s.isSupportedVersion(version) { - err = ErrUnsupportedVersion.AddDetails("%d", version) - return - } - - objects, err = s.readCount() - s.objects = objects - return -} - -// readSignature reads an returns the signature field in the packfile. -func (s *Scanner) readSignature() ([]byte, error) { - var sig = make([]byte, 4) - if _, err := io.ReadFull(s.r, sig); err != nil { - return []byte{}, err - } - - return sig, nil -} - -// isValidSignature returns if sig is a valid packfile signature. -func (s *Scanner) isValidSignature(sig []byte) bool { - return bytes.Equal(sig, signature) -} - -// readVersion reads and returns the version field of a packfile. -func (s *Scanner) readVersion() (uint32, error) { - return binary.ReadUint32(s.r) -} - -// isSupportedVersion returns whether version v is supported by the parser. -// The current supported version is VersionSupported, defined above. -func (s *Scanner) isSupportedVersion(v uint32) bool { - return v == VersionSupported -} - -// readCount reads and returns the count of objects field of a packfile. -func (s *Scanner) readCount() (uint32, error) { - return binary.ReadUint32(s.r) -} - -// SeekObjectHeader seeks to specified offset and returns the ObjectHeader -// for the next object in the reader -func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { - // if seeking we assume that you are not interested in the header - if s.version == 0 { - s.version = VersionSupported - } - - if _, err := s.r.Seek(offset, io.SeekStart); err != nil { - return nil, err - } - - h, err := s.nextObjectHeader() - if err != nil { - return nil, err - } - - h.Offset = offset - return h, nil -} - -// NextObjectHeader returns the ObjectHeader for the next object in the reader -func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { - if err := s.doPending(); err != nil { - return nil, err - } - - offset, err := s.r.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - h, err := s.nextObjectHeader() - if err != nil { - return nil, err - } - - h.Offset = offset - return h, nil -} - -// nextObjectHeader returns the ObjectHeader for the next object in the reader -// without the Offset field -func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { - s.r.Flush() - s.crc.Reset() - - h := &ObjectHeader{} - s.pendingObject = h - - var err error - h.Offset, err = s.r.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - h.Type, h.Length, err = s.readObjectTypeAndLength() - if err != nil { - return nil, err - } - - switch h.Type { - case plumbing.OFSDeltaObject: - no, err := binary.ReadVariableWidthInt(s.r) - if err != nil { - return nil, err - } - - h.OffsetReference = h.Offset - no - case plumbing.REFDeltaObject: - var err error - h.Reference, err = binary.ReadHash(s.r) - if err != nil { - return nil, err - } - } - - return h, nil -} - -func (s *Scanner) doPending() error { - if s.version == 0 { - var err error - s.version, s.objects, err = s.Header() - if err != nil { - return err - } - } - - return s.discardObjectIfNeeded() -} - -func (s *Scanner) discardObjectIfNeeded() error { - if s.pendingObject == nil { - return nil - } - - h := s.pendingObject - n, _, err := s.NextObject(stdioutil.Discard) - if err != nil { - return err - } - - if n != h.Length { - return fmt.Errorf( - "error discarding object, discarded %d, expected %d", - n, h.Length, - ) - } - - return nil -} - -// ReadObjectTypeAndLength reads and returns the object type and the -// length field from an object entry in a packfile. -func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { - t, c, err := s.readType() - if err != nil { - return t, 0, err - } - - l, err := s.readLength(c) - - return t, l, err -} - -func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { - var c byte - var err error - if c, err = s.r.ReadByte(); err != nil { - return plumbing.ObjectType(0), 0, err - } - - typ := parseType(c) - - return typ, c, nil -} - -func parseType(b byte) plumbing.ObjectType { - return plumbing.ObjectType((b & maskType) >> firstLengthBits) -} - -// the length is codified in the last 4 bits of the first byte and in -// the last 7 bits of subsequent bytes. Last byte has a 0 MSB. -func (s *Scanner) readLength(first byte) (int64, error) { - length := int64(first & maskFirstLength) - - c := first - shift := firstLengthBits - var err error - for c&maskContinue > 0 { - if c, err = s.r.ReadByte(); err != nil { - return 0, err - } - - length += int64(c&maskLength) << shift - shift += lengthBits - } - - return length, nil -} - -// NextObject writes the content of the next object into the reader, returns -// the number of bytes written, the CRC32 of the content and an error, if any -func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { - s.pendingObject = nil - written, err = s.copyObject(w) - - s.r.Flush() - crc32 = s.crc.Sum32() - s.crc.Reset() - - return -} - -// ReadRegularObject reads and write a non-deltified object -// from it zlib stream in an object entry in the packfile. -func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { - zr := zlibReaderPool.Get().(io.ReadCloser) - defer zlibReaderPool.Put(zr) - - if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { - return 0, fmt.Errorf("zlib reset error: %s", err) - } - - defer ioutil.CheckClose(zr, &err) - buf := byteSlicePool.Get().([]byte) - n, err = io.CopyBuffer(w, zr, buf) - byteSlicePool.Put(buf) - return -} - -var byteSlicePool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - -// SeekFromStart sets a new offset from start, returns the old position before -// the change. -func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { - // if seeking we assume that you are not interested in the header - if s.version == 0 { - s.version = VersionSupported - } - - previous, err = s.r.Seek(0, io.SeekCurrent) - if err != nil { - return -1, err - } - - _, err = s.r.Seek(offset, io.SeekStart) - return previous, err -} - -// Checksum returns the checksum of the packfile -func (s *Scanner) Checksum() (plumbing.Hash, error) { - err := s.discardObjectIfNeeded() - if err != nil { - return plumbing.ZeroHash, err - } - - return binary.ReadHash(s.r) -} - -// Close reads the reader until io.EOF -func (s *Scanner) Close() error { - buf := byteSlicePool.Get().([]byte) - _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) - byteSlicePool.Put(buf) - return err -} - -// Flush is a no-op (deprecated) -func (s *Scanner) Flush() error { - return nil -} - -// scannerReader has the following characteristics: -// - Provides an io.SeekReader impl for bufio.Reader, when the underlying -// reader supports it. -// - Keeps track of the current read position, for when the underlying reader -// isn't an io.SeekReader, but we still want to know the current offset. -// - Writes to the hash writer what it reads, with the aid of a smaller buffer. -// The buffer helps avoid a performance penality for performing small writes -// to the crc32 hash writer. -type scannerReader struct { - reader io.Reader - crc io.Writer - rbuf *bufio.Reader - wbuf *bufio.Writer - offset int64 -} - -func newScannerReader(r io.Reader, h io.Writer) *scannerReader { - sr := &scannerReader{ - rbuf: bufio.NewReader(nil), - wbuf: bufio.NewWriterSize(nil, 64), - crc: h, - } - sr.Reset(r) - - return sr -} - -func (r *scannerReader) Reset(reader io.Reader) { - r.reader = reader - r.rbuf.Reset(r.reader) - r.wbuf.Reset(r.crc) - - r.offset = 0 - if seeker, ok := r.reader.(io.ReadSeeker); ok { - r.offset, _ = seeker.Seek(0, io.SeekCurrent) - } -} - -func (r *scannerReader) Read(p []byte) (n int, err error) { - n, err = r.rbuf.Read(p) - - r.offset += int64(n) - if _, err := r.wbuf.Write(p[:n]); err != nil { - return n, err - } - return -} - -func (r *scannerReader) ReadByte() (b byte, err error) { - b, err = r.rbuf.ReadByte() - if err == nil { - r.offset++ - return b, r.wbuf.WriteByte(b) - } - return -} - -func (r *scannerReader) Flush() error { - return r.wbuf.Flush() -} - -// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, -// then only whence=io.SeekCurrent is supported, any other operation fails. -func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { - var err error - - if seeker, ok := r.reader.(io.ReadSeeker); !ok { - if whence != io.SeekCurrent || offset != 0 { - return -1, ErrSeekNotSupported - } - } else { - if whence == io.SeekCurrent && offset == 0 { - return r.offset, nil - } - - r.offset, err = seeker.Seek(offset, whence) - r.rbuf.Reset(r.reader) - } - - return r.offset, err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go deleted file mode 100644 index 6d409795b0e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package pktline implements reading payloads form pkt-lines and encoding -// pkt-lines from payloads. -package pktline - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// An Encoder writes pkt-lines to an output stream. -type Encoder struct { - w io.Writer -} - -const ( - // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. - MaxPayloadSize = 65516 - - // For compatibility with canonical Git implementation, accept longer pkt-lines - OversizePayloadMax = 65520 -) - -var ( - // FlushPkt are the contents of a flush-pkt pkt-line. - FlushPkt = []byte{'0', '0', '0', '0'} - // Flush is the payload to use with the Encode method to encode a flush-pkt. - Flush = []byte{} - // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. - FlushString = "" - // ErrPayloadTooLong is returned by the Encode methods when any of the - // provided payloads is bigger than MaxPayloadSize. - ErrPayloadTooLong = errors.New("payload is too long") -) - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - } -} - -// Flush encodes a flush-pkt to the output stream. -func (e *Encoder) Flush() error { - _, err := e.w.Write(FlushPkt) - return err -} - -// Encode encodes a pkt-line with the payload specified and write it to -// the output stream. If several payloads are specified, each of them -// will get streamed in their own pkt-lines. -func (e *Encoder) Encode(payloads ...[]byte) error { - for _, p := range payloads { - if err := e.encodeLine(p); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeLine(p []byte) error { - if len(p) > MaxPayloadSize { - return ErrPayloadTooLong - } - - if bytes.Equal(p, Flush) { - return e.Flush() - } - - n := len(p) + 4 - if _, err := e.w.Write(asciiHex16(n)); err != nil { - return err - } - _, err := e.w.Write(p) - return err -} - -// Returns the hexadecimal ascii representation of the 16 less -// significant bits of n. The length of the returned slice will always -// be 4. Example: if n is 1234 (0x4d2), the return value will be -// []byte{'0', '4', 'd', '2'}. -func asciiHex16(n int) []byte { - var ret [4]byte - ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) - ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) - ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) - ret[3] = byteToASCIIHex(byte(n & 0x000f)) - - return ret[:] -} - -// turns a byte into its hexadecimal ascii representation. Example: -// from 11 (0xb) to 'b'. -func byteToASCIIHex(n byte) byte { - if n < 10 { - return '0' + n - } - - return 'a' - 10 + n -} - -// EncodeString works similarly as Encode but payloads are specified as strings. -func (e *Encoder) EncodeString(payloads ...string) error { - for _, p := range payloads { - if err := e.Encode([]byte(p)); err != nil { - return err - } - } - - return nil -} - -// Encodef encodes a single pkt-line with the payload formatted as -// the format specifier. The rest of the arguments will be used in -// the format string. -func (e *Encoder) Encodef(format string, a ...interface{}) error { - return e.EncodeString( - fmt.Sprintf(format, a...), - ) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go deleted file mode 100644 index 99aab46e88d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go +++ /dev/null @@ -1,134 +0,0 @@ -package pktline - -import ( - "errors" - "io" -) - -const ( - lenSize = 4 -) - -// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. -var ErrInvalidPktLen = errors.New("invalid pkt-len found") - -// Scanner provides a convenient interface for reading the payloads of a -// series of pkt-lines. It takes an io.Reader providing the source, -// which then can be tokenized through repeated calls to the Scan -// method. -// -// After each Scan call, the Bytes method will return the payload of the -// corresponding pkt-line on a shared buffer, which will be 65516 bytes -// or smaller. Flush pkt-lines are represented by empty byte slices. -// -// Scanning stops at EOF or the first I/O error. -type Scanner struct { - r io.Reader // The reader provided by the client - err error // Sticky error - payload []byte // Last pkt-payload - len [lenSize]byte // Last pkt-len -} - -// NewScanner returns a new Scanner to read from r. -func NewScanner(r io.Reader) *Scanner { - return &Scanner{ - r: r, - } -} - -// Err returns the first error encountered by the Scanner. -func (s *Scanner) Err() error { - return s.err -} - -// Scan advances the Scanner to the next pkt-line, whose payload will -// then be available through the Bytes method. Scanning stops at EOF -// or the first I/O error. After Scan returns false, the Err method -// will return any error that occurred during scanning, except that if -// it was io.EOF, Err will return nil. -func (s *Scanner) Scan() bool { - var l int - l, s.err = s.readPayloadLen() - if s.err == io.EOF { - s.err = nil - return false - } - if s.err != nil { - return false - } - - if cap(s.payload) < l { - s.payload = make([]byte, 0, l) - } - - if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { - return false - } - s.payload = s.payload[:l] - - return true -} - -// Bytes returns the most recent payload generated by a call to Scan. -// The underlying array may point to data that will be overwritten by a -// subsequent call to Scan. It does no allocation. -func (s *Scanner) Bytes() []byte { - return s.payload -} - -// Method readPayloadLen returns the payload length by reading the -// pkt-len and subtracting the pkt-len size. -func (s *Scanner) readPayloadLen() (int, error) { - if _, err := io.ReadFull(s.r, s.len[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return 0, ErrInvalidPktLen - } - - return 0, err - } - - n, err := hexDecode(s.len) - if err != nil { - return 0, err - } - - switch { - case n == 0: - return 0, nil - case n <= lenSize: - return 0, ErrInvalidPktLen - case n > OversizePayloadMax+lenSize: - return 0, ErrInvalidPktLen - default: - return n - lenSize, nil - } -} - -// Turns the hexadecimal representation of a number in a byte slice into -// a number. This function substitute strconv.ParseUint(string(buf), 16, -// 16) and/or hex.Decode, to avoid generating new strings, thus helping the -// GC. -func hexDecode(buf [lenSize]byte) (int, error) { - var ret int - for i := 0; i < lenSize; i++ { - n, err := asciiHexToByte(buf[i]) - if err != nil { - return 0, ErrInvalidPktLen - } - ret = 16*ret + int(n) - } - return ret, nil -} - -// turns the hexadecimal ascii representation of a byte into its -// numerical value. Example: from 'b' to 11 (0xb). -func asciiHexToByte(b byte) (byte, error) { - switch { - case b >= '0' && b <= '9': - return b - '0', nil - case b >= 'a' && b <= 'f': - return b - 'a' + 10, nil - default: - return 0, ErrInvalidPktLen - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/hash.go deleted file mode 100644 index afc602a9ec8..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/hash.go +++ /dev/null @@ -1,83 +0,0 @@ -package plumbing - -import ( - "bytes" - "crypto/sha1" - "encoding/hex" - "hash" - "sort" - "strconv" -) - -// Hash SHA1 hashed content -type Hash [20]byte - -// ZeroHash is Hash with value zero -var ZeroHash Hash - -// ComputeHash compute the hash for a given ObjectType and content -func ComputeHash(t ObjectType, content []byte) Hash { - h := NewHasher(t, int64(len(content))) - h.Write(content) - return h.Sum() -} - -// NewHash return a new Hash from a hexadecimal hash representation -func NewHash(s string) Hash { - b, _ := hex.DecodeString(s) - - var h Hash - copy(h[:], b) - - return h -} - -func (h Hash) IsZero() bool { - var empty Hash - return h == empty -} - -func (h Hash) String() string { - return hex.EncodeToString(h[:]) -} - -type Hasher struct { - hash.Hash -} - -func NewHasher(t ObjectType, size int64) Hasher { - h := Hasher{sha1.New()} - h.Write(t.Bytes()) - h.Write([]byte(" ")) - h.Write([]byte(strconv.FormatInt(size, 10))) - h.Write([]byte{0}) - return h -} - -func (h Hasher) Sum() (hash Hash) { - copy(hash[:], h.Hash.Sum(nil)) - return -} - -// HashesSort sorts a slice of Hashes in increasing order. -func HashesSort(a []Hash) { - sort.Sort(HashSlice(a)) -} - -// HashSlice attaches the methods of sort.Interface to []Hash, sorting in -// increasing order. -type HashSlice []Hash - -func (p HashSlice) Len() int { return len(p) } -func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } -func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// IsHash returns true if the given string is a valid hash. -func IsHash(s string) bool { - if len(s) != 40 { - return false - } - - _, err := hex.DecodeString(s) - return err == nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/memory.go deleted file mode 100644 index b8e1e1b817a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/memory.go +++ /dev/null @@ -1,61 +0,0 @@ -package plumbing - -import ( - "bytes" - "io" - "io/ioutil" -) - -// MemoryObject on memory Object implementation -type MemoryObject struct { - t ObjectType - h Hash - cont []byte - sz int64 -} - -// Hash returns the object Hash, the hash is calculated on-the-fly the first -// time it's called, in all subsequent calls the same Hash is returned even -// if the type or the content have changed. The Hash is only generated if the -// size of the content is exactly the object size. -func (o *MemoryObject) Hash() Hash { - if o.h == ZeroHash && int64(len(o.cont)) == o.sz { - o.h = ComputeHash(o.t, o.cont) - } - - return o.h -} - -// Type return the ObjectType -func (o *MemoryObject) Type() ObjectType { return o.t } - -// SetType sets the ObjectType -func (o *MemoryObject) SetType(t ObjectType) { o.t = t } - -// Size return the size of the object -func (o *MemoryObject) Size() int64 { return o.sz } - -// SetSize set the object size, a content of the given size should be written -// afterwards -func (o *MemoryObject) SetSize(s int64) { o.sz = s } - -// Reader returns a ObjectReader used to read the object's content. -func (o *MemoryObject) Reader() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil -} - -// Writer returns a ObjectWriter used to write the object's content. -func (o *MemoryObject) Writer() (io.WriteCloser, error) { - return o, nil -} - -func (o *MemoryObject) Write(p []byte) (n int, err error) { - o.cont = append(o.cont, p...) - o.sz = int64(len(o.cont)) - - return len(p), nil -} - -// Close releases any resources consumed by the object when it is acting as a -// ObjectWriter. -func (o *MemoryObject) Close() error { return nil } diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object.go deleted file mode 100644 index 2655dee43e8..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object.go +++ /dev/null @@ -1,111 +0,0 @@ -// package plumbing implement the core interfaces and structs used by go-git -package plumbing - -import ( - "errors" - "io" -) - -var ( - ErrObjectNotFound = errors.New("object not found") - // ErrInvalidType is returned when an invalid object type is provided. - ErrInvalidType = errors.New("invalid object type") -) - -// Object is a generic representation of any git object -type EncodedObject interface { - Hash() Hash - Type() ObjectType - SetType(ObjectType) - Size() int64 - SetSize(int64) - Reader() (io.ReadCloser, error) - Writer() (io.WriteCloser, error) -} - -// DeltaObject is an EncodedObject representing a delta. -type DeltaObject interface { - EncodedObject - // BaseHash returns the hash of the object used as base for this delta. - BaseHash() Hash - // ActualHash returns the hash of the object after applying the delta. - ActualHash() Hash - // Size returns the size of the object after applying the delta. - ActualSize() int64 -} - -// ObjectType internal object type -// Integer values from 0 to 7 map to those exposed by git. -// AnyObject is used to represent any from 0 to 7. -type ObjectType int8 - -const ( - InvalidObject ObjectType = 0 - CommitObject ObjectType = 1 - TreeObject ObjectType = 2 - BlobObject ObjectType = 3 - TagObject ObjectType = 4 - // 5 reserved for future expansion - OFSDeltaObject ObjectType = 6 - REFDeltaObject ObjectType = 7 - - AnyObject ObjectType = -127 -) - -func (t ObjectType) String() string { - switch t { - case CommitObject: - return "commit" - case TreeObject: - return "tree" - case BlobObject: - return "blob" - case TagObject: - return "tag" - case OFSDeltaObject: - return "ofs-delta" - case REFDeltaObject: - return "ref-delta" - case AnyObject: - return "any" - default: - return "unknown" - } -} - -func (t ObjectType) Bytes() []byte { - return []byte(t.String()) -} - -// Valid returns true if t is a valid ObjectType. -func (t ObjectType) Valid() bool { - return t >= CommitObject && t <= REFDeltaObject -} - -// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. -// REFDeltaObject or OFSDeltaObject). -func (t ObjectType) IsDelta() bool { - return t == REFDeltaObject || t == OFSDeltaObject -} - -// ParseObjectType parses a string representation of ObjectType. It returns an -// error on parse failure. -func ParseObjectType(value string) (typ ObjectType, err error) { - switch value { - case "commit": - typ = CommitObject - case "tree": - typ = TreeObject - case "blob": - typ = BlobObject - case "tag": - typ = TagObject - case "ofs-delta": - typ = OFSDeltaObject - case "ref-delta": - typ = REFDeltaObject - default: - err = ErrInvalidType - } - return -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go deleted file mode 100644 index 8fb7576fa3f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go +++ /dev/null @@ -1,144 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Blob is used to store arbitrary data - it is generally a file. -type Blob struct { - // Hash of the blob. - Hash plumbing.Hash - // Size of the (uncompressed) blob. - Size int64 - - obj plumbing.EncodedObject -} - -// GetBlob gets a blob from an object storer and decodes it. -func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { - o, err := s.EncodedObject(plumbing.BlobObject, h) - if err != nil { - return nil, err - } - - return DecodeBlob(o) -} - -// DecodeObject decodes an encoded object into a *Blob. -func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { - b := &Blob{} - if err := b.Decode(o); err != nil { - return nil, err - } - - return b, nil -} - -// ID returns the object ID of the blob. The returned value will always match -// the current value of Blob.Hash. -// -// ID is present to fulfill the Object interface. -func (b *Blob) ID() plumbing.Hash { - return b.Hash -} - -// Type returns the type of object. It always returns plumbing.BlobObject. -// -// Type is present to fulfill the Object interface. -func (b *Blob) Type() plumbing.ObjectType { - return plumbing.BlobObject -} - -// Decode transforms a plumbing.EncodedObject into a Blob struct. -func (b *Blob) Decode(o plumbing.EncodedObject) error { - if o.Type() != plumbing.BlobObject { - return ErrUnsupportedObject - } - - b.Hash = o.Hash() - b.Size = o.Size() - b.obj = o - - return nil -} - -// Encode transforms a Blob into a plumbing.EncodedObject. -func (b *Blob) Encode(o plumbing.EncodedObject) (err error) { - o.SetType(plumbing.BlobObject) - - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - r, err := b.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - _, err = io.Copy(w, r) - return err -} - -// Reader returns a reader allow the access to the content of the blob -func (b *Blob) Reader() (io.ReadCloser, error) { - return b.obj.Reader() -} - -// BlobIter provides an iterator for a set of blobs. -type BlobIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewBlobIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *BlobIter that iterates over all -// blobs contained in the storer.EncodedObjectIter. -// -// Any non-blob object returned by the storer.EncodedObjectIter is skipped. -func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { - return &BlobIter{iter, s} -} - -// Next moves the iterator to the next blob and returns a pointer to it. If -// there are no more blobs, it returns io.EOF. -func (iter *BlobIter) Next() (*Blob, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - if obj.Type() != plumbing.BlobObject { - continue - } - - return DecodeBlob(obj) - } -} - -// ForEach call the cb function for each blob contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *BlobIter) ForEach(cb func(*Blob) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - if obj.Type() != plumbing.BlobObject { - return nil - } - - b, err := DecodeBlob(obj) - if err != nil { - return err - } - - return cb(b) - }) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go deleted file mode 100644 index c9d1615089f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go +++ /dev/null @@ -1,159 +0,0 @@ -package object - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/go-git/go-git/v5/utils/merkletrie" -) - -// Change values represent a detected change between two git trees. For -// modifications, From is the original status of the node and To is its -// final status. For insertions, From is the zero value and for -// deletions To is the zero value. -type Change struct { - From ChangeEntry - To ChangeEntry -} - -var empty ChangeEntry - -// Action returns the kind of action represented by the change, an -// insertion, a deletion or a modification. -func (c *Change) Action() (merkletrie.Action, error) { - if c.From == empty && c.To == empty { - return merkletrie.Action(0), - fmt.Errorf("malformed change: empty from and to") - } - - if c.From == empty { - return merkletrie.Insert, nil - } - - if c.To == empty { - return merkletrie.Delete, nil - } - - return merkletrie.Modify, nil -} - -// Files return the files before and after a change. -// For insertions from will be nil. For deletions to will be nil. -func (c *Change) Files() (from, to *File, err error) { - action, err := c.Action() - if err != nil { - return - } - - if action == merkletrie.Insert || action == merkletrie.Modify { - to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) - if !c.To.TreeEntry.Mode.IsFile() { - return nil, nil, nil - } - - if err != nil { - return - } - } - - if action == merkletrie.Delete || action == merkletrie.Modify { - from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) - if !c.From.TreeEntry.Mode.IsFile() { - return nil, nil, nil - } - - if err != nil { - return - } - } - - return -} - -func (c *Change) String() string { - action, err := c.Action() - if err != nil { - return fmt.Sprintf("malformed change") - } - - return fmt.Sprintf("", action, c.name()) -} - -// Patch returns a Patch with all the file changes in chunks. This -// representation can be used to create several diff outputs. -func (c *Change) Patch() (*Patch, error) { - return c.PatchContext(context.Background()) -} - -// Patch returns a Patch with all the file changes in chunks. This -// representation can be used to create several diff outputs. -// If context expires, an non-nil error will be returned -// Provided context must be non-nil -func (c *Change) PatchContext(ctx context.Context) (*Patch, error) { - return getPatchContext(ctx, "", c) -} - -func (c *Change) name() string { - if c.From != empty { - return c.From.Name - } - - return c.To.Name -} - -// ChangeEntry values represent a node that has suffered a change. -type ChangeEntry struct { - // Full path of the node using "/" as separator. - Name string - // Parent tree of the node that has changed. - Tree *Tree - // The entry of the node. - TreeEntry TreeEntry -} - -// Changes represents a collection of changes between two git trees. -// Implements sort.Interface lexicographically over the path of the -// changed files. -type Changes []*Change - -func (c Changes) Len() int { - return len(c) -} - -func (c Changes) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -func (c Changes) Less(i, j int) bool { - return strings.Compare(c[i].name(), c[j].name()) < 0 -} - -func (c Changes) String() string { - var buffer bytes.Buffer - buffer.WriteString("[") - comma := "" - for _, v := range c { - buffer.WriteString(comma) - buffer.WriteString(v.String()) - comma = ", " - } - buffer.WriteString("]") - - return buffer.String() -} - -// Patch returns a Patch with all the changes in chunks. This -// representation can be used to create several diff outputs. -func (c Changes) Patch() (*Patch, error) { - return c.PatchContext(context.Background()) -} - -// Patch returns a Patch with all the changes in chunks. This -// representation can be used to create several diff outputs. -// If context expires, an non-nil error will be returned -// Provided context must be non-nil -func (c Changes) PatchContext(ctx context.Context) (*Patch, error) { - return getPatchContext(ctx, "", c...) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go deleted file mode 100644 index f701188288f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go +++ /dev/null @@ -1,61 +0,0 @@ -package object - -import ( - "errors" - "fmt" - - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// The following functions transform changes types form the merkletrie -// package to changes types from this package. - -func newChange(c merkletrie.Change) (*Change, error) { - ret := &Change{} - - var err error - if ret.From, err = newChangeEntry(c.From); err != nil { - return nil, fmt.Errorf("From field: %s", err) - } - - if ret.To, err = newChangeEntry(c.To); err != nil { - return nil, fmt.Errorf("To field: %s", err) - } - - return ret, nil -} - -func newChangeEntry(p noder.Path) (ChangeEntry, error) { - if p == nil { - return empty, nil - } - - asTreeNoder, ok := p.Last().(*treeNoder) - if !ok { - return ChangeEntry{}, errors.New("cannot transform non-TreeNoders") - } - - return ChangeEntry{ - Name: p.String(), - Tree: asTreeNoder.parent, - TreeEntry: TreeEntry{ - Name: asTreeNoder.name, - Mode: asTreeNoder.mode, - Hash: asTreeNoder.hash, - }, - }, nil -} - -func newChanges(src merkletrie.Changes) (Changes, error) { - ret := make(Changes, len(src)) - var err error - for i, e := range src { - ret[i], err = newChange(e) - if err != nil { - return nil, fmt.Errorf("change #%d: %s", i, err) - } - } - - return ret, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go deleted file mode 100644 index 113cb29e54e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ /dev/null @@ -1,442 +0,0 @@ -package object - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "strings" - - "golang.org/x/crypto/openpgp" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - beginpgp string = "-----BEGIN PGP SIGNATURE-----" - endpgp string = "-----END PGP SIGNATURE-----" - headerpgp string = "gpgsig" -) - -// Hash represents the hash of an object -type Hash plumbing.Hash - -// Commit points to a single tree, marking it as what the project looked like -// at a certain point in time. It contains meta-information about that point -// in time, such as a timestamp, the author of the changes since the last -// commit, a pointer to the previous commit(s), etc. -// http://shafiulazam.com/gitbook/1_the_git_object_model.html -type Commit struct { - // Hash of the commit object. - Hash plumbing.Hash - // Author is the original author of the commit. - Author Signature - // Committer is the one performing the commit, might be different from - // Author. - Committer Signature - // PGPSignature is the PGP signature of the commit. - PGPSignature string - // Message is the commit message, contains arbitrary text. - Message string - // TreeHash is the hash of the root tree of the commit. - TreeHash plumbing.Hash - // ParentHashes are the hashes of the parent commits of the commit. - ParentHashes []plumbing.Hash - - s storer.EncodedObjectStorer -} - -// GetCommit gets a commit from an object storer and decodes it. -func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { - o, err := s.EncodedObject(plumbing.CommitObject, h) - if err != nil { - return nil, err - } - - return DecodeCommit(s, o) -} - -// DecodeCommit decodes an encoded object into a *Commit and associates it to -// the given object storer. -func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { - c := &Commit{s: s} - if err := c.Decode(o); err != nil { - return nil, err - } - - return c, nil -} - -// Tree returns the Tree from the commit. -func (c *Commit) Tree() (*Tree, error) { - return GetTree(c.s, c.TreeHash) -} - -// PatchContext returns the Patch between the actual commit and the provided one. -// Error will be return if context expires. Provided context must be non-nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) { - fromTree, err := c.Tree() - if err != nil { - return nil, err - } - - var toTree *Tree - if to != nil { - toTree, err = to.Tree() - if err != nil { - return nil, err - } - } - - return fromTree.PatchContext(ctx, toTree) -} - -// Patch returns the Patch between the actual commit and the provided one. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (c *Commit) Patch(to *Commit) (*Patch, error) { - return c.PatchContext(context.Background(), to) -} - -// Parents return a CommitIter to the parent Commits. -func (c *Commit) Parents() CommitIter { - return NewCommitIter(c.s, - storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes), - ) -} - -// NumParents returns the number of parents in a commit. -func (c *Commit) NumParents() int { - return len(c.ParentHashes) -} - -var ErrParentNotFound = errors.New("commit parent not found") - -// Parent returns the ith parent of a commit. -func (c *Commit) Parent(i int) (*Commit, error) { - if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 { - return nil, ErrParentNotFound - } - - return GetCommit(c.s, c.ParentHashes[i]) -} - -// File returns the file with the specified "path" in the commit and a -// nil error if the file exists. If the file does not exist, it returns -// a nil file and the ErrFileNotFound error. -func (c *Commit) File(path string) (*File, error) { - tree, err := c.Tree() - if err != nil { - return nil, err - } - - return tree.File(path) -} - -// Files returns a FileIter allowing to iterate over the Tree -func (c *Commit) Files() (*FileIter, error) { - tree, err := c.Tree() - if err != nil { - return nil, err - } - - return tree.Files(), nil -} - -// ID returns the object ID of the commit. The returned value will always match -// the current value of Commit.Hash. -// -// ID is present to fulfill the Object interface. -func (c *Commit) ID() plumbing.Hash { - return c.Hash -} - -// Type returns the type of object. It always returns plumbing.CommitObject. -// -// Type is present to fulfill the Object interface. -func (c *Commit) Type() plumbing.ObjectType { - return plumbing.CommitObject -} - -// Decode transforms a plumbing.EncodedObject into a Commit struct. -func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.CommitObject { - return ErrUnsupportedObject - } - - c.Hash = o.Hash() - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - - var message bool - var pgpsig bool - var msgbuf bytes.Buffer - for { - line, err := r.ReadBytes('\n') - if err != nil && err != io.EOF { - return err - } - - if pgpsig { - if len(line) > 0 && line[0] == ' ' { - line = bytes.TrimLeft(line, " ") - c.PGPSignature += string(line) - continue - } else { - pgpsig = false - } - } - - if !message { - line = bytes.TrimSpace(line) - if len(line) == 0 { - message = true - continue - } - - split := bytes.SplitN(line, []byte{' '}, 2) - - var data []byte - if len(split) == 2 { - data = split[1] - } - - switch string(split[0]) { - case "tree": - c.TreeHash = plumbing.NewHash(string(data)) - case "parent": - c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) - case "author": - c.Author.Decode(data) - case "committer": - c.Committer.Decode(data) - case headerpgp: - c.PGPSignature += string(data) + "\n" - pgpsig = true - } - } else { - msgbuf.Write(line) - } - - if err == io.EOF { - break - } - } - c.Message = msgbuf.String() - return nil -} - -// Encode transforms a Commit into a plumbing.EncodedObject. -func (b *Commit) Encode(o plumbing.EncodedObject) error { - return b.encode(o, true) -} - -// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). -func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error { - return b.encode(o, false) -} - -func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { - o.SetType(plumbing.CommitObject) - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil { - return err - } - - for _, parent := range b.ParentHashes { - if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { - return err - } - } - - if _, err = fmt.Fprint(w, "author "); err != nil { - return err - } - - if err = b.Author.Encode(w); err != nil { - return err - } - - if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { - return err - } - - if err = b.Committer.Encode(w); err != nil { - return err - } - - if b.PGPSignature != "" && includeSig { - if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { - return err - } - - // Split all the signature lines and re-write with a left padding and - // newline. Use join for this so it's clear that a newline should not be - // added after this section, as it will be added when the message is - // printed. - signature := strings.TrimSuffix(b.PGPSignature, "\n") - lines := strings.Split(signature, "\n") - if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { - return err - } - } - - if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil { - return err - } - - return err -} - -// Stats returns the stats of a commit. -func (c *Commit) Stats() (FileStats, error) { - return c.StatsContext(context.Background()) -} - -// StatsContext returns the stats of a commit. Error will be return if context -// expires. Provided context must be non-nil. -func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) { - fromTree, err := c.Tree() - if err != nil { - return nil, err - } - - toTree := &Tree{} - if c.NumParents() != 0 { - firstParent, err := c.Parents().Next() - if err != nil { - return nil, err - } - - toTree, err = firstParent.Tree() - if err != nil { - return nil, err - } - } - - patch, err := toTree.PatchContext(ctx, fromTree) - if err != nil { - return nil, err - } - - return getFileStatsFromFilePatches(patch.FilePatches()), nil -} - -func (c *Commit) String() string { - return fmt.Sprintf( - "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", - plumbing.CommitObject, c.Hash, c.Author.String(), - c.Author.When.Format(DateFormat), indent(c.Message), - ) -} - -// Verify performs PGP verification of the commit with a provided armored -// keyring and returns openpgp.Entity associated with verifying key on success. -func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { - keyRingReader := strings.NewReader(armoredKeyRing) - keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) - if err != nil { - return nil, err - } - - // Extract signature. - signature := strings.NewReader(c.PGPSignature) - - encoded := &plumbing.MemoryObject{} - // Encode commit components, excluding signature and get a reader object. - if err := c.EncodeWithoutSignature(encoded); err != nil { - return nil, err - } - er, err := encoded.Reader() - if err != nil { - return nil, err - } - - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) -} - -func indent(t string) string { - var output []string - for _, line := range strings.Split(t, "\n") { - if len(line) != 0 { - line = " " + line - } - - output = append(output, line) - } - - return strings.Join(output, "\n") -} - -// CommitIter is a generic closable interface for iterating over commits. -type CommitIter interface { - Next() (*Commit, error) - ForEach(func(*Commit) error) error - Close() -} - -// storerCommitIter provides an iterator from commits in an EncodedObjectStorer. -type storerCommitIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewCommitIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a CommitIter that iterates over all -// commits contained in the storer.EncodedObjectIter. -// -// Any non-commit object returned by the storer.EncodedObjectIter is skipped. -func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter { - return &storerCommitIter{iter, s} -} - -// Next moves the iterator to the next commit and returns a pointer to it. If -// there are no more commits, it returns io.EOF. -func (iter *storerCommitIter) Next() (*Commit, error) { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - return DecodeCommit(iter.s, obj) -} - -// ForEach call the cb function for each commit contained on this iter until -// an error appends or the end of the iter is reached. If ErrStop is sent -// the iteration is stopped but no error is returned. The iterator is closed. -func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - c, err := DecodeCommit(iter.s, obj) - if err != nil { - return err - } - - return cb(c) - }) -} - -func (iter *storerCommitIter) Close() { - iter.EncodedObjectIter.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go deleted file mode 100644 index a96b6a4cf0f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go +++ /dev/null @@ -1,327 +0,0 @@ -package object - -import ( - "container/list" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" -) - -type commitPreIterator struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - stack []CommitIter - start *Commit -} - -// NewCommitPreorderIter returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents in pre-order. -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitPreorderIter( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &commitPreIterator{ - seenExternal: seenExternal, - seen: seen, - stack: make([]CommitIter, 0), - start: c, - } -} - -func (w *commitPreIterator) Next() (*Commit, error) { - var c *Commit - for { - if w.start != nil { - c = w.start - w.start = nil - } else { - current := len(w.stack) - 1 - if current < 0 { - return nil, io.EOF - } - - var err error - c, err = w.stack[current].Next() - if err == io.EOF { - w.stack = w.stack[:current] - continue - } - - if err != nil { - return nil, err - } - } - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - if c.NumParents() > 0 { - w.stack = append(w.stack, filteredParentIter(c, w.seen)) - } - - return c, nil - } -} - -func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter { - var hashes []plumbing.Hash - for _, h := range c.ParentHashes { - if !seen[h] { - hashes = append(hashes, h) - } - } - - return NewCommitIter(c.s, - storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes), - ) -} - -func (w *commitPreIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitPreIterator) Close() {} - -type commitPostIterator struct { - stack []*Commit - seen map[plumbing.Hash]bool -} - -// NewCommitPostorderIter returns a CommitIter that walks the commit -// history like WalkCommitHistory but in post-order. This means that after -// walking a merge commit, the merged commit will be walked before the base -// it was merged on. This can be useful if you wish to see the history in -// chronological order. Ignore allows to skip some commits from being iterated. -func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &commitPostIterator{ - stack: []*Commit{c}, - seen: seen, - } -} - -func (w *commitPostIterator) Next() (*Commit, error) { - for { - if len(w.stack) == 0 { - return nil, io.EOF - } - - c := w.stack[len(w.stack)-1] - w.stack = w.stack[:len(w.stack)-1] - - if w.seen[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - return c, c.Parents().ForEach(func(p *Commit) error { - w.stack = append(w.stack, p) - return nil - }) - } -} - -func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitPostIterator) Close() {} - -// commitAllIterator stands for commit iterator for all refs. -type commitAllIterator struct { - // currCommit points to the current commit. - currCommit *list.Element -} - -// NewCommitAllIter returns a new commit iterator for all refs. -// repoStorer is a repo Storer used to get commits and references. -// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order -func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { - commitsPath := list.New() - commitsLookup := make(map[plumbing.Hash]*list.Element) - head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) - if err == nil { - err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - // add all references along with the HEAD - refIter, err := repoStorer.IterReferences() - if err != nil { - return nil, err - } - defer refIter.Close() - - for { - ref, err := refIter.Next() - if err == io.EOF { - break - } - - if err == plumbing.ErrReferenceNotFound { - continue - } - - if err != nil { - return nil, err - } - - if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { - return nil, err - } - } - - return &commitAllIterator{commitsPath.Front()}, nil -} - -func addReference( - repoStorer storage.Storer, - commitIterFunc func(*Commit) CommitIter, - ref *plumbing.Reference, - commitsPath *list.List, - commitsLookup map[plumbing.Hash]*list.Element) error { - - _, exists := commitsLookup[ref.Hash()] - if exists { - // we already have it - skip the reference. - return nil - } - - refCommit, _ := GetCommit(repoStorer, ref.Hash()) - if refCommit == nil { - // if it's not a commit - skip it. - return nil - } - - var ( - refCommits []*Commit - parent *list.Element - ) - // collect all ref commits to add - commitIter := commitIterFunc(refCommit) - for c, e := commitIter.Next(); e == nil; { - parent, exists = commitsLookup[c.Hash] - if exists { - break - } - refCommits = append(refCommits, c) - c, e = commitIter.Next() - } - commitIter.Close() - - if parent == nil { - // common parent - not found - // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) - for _, c := range refCommits { - parent = commitsPath.PushBack(c) - commitsLookup[c.Hash] = parent - } - } else { - // add ref's commits to the path in reverse order (from the latest) - for i := len(refCommits) - 1; i >= 0; i-- { - c := refCommits[i] - // insert before found common parent - parent = commitsPath.InsertBefore(c, parent) - commitsLookup[c.Hash] = parent - } - } - - return nil -} - -func (it *commitAllIterator) Next() (*Commit, error) { - if it.currCommit == nil { - return nil, io.EOF - } - - c := it.currCommit.Value.(*Commit) - it.currCommit = it.currCommit.Next() - - return c, nil -} - -func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := it.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (it *commitAllIterator) Close() { - it.currCommit = nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go deleted file mode 100644 index 8047fa9bc0e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go +++ /dev/null @@ -1,100 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type bfsCommitIterator struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - queue []*Commit -} - -// NewCommitIterBSF returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents in pre-order. -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitIterBSF( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &bfsCommitIterator{ - seenExternal: seenExternal, - seen: seen, - queue: []*Commit{c}, - } -} - -func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error { - if w.seen[h] || w.seenExternal[h] { - return nil - } - c, err := GetCommit(store, h) - if err != nil { - return err - } - w.queue = append(w.queue, c) - return nil -} - -func (w *bfsCommitIterator) Next() (*Commit, error) { - var c *Commit - for { - if len(w.queue) == 0 { - return nil, io.EOF - } - c = w.queue[0] - w.queue = w.queue[1:] - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - for _, h := range c.ParentHashes { - err := w.appendHash(c.s, h) - if err != nil { - return nil, err - } - } - - return c, nil - } -} - -func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *bfsCommitIterator) Close() {} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go deleted file mode 100644 index e87c3dbbb8d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go +++ /dev/null @@ -1,176 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// NewFilterCommitIter returns a CommitIter that walks the commit history, -// starting at the passed commit and visiting its parents in Breadth-first order. -// The commits returned by the CommitIter will validate the passed CommitFilter. -// The history won't be transversed beyond a commit if isLimit is true for it. -// Each commit will be visited only once. -// If the commit history can not be traversed, or the Close() method is called, -// the CommitIter won't return more commits. -// If no isValid is passed, all ancestors of from commit will be valid. -// If no isLimit is limit, all ancestors of all commits will be visited. -func NewFilterCommitIter( - from *Commit, - isValid *CommitFilter, - isLimit *CommitFilter, -) CommitIter { - var validFilter CommitFilter - if isValid == nil { - validFilter = func(_ *Commit) bool { - return true - } - } else { - validFilter = *isValid - } - - var limitFilter CommitFilter - if isLimit == nil { - limitFilter = func(_ *Commit) bool { - return false - } - } else { - limitFilter = *isLimit - } - - return &filterCommitIter{ - isValid: validFilter, - isLimit: limitFilter, - visited: map[plumbing.Hash]struct{}{}, - queue: []*Commit{from}, - } -} - -// CommitFilter returns a boolean for the passed Commit -type CommitFilter func(*Commit) bool - -// filterCommitIter implements CommitIter -type filterCommitIter struct { - isValid CommitFilter - isLimit CommitFilter - visited map[plumbing.Hash]struct{} - queue []*Commit - lastErr error -} - -// Next returns the next commit of the CommitIter. -// It will return io.EOF if there are no more commits to visit, -// or an error if the history could not be traversed. -func (w *filterCommitIter) Next() (*Commit, error) { - var commit *Commit - var err error - for { - commit, err = w.popNewFromQueue() - if err != nil { - return nil, w.close(err) - } - - w.visited[commit.Hash] = struct{}{} - - if !w.isLimit(commit) { - err = w.addToQueue(commit.s, commit.ParentHashes...) - if err != nil { - return nil, w.close(err) - } - } - - if w.isValid(commit) { - return commit, nil - } - } -} - -// ForEach runs the passed callback over each Commit returned by the CommitIter -// until the callback returns an error or there is no more commits to traverse. -func (w *filterCommitIter) ForEach(cb func(*Commit) error) error { - for { - commit, err := w.Next() - if err == io.EOF { - break - } - - if err != nil { - return err - } - - if err := cb(commit); err == storer.ErrStop { - break - } else if err != nil { - return err - } - } - - return nil -} - -// Error returns the error that caused that the CommitIter is no longer returning commits -func (w *filterCommitIter) Error() error { - return w.lastErr -} - -// Close closes the CommitIter -func (w *filterCommitIter) Close() { - w.visited = map[plumbing.Hash]struct{}{} - w.queue = []*Commit{} - w.isLimit = nil - w.isValid = nil -} - -// close closes the CommitIter with an error -func (w *filterCommitIter) close(err error) error { - w.Close() - w.lastErr = err - return err -} - -// popNewFromQueue returns the first new commit from the internal fifo queue, -// or an io.EOF error if the queue is empty -func (w *filterCommitIter) popNewFromQueue() (*Commit, error) { - var first *Commit - for { - if len(w.queue) == 0 { - if w.lastErr != nil { - return nil, w.lastErr - } - - return nil, io.EOF - } - - first = w.queue[0] - w.queue = w.queue[1:] - if _, ok := w.visited[first.Hash]; ok { - continue - } - - return first, nil - } -} - -// addToQueue adds the passed commits to the internal fifo queue if they weren't seen -// or returns an error if the passed hashes could not be used to get valid commits -func (w *filterCommitIter) addToQueue( - store storer.EncodedObjectStorer, - hashes ...plumbing.Hash, -) error { - for _, hash := range hashes { - if _, ok := w.visited[hash]; ok { - continue - } - - commit, err := GetCommit(store, hash) - if err != nil { - return err - } - - w.queue = append(w.queue, commit) - } - - return nil -} - diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go deleted file mode 100644 index fbddf1d238f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go +++ /dev/null @@ -1,103 +0,0 @@ -package object - -import ( - "io" - - "github.com/emirpasic/gods/trees/binaryheap" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitIteratorByCTime struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - heap *binaryheap.Heap -} - -// NewCommitIterCTime returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents while preserving Committer Time order. -// this appears to be the closest order to `git log` -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitIterCTime( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - heap := binaryheap.NewWith(func(a, b interface{}) int { - if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) { - return 1 - } - return -1 - }) - heap.Push(c) - - return &commitIteratorByCTime{ - seenExternal: seenExternal, - seen: seen, - heap: heap, - } -} - -func (w *commitIteratorByCTime) Next() (*Commit, error) { - var c *Commit - for { - cIn, ok := w.heap.Pop() - if !ok { - return nil, io.EOF - } - c = cIn.(*Commit) - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - for _, h := range c.ParentHashes { - if w.seen[h] || w.seenExternal[h] { - continue - } - pc, err := GetCommit(c.s, h) - if err != nil { - return nil, err - } - w.heap.Push(pc) - } - - return c, nil - } -} - -func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitIteratorByCTime) Close() {} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go deleted file mode 100644 index ac56a71c41a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go +++ /dev/null @@ -1,65 +0,0 @@ -package object - -import ( - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitLimitIter struct { - sourceIter CommitIter - limitOptions LogLimitOptions -} - -type LogLimitOptions struct { - Since *time.Time - Until *time.Time -} - -func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter { - iterator := new(commitLimitIter) - iterator.sourceIter = commitIter - iterator.limitOptions = limitOptions - return iterator -} - -func (c *commitLimitIter) Next() (*Commit, error) { - for { - commit, err := c.sourceIter.Next() - if err != nil { - return nil, err - } - - if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) { - continue - } - if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) { - continue - } - return commit, nil - } -} - -func (c *commitLimitIter) ForEach(cb func(*Commit) error) error { - for { - commit, nextErr := c.Next() - if nextErr == io.EOF { - break - } - if nextErr != nil { - return nextErr - } - err := cb(commit) - if err == storer.ErrStop { - return nil - } else if err != nil { - return err - } - } - return nil -} - -func (c *commitLimitIter) Close() { - c.sourceIter.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go deleted file mode 100644 index af6f745d21d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go +++ /dev/null @@ -1,161 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitPathIter struct { - pathFilter func(string) bool - sourceIter CommitIter - currentCommit *Commit - checkParent bool -} - -// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between -// successive trees returned from the commit iterator from the argument. The purpose of this is -// to find the commits that explain how the files that match the path came to be. -// If checkParent is true then the function double checks if potential parent (next commit in a path) -// is one of the parents in the tree (it's used by `git log --all`). -// pathFilter is a function that takes path of file as argument and returns true if we want it -func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter { - iterator := new(commitPathIter) - iterator.sourceIter = commitIter - iterator.pathFilter = pathFilter - iterator.checkParent = checkParent - return iterator -} - -// this function is kept for compatibilty, can be replaced with NewCommitPathIterFromIter -func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { - return NewCommitPathIterFromIter( - func(path string) bool { - return path == fileName - }, - commitIter, - checkParent, - ) -} - -func (c *commitPathIter) Next() (*Commit, error) { - if c.currentCommit == nil { - var err error - c.currentCommit, err = c.sourceIter.Next() - if err != nil { - return nil, err - } - } - commit, commitErr := c.getNextFileCommit() - - // Setting current-commit to nil to prevent unwanted states when errors are raised - if commitErr != nil { - c.currentCommit = nil - } - return commit, commitErr -} - -func (c *commitPathIter) getNextFileCommit() (*Commit, error) { - for { - // Parent-commit can be nil if the current-commit is the initial commit - parentCommit, parentCommitErr := c.sourceIter.Next() - if parentCommitErr != nil { - // If the parent-commit is beyond the initial commit, keep it nil - if parentCommitErr != io.EOF { - return nil, parentCommitErr - } - parentCommit = nil - } - - // Fetch the trees of the current and parent commits - currentTree, currTreeErr := c.currentCommit.Tree() - if currTreeErr != nil { - return nil, currTreeErr - } - - var parentTree *Tree - if parentCommit != nil { - var parentTreeErr error - parentTree, parentTreeErr = parentCommit.Tree() - if parentTreeErr != nil { - return nil, parentTreeErr - } - } - - // Find diff between current and parent trees - changes, diffErr := DiffTree(currentTree, parentTree) - if diffErr != nil { - return nil, diffErr - } - - found := c.hasFileChange(changes, parentCommit) - - // Storing the current-commit in-case a change is found, and - // Updating the current-commit for the next-iteration - prevCommit := c.currentCommit - c.currentCommit = parentCommit - - if found { - return prevCommit, nil - } - - // If not matches found and if parent-commit is beyond the initial commit, then return with EOF - if parentCommit == nil { - return nil, io.EOF - } - } -} - -func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { - for _, change := range changes { - if !c.pathFilter(change.name()) { - continue - } - - // filename matches, now check if source iterator contains all commits (from all refs) - if c.checkParent { - if parent != nil && isParentHash(parent.Hash, c.currentCommit) { - return true - } - continue - } - - return true - } - - return false -} - -func isParentHash(hash plumbing.Hash, commit *Commit) bool { - for _, h := range commit.ParentHashes { - if h == hash { - return true - } - } - return false -} - -func (c *commitPathIter) ForEach(cb func(*Commit) error) error { - for { - commit, nextErr := c.Next() - if nextErr == io.EOF { - break - } - if nextErr != nil { - return nextErr - } - err := cb(commit) - if err == storer.ErrStop { - return nil - } else if err != nil { - return err - } - } - return nil -} - -func (c *commitPathIter) Close() { - c.sourceIter.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go deleted file mode 100644 index 3591f5f0a60..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go +++ /dev/null @@ -1,12 +0,0 @@ -package object - -import ( - "bufio" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewReader(nil) - }, -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go deleted file mode 100644 index 7c2222702ce..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go +++ /dev/null @@ -1,98 +0,0 @@ -package object - -import ( - "bytes" - "context" - - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// DiffTree compares the content and mode of the blobs found via two -// tree objects. -// DiffTree does not perform rename detection, use DiffTreeWithOptions -// instead to detect renames. -func DiffTree(a, b *Tree) (Changes, error) { - return DiffTreeContext(context.Background(), a, b) -} - -// DiffTreeContext compares the content and mode of the blobs found via two -// tree objects. Provided context must be non-nil. -// An error will be returned if context expires. -func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) { - return DiffTreeWithOptions(ctx, a, b, nil) -} - -// DiffTreeOptions are the configurable options when performing a diff tree. -type DiffTreeOptions struct { - // DetectRenames is whether the diff tree will use rename detection. - DetectRenames bool - // RenameScore is the threshold to of similarity between files to consider - // that a pair of delete and insert are a rename. The number must be - // exactly between 0 and 100. - RenameScore uint - // RenameLimit is the maximum amount of files that can be compared when - // detecting renames. The number of comparisons that have to be performed - // is equal to the number of deleted files * the number of added files. - // That means, that if 100 files were deleted and 50 files were added, 5000 - // file comparisons may be needed. So, if the rename limit is 50, the number - // of both deleted and added needs to be equal or less than 50. - // A value of 0 means no limit. - RenameLimit uint - // OnlyExactRenames performs only detection of exact renames and will not perform - // any detection of renames based on file similarity. - OnlyExactRenames bool -} - -// DefaultDiffTreeOptions are the default and recommended options for the -// diff tree. -var DefaultDiffTreeOptions = &DiffTreeOptions{ - DetectRenames: true, - RenameScore: 60, - RenameLimit: 0, - OnlyExactRenames: false, -} - -// DiffTreeWithOptions compares the content and mode of the blobs found -// via two tree objects with the given options. The provided context -// must be non-nil. -// If no options are passed, no rename detection will be performed. The -// recommended options are DefaultDiffTreeOptions. -// An error will be returned if the context expires. -// This function will be deprecated and removed in v6 so the default -// behaviour of DiffTree is to detect renames. -func DiffTreeWithOptions( - ctx context.Context, - a, b *Tree, - opts *DiffTreeOptions, -) (Changes, error) { - from := NewTreeRootNode(a) - to := NewTreeRootNode(b) - - hashEqual := func(a, b noder.Hasher) bool { - return bytes.Equal(a.Hash(), b.Hash()) - } - - merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual) - if err != nil { - if err == merkletrie.ErrCanceled { - return nil, ErrCanceled - } - return nil, err - } - - changes, err := newChanges(merkletrieChanges) - if err != nil { - return nil, err - } - - if opts == nil { - opts = new(DiffTreeOptions) - } - - if opts.DetectRenames { - return DetectRenames(changes, opts) - } - - return changes, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go deleted file mode 100644 index 6cc5367d8d6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go +++ /dev/null @@ -1,137 +0,0 @@ -package object - -import ( - "bytes" - "io" - "strings" - - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// File represents git file objects. -type File struct { - // Name is the path of the file. It might be relative to a tree, - // depending of the function that generates it. - Name string - // Mode is the file mode. - Mode filemode.FileMode - // Blob with the contents of the file. - Blob -} - -// NewFile returns a File based on the given blob object -func NewFile(name string, m filemode.FileMode, b *Blob) *File { - return &File{Name: name, Mode: m, Blob: *b} -} - -// Contents returns the contents of a file as a string. -func (f *File) Contents() (content string, err error) { - reader, err := f.Reader() - if err != nil { - return "", err - } - defer ioutil.CheckClose(reader, &err) - - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(reader); err != nil { - return "", err - } - - return buf.String(), nil -} - -// IsBinary returns if the file is binary or not -func (f *File) IsBinary() (bin bool, err error) { - reader, err := f.Reader() - if err != nil { - return false, err - } - defer ioutil.CheckClose(reader, &err) - - return binary.IsBinary(reader) -} - -// Lines returns a slice of lines from the contents of a file, stripping -// all end of line characters. If the last line is empty (does not end -// in an end of line), it is also stripped. -func (f *File) Lines() ([]string, error) { - content, err := f.Contents() - if err != nil { - return nil, err - } - - splits := strings.Split(content, "\n") - // remove the last line if it is empty - if splits[len(splits)-1] == "" { - return splits[:len(splits)-1], nil - } - - return splits, nil -} - -// FileIter provides an iterator for the files in a tree. -type FileIter struct { - s storer.EncodedObjectStorer - w TreeWalker -} - -// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a -// *FileIter that iterates over all files contained in the tree, recursively. -func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { - return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)} -} - -// Next moves the iterator to the next file and returns a pointer to it. If -// there are no more files, it returns io.EOF. -func (iter *FileIter) Next() (*File, error) { - for { - name, entry, err := iter.w.Next() - if err != nil { - return nil, err - } - - if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { - continue - } - - blob, err := GetBlob(iter.s, entry.Hash) - if err != nil { - return nil, err - } - - return NewFile(name, entry.Mode, blob), nil - } -} - -// ForEach call the cb function for each file contained in this iter until -// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *FileIter) ForEach(cb func(*File) error) error { - defer iter.Close() - - for { - f, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(f); err != nil { - if err == storer.ErrStop { - return nil - } - - return err - } - } -} - -func (iter *FileIter) Close() { - iter.w.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go deleted file mode 100644 index b412361d029..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go +++ /dev/null @@ -1,210 +0,0 @@ -package object - -import ( - "fmt" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// errIsReachable is thrown when first commit is an ancestor of the second -var errIsReachable = fmt.Errorf("first is reachable from second") - -// MergeBase mimics the behavior of `git merge-base actual other`, returning the -// best common ancestor between the actual and the passed one. -// The best common ancestors can not be reached from other common ancestors. -func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) { - // use sortedByCommitDateDesc strategy - sorted := sortByCommitDateDesc(c, other) - newer := sorted[0] - older := sorted[1] - - newerHistory, err := ancestorsIndex(older, newer) - if err == errIsReachable { - return []*Commit{older}, nil - } - - if err != nil { - return nil, err - } - - var res []*Commit - inNewerHistory := isInIndexCommitFilter(newerHistory) - resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory) - _ = resIter.ForEach(func(commit *Commit) error { - res = append(res, commit) - return nil - }) - - return Independents(res) -} - -// IsAncestor returns true if the actual commit is ancestor of the passed one. -// It returns an error if the history is not transversable -// It mimics the behavior of `git merge --is-ancestor actual other` -func (c *Commit) IsAncestor(other *Commit) (bool, error) { - found := false - iter := NewCommitPreorderIter(other, nil, nil) - err := iter.ForEach(func(comm *Commit) error { - if comm.Hash != c.Hash { - return nil - } - - found = true - return storer.ErrStop - }) - - return found, err -} - -// ancestorsIndex returns a map with the ancestors of the starting commit if the -// excluded one is not one of them. It returns errIsReachable if the excluded commit -// is ancestor of the starting, or another error if the history is not traversable. -func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) { - if excluded.Hash.String() == starting.Hash.String() { - return nil, errIsReachable - } - - startingHistory := map[plumbing.Hash]struct{}{} - startingIter := NewCommitIterBSF(starting, nil, nil) - err := startingIter.ForEach(func(commit *Commit) error { - if commit.Hash == excluded.Hash { - return errIsReachable - } - - startingHistory[commit.Hash] = struct{}{} - return nil - }) - - if err != nil { - return nil, err - } - - return startingHistory, nil -} - -// Independents returns a subset of the passed commits, that are not reachable the others -// It mimics the behavior of `git merge-base --independent commit...`. -func Independents(commits []*Commit) ([]*Commit, error) { - // use sortedByCommitDateDesc strategy - candidates := sortByCommitDateDesc(commits...) - candidates = removeDuplicated(candidates) - - seen := map[plumbing.Hash]struct{}{} - var isLimit CommitFilter = func(commit *Commit) bool { - _, ok := seen[commit.Hash] - return ok - } - - if len(candidates) < 2 { - return candidates, nil - } - - pos := 0 - for { - from := candidates[pos] - others := remove(candidates, from) - fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit) - err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error { - for _, other := range others { - if fromAncestor.Hash == other.Hash { - candidates = remove(candidates, other) - others = remove(others, other) - } - } - - if len(candidates) == 1 { - return storer.ErrStop - } - - seen[fromAncestor.Hash] = struct{}{} - return nil - }) - - if err != nil { - return nil, err - } - - nextPos := indexOf(candidates, from) + 1 - if nextPos >= len(candidates) { - break - } - - pos = nextPos - } - - return candidates, nil -} - -// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc` -// -// Following this strategy, it is tried to reduce the time needed when walking -// the history from one commit to reach the others. It is assumed that ancestors -// use to be committed before its descendant; -// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`; -// so starting by `A` it will be reached `A^` way sooner than walking from `A^` -// to the initial commit, and then from `A` to `A^`. -func sortByCommitDateDesc(commits ...*Commit) []*Commit { - sorted := make([]*Commit, len(commits)) - copy(sorted, commits) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Committer.When.After(sorted[j].Committer.When) - }) - - return sorted -} - -// indexOf returns the first position where target was found in the passed commits -func indexOf(commits []*Commit, target *Commit) int { - for i, commit := range commits { - if target.Hash == commit.Hash { - return i - } - } - - return -1 -} - -// remove returns the passed commits excluding the commit toDelete -func remove(commits []*Commit, toDelete *Commit) []*Commit { - res := make([]*Commit, len(commits)) - j := 0 - for _, commit := range commits { - if commit.Hash == toDelete.Hash { - continue - } - - res[j] = commit - j++ - } - - return res[:j] -} - -// removeDuplicated removes duplicated commits from the passed slice of commits -func removeDuplicated(commits []*Commit) []*Commit { - seen := make(map[plumbing.Hash]struct{}, len(commits)) - res := make([]*Commit, len(commits)) - j := 0 - for _, commit := range commits { - if _, ok := seen[commit.Hash]; ok { - continue - } - - seen[commit.Hash] = struct{}{} - res[j] = commit - j++ - } - - return res[:j] -} - -// isInIndexCommitFilter returns a commitFilter that returns true -// if the commit is in the passed index. -func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter { - return func(c *Commit) bool { - _, ok := index[c.Hash] - return ok - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go deleted file mode 100644 index 13b1e91c9c6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go +++ /dev/null @@ -1,239 +0,0 @@ -// Package object contains implementations of all Git objects and utility -// functions to work with them. -package object - -import ( - "bytes" - "errors" - "fmt" - "io" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// ErrUnsupportedObject trigger when a non-supported object is being decoded. -var ErrUnsupportedObject = errors.New("unsupported object type") - -// Object is a generic representation of any git object. It is implemented by -// Commit, Tree, Blob, and Tag, and includes the functions that are common to -// them. -// -// Object is returned when an object can be of any type. It is frequently used -// with a type cast to acquire the specific type of object: -// -// func process(obj Object) { -// switch o := obj.(type) { -// case *Commit: -// // o is a Commit -// case *Tree: -// // o is a Tree -// case *Blob: -// // o is a Blob -// case *Tag: -// // o is a Tag -// } -// } -// -// This interface is intentionally different from plumbing.EncodedObject, which -// is a lower level interface used by storage implementations to read and write -// objects in its encoded form. -type Object interface { - ID() plumbing.Hash - Type() plumbing.ObjectType - Decode(plumbing.EncodedObject) error - Encode(plumbing.EncodedObject) error -} - -// GetObject gets an object from an object storer and decodes it. -func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { - o, err := s.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return nil, err - } - - return DecodeObject(s, o) -} - -// DecodeObject decodes an encoded object into an Object and associates it to -// the given object storer. -func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { - switch o.Type() { - case plumbing.CommitObject: - return DecodeCommit(s, o) - case plumbing.TreeObject: - return DecodeTree(s, o) - case plumbing.BlobObject: - return DecodeBlob(o) - case plumbing.TagObject: - return DecodeTag(s, o) - default: - return nil, plumbing.ErrInvalidType - } -} - -// DateFormat is the format being used in the original git implementation -const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" - -// Signature is used to identify who and when created a commit or tag. -type Signature struct { - // Name represents a person name. It is an arbitrary string. - Name string - // Email is an email, but it cannot be assumed to be well-formed. - Email string - // When is the timestamp of the signature. - When time.Time -} - -// Decode decodes a byte slice into a signature -func (s *Signature) Decode(b []byte) { - open := bytes.LastIndexByte(b, '<') - close := bytes.LastIndexByte(b, '>') - if open == -1 || close == -1 { - return - } - - if close < open { - return - } - - s.Name = string(bytes.Trim(b[:open], " ")) - s.Email = string(b[open+1 : close]) - - hasTime := close+2 < len(b) - if hasTime { - s.decodeTimeAndTimeZone(b[close+2:]) - } -} - -// Encode encodes a Signature into a writer. -func (s *Signature) Encode(w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { - return err - } - if err := s.encodeTimeAndTimeZone(w); err != nil { - return err - } - return nil -} - -var timeZoneLength = 5 - -func (s *Signature) decodeTimeAndTimeZone(b []byte) { - space := bytes.IndexByte(b, ' ') - if space == -1 { - space = len(b) - } - - ts, err := strconv.ParseInt(string(b[:space]), 10, 64) - if err != nil { - return - } - - s.When = time.Unix(ts, 0).In(time.UTC) - var tzStart = space + 1 - if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { - return - } - - timezone := string(b[tzStart : tzStart+timeZoneLength]) - tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64) - tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64) - if err1 != nil || err2 != nil { - return - } - if tzhours < 0 { - tzmins *= -1 - } - - tz := time.FixedZone("", int(tzhours*60*60+tzmins*60)) - - s.When = s.When.In(tz) -} - -func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { - u := s.When.Unix() - if u < 0 { - u = 0 - } - _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700")) - return err -} - -func (s *Signature) String() string { - return fmt.Sprintf("%s <%s>", s.Name, s.Email) -} - -// ObjectIter provides an iterator for a set of objects. -type ObjectIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewObjectIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all -// objects contained in the storer.EncodedObjectIter. -func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { - return &ObjectIter{iter, s} -} - -// Next moves the iterator to the next object and returns a pointer to it. If -// there are no more objects, it returns io.EOF. -func (iter *ObjectIter) Next() (Object, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - o, err := iter.toObject(obj) - if err == plumbing.ErrInvalidType { - continue - } - - if err != nil { - return nil, err - } - - return o, nil - } -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *ObjectIter) ForEach(cb func(Object) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - o, err := iter.toObject(obj) - if err == plumbing.ErrInvalidType { - return nil - } - - if err != nil { - return err - } - - return cb(o) - }) -} - -func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { - switch obj.Type() { - case plumbing.BlobObject: - blob := &Blob{} - return blob, blob.Decode(obj) - case plumbing.TreeObject: - tree := &Tree{s: iter.s} - return tree, tree.Decode(obj) - case plumbing.CommitObject: - commit := &Commit{} - return commit, commit.Decode(obj) - case plumbing.TagObject: - tag := &Tag{} - return tag, tag.Decode(obj) - default: - return nil, plumbing.ErrInvalidType - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go deleted file mode 100644 index 1135a40a4ab..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ /dev/null @@ -1,346 +0,0 @@ -package object - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - fdiff "github.com/go-git/go-git/v5/plumbing/format/diff" - "github.com/go-git/go-git/v5/utils/diff" - - dmp "github.com/sergi/go-diff/diffmatchpatch" -) - -var ( - ErrCanceled = errors.New("operation canceled") -) - -func getPatch(message string, changes ...*Change) (*Patch, error) { - ctx := context.Background() - return getPatchContext(ctx, message, changes...) -} - -func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) { - var filePatches []fdiff.FilePatch - for _, c := range changes { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - fp, err := filePatchWithContext(ctx, c) - if err != nil { - return nil, err - } - - filePatches = append(filePatches, fp) - } - - return &Patch{message, filePatches}, nil -} - -func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) { - from, to, err := c.Files() - if err != nil { - return nil, err - } - fromContent, fIsBinary, err := fileContent(from) - if err != nil { - return nil, err - } - - toContent, tIsBinary, err := fileContent(to) - if err != nil { - return nil, err - } - - if fIsBinary || tIsBinary { - return &textFilePatch{from: c.From, to: c.To}, nil - } - - diffs := diff.Do(fromContent, toContent) - - var chunks []fdiff.Chunk - for _, d := range diffs { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - var op fdiff.Operation - switch d.Type { - case dmp.DiffEqual: - op = fdiff.Equal - case dmp.DiffDelete: - op = fdiff.Delete - case dmp.DiffInsert: - op = fdiff.Add - } - - chunks = append(chunks, &textChunk{d.Text, op}) - } - - return &textFilePatch{ - chunks: chunks, - from: c.From, - to: c.To, - }, nil - -} - -func filePatch(c *Change) (fdiff.FilePatch, error) { - return filePatchWithContext(context.Background(), c) -} - -func fileContent(f *File) (content string, isBinary bool, err error) { - if f == nil { - return - } - - isBinary, err = f.IsBinary() - if err != nil || isBinary { - return - } - - content, err = f.Contents() - - return -} - -// Patch is an implementation of fdiff.Patch interface -type Patch struct { - message string - filePatches []fdiff.FilePatch -} - -func (t *Patch) FilePatches() []fdiff.FilePatch { - return t.filePatches -} - -func (t *Patch) Message() string { - return t.message -} - -func (p *Patch) Encode(w io.Writer) error { - ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines) - - return ue.Encode(p) -} - -func (p *Patch) Stats() FileStats { - return getFileStatsFromFilePatches(p.FilePatches()) -} - -func (p *Patch) String() string { - buf := bytes.NewBuffer(nil) - err := p.Encode(buf) - if err != nil { - return fmt.Sprintf("malformed patch: %s", err.Error()) - } - - return buf.String() -} - -// changeEntryWrapper is an implementation of fdiff.File interface -type changeEntryWrapper struct { - ce ChangeEntry -} - -func (f *changeEntryWrapper) Hash() plumbing.Hash { - if !f.ce.TreeEntry.Mode.IsFile() { - return plumbing.ZeroHash - } - - return f.ce.TreeEntry.Hash -} - -func (f *changeEntryWrapper) Mode() filemode.FileMode { - return f.ce.TreeEntry.Mode -} -func (f *changeEntryWrapper) Path() string { - if !f.ce.TreeEntry.Mode.IsFile() { - return "" - } - - return f.ce.Name -} - -func (f *changeEntryWrapper) Empty() bool { - return !f.ce.TreeEntry.Mode.IsFile() -} - -// textFilePatch is an implementation of fdiff.FilePatch interface -type textFilePatch struct { - chunks []fdiff.Chunk - from, to ChangeEntry -} - -func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) { - f := &changeEntryWrapper{tf.from} - t := &changeEntryWrapper{tf.to} - - if !f.Empty() { - from = f - } - - if !t.Empty() { - to = t - } - - return -} - -func (t *textFilePatch) IsBinary() bool { - return len(t.chunks) == 0 -} - -func (t *textFilePatch) Chunks() []fdiff.Chunk { - return t.chunks -} - -// textChunk is an implementation of fdiff.Chunk interface -type textChunk struct { - content string - op fdiff.Operation -} - -func (t *textChunk) Content() string { - return t.content -} - -func (t *textChunk) Type() fdiff.Operation { - return t.op -} - -// FileStat stores the status of changes in content of a file. -type FileStat struct { - Name string - Addition int - Deletion int -} - -func (fs FileStat) String() string { - return printStat([]FileStat{fs}) -} - -// FileStats is a collection of FileStat. -type FileStats []FileStat - -func (fileStats FileStats) String() string { - return printStat(fileStats) -} - -func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 - - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 - for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) - } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) - } - } - - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength - - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea - - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 - } - - finalOutput := "" - for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor))) - dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor))) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) - } - - return finalOutput -} - -func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { - var fileStats FileStats - - for _, fp := range filePatches { - // ignore empty patches (binary files, submodule refs updates) - if len(fp.Chunks()) == 0 { - continue - } - - cs := FileStat{} - from, to := fp.Files() - if from == nil { - // New File is created. - cs.Name = to.Path() - } else if to == nil { - // File is deleted. - cs.Name = from.Path() - } else if from.Path() != to.Path() { - // File is renamed. Not supported. - // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) - } else { - cs.Name = from.Path() - } - - for _, chunk := range fp.Chunks() { - s := chunk.Content() - if len(s) == 0 { - continue - } - - switch chunk.Type() { - case fdiff.Add: - cs.Addition += strings.Count(s, "\n") - if s[len(s)-1] != '\n' { - cs.Addition++ - } - case fdiff.Delete: - cs.Deletion += strings.Count(s, "\n") - if s[len(s)-1] != '\n' { - cs.Deletion++ - } - } - } - - fileStats = append(fileStats, cs) - } - - return fileStats -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go deleted file mode 100644 index 35af1d62d5e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go +++ /dev/null @@ -1,813 +0,0 @@ -package object - -import ( - "errors" - "io" - "sort" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" -) - -// DetectRenames detects the renames in the given changes on two trees with -// the given options. It will return the given changes grouping additions and -// deletions into modifications when possible. -// If options is nil, the default diff tree options will be used. -func DetectRenames( - changes Changes, - opts *DiffTreeOptions, -) (Changes, error) { - if opts == nil { - opts = DefaultDiffTreeOptions - } - - detector := &renameDetector{ - renameScore: int(opts.RenameScore), - renameLimit: int(opts.RenameLimit), - onlyExact: opts.OnlyExactRenames, - } - - for _, c := range changes { - action, err := c.Action() - if err != nil { - return nil, err - } - - switch action { - case merkletrie.Insert: - detector.added = append(detector.added, c) - case merkletrie.Delete: - detector.deleted = append(detector.deleted, c) - default: - detector.modified = append(detector.modified, c) - } - } - - return detector.detect() -} - -// renameDetector will detect and resolve renames in a set of changes. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java -type renameDetector struct { - added []*Change - deleted []*Change - modified []*Change - - renameScore int - renameLimit int - onlyExact bool -} - -// detectExactRenames detects matches files that were deleted with files that -// were added where the hash is the same on both. If there are multiple targets -// the one with the most similar path will be chosen as the rename and the -// rest as either deletions or additions. -func (d *renameDetector) detectExactRenames() { - added := groupChangesByHash(d.added) - deletes := groupChangesByHash(d.deleted) - var uniqueAdds []*Change - var nonUniqueAdds [][]*Change - var addedLeft []*Change - - for _, cs := range added { - if len(cs) == 1 { - uniqueAdds = append(uniqueAdds, cs[0]) - } else { - nonUniqueAdds = append(nonUniqueAdds, cs) - } - } - - for _, c := range uniqueAdds { - hash := changeHash(c) - deleted := deletes[hash] - - if len(deleted) == 1 { - if sameMode(c, deleted[0]) { - d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To}) - delete(deletes, hash) - } else { - addedLeft = append(addedLeft, c) - } - } else if len(deleted) > 1 { - bestMatch := bestNameMatch(c, deleted) - if bestMatch != nil && sameMode(c, bestMatch) { - d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To}) - delete(deletes, hash) - - var newDeletes = make([]*Change, 0, len(deleted)-1) - for _, d := range deleted { - if d != bestMatch { - newDeletes = append(newDeletes, d) - } - } - deletes[hash] = newDeletes - } - } else { - addedLeft = append(addedLeft, c) - } - } - - for _, added := range nonUniqueAdds { - hash := changeHash(added[0]) - deleted := deletes[hash] - - if len(deleted) == 1 { - deleted := deleted[0] - bestMatch := bestNameMatch(deleted, added) - if bestMatch != nil && sameMode(deleted, bestMatch) { - d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To}) - delete(deletes, hash) - - for _, c := range added { - if c != bestMatch { - addedLeft = append(addedLeft, c) - } - } - } else { - addedLeft = append(addedLeft, added...) - } - } else if len(deleted) > 1 { - maxSize := len(deleted) * len(added) - if d.renameLimit > 0 && d.renameLimit < maxSize { - maxSize = d.renameLimit - } - - matrix := make(similarityMatrix, 0, maxSize) - - for delIdx, del := range deleted { - deletedName := changeName(del) - - for addIdx, add := range added { - addedName := changeName(add) - - score := nameSimilarityScore(addedName, deletedName) - matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score}) - - if len(matrix) >= maxSize { - break - } - } - - if len(matrix) >= maxSize { - break - } - } - - sort.Stable(matrix) - - usedAdds := make(map[*Change]struct{}) - usedDeletes := make(map[*Change]struct{}) - for i := len(matrix) - 1; i >= 0; i-- { - del := deleted[matrix[i].deleted] - add := added[matrix[i].added] - - if add == nil || del == nil { - // it was already matched - continue - } - - usedAdds[add] = struct{}{} - usedDeletes[del] = struct{}{} - d.modified = append(d.modified, &Change{From: del.From, To: add.To}) - added[matrix[i].added] = nil - deleted[matrix[i].deleted] = nil - } - - for _, c := range added { - if _, ok := usedAdds[c]; !ok && c != nil { - addedLeft = append(addedLeft, c) - } - } - - var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes)) - for _, c := range deleted { - if _, ok := usedDeletes[c]; !ok && c != nil { - newDeletes = append(newDeletes, c) - } - } - deletes[hash] = newDeletes - } else { - addedLeft = append(addedLeft, added...) - } - } - - d.added = addedLeft - d.deleted = nil - for _, dels := range deletes { - d.deleted = append(d.deleted, dels...) - } -} - -// detectContentRenames detects renames based on the similarity of the content -// in the files by building a matrix of pairs between sources and destinations -// and matching by the highest score. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java -func (d *renameDetector) detectContentRenames() error { - cnt := max(len(d.added), len(d.deleted)) - if d.renameLimit > 0 && cnt > d.renameLimit { - return nil - } - - srcs, dsts := d.deleted, d.added - matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore) - if err != nil { - return err - } - renames := make([]*Change, 0, min(len(matrix), len(dsts))) - - // Match rename pairs on a first come, first serve basis until - // we have looked at everything that is above the minimum score. - for i := len(matrix) - 1; i >= 0; i-- { - pair := matrix[i] - src := srcs[pair.deleted] - dst := dsts[pair.added] - - if dst == nil || src == nil { - // It was already matched before - continue - } - - renames = append(renames, &Change{From: src.From, To: dst.To}) - - // Claim destination and source as matched - dsts[pair.added] = nil - srcs[pair.deleted] = nil - } - - d.modified = append(d.modified, renames...) - d.added = compactChanges(dsts) - d.deleted = compactChanges(srcs) - - return nil -} - -func (d *renameDetector) detect() (Changes, error) { - if len(d.added) > 0 && len(d.deleted) > 0 { - d.detectExactRenames() - - if !d.onlyExact { - if err := d.detectContentRenames(); err != nil { - return nil, err - } - } - } - - result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified)) - result = append(result, d.added...) - result = append(result, d.deleted...) - result = append(result, d.modified...) - - sort.Stable(result) - - return result, nil -} - -func bestNameMatch(change *Change, changes []*Change) *Change { - var best *Change - var bestScore int - - cname := changeName(change) - - for _, c := range changes { - score := nameSimilarityScore(cname, changeName(c)) - if score > bestScore { - bestScore = score - best = c - } - } - - return best -} - -func nameSimilarityScore(a, b string) int { - aDirLen := strings.LastIndexByte(a, '/') + 1 - bDirLen := strings.LastIndexByte(b, '/') + 1 - - dirMin := min(aDirLen, bDirLen) - dirMax := max(aDirLen, bDirLen) - - var dirScoreLtr, dirScoreRtl int - if dirMax == 0 { - dirScoreLtr = 100 - dirScoreRtl = 100 - } else { - var dirSim int - - for ; dirSim < dirMin; dirSim++ { - if a[dirSim] != b[dirSim] { - break - } - } - - dirScoreLtr = dirSim * 100 / dirMax - - if dirScoreLtr == 100 { - dirScoreRtl = 100 - } else { - for dirSim = 0; dirSim < dirMin; dirSim++ { - if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] { - break - } - } - dirScoreRtl = dirSim * 100 / dirMax - } - } - - fileMin := min(len(a)-aDirLen, len(b)-bDirLen) - fileMax := max(len(a)-aDirLen, len(b)-bDirLen) - - fileSim := 0 - for ; fileSim < fileMin; fileSim++ { - if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] { - break - } - } - fileScore := fileSim * 100 / fileMax - - return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100 -} - -func changeName(c *Change) string { - if c.To != empty { - return c.To.Name - } - return c.From.Name -} - -func changeHash(c *Change) plumbing.Hash { - if c.To != empty { - return c.To.TreeEntry.Hash - } - - return c.From.TreeEntry.Hash -} - -func changeMode(c *Change) filemode.FileMode { - if c.To != empty { - return c.To.TreeEntry.Mode - } - - return c.From.TreeEntry.Mode -} - -func sameMode(a, b *Change) bool { - return changeMode(a) == changeMode(b) -} - -func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change { - var result = make(map[plumbing.Hash][]*Change) - for _, c := range changes { - hash := changeHash(c) - result[hash] = append(result[hash], c) - } - return result -} - -type similarityMatrix []similarityPair - -func (m similarityMatrix) Len() int { return len(m) } -func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m similarityMatrix) Less(i, j int) bool { - if m[i].score == m[j].score { - if m[i].added == m[j].added { - return m[i].deleted < m[j].deleted - } - return m[i].added < m[j].added - } - return m[i].score < m[j].score -} - -type similarityPair struct { - // index of the added file - added int - // index of the deleted file - deleted int - // similarity score - score int -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) { - // Allocate for the worst-case scenario where every pair has a score - // that we need to consider. We might not need that many. - matrix := make(similarityMatrix, 0, len(srcs)*len(dsts)) - srcSizes := make([]int64, len(srcs)) - dstSizes := make([]int64, len(dsts)) - dstTooLarge := make(map[int]bool) - - // Consider each pair of files, if the score is above the minimum - // threshold we need to record that scoring in the matrix so we can - // later find the best matches. -outerLoop: - for srcIdx, src := range srcs { - if changeMode(src) != filemode.Regular { - continue - } - - // Declare the from file and the similarity index here to be able to - // reuse it inside the inner loop. The reason to not initialize them - // here is so we can skip the initialization in case they happen to - // not be needed later. They will be initialized inside the inner - // loop if and only if they're needed and reused in subsequent passes. - var from *File - var s *similarityIndex - var err error - for dstIdx, dst := range dsts { - if changeMode(dst) != filemode.Regular { - continue - } - - if dstTooLarge[dstIdx] { - continue - } - - var to *File - srcSize := srcSizes[srcIdx] - if srcSize == 0 { - from, _, err = src.Files() - if err != nil { - return nil, err - } - srcSize = from.Size + 1 - srcSizes[srcIdx] = srcSize - } - - dstSize := dstSizes[dstIdx] - if dstSize == 0 { - _, to, err = dst.Files() - if err != nil { - return nil, err - } - dstSize = to.Size + 1 - dstSizes[dstIdx] = dstSize - } - - min, max := srcSize, dstSize - if dstSize < srcSize { - min = dstSize - max = srcSize - } - - if int(min*100/max) < renameScore { - // File sizes are too different to be a match - continue - } - - if s == nil { - s, err = fileSimilarityIndex(from) - if err != nil { - if err == errIndexFull { - continue outerLoop - } - return nil, err - } - } - - if to == nil { - _, to, err = dst.Files() - if err != nil { - return nil, err - } - } - - di, err := fileSimilarityIndex(to) - if err != nil { - if err == errIndexFull { - dstTooLarge[dstIdx] = true - } - - return nil, err - } - - contentScore := s.score(di, 10000) - // The name score returns a value between 0 and 100, so we need to - // convert it to the same range as the content score. - nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100 - score := (contentScore*99 + nameScore*1) / 10000 - - if score < renameScore { - continue - } - - matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score}) - } - } - - sort.Stable(matrix) - - return matrix, nil -} - -func compactChanges(changes []*Change) []*Change { - var result []*Change - for _, c := range changes { - if c != nil { - result = append(result, c) - } - } - return result -} - -const ( - keyShift = 32 - maxCountValue = (1 << keyShift) - 1 -) - -var errIndexFull = errors.New("index is full") - -// similarityIndex is an index structure of lines/blocks in one file. -// This structure can be used to compute an approximation of the similarity -// between two files. -// To save space in memory, this index uses a space efficient encoding which -// will not exceed 1MiB per instance. The index starts out at a smaller size -// (closer to 2KiB), but may grow as more distinct blocks withing the scanned -// file are discovered. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java -type similarityIndex struct { - hashed uint64 - // number of non-zero entries in hashes - numHashes int - growAt int - hashes []keyCountPair - hashBits int -} - -func fileSimilarityIndex(f *File) (*similarityIndex, error) { - idx := newSimilarityIndex() - if err := idx.hash(f); err != nil { - return nil, err - } - - sort.Stable(keyCountPairs(idx.hashes)) - - return idx, nil -} - -func newSimilarityIndex() *similarityIndex { - return &similarityIndex{ - hashBits: 8, - hashes: make([]keyCountPair, 1<<8), - growAt: shouldGrowAt(8), - } -} - -func (i *similarityIndex) hash(f *File) error { - isBin, err := f.IsBinary() - if err != nil { - return err - } - - r, err := f.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - return i.hashContent(r, f.Size, isBin) -} - -func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error { - var buf = make([]byte, 4096) - var ptr, cnt int - remaining := size - - for 0 < remaining { - hash := 5381 - var blockHashedCnt uint64 - - // Hash one line or block, whatever happens first - n := int64(0) - for { - if ptr == cnt { - ptr = 0 - var err error - cnt, err = io.ReadFull(r, buf) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if cnt == 0 { - return io.EOF - } - } - n++ - c := buf[ptr] & 0xff - ptr++ - - // Ignore CR in CRLF sequence if it's text - if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' { - continue - } - blockHashedCnt++ - - if c == '\n' { - break - } - - hash = (hash << 5) + hash + int(c) - - if n >= 64 || n >= remaining { - break - } - } - i.hashed += blockHashedCnt - if err := i.add(hash, blockHashedCnt); err != nil { - return err - } - remaining -= n - } - - return nil -} - -// score computes the similarity score between this index and another one. -// A region of a file is defined as a line in a text file or a fixed-size -// block in a binary file. To prepare an index, each region in the file is -// hashed; the values and counts of hashes are retained in a sorted table. -// Define the similarity fraction F as the count of matching regions between -// the two files divided between the maximum count of regions in either file. -// The similarity score is F multiplied by the maxScore constant, yielding a -// range [0, maxScore]. It is defined as maxScore for the degenerate case of -// two empty files. -// The similarity score is symmetrical; i.e. a.score(b) == b.score(a). -func (i *similarityIndex) score(other *similarityIndex, maxScore int) int { - var maxHashed = i.hashed - if maxHashed < other.hashed { - maxHashed = other.hashed - } - if maxHashed == 0 { - return maxScore - } - - return int(i.common(other) * uint64(maxScore) / maxHashed) -} - -func (i *similarityIndex) common(dst *similarityIndex) uint64 { - srcIdx, dstIdx := 0, 0 - if i.numHashes == 0 || dst.numHashes == 0 { - return 0 - } - - var common uint64 - srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key() - - for { - if srcKey == dstKey { - srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count() - if srcCnt < dstCnt { - common += srcCnt - } else { - common += dstCnt - } - - srcIdx++ - if srcIdx == len(i.hashes) { - break - } - srcKey = i.hashes[srcIdx].key() - - dstIdx++ - if dstIdx == len(dst.hashes) { - break - } - dstKey = dst.hashes[dstIdx].key() - } else if srcKey < dstKey { - // Region of src that is not in dst - srcIdx++ - if srcIdx == len(i.hashes) { - break - } - srcKey = i.hashes[srcIdx].key() - } else { - // Region of dst that is not in src - dstIdx++ - if dstIdx == len(dst.hashes) { - break - } - dstKey = dst.hashes[dstIdx].key() - } - } - - return common -} - -func (i *similarityIndex) add(key int, cnt uint64) error { - key = int(uint32(key)*0x9e370001 >> 1) - - j := i.slot(key) - for { - v := i.hashes[j] - if v == 0 { - // It's an empty slot, so we can store it here. - if i.growAt <= i.numHashes { - if err := i.grow(); err != nil { - return err - } - j = i.slot(key) - continue - } - - var err error - i.hashes[j], err = newKeyCountPair(key, cnt) - if err != nil { - return err - } - i.numHashes++ - return nil - } else if v.key() == key { - // It's the same key, so increment the counter. - var err error - i.hashes[j], err = newKeyCountPair(key, v.count()+cnt) - if err != nil { - return err - } - return nil - } else if j+1 >= len(i.hashes) { - j = 0 - } else { - j++ - } - } -} - -type keyCountPair uint64 - -func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) { - if cnt > maxCountValue { - return 0, errIndexFull - } - - return keyCountPair((uint64(key) << keyShift) | cnt), nil -} - -func (p keyCountPair) key() int { - return int(p >> keyShift) -} - -func (p keyCountPair) count() uint64 { - return uint64(p) & maxCountValue -} - -func (i *similarityIndex) slot(key int) int { - // We use 31 - hashBits because the upper bit was already forced - // to be 0 and we want the remaining high bits to be used as the - // table slot. - return int(uint32(key) >> uint(31 - i.hashBits)) -} - -func shouldGrowAt(hashBits int) int { - return (1 << uint(hashBits)) * (hashBits - 3) / hashBits -} - -func (i *similarityIndex) grow() error { - if i.hashBits == 30 { - return errIndexFull - } - - old := i.hashes - - i.hashBits++ - i.growAt = shouldGrowAt(i.hashBits) - - // TODO(erizocosmico): find a way to check if it will OOM and return - // errIndexFull instead. - i.hashes = make([]keyCountPair, 1<= len(i.hashes) { - j = 0 - } - } - i.hashes[j] = v - } - } - - return nil -} - -type keyCountPairs []keyCountPair - -func (p keyCountPairs) Len() int { return len(p) } -func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] } diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go deleted file mode 100644 index 46416580455..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go +++ /dev/null @@ -1,357 +0,0 @@ -package object - -import ( - "bufio" - "bytes" - "fmt" - "io" - stdioutil "io/ioutil" - "strings" - - "golang.org/x/crypto/openpgp" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Tag represents an annotated tag object. It points to a single git object of -// any type, but tags typically are applied to commit or blob objects. It -// provides a reference that associates the target with a tag name. It also -// contains meta-information about the tag, including the tagger, tag date and -// message. -// -// Note that this is not used for lightweight tags. -// -// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags -type Tag struct { - // Hash of the tag. - Hash plumbing.Hash - // Name of the tag. - Name string - // Tagger is the one who created the tag. - Tagger Signature - // Message is an arbitrary text message. - Message string - // PGPSignature is the PGP signature of the tag. - PGPSignature string - // TargetType is the object type of the target. - TargetType plumbing.ObjectType - // Target is the hash of the target object. - Target plumbing.Hash - - s storer.EncodedObjectStorer -} - -// GetTag gets a tag from an object storer and decodes it. -func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { - o, err := s.EncodedObject(plumbing.TagObject, h) - if err != nil { - return nil, err - } - - return DecodeTag(s, o) -} - -// DecodeTag decodes an encoded object into a *Commit and associates it to the -// given object storer. -func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { - t := &Tag{s: s} - if err := t.Decode(o); err != nil { - return nil, err - } - - return t, nil -} - -// ID returns the object ID of the tag, not the object that the tag references. -// The returned value will always match the current value of Tag.Hash. -// -// ID is present to fulfill the Object interface. -func (t *Tag) ID() plumbing.Hash { - return t.Hash -} - -// Type returns the type of object. It always returns plumbing.TagObject. -// -// Type is present to fulfill the Object interface. -func (t *Tag) Type() plumbing.ObjectType { - return plumbing.TagObject -} - -// Decode transforms a plumbing.EncodedObject into a Tag struct. -func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.TagObject { - return ErrUnsupportedObject - } - - t.Hash = o.Hash() - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - for { - var line []byte - line, err = r.ReadBytes('\n') - if err != nil && err != io.EOF { - return err - } - - line = bytes.TrimSpace(line) - if len(line) == 0 { - break // Start of message - } - - split := bytes.SplitN(line, []byte{' '}, 2) - switch string(split[0]) { - case "object": - t.Target = plumbing.NewHash(string(split[1])) - case "type": - t.TargetType, err = plumbing.ParseObjectType(string(split[1])) - if err != nil { - return err - } - case "tag": - t.Name = string(split[1]) - case "tagger": - t.Tagger.Decode(split[1]) - } - - if err == io.EOF { - return nil - } - } - - data, err := stdioutil.ReadAll(r) - if err != nil { - return err - } - - var pgpsig bool - // Check if data contains PGP signature. - if bytes.Contains(data, []byte(beginpgp)) { - // Split the lines at newline. - messageAndSig := bytes.Split(data, []byte("\n")) - - for _, l := range messageAndSig { - if pgpsig { - if bytes.Contains(l, []byte(endpgp)) { - t.PGPSignature += endpgp + "\n" - break - } else { - t.PGPSignature += string(l) + "\n" - } - continue - } - - // Check if it's the beginning of a PGP signature. - if bytes.Contains(l, []byte(beginpgp)) { - t.PGPSignature += beginpgp + "\n" - pgpsig = true - continue - } - - t.Message += string(l) + "\n" - } - } else { - t.Message = string(data) - } - - return nil -} - -// Encode transforms a Tag into a plumbing.EncodedObject. -func (t *Tag) Encode(o plumbing.EncodedObject) error { - return t.encode(o, true) -} - -// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). -func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error { - return t.encode(o, false) -} - -func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) { - o.SetType(plumbing.TagObject) - w, err := o.Writer() - if err != nil { - return err - } - defer ioutil.CheckClose(w, &err) - - if _, err = fmt.Fprintf(w, - "object %s\ntype %s\ntag %s\ntagger ", - t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { - return err - } - - if err = t.Tagger.Encode(w); err != nil { - return err - } - - if _, err = fmt.Fprint(w, "\n\n"); err != nil { - return err - } - - if _, err = fmt.Fprint(w, t.Message); err != nil { - return err - } - - // Note that this is highly sensitive to what it sent along in the message. - // Message *always* needs to end with a newline, or else the message and the - // signature will be concatenated into a corrupt object. Since this is a - // lower-level method, we assume you know what you are doing and have already - // done the needful on the message in the caller. - if includeSig { - if _, err = fmt.Fprint(w, t.PGPSignature); err != nil { - return err - } - } - - return err -} - -// Commit returns the commit pointed to by the tag. If the tag points to a -// different type of object ErrUnsupportedObject will be returned. -func (t *Tag) Commit() (*Commit, error) { - if t.TargetType != plumbing.CommitObject { - return nil, ErrUnsupportedObject - } - - o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) - if err != nil { - return nil, err - } - - return DecodeCommit(t.s, o) -} - -// Tree returns the tree pointed to by the tag. If the tag points to a commit -// object the tree of that commit will be returned. If the tag does not point -// to a commit or tree object ErrUnsupportedObject will be returned. -func (t *Tag) Tree() (*Tree, error) { - switch t.TargetType { - case plumbing.CommitObject: - c, err := t.Commit() - if err != nil { - return nil, err - } - - return c.Tree() - case plumbing.TreeObject: - return GetTree(t.s, t.Target) - default: - return nil, ErrUnsupportedObject - } -} - -// Blob returns the blob pointed to by the tag. If the tag points to a -// different type of object ErrUnsupportedObject will be returned. -func (t *Tag) Blob() (*Blob, error) { - if t.TargetType != plumbing.BlobObject { - return nil, ErrUnsupportedObject - } - - return GetBlob(t.s, t.Target) -} - -// Object returns the object pointed to by the tag. -func (t *Tag) Object() (Object, error) { - o, err := t.s.EncodedObject(t.TargetType, t.Target) - if err != nil { - return nil, err - } - - return DecodeObject(t.s, o) -} - -// String returns the meta information contained in the tag as a formatted -// string. -func (t *Tag) String() string { - obj, _ := t.Object() - - return fmt.Sprintf( - "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", - plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), - t.Message, objectAsString(obj), - ) -} - -// Verify performs PGP verification of the tag with a provided armored -// keyring and returns openpgp.Entity associated with verifying key on success. -func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { - keyRingReader := strings.NewReader(armoredKeyRing) - keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) - if err != nil { - return nil, err - } - - // Extract signature. - signature := strings.NewReader(t.PGPSignature) - - encoded := &plumbing.MemoryObject{} - // Encode tag components, excluding signature and get a reader object. - if err := t.EncodeWithoutSignature(encoded); err != nil { - return nil, err - } - er, err := encoded.Reader() - if err != nil { - return nil, err - } - - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) -} - -// TagIter provides an iterator for a set of tags. -type TagIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewTagIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *TagIter that iterates over all -// tags contained in the storer.EncodedObjectIter. -// -// Any non-tag object returned by the storer.EncodedObjectIter is skipped. -func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { - return &TagIter{iter, s} -} - -// Next moves the iterator to the next tag and returns a pointer to it. If -// there are no more tags, it returns io.EOF. -func (iter *TagIter) Next() (*Tag, error) { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - return DecodeTag(iter.s, obj) -} - -// ForEach call the cb function for each tag contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *TagIter) ForEach(cb func(*Tag) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - t, err := DecodeTag(iter.s, obj) - if err != nil { - return err - } - - return cb(t) - }) -} - -func objectAsString(obj Object) string { - switch o := obj.(type) { - case *Commit: - return o.String() - default: - return "" - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go deleted file mode 100644 index 5e6378ca499..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ /dev/null @@ -1,525 +0,0 @@ -package object - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "path" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - maxTreeDepth = 1024 - startingStackSize = 8 -) - -// New errors defined by this package. -var ( - ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") - ErrFileNotFound = errors.New("file not found") - ErrDirectoryNotFound = errors.New("directory not found") - ErrEntryNotFound = errors.New("entry not found") -) - -// Tree is basically like a directory - it references a bunch of other trees -// and/or blobs (i.e. files and sub-directories) -type Tree struct { - Entries []TreeEntry - Hash plumbing.Hash - - s storer.EncodedObjectStorer - m map[string]*TreeEntry - t map[string]*Tree // tree path cache -} - -// GetTree gets a tree from an object storer and decodes it. -func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { - o, err := s.EncodedObject(plumbing.TreeObject, h) - if err != nil { - return nil, err - } - - return DecodeTree(s, o) -} - -// DecodeTree decodes an encoded object into a *Tree and associates it to the -// given object storer. -func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { - t := &Tree{s: s} - if err := t.Decode(o); err != nil { - return nil, err - } - - return t, nil -} - -// TreeEntry represents a file -type TreeEntry struct { - Name string - Mode filemode.FileMode - Hash plumbing.Hash -} - -// File returns the hash of the file identified by the `path` argument. -// The path is interpreted as relative to the tree receiver. -func (t *Tree) File(path string) (*File, error) { - e, err := t.FindEntry(path) - if err != nil { - return nil, ErrFileNotFound - } - - blob, err := GetBlob(t.s, e.Hash) - if err != nil { - if err == plumbing.ErrObjectNotFound { - return nil, ErrFileNotFound - } - return nil, err - } - - return NewFile(path, e.Mode, blob), nil -} - -// Size returns the plaintext size of an object, without reading it -// into memory. -func (t *Tree) Size(path string) (int64, error) { - e, err := t.FindEntry(path) - if err != nil { - return 0, ErrEntryNotFound - } - - return t.s.EncodedObjectSize(e.Hash) -} - -// Tree returns the tree identified by the `path` argument. -// The path is interpreted as relative to the tree receiver. -func (t *Tree) Tree(path string) (*Tree, error) { - e, err := t.FindEntry(path) - if err != nil { - return nil, ErrDirectoryNotFound - } - - tree, err := GetTree(t.s, e.Hash) - if err == plumbing.ErrObjectNotFound { - return nil, ErrDirectoryNotFound - } - - return tree, err -} - -// TreeEntryFile returns the *File for a given *TreeEntry. -func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { - blob, err := GetBlob(t.s, e.Hash) - if err != nil { - return nil, err - } - - return NewFile(e.Name, e.Mode, blob), nil -} - -// FindEntry search a TreeEntry in this tree or any subtree. -func (t *Tree) FindEntry(path string) (*TreeEntry, error) { - if t.t == nil { - t.t = make(map[string]*Tree) - } - - pathParts := strings.Split(path, "/") - startingTree := t - pathCurrent := "" - - // search for the longest path in the tree path cache - for i := len(pathParts) - 1; i > 1; i-- { - path := filepath.Join(pathParts[:i]...) - - tree, ok := t.t[path] - if ok { - startingTree = tree - pathParts = pathParts[i:] - pathCurrent = path - - break - } - } - - var tree *Tree - var err error - for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] { - if tree, err = tree.dir(pathParts[0]); err != nil { - return nil, err - } - - pathCurrent = filepath.Join(pathCurrent, pathParts[0]) - t.t[pathCurrent] = tree - } - - return tree.entry(pathParts[0]) -} - -func (t *Tree) dir(baseName string) (*Tree, error) { - entry, err := t.entry(baseName) - if err != nil { - return nil, ErrDirectoryNotFound - } - - obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) - if err != nil { - return nil, err - } - - tree := &Tree{s: t.s} - err = tree.Decode(obj) - - return tree, err -} - -func (t *Tree) entry(baseName string) (*TreeEntry, error) { - if t.m == nil { - t.buildMap() - } - - entry, ok := t.m[baseName] - if !ok { - return nil, ErrEntryNotFound - } - - return entry, nil -} - -// Files returns a FileIter allowing to iterate over the Tree -func (t *Tree) Files() *FileIter { - return NewFileIter(t.s, t) -} - -// ID returns the object ID of the tree. The returned value will always match -// the current value of Tree.Hash. -// -// ID is present to fulfill the Object interface. -func (t *Tree) ID() plumbing.Hash { - return t.Hash -} - -// Type returns the type of object. It always returns plumbing.TreeObject. -func (t *Tree) Type() plumbing.ObjectType { - return plumbing.TreeObject -} - -// Decode transform an plumbing.EncodedObject into a Tree struct -func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.TreeObject { - return ErrUnsupportedObject - } - - t.Hash = o.Hash() - if o.Size() == 0 { - return nil - } - - t.Entries = nil - t.m = nil - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - for { - str, err := r.ReadString(' ') - if err != nil { - if err == io.EOF { - break - } - - return err - } - str = str[:len(str)-1] // strip last byte (' ') - - mode, err := filemode.New(str) - if err != nil { - return err - } - - name, err := r.ReadString(0) - if err != nil && err != io.EOF { - return err - } - - var hash plumbing.Hash - if _, err = io.ReadFull(r, hash[:]); err != nil { - return err - } - - baseName := name[:len(name)-1] - t.Entries = append(t.Entries, TreeEntry{ - Hash: hash, - Mode: mode, - Name: baseName, - }) - } - - return nil -} - -// Encode transforms a Tree into a plumbing.EncodedObject. -func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { - o.SetType(plumbing.TreeObject) - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - for _, entry := range t.Entries { - if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { - return err - } - - if _, err = w.Write([]byte{0x00}); err != nil { - return err - } - - if _, err = w.Write(entry.Hash[:]); err != nil { - return err - } - } - - return err -} - -func (t *Tree) buildMap() { - t.m = make(map[string]*TreeEntry) - for i := 0; i < len(t.Entries); i++ { - t.m[t.Entries[i].Name] = &t.Entries[i] - } -} - -// Diff returns a list of changes between this tree and the provided one -func (t *Tree) Diff(to *Tree) (Changes, error) { - return t.DiffContext(context.Background(), to) -} - -// DiffContext returns a list of changes between this tree and the provided one -// Error will be returned if context expires. Provided context must be non nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) { - return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions) -} - -// Patch returns a slice of Patch objects with all the changes between trees -// in chunks. This representation can be used to create several diff outputs. -func (t *Tree) Patch(to *Tree) (*Patch, error) { - return t.PatchContext(context.Background(), to) -} - -// PatchContext returns a slice of Patch objects with all the changes between -// trees in chunks. This representation can be used to create several diff -// outputs. If context expires, an error will be returned. Provided context must -// be non-nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) { - changes, err := t.DiffContext(ctx, to) - if err != nil { - return nil, err - } - - return changes.PatchContext(ctx) -} - -// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. -type treeEntryIter struct { - t *Tree - pos int -} - -func (iter *treeEntryIter) Next() (TreeEntry, error) { - if iter.pos >= len(iter.t.Entries) { - return TreeEntry{}, io.EOF - } - iter.pos++ - return iter.t.Entries[iter.pos-1], nil -} - -// TreeWalker provides a means of walking through all of the entries in a Tree. -type TreeWalker struct { - stack []*treeEntryIter - base string - recursive bool - seen map[plumbing.Hash]bool - - s storer.EncodedObjectStorer - t *Tree -} - -// NewTreeWalker returns a new TreeWalker for the given tree. -// -// It is the caller's responsibility to call Close() when finished with the -// tree walker. -func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker { - stack := make([]*treeEntryIter, 0, startingStackSize) - stack = append(stack, &treeEntryIter{t, 0}) - - return &TreeWalker{ - stack: stack, - recursive: recursive, - seen: seen, - - s: t.s, - t: t, - } -} - -// Next returns the next object from the tree. Objects are returned in order -// and subtrees are included. After the last object has been returned further -// calls to Next() will return io.EOF. -// -// In the current implementation any objects which cannot be found in the -// underlying repository will be skipped automatically. It is possible that this -// may change in future versions. -func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { - var obj *Tree - for { - current := len(w.stack) - 1 - if current < 0 { - // Nothing left on the stack so we're finished - err = io.EOF - return - } - - if current > maxTreeDepth { - // We're probably following bad data or some self-referencing tree - err = ErrMaxTreeDepth - return - } - - entry, err = w.stack[current].Next() - if err == io.EOF { - // Finished with the current tree, move back up to the parent - w.stack = w.stack[:current] - w.base, _ = path.Split(w.base) - w.base = strings.TrimSuffix(w.base, "/") - continue - } - - if err != nil { - return - } - - if w.seen[entry.Hash] { - continue - } - - if entry.Mode == filemode.Dir { - obj, err = GetTree(w.s, entry.Hash) - } - - name = simpleJoin(w.base, entry.Name) - - if err != nil { - err = io.EOF - return - } - - break - } - - if !w.recursive { - return - } - - if obj != nil { - w.stack = append(w.stack, &treeEntryIter{obj, 0}) - w.base = simpleJoin(w.base, entry.Name) - } - - return -} - -// Tree returns the tree that the tree walker most recently operated on. -func (w *TreeWalker) Tree() *Tree { - current := len(w.stack) - 1 - if w.stack[current].pos == 0 { - current-- - } - - if current < 0 { - return nil - } - - return w.stack[current].t -} - -// Close releases any resources used by the TreeWalker. -func (w *TreeWalker) Close() { - w.stack = nil -} - -// TreeIter provides an iterator for a set of trees. -type TreeIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewTreeIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *TreeIter that iterates over all -// tree contained in the storer.EncodedObjectIter. -// -// Any non-tree object returned by the storer.EncodedObjectIter is skipped. -func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { - return &TreeIter{iter, s} -} - -// Next moves the iterator to the next tree and returns a pointer to it. If -// there are no more trees, it returns io.EOF. -func (iter *TreeIter) Next() (*Tree, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - if obj.Type() != plumbing.TreeObject { - continue - } - - return DecodeTree(iter.s, obj) - } -} - -// ForEach call the cb function for each tree contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *TreeIter) ForEach(cb func(*Tree) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - if obj.Type() != plumbing.TreeObject { - return nil - } - - t, err := DecodeTree(iter.s, obj) - if err != nil { - return err - } - - return cb(t) - }) -} - -func simpleJoin(parent, child string) string { - if len(parent) > 0 { - return parent + "/" + child - } - return child -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go deleted file mode 100644 index b4891b957c5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ /dev/null @@ -1,136 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A treenoder is a helper type that wraps git trees into merkletrie -// noders. -// -// As a merkletrie noder doesn't understand the concept of modes (e.g. -// file permissions), the treenoder includes the mode of the git tree in -// the hash, so changes in the modes will be detected as modifications -// to the file contents by the merkletrie difftree algorithm. This is -// consistent with how the "git diff-tree" command works. -type treeNoder struct { - parent *Tree // the root node is its own parent - name string // empty string for the root node - mode filemode.FileMode - hash plumbing.Hash - children []noder.Noder // memoized -} - -// NewTreeRootNode returns the root node of a Tree -func NewTreeRootNode(t *Tree) noder.Noder { - if t == nil { - return &treeNoder{} - } - - return &treeNoder{ - parent: t, - name: "", - mode: filemode.Dir, - hash: t.Hash, - } -} - -func (t *treeNoder) isRoot() bool { - return t.name == "" -} - -func (t *treeNoder) String() string { - return "treeNoder <" + t.name + ">" -} - -func (t *treeNoder) Hash() []byte { - if t.mode == filemode.Deprecated { - return append(t.hash[:], filemode.Regular.Bytes()...) - } - return append(t.hash[:], t.mode.Bytes()...) -} - -func (t *treeNoder) Name() string { - return t.name -} - -func (t *treeNoder) IsDir() bool { - return t.mode == filemode.Dir -} - -// Children will return the children of a treenoder as treenoders, -// building them from the children of the wrapped git tree. -func (t *treeNoder) Children() ([]noder.Noder, error) { - if t.mode != filemode.Dir { - return noder.NoChildren, nil - } - - // children are memoized for efficiency - if t.children != nil { - return t.children, nil - } - - // the parent of the returned children will be ourself as a tree if - // we are a not the root treenoder. The root is special as it - // is is own parent. - parent := t.parent - if !t.isRoot() { - var err error - if parent, err = t.parent.Tree(t.name); err != nil { - return nil, err - } - } - - return transformChildren(parent) -} - -// Returns the children of a tree as treenoders. -// Efficiency is key here. -func transformChildren(t *Tree) ([]noder.Noder, error) { - var err error - var e TreeEntry - - // there will be more tree entries than children in the tree, - // due to submodules and empty directories, but I think it is still - // worth it to pre-allocate the whole array now, even if sometimes - // is bigger than needed. - ret := make([]noder.Noder, 0, len(t.Entries)) - - walker := NewTreeWalker(t, false, nil) // don't recurse - // don't defer walker.Close() for efficiency reasons. - for { - _, e, err = walker.Next() - if err == io.EOF { - break - } - if err != nil { - walker.Close() - return nil, err - } - - ret = append(ret, &treeNoder{ - parent: t, - name: e.Name, - mode: e.Mode, - hash: e.Hash, - }) - } - walker.Close() - - return ret, nil -} - -// len(t.tree.Entries) != the number of elements walked by treewalker -// for some reason because of empty directories, submodules, etc, so we -// have to walk here. -func (t *treeNoder) NumChildren() (int, error) { - children, err := t.Children() - if err != nil { - return 0, err - } - - return len(children), nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go deleted file mode 100644 index 1bd724cad5b..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go +++ /dev/null @@ -1,211 +0,0 @@ -package packp - -import ( - "fmt" - "sort" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/memory" -) - -// AdvRefs values represent the information transmitted on an -// advertised-refs message. Values from this type are not zero-value -// safe, use the New function instead. -type AdvRefs struct { - // Prefix stores prefix payloads. - // - // When using this message over (smart) HTTP, you have to add a pktline - // before the whole thing with the following payload: - // - // '# service=$servicename" LF - // - // Moreover, some (all) git HTTP smart servers will send a flush-pkt - // just after the first pkt-line. - // - // To accommodate both situations, the Prefix field allow you to store - // any data you want to send before the actual pktlines. It will also - // be filled up with whatever is found on the line. - Prefix [][]byte - // Head stores the resolved HEAD reference if present. - // This can be present with git-upload-pack, not with git-receive-pack. - Head *plumbing.Hash - // Capabilities are the capabilities. - Capabilities *capability.List - // References are the hash references. - References map[string]plumbing.Hash - // Peeled are the peeled hash references. - Peeled map[string]plumbing.Hash - // Shallows are the shallow object ids. - Shallows []plumbing.Hash -} - -// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. -func NewAdvRefs() *AdvRefs { - return &AdvRefs{ - Prefix: [][]byte{}, - Capabilities: capability.NewList(), - References: make(map[string]plumbing.Hash), - Peeled: make(map[string]plumbing.Hash), - Shallows: []plumbing.Hash{}, - } -} - -func (a *AdvRefs) AddReference(r *plumbing.Reference) error { - switch r.Type() { - case plumbing.SymbolicReference: - v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) - a.Capabilities.Add(capability.SymRef, v) - case plumbing.HashReference: - a.References[r.Name().String()] = r.Hash() - default: - return plumbing.ErrInvalidType - } - - return nil -} - -func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { - s := memory.ReferenceStorage{} - if err := a.addRefs(s); err != nil { - return s, plumbing.NewUnexpectedError(err) - } - - return s, nil -} - -func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { - for name, hash := range a.References { - ref := plumbing.NewReferenceFromStrings(name, hash.String()) - if err := s.SetReference(ref); err != nil { - return err - } - } - - if a.supportSymrefs() { - return a.addSymbolicRefs(s) - } - - return a.resolveHead(s) -} - -// If the server does not support symrefs capability, -// we need to guess the reference where HEAD is pointing to. -// -// Git versions prior to 1.8.4.3 has an special procedure to get -// the reference where is pointing to HEAD: -// - Check if a reference called master exists. If exists and it -// has the same hash as HEAD hash, we can say that HEAD is pointing to master -// - If master does not exists or does not have the same hash as HEAD, -// order references and check in that order if that reference has the same -// hash than HEAD. If yes, set HEAD pointing to that branch hash -// - If no reference is found, throw an error -func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { - if a.Head == nil { - return nil - } - - ref, err := s.Reference(plumbing.Master) - - // check first if HEAD is pointing to master - if err == nil { - ok, err := a.createHeadIfCorrectReference(ref, s) - if err != nil { - return err - } - - if ok { - return nil - } - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - // From here we are trying to guess the branch that HEAD is pointing - refIter, err := s.IterReferences() - if err != nil { - return err - } - - var refNames []string - err = refIter.ForEach(func(r *plumbing.Reference) error { - refNames = append(refNames, string(r.Name())) - return nil - }) - if err != nil { - return err - } - - sort.Strings(refNames) - - var headSet bool - for _, refName := range refNames { - ref, err := s.Reference(plumbing.ReferenceName(refName)) - if err != nil { - return err - } - ok, err := a.createHeadIfCorrectReference(ref, s) - if err != nil { - return err - } - if ok { - headSet = true - break - } - } - - if !headSet { - return plumbing.ErrReferenceNotFound - } - - return nil -} - -func (a *AdvRefs) createHeadIfCorrectReference( - reference *plumbing.Reference, - s storer.ReferenceStorer) (bool, error) { - if reference.Hash() == *a.Head { - headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) - if err := s.SetReference(headRef); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { - for _, symref := range a.Capabilities.Get(capability.SymRef) { - chunks := strings.Split(symref, ":") - if len(chunks) != 2 { - err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) - return plumbing.NewUnexpectedError(err) - } - name := plumbing.ReferenceName(chunks[0]) - target := plumbing.ReferenceName(chunks[1]) - ref := plumbing.NewSymbolicReference(name, target) - if err := s.SetReference(ref); err != nil { - return nil - } - } - - return nil -} - -func (a *AdvRefs) supportSymrefs() bool { - return a.Capabilities.Supports(capability.SymRef) -} - -// IsEmpty returns true if doesn't contain any reference. -func (a *AdvRefs) IsEmpty() bool { - return a.Head == nil && - len(a.References) == 0 && - len(a.Peeled) == 0 && - len(a.Shallows) == 0 -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go deleted file mode 100644 index 63bbe5ab16e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go +++ /dev/null @@ -1,288 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Decode reads the next advertised-refs message form its input and -// stores it in the AdvRefs. -func (a *AdvRefs) Decode(r io.Reader) error { - d := newAdvRefsDecoder(r) - return d.Decode(a) -} - -type advRefsDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - hash plumbing.Hash // last hash read - err error // sticky error, use the parser.error() method to fill this out - data *AdvRefs // parsed data is stored here -} - -var ( - // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised - // references message. - ErrEmptyAdvRefs = errors.New("empty advertised-ref message") - // ErrEmptyInput is returned by Decode if the input is empty. - ErrEmptyInput = errors.New("empty input") -) - -func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { - return &advRefsDecoder{ - s: pktline.NewScanner(r), - } -} - -func (d *advRefsDecoder) Decode(v *AdvRefs) error { - d.data = v - - for state := decodePrefix; state != nil; { - state = state(d) - } - - return d.err -} - -type decoderStateFn func(*advRefsDecoder) decoderStateFn - -// fills out the parser sticky error -func (d *advRefsDecoder) error(format string, a ...interface{}) { - msg := fmt.Sprintf( - "pkt-line %d: %s", d.nLine, - fmt.Sprintf(format, a...), - ) - - d.err = NewErrUnexpectedData(msg, d.line) -} - -// Reads a new pkt-line from the scanner, makes its payload available as -// p.line and increments p.nLine. A successful invocation returns true, -// otherwise, false is returned and the sticky error is filled out -// accordingly. Trims eols at the end of the payloads. -func (d *advRefsDecoder) nextLine() bool { - d.nLine++ - - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { - return false - } - - if d.nLine == 1 { - d.err = ErrEmptyInput - return false - } - - d.error("EOF") - return false - } - - d.line = d.s.Bytes() - d.line = bytes.TrimSuffix(d.line, eol) - - return true -} - -// The HTTP smart prefix is often followed by a flush-pkt. -func decodePrefix(d *advRefsDecoder) decoderStateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if !isPrefix(d.line) { - return decodeFirstHash - } - - tmp := make([]byte, len(d.line)) - copy(tmp, d.line) - d.data.Prefix = append(d.data.Prefix, tmp) - if ok := d.nextLine(); !ok { - return nil - } - - if !isFlush(d.line) { - return decodeFirstHash - } - - d.data.Prefix = append(d.data.Prefix, pktline.Flush) - if ok := d.nextLine(); !ok { - return nil - } - - return decodeFirstHash -} - -func isPrefix(payload []byte) bool { - return len(payload) > 0 && payload[0] == '#' -} - -// If the first hash is zero, then a no-refs is coming. Otherwise, a -// list-of-refs is coming, and the hash will be followed by the first -// advertised ref. -func decodeFirstHash(p *advRefsDecoder) decoderStateFn { - // If the repository is empty, we receive a flush here (HTTP). - if isFlush(p.line) { - p.err = ErrEmptyAdvRefs - return nil - } - - if len(p.line) < hashSize { - p.error("cannot read hash, pkt-line too short") - return nil - } - - if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { - p.error("invalid hash text: %s", err) - return nil - } - - p.line = p.line[hashSize:] - - if p.hash.IsZero() { - return decodeSkipNoRefs - } - - return decodeFirstRef -} - -// Skips SP "capabilities^{}" NUL -func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { - if len(p.line) < len(noHeadMark) { - p.error("too short zero-id ref") - return nil - } - - if !bytes.HasPrefix(p.line, noHeadMark) { - p.error("malformed zero-id ref") - return nil - } - - p.line = p.line[len(noHeadMark):] - - return decodeCaps -} - -// decode the refname, expects SP refname NULL -func decodeFirstRef(l *advRefsDecoder) decoderStateFn { - if len(l.line) < 3 { - l.error("line too short after hash") - return nil - } - - if !bytes.HasPrefix(l.line, sp) { - l.error("no space after hash") - return nil - } - l.line = l.line[1:] - - chunks := bytes.SplitN(l.line, null, 2) - if len(chunks) < 2 { - l.error("NULL not found") - return nil - } - ref := chunks[0] - l.line = chunks[1] - - if bytes.Equal(ref, []byte(head)) { - l.data.Head = &l.hash - } else { - l.data.References[string(ref)] = l.hash - } - - return decodeCaps -} - -func decodeCaps(p *advRefsDecoder) decoderStateFn { - if err := p.data.Capabilities.Decode(p.line); err != nil { - p.error("invalid capabilities: %s", err) - return nil - } - - return decodeOtherRefs -} - -// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). -// If there are no refs, then there might be a shallow or flush-ptk. -func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { - if ok := p.nextLine(); !ok { - return nil - } - - if bytes.HasPrefix(p.line, shallow) { - return decodeShallow - } - - if len(p.line) == 0 { - return nil - } - - saveTo := p.data.References - if bytes.HasSuffix(p.line, peeled) { - p.line = bytes.TrimSuffix(p.line, peeled) - saveTo = p.data.Peeled - } - - ref, hash, err := readRef(p.line) - if err != nil { - p.error("%s", err) - return nil - } - saveTo[ref] = hash - - return decodeOtherRefs -} - -// Reads a ref-name -func readRef(data []byte) (string, plumbing.Hash, error) { - chunks := bytes.Split(data, sp) - switch { - case len(chunks) == 1: - return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") - case len(chunks) > 2: - return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") - default: - return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil - } -} - -// Keeps reading shallows until a flush-pkt is found -func decodeShallow(p *advRefsDecoder) decoderStateFn { - if !bytes.HasPrefix(p.line, shallow) { - p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) - return nil - } - p.line = bytes.TrimPrefix(p.line, shallow) - - if len(p.line) != hashSize { - p.error(fmt.Sprintf( - "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", - len(p.line))) - return nil - } - - text := p.line[:hashSize] - var h plumbing.Hash - if _, err := hex.Decode(h[:], text); err != nil { - p.error("invalid hash text: %s", err) - return nil - } - - p.data.Shallows = append(p.data.Shallows, h) - - if ok := p.nextLine(); !ok { - return nil - } - - if len(p.line) == 0 { - return nil // successful parse of the advertised-refs message - } - - return decodeShallow -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go deleted file mode 100644 index fb9bd883fce..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go +++ /dev/null @@ -1,176 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// Encode writes the AdvRefs encoding to a writer. -// -// All the payloads will end with a newline character. Capabilities, -// references and shallows are written in alphabetical order, except for -// peeled references that always follow their corresponding references. -func (a *AdvRefs) Encode(w io.Writer) error { - e := newAdvRefsEncoder(w) - return e.Encode(a) -} - -type advRefsEncoder struct { - data *AdvRefs // data to encode - pe *pktline.Encoder // where to write the encoded data - firstRefName string // reference name to encode in the first pkt-line (HEAD if present) - firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) - sortedRefs []string // hash references to encode ordered by increasing order - err error // sticky error - -} - -func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { - return &advRefsEncoder{ - pe: pktline.NewEncoder(w), - } -} - -func (e *advRefsEncoder) Encode(v *AdvRefs) error { - e.data = v - e.sortRefs() - e.setFirstRef() - - for state := encodePrefix; state != nil; { - state = state(e) - } - - return e.err -} - -func (e *advRefsEncoder) sortRefs() { - if len(e.data.References) > 0 { - refs := make([]string, 0, len(e.data.References)) - for refName := range e.data.References { - refs = append(refs, refName) - } - - sort.Strings(refs) - e.sortedRefs = refs - } -} - -func (e *advRefsEncoder) setFirstRef() { - if e.data.Head != nil { - e.firstRefName = head - e.firstRefHash = *e.data.Head - return - } - - if len(e.sortedRefs) > 0 { - refName := e.sortedRefs[0] - e.firstRefName = refName - e.firstRefHash = e.data.References[refName] - } -} - -type encoderStateFn func(*advRefsEncoder) encoderStateFn - -func encodePrefix(e *advRefsEncoder) encoderStateFn { - for _, p := range e.data.Prefix { - if bytes.Equal(p, pktline.Flush) { - if e.err = e.pe.Flush(); e.err != nil { - return nil - } - continue - } - if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { - return nil - } - } - - return encodeFirstLine -} - -// Adds the first pkt-line payload: head hash, head ref and capabilities. -// If HEAD ref is not found, the first reference ordered in increasing order will be used. -// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". -// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt -// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt -func encodeFirstLine(e *advRefsEncoder) encoderStateFn { - const formatFirstLine = "%s %s\x00%s\n" - var firstLine string - capabilities := formatCaps(e.data.Capabilities) - - if e.firstRefName == "" { - firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) - } else { - firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) - - } - - if e.err = e.pe.EncodeString(firstLine); e.err != nil { - return nil - } - - return encodeRefs -} - -func formatCaps(c *capability.List) string { - if c == nil { - return "" - } - - return c.String() -} - -// Adds the (sorted) refs: hash SP refname EOL -// and their peeled refs if any. -func encodeRefs(e *advRefsEncoder) encoderStateFn { - for _, r := range e.sortedRefs { - if r == e.firstRefName { - continue - } - - hash := e.data.References[r] - if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { - return nil - } - - if hash, ok := e.data.Peeled[r]; ok { - if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { - return nil - } - } - } - - return encodeShallow -} - -// Adds the (sorted) shallows: "shallow" SP hash EOL -func encodeShallow(e *advRefsEncoder) encoderStateFn { - sorted := sortShallows(e.data.Shallows) - for _, hash := range sorted { - if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { - return nil - } - } - - return encodeFlush -} - -func sortShallows(c []plumbing.Hash) []string { - ret := []string{} - for _, h := range c { - ret = append(ret, h.String()) - } - sort.Strings(ret) - - return ret -} - -func encodeFlush(e *advRefsEncoder) encoderStateFn { - e.err = e.pe.Flush() - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go deleted file mode 100644 index a129781157b..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go +++ /dev/null @@ -1,252 +0,0 @@ -// Package capability defines the server and client capabilities. -package capability - -// Capability describes a server or client capability. -type Capability string - -func (n Capability) String() string { - return string(n) -} - -const ( - // MultiACK capability allows the server to return "ACK obj-id continue" as - // soon as it finds a commit that it can use as a common base, between the - // client's wants and the client's have set. - // - // By sending this early, the server can potentially head off the client - // from walking any further down that particular branch of the client's - // repository history. The client may still need to walk down other - // branches, sending have lines for those, until the server has a - // complete cut across the DAG, or the client has said "done". - // - // Without multi_ack, a client sends have lines in --date-order until - // the server has found a common base. That means the client will send - // have lines that are already known by the server to be common, because - // they overlap in time with another branch that the server hasn't found - // a common base on yet. - // - // For example suppose the client has commits in caps that the server - // doesn't and the server has commits in lower case that the client - // doesn't, as in the following diagram: - // - // +---- u ---------------------- x - // / +----- y - // / / - // a -- b -- c -- d -- E -- F - // \ - // +--- Q -- R -- S - // - // If the client wants x,y and starts out by saying have F,S, the server - // doesn't know what F,S is. Eventually the client says "have d" and - // the server sends "ACK d continue" to let the client know to stop - // walking down that line (so don't send c-b-a), but it's not done yet, - // it needs a base for x. The client keeps going with S-R-Q, until a - // gets reached, at which point the server has a clear base and it all - // ends. - // - // Without multi_ack the client would have sent that c-b-a chain anyway, - // interleaved with S-R-Q. - MultiACK Capability = "multi_ack" - // MultiACKDetailed is an extension of multi_ack that permits client to - // better understand the server's in-memory state. - MultiACKDetailed Capability = "multi_ack_detailed" - // NoDone should only be used with the smart HTTP protocol. If - // multi_ack_detailed and no-done are both present, then the sender is - // free to immediately send a pack following its first "ACK obj-id ready" - // message. - // - // Without no-done in the smart HTTP protocol, the server session would - // end and the client has to make another trip to send "done" before - // the server can send the pack. no-done removes the last round and - // thus slightly reduces latency. - NoDone Capability = "no-done" - // ThinPack is one with deltas which reference base objects not - // contained within the pack (but are known to exist at the receiving - // end). This can reduce the network traffic significantly, but it - // requires the receiving end to know how to "thicken" these packs by - // adding the missing bases to the pack. - // - // The upload-pack server advertises 'thin-pack' when it can generate - // and send a thin pack. A client requests the 'thin-pack' capability - // when it understands how to "thicken" it, notifying the server that - // it can receive such a pack. A client MUST NOT request the - // 'thin-pack' capability if it cannot turn a thin pack into a - // self-contained pack. - // - // Receive-pack, on the other hand, is assumed by default to be able to - // handle thin packs, but can ask the client not to use the feature by - // advertising the 'no-thin' capability. A client MUST NOT send a thin - // pack if the server advertises the 'no-thin' capability. - // - // The reasons for this asymmetry are historical. The receive-pack - // program did not exist until after the invention of thin packs, so - // historically the reference implementation of receive-pack always - // understood thin packs. Adding 'no-thin' later allowed receive-pack - // to disable the feature in a backwards-compatible manner. - ThinPack Capability = "thin-pack" - // Sideband means that server can send, and client understand multiplexed - // progress reports and error info interleaved with the packfile itself. - // - // These two options are mutually exclusive. A modern client always - // favors Sideband64k. - // - // Either mode indicates that the packfile data will be streamed broken - // up into packets of up to either 1000 bytes in the case of 'side_band', - // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up - // of a leading 4-byte pkt-line length of how much data is in the packet, - // followed by a 1-byte stream code, followed by the actual data. - // - // The stream code can be one of: - // - // 1 - pack data - // 2 - progress messages - // 3 - fatal error message just before stream aborts - // - // The "side-band-64k" capability came about as a way for newer clients - // that can handle much larger packets to request packets that are - // actually crammed nearly full, while maintaining backward compatibility - // for the older clients. - // - // Further, with side-band and its up to 1000-byte messages, it's actually - // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, - // same deal, you have up to 65519 bytes of data and 1 byte for the stream - // code. - // - // The client MUST send only maximum of one of "side-band" and "side- - // band-64k". Server MUST diagnose it as an error if client requests - // both. - Sideband Capability = "side-band" - Sideband64k Capability = "side-band-64k" - // OFSDelta server can send, and client understand PACKv2 with delta - // referring to its base by position in pack rather than by an obj-id. That - // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. - OFSDelta Capability = "ofs-delta" - // Agent the server may optionally send this capability to notify the client - // that the server is running version `X`. The client may optionally return - // its own agent string by responding with an `agent=Y` capability (but it - // MUST NOT do so if the server did not mention the agent capability). The - // `X` and `Y` strings may contain any printable ASCII characters except - // space (i.e., the byte range 32 < x < 127), and are typically of the form - // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely - // informative for statistics and debugging purposes, and MUST NOT be used - // to programmatically assume the presence or absence of particular features. - Agent Capability = "agent" - // Shallow capability adds "deepen", "shallow" and "unshallow" commands to - // the fetch-pack/upload-pack protocol so clients can request shallow - // clones. - Shallow Capability = "shallow" - // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack - // protocol so the client can request shallow clones that are cut at a - // specific time, instead of depth. Internally it's equivalent of doing - // "rev-list --max-age=" on the server side. "deepen-since" - // cannot be used with "deepen". - DeepenSince Capability = "deepen-since" - // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack - // protocol so the client can request shallow clones that are cut at a - // specific revision, instead of depth. Internally it's equivalent of - // doing "rev-list --not " on the server side. "deepen-not" - // cannot be used with "deepen", but can be used with "deepen-since". - DeepenNot Capability = "deepen-not" - // DeepenRelative if this capability is requested by the client, the - // semantics of "deepen" command is changed. The "depth" argument is the - // depth from the current shallow boundary, instead of the depth from - // remote refs. - DeepenRelative Capability = "deepen-relative" - // NoProgress the client was started with "git clone -q" or something, and - // doesn't want that side band 2. Basically the client just says "I do not - // wish to receive stream 2 on sideband, so do not send it to me, and if - // you did, I will drop it on the floor anyway". However, the sideband - // channel 3 is still used for error responses. - NoProgress Capability = "no-progress" - // IncludeTag capability is about sending annotated tags if we are - // sending objects they point to. If we pack an object to the client, and - // a tag object points exactly at that object, we pack the tag object too. - // In general this allows a client to get all new annotated tags when it - // fetches a branch, in a single network connection. - // - // Clients MAY always send include-tag, hardcoding it into a request when - // the server advertises this capability. The decision for a client to - // request include-tag only has to do with the client's desires for tag - // data, whether or not a server had advertised objects in the - // refs/tags/* namespace. - // - // Servers MUST pack the tags if their referrant is packed and the client - // has requested include-tags. - // - // Clients MUST be prepared for the case where a server has ignored - // include-tag and has not actually sent tags in the pack. In such - // cases the client SHOULD issue a subsequent fetch to acquire the tags - // that include-tag would have otherwise given the client. - // - // The server SHOULD send include-tag, if it supports it, regardless - // of whether or not there are tags available. - IncludeTag Capability = "include-tag" - // ReportStatus the receive-pack process can receive a 'report-status' - // capability, which tells it that the client wants a report of what - // happened after a packfile upload and reference update. If the pushing - // client requests this capability, after unpacking and updating references - // the server will respond with whether the packfile unpacked successfully - // and if each reference was updated successfully. If any of those were not - // successful, it will send back an error message. See pack-protocol.txt - // for example messages. - ReportStatus Capability = "report-status" - // DeleteRefs If the server sends back this capability, it means that - // it is capable of accepting a zero-id value as the target - // value of a reference update. It is not sent back by the client, it - // simply informs the client that it can be sent zero-id values - // to delete references - DeleteRefs Capability = "delete-refs" - // Quiet If the receive-pack server advertises this capability, it is - // capable of silencing human-readable progress output which otherwise may - // be shown when processing the received pack. A send-pack client should - // respond with the 'quiet' capability to suppress server-side progress - // reporting if the local progress reporting is also being suppressed - // (e.g., via `push -q`, or if stderr does not go to a tty). - Quiet Capability = "quiet" - // Atomic If the server sends this capability it is capable of accepting - // atomic pushes. If the pushing client requests this capability, the server - // will update the refs in one atomic transaction. Either all refs are - // updated or none. - Atomic Capability = "atomic" - // PushOptions If the server sends this capability it is able to accept - // push options after the update commands have been sent, but before the - // packfile is streamed. If the pushing client requests this capability, - // the server will pass the options to the pre- and post- receive hooks - // that process this push request. - PushOptions Capability = "push-options" - // AllowTipSHA1InWant if the upload-pack server advertises this capability, - // fetch-pack may send "want" lines with SHA-1s that exist at the server but - // are not advertised by upload-pack. - AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" - // AllowReachableSHA1InWant if the upload-pack server advertises this - // capability, fetch-pack may send "want" lines with SHA-1s that exist at - // the server but are not advertised by upload-pack. - AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" - // PushCert the receive-pack server that advertises this capability is - // willing to accept a signed push certificate, and asks the to be - // included in the push certificate. A send-pack client MUST NOT - // send a push-cert packet unless the receive-pack server advertises - // this capability. - PushCert Capability = "push-cert" - // SymRef symbolic reference support for better negotiation. - SymRef Capability = "symref" -) - -const DefaultAgent = "go-git/4.x" - -var known = map[Capability]bool{ - MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, - Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, - Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, - NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, - Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, - AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, -} - -var requiresArgument = map[Capability]bool{ - Agent: true, PushCert: true, SymRef: true, -} - -var multipleArgument = map[Capability]bool{ - SymRef: true, -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go deleted file mode 100644 index 96092112d21..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go +++ /dev/null @@ -1,196 +0,0 @@ -package capability - -import ( - "bytes" - "errors" - "fmt" - "strings" -) - -var ( - // ErrArgumentsRequired is returned if no arguments are giving with a - // capability that requires arguments - ErrArgumentsRequired = errors.New("arguments required") - // ErrArguments is returned if arguments are given with a capabilities that - // not supports arguments - ErrArguments = errors.New("arguments not allowed") - // ErrEmptyArgument is returned when an empty value is given - ErrEmptyArgument = errors.New("empty argument") - // ErrMultipleArguments multiple argument given to a capabilities that not - // support it - ErrMultipleArguments = errors.New("multiple arguments not allowed") -) - -// List represents a list of capabilities -type List struct { - m map[Capability]*entry - sort []string -} - -type entry struct { - Name Capability - Values []string -} - -// NewList returns a new List of capabilities -func NewList() *List { - return &List{ - m: make(map[Capability]*entry), - } -} - -// IsEmpty returns true if the List is empty -func (l *List) IsEmpty() bool { - return len(l.sort) == 0 -} - -// Decode decodes list of capabilities from raw into the list -func (l *List) Decode(raw []byte) error { - // git 1.x receive pack used to send a leading space on its - // git-receive-pack capabilities announcement. We just trim space to be - // tolerant to space changes in different versions. - raw = bytes.TrimSpace(raw) - - if len(raw) == 0 { - return nil - } - - for _, data := range bytes.Split(raw, []byte{' '}) { - pair := bytes.SplitN(data, []byte{'='}, 2) - - c := Capability(pair[0]) - if len(pair) == 1 { - if err := l.Add(c); err != nil { - return err - } - - continue - } - - if err := l.Add(c, string(pair[1])); err != nil { - return err - } - } - - return nil -} - -// Get returns the values for a capability -func (l *List) Get(capability Capability) []string { - if _, ok := l.m[capability]; !ok { - return nil - } - - return l.m[capability].Values -} - -// Set sets a capability removing the previous values -func (l *List) Set(capability Capability, values ...string) error { - if _, ok := l.m[capability]; ok { - delete(l.m, capability) - } - - return l.Add(capability, values...) -} - -// Add adds a capability, values are optional -func (l *List) Add(c Capability, values ...string) error { - if err := l.validate(c, values); err != nil { - return err - } - - if !l.Supports(c) { - l.m[c] = &entry{Name: c} - l.sort = append(l.sort, c.String()) - } - - if len(values) == 0 { - return nil - } - - if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { - return ErrMultipleArguments - } - - l.m[c].Values = append(l.m[c].Values, values...) - return nil -} - -func (l *List) validateNoEmptyArgs(values []string) error { - for _, v := range values { - if v == "" { - return ErrEmptyArgument - } - } - return nil -} - -func (l *List) validate(c Capability, values []string) error { - if !known[c] { - return l.validateNoEmptyArgs(values) - } - if requiresArgument[c] && len(values) == 0 { - return ErrArgumentsRequired - } - - if !requiresArgument[c] && len(values) != 0 { - return ErrArguments - } - - if !multipleArgument[c] && len(values) > 1 { - return ErrMultipleArguments - } - return l.validateNoEmptyArgs(values) -} - -// Supports returns true if capability is present -func (l *List) Supports(capability Capability) bool { - _, ok := l.m[capability] - return ok -} - -// Delete deletes a capability from the List -func (l *List) Delete(capability Capability) { - if !l.Supports(capability) { - return - } - - delete(l.m, capability) - for i, c := range l.sort { - if c != string(capability) { - continue - } - - l.sort = append(l.sort[:i], l.sort[i+1:]...) - return - } -} - -// All returns a slice with all defined capabilities. -func (l *List) All() []Capability { - var cs []Capability - for _, key := range l.sort { - cs = append(cs, Capability(key)) - } - - return cs -} - -// String generates the capabilities strings, the capabilities are sorted in -// insertion order -func (l *List) String() string { - var o []string - for _, key := range l.sort { - cap := l.m[Capability(key)] - if len(cap.Values) == 0 { - o = append(o, key) - continue - } - - for _, value := range cap.Values { - o = append(o, fmt.Sprintf("%s=%s", key, value)) - } - } - - return strings.Join(o, " ") -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go deleted file mode 100644 index ab07ac8f74f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go +++ /dev/null @@ -1,70 +0,0 @@ -package packp - -import ( - "fmt" -) - -type stateFn func() stateFn - -const ( - // common - hashSize = 40 - - // advrefs - head = "HEAD" - noHead = "capabilities^{}" -) - -var ( - // common - sp = []byte(" ") - eol = []byte("\n") - eq = []byte{'='} - - // advertised-refs - null = []byte("\x00") - peeled = []byte("^{}") - noHeadMark = []byte(" capabilities^{}\x00") - - // upload-request - want = []byte("want ") - shallow = []byte("shallow ") - deepen = []byte("deepen") - deepenCommits = []byte("deepen ") - deepenSince = []byte("deepen-since ") - deepenReference = []byte("deepen-not ") - - // shallow-update - unshallow = []byte("unshallow ") - - // server-response - ack = []byte("ACK") - nak = []byte("NAK") - - // updreq - shallowNoSp = []byte("shallow") -) - -func isFlush(payload []byte) bool { - return len(payload) == 0 -} - -// ErrUnexpectedData represents an unexpected data decoding a message -type ErrUnexpectedData struct { - Msg string - Data []byte -} - -// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and -// the message given -func NewErrUnexpectedData(msg string, data []byte) error { - return &ErrUnexpectedData{Msg: msg, Data: data} -} - -func (err *ErrUnexpectedData) Error() string { - if len(err.Data) == 0 { - return err.Msg - } - - return fmt.Sprintf("%s (%s)", err.Msg, err.Data) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go deleted file mode 100644 index 4950d1d6625..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go +++ /dev/null @@ -1,724 +0,0 @@ -package packp - -/* - -A nice way to trace the real data transmitted and received by git, use: - -GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git -GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git - -Here follows a copy of the current protocol specification at the time of -this writing. - -(Please notice that most http git servers will add a flush-pkt after the -first pkt-line when using HTTP smart.) - - -Documentation Common to Pack and Http Protocols -=============================================== - -ABNF Notation -------------- - -ABNF notation as described by RFC 5234 is used within the protocol documents, -except the following replacement core rules are used: ----- - HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" ----- - -We also define the following common rules: ----- - NUL = %x00 - zero-id = 40*"0" - obj-id = 40*(HEXDIGIT) - - refname = "HEAD" - refname /= "refs/" ----- - -A refname is a hierarchical octet string beginning with "refs/" and -not violating the 'git-check-ref-format' command's validation rules. -More specifically, they: - -. They can include slash `/` for hierarchical (directory) - grouping, but no slash-separated component can begin with a - dot `.`. - -. They must contain at least one `/`. This enforces the presence of a - category like `heads/`, `tags/` etc. but the actual names are not - restricted. - -. They cannot have two consecutive dots `..` anywhere. - -. They cannot have ASCII control characters (i.e. bytes whose - values are lower than \040, or \177 `DEL`), space, tilde `~`, - caret `^`, colon `:`, question-mark `?`, asterisk `*`, - or open bracket `[` anywhere. - -. They cannot end with a slash `/` or a dot `.`. - -. They cannot end with the sequence `.lock`. - -. They cannot contain a sequence `@{`. - -. They cannot contain a `\\`. - - -pkt-line Format ---------------- - -Much (but not all) of the payload is described around pkt-lines. - -A pkt-line is a variable length binary string. The first four bytes -of the line, the pkt-len, indicates the total length of the line, -in hexadecimal. The pkt-len includes the 4 bytes used to contain -the length's hexadecimal representation. - -A pkt-line MAY contain binary data, so implementors MUST ensure -pkt-line parsing/formatting routines are 8-bit clean. - -A non-binary line SHOULD BE terminated by an LF, which if present -MUST be included in the total length. Receivers MUST treat pkt-lines -with non-binary data the same whether or not they contain the trailing -LF (stripping the LF if present, and not complaining when it is -missing). - -The maximum length of a pkt-line's data component is 65516 bytes. -Implementations MUST NOT send pkt-line whose length exceeds 65520 -(65516 bytes of payload + 4 bytes of length data). - -Implementations SHOULD NOT send an empty pkt-line ("0004"). - -A pkt-line with a length field of 0 ("0000"), called a flush-pkt, -is a special case and MUST be handled differently than an empty -pkt-line ("0004"). - ----- - pkt-line = data-pkt / flush-pkt - - data-pkt = pkt-len pkt-payload - pkt-len = 4*(HEXDIG) - pkt-payload = (pkt-len - 4)*(OCTET) - - flush-pkt = "0000" ----- - -Examples (as C-style strings): - ----- - pkt-line actual value - --------------------------------- - "0006a\n" "a\n" - "0005a" "a" - "000bfoobar\n" "foobar\n" - "0004" "" ----- - -Packfile transfer protocols -=========================== - -Git supports transferring data in packfiles over the ssh://, git://, http:// and -file:// transports. There exist two sets of protocols, one for pushing -data from a client to a server and another for fetching data from a -server to a client. The three transports (ssh, git, file) use the same -protocol to transfer data. http is documented in http-protocol.txt. - -The processes invoked in the canonical Git implementation are 'upload-pack' -on the server side and 'fetch-pack' on the client side for fetching data; -then 'receive-pack' on the server and 'send-pack' on the client for pushing -data. The protocol functions to have a server tell a client what is -currently on the server, then for the two to negotiate the smallest amount -of data to send in order to fully update one or the other. - -pkt-line Format ---------------- - -The descriptions below build on the pkt-line format described in -protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless -otherwise noted the usual pkt-line LF rules apply: the sender SHOULD -include a LF, but the receiver MUST NOT complain if it is not present. - -Transports ----------- -There are three transports over which the packfile protocol is -initiated. The Git transport is a simple, unauthenticated server that -takes the command (almost always 'upload-pack', though Git -servers can be configured to be globally writable, in which 'receive- -pack' initiation is also allowed) with which the client wishes to -communicate and executes it and connects it to the requesting -process. - -In the SSH transport, the client just runs the 'upload-pack' -or 'receive-pack' process on the server over the SSH protocol and then -communicates with that invoked process over the SSH connection. - -The file:// transport runs the 'upload-pack' or 'receive-pack' -process locally and communicates with it over a pipe. - -Git Transport -------------- - -The Git transport starts off by sending the command and repository -on the wire using the pkt-line format, followed by a NUL byte and a -hostname parameter, terminated by a NUL byte. - - 0032git-upload-pack /project.git\0host=myserver.com\0 - --- - git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] - request-command = "git-upload-pack" / "git-receive-pack" / - "git-upload-archive" ; case sensitive - pathname = *( %x01-ff ) ; exclude NUL - host-parameter = "host=" hostname [ ":" port ] --- - -Only host-parameter is allowed in the git-proto-request. Clients -MUST NOT attempt to send additional parameters. It is used for the -git-daemon name based virtual hosting. See --interpolated-path -option to git daemon, with the %H/%CH format characters. - -Basically what the Git client is doing to connect to an 'upload-pack' -process on the server side over the Git protocol is this: - - $ echo -e -n \ - "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | - nc -v example.com 9418 - -If the server refuses the request for some reasons, it could abort -gracefully with an error message. - ----- - error-line = PKT-LINE("ERR" SP explanation-text) ----- - - -SSH Transport -------------- - -Initiating the upload-pack or receive-pack processes over SSH is -executing the binary on the server via SSH remote execution. -It is basically equivalent to running this: - - $ ssh git.example.com "git-upload-pack '/project.git'" - -For a server to support Git pushing and pulling for a given user over -SSH, that user needs to be able to execute one or both of those -commands via the SSH shell that they are provided on login. On some -systems, that shell access is limited to only being able to run those -two commands, or even just one of them. - -In an ssh:// format URI, it's absolute in the URI, so the '/' after -the host name (or port number) is sent as an argument, which is then -read by the remote git-upload-pack exactly as is, so it's effectively -an absolute path in the remote filesystem. - - git clone ssh://user@example.com/project.git - | - v - ssh user@example.com "git-upload-pack '/project.git'" - -In a "user@host:path" format URI, its relative to the user's home -directory, because the Git client will run: - - git clone user@example.com:project.git - | - v - ssh user@example.com "git-upload-pack 'project.git'" - -The exception is if a '~' is used, in which case -we execute it without the leading '/'. - - ssh://user@example.com/~alice/project.git, - | - v - ssh user@example.com "git-upload-pack '~alice/project.git'" - -A few things to remember here: - -- The "command name" is spelled with dash (e.g. git-upload-pack), but - this can be overridden by the client; - -- The repository path is always quoted with single quotes. - -Fetching Data From a Server ---------------------------- - -When one Git repository wants to get data that a second repository -has, the first can 'fetch' from the second. This operation determines -what data the server has that the client does not then streams that -data down to the client in packfile format. - - -Reference Discovery -------------------- - -When the client initially connects the server will immediately respond -with a listing of each reference it has (all branches and tags) along -with the object name that each reference currently points to. - - $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | - nc -v example.com 9418 - 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack - side-band side-band-64k ofs-delta shallow no-progress include-tag - 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration - 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master - 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 - 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 - 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} - 0000 - -The returned response is a pkt-line stream describing each ref and -its current value. The stream MUST be sorted by name according to -the C locale ordering. - -If HEAD is a valid ref, HEAD MUST appear as the first advertised -ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the -advertisement list at all, but other refs may still appear. - -The stream MUST include capability declarations behind a NUL on the -first ref. The peeled value of a ref (that is "ref^{}") MUST be -immediately after the ref itself, if presented. A conforming server -MUST peel the ref if it's an annotated tag. - ----- - advertised-refs = (no-refs / list-of-refs) - *shallow - flush-pkt - - no-refs = PKT-LINE(zero-id SP "capabilities^{}" - NUL capability-list) - - list-of-refs = first-ref *other-ref - first-ref = PKT-LINE(obj-id SP refname - NUL capability-list) - - other-ref = PKT-LINE(other-tip / other-peeled) - other-tip = obj-id SP refname - other-peeled = obj-id SP refname "^{}" - - shallow = PKT-LINE("shallow" SP obj-id) - - capability-list = capability *(SP capability) - capability = 1*(LC_ALPHA / DIGIT / "-" / "_") - LC_ALPHA = %x61-7A ----- - -Server and client MUST use lowercase for obj-id, both MUST treat obj-id -as case-insensitive. - -See protocol-capabilities.txt for a list of allowed server capabilities -and descriptions. - -Packfile Negotiation --------------------- -After reference and capabilities discovery, the client can decide to -terminate the connection by sending a flush-pkt, telling the server it can -now gracefully terminate, and disconnect, when it does not need any pack -data. This can happen with the ls-remote command, and also can happen when -the client already is up-to-date. - -Otherwise, it enters the negotiation phase, where the client and -server determine what the minimal packfile necessary for transport is, -by telling the server what objects it wants, its shallow objects -(if any), and the maximum commit depth it wants (if any). The client -will also send a list of the capabilities it wants to be in effect, -out of what the server said it could do with the first 'want' line. - ----- - upload-request = want-list - *shallow-line - *1depth-request - flush-pkt - - want-list = first-want - *additional-want - - shallow-line = PKT-LINE("shallow" SP obj-id) - - depth-request = PKT-LINE("deepen" SP depth) / - PKT-LINE("deepen-since" SP timestamp) / - PKT-LINE("deepen-not" SP ref) - - first-want = PKT-LINE("want" SP obj-id SP capability-list) - additional-want = PKT-LINE("want" SP obj-id) - - depth = 1*DIGIT ----- - -Clients MUST send all the obj-ids it wants from the reference -discovery phase as 'want' lines. Clients MUST send at least one -'want' command in the request body. Clients MUST NOT mention an -obj-id in a 'want' command which did not appear in the response -obtained through ref discovery. - -The client MUST write all obj-ids which it only has shallow copies -of (meaning that it does not have the parents of a commit) as -'shallow' lines so that the server is aware of the limitations of -the client's history. - -The client now sends the maximum commit history depth it wants for -this transaction, which is the number of commits it wants from the -tip of the history, if any, as a 'deepen' line. A depth of 0 is the -same as not making a depth request. The client does not want to receive -any commits beyond this depth, nor does it want objects needed only to -complete those commits. Commits whose parents are not received as a -result are defined as shallow and marked as such in the server. This -information is sent back to the client in the next step. - -Once all the 'want's and 'shallow's (and optional 'deepen') are -transferred, clients MUST send a flush-pkt, to tell the server side -that it is done sending the list. - -Otherwise, if the client sent a positive depth request, the server -will determine which commits will and will not be shallow and -send this information to the client. If the client did not request -a positive depth, this step is skipped. - ----- - shallow-update = *shallow-line - *unshallow-line - flush-pkt - - shallow-line = PKT-LINE("shallow" SP obj-id) - - unshallow-line = PKT-LINE("unshallow" SP obj-id) ----- - -If the client has requested a positive depth, the server will compute -the set of commits which are no deeper than the desired depth. The set -of commits start at the client's wants. - -The server writes 'shallow' lines for each -commit whose parents will not be sent as a result. The server writes -an 'unshallow' line for each commit which the client has indicated is -shallow, but is no longer shallow at the currently requested depth -(that is, its parents will now be sent). The server MUST NOT mark -as unshallow anything which the client has not indicated was shallow. - -Now the client will send a list of the obj-ids it has using 'have' -lines, so the server can make a packfile that only contains the objects -that the client needs. In multi_ack mode, the canonical implementation -will send up to 32 of these at a time, then will send a flush-pkt. The -canonical implementation will skip ahead and send the next 32 immediately, -so that there is always a block of 32 "in-flight on the wire" at a time. - ----- - upload-haves = have-list - compute-end - - have-list = *have-line - have-line = PKT-LINE("have" SP obj-id) - compute-end = flush-pkt / PKT-LINE("done") ----- - -If the server reads 'have' lines, it then will respond by ACKing any -of the obj-ids the client said it had that the server also has. The -server will ACK obj-ids differently depending on which ack mode is -chosen by the client. - -In multi_ack mode: - - * the server will respond with 'ACK obj-id continue' for any common - commits. - - * once the server has found an acceptable common base commit and is - ready to make a packfile, it will blindly ACK all 'have' obj-ids - back to the client. - - * the server will then send a 'NAK' and then wait for another response - from the client - either a 'done' or another list of 'have' lines. - -In multi_ack_detailed mode: - - * the server will differentiate the ACKs where it is signaling - that it is ready to send data with 'ACK obj-id ready' lines, and - signals the identified common commits with 'ACK obj-id common' lines. - -Without either multi_ack or multi_ack_detailed: - - * upload-pack sends "ACK obj-id" on the first common object it finds. - After that it says nothing until the client gives it a "done". - - * upload-pack sends "NAK" on a flush-pkt if no common object - has been found yet. If one has been found, and thus an ACK - was already sent, it's silent on the flush-pkt. - -After the client has gotten enough ACK responses that it can determine -that the server has enough information to send an efficient packfile -(in the canonical implementation, this is determined when it has received -enough ACKs that it can color everything left in the --date-order queue -as common with the server, or the --date-order queue is empty), or the -client determines that it wants to give up (in the canonical implementation, -this is determined when the client sends 256 'have' lines without getting -any of them ACKed by the server - meaning there is nothing in common and -the server should just send all of its objects), then the client will send -a 'done' command. The 'done' command signals to the server that the client -is ready to receive its packfile data. - -However, the 256 limit *only* turns on in the canonical client -implementation if we have received at least one "ACK %s continue" -during a prior round. This helps to ensure that at least one common -ancestor is found before we give up entirely. - -Once the 'done' line is read from the client, the server will either -send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object -name of the last commit determined to be common. The server only sends -ACK after 'done' if there is at least one common base and multi_ack or -multi_ack_detailed is enabled. The server always sends NAK after 'done' -if there is no common base found. - -Then the server will start sending its packfile data. - ----- - server-response = *ack_multi ack / nak - ack_multi = PKT-LINE("ACK" SP obj-id ack_status) - ack_status = "continue" / "common" / "ready" - ack = PKT-LINE("ACK" SP obj-id) - nak = PKT-LINE("NAK") ----- - -A simple clone may look like this (with no 'have' lines): - ----- - C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ - side-band-64k ofs-delta\n - C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n - C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n - C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n - C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n - C: 0000 - C: 0009done\n - - S: 0008NAK\n - S: [PACKFILE] ----- - -An incremental update (fetch) response might look like this: - ----- - C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ - side-band-64k ofs-delta\n - C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n - C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n - C: 0000 - C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n - C: [30 more have lines] - C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n - C: 0000 - - S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n - S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n - S: 0008NAK\n - - C: 0009done\n - - S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n - S: [PACKFILE] ----- - - -Packfile Data -------------- - -Now that the client and server have finished negotiation about what -the minimal amount of data that needs to be sent to the client is, the server -will construct and send the required data in packfile format. - -See pack-format.txt for what the packfile itself actually looks like. - -If 'side-band' or 'side-band-64k' capabilities have been specified by -the client, the server will send the packfile data multiplexed. - -Each packet starting with the packet-line length of the amount of data -that follows, followed by a single byte specifying the sideband the -following data is coming in on. - -In 'side-band' mode, it will send up to 999 data bytes plus 1 control -code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' -mode it will send up to 65519 data bytes plus 1 control code, for a -total of up to 65520 bytes in a pkt-line. - -The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain -packfile data, sideband '2' will be used for progress information that the -client will generally print to stderr and sideband '3' is used for error -information. - -If no 'side-band' capability was specified, the server will stream the -entire packfile without multiplexing. - - -Pushing Data To a Server ------------------------- - -Pushing data to a server will invoke the 'receive-pack' process on the -server, which will allow the client to tell it which references it should -update and then send all the data the server will need for those new -references to be complete. Once all the data is received and validated, -the server will then update its references to what the client specified. - -Authentication --------------- - -The protocol itself contains no authentication mechanisms. That is to be -handled by the transport, such as SSH, before the 'receive-pack' process is -invoked. If 'receive-pack' is configured over the Git transport, those -repositories will be writable by anyone who can access that port (9418) as -that transport is unauthenticated. - -Reference Discovery -------------------- - -The reference discovery phase is done nearly the same way as it is in the -fetching protocol. Each reference obj-id and name on the server is sent -in packet-line format to the client, followed by a flush-pkt. The only -real difference is that the capability listing is different - the only -possible values are 'report-status', 'delete-refs', 'ofs-delta' and -'push-options'. - -Reference Update Request and Packfile Transfer ----------------------------------------------- - -Once the client knows what references the server is at, it can send a -list of reference update requests. For each reference on the server -that it wants to update, it sends a line listing the obj-id currently on -the server, the obj-id the client would like to update it to and the name -of the reference. - -This list is followed by a flush-pkt. Then the push options are transmitted -one per packet followed by another flush-pkt. After that the packfile that -should contain all the objects that the server will need to complete the new -references will be sent. - ----- - update-request = *shallow ( command-list | push-cert ) [packfile] - - shallow = PKT-LINE("shallow" SP obj-id) - - command-list = PKT-LINE(command NUL capability-list) - *PKT-LINE(command) - flush-pkt - - command = create / delete / update - create = zero-id SP new-id SP name - delete = old-id SP zero-id SP name - update = old-id SP new-id SP name - - old-id = obj-id - new-id = obj-id - - push-cert = PKT-LINE("push-cert" NUL capability-list LF) - PKT-LINE("certificate version 0.1" LF) - PKT-LINE("pusher" SP ident LF) - PKT-LINE("pushee" SP url LF) - PKT-LINE("nonce" SP nonce LF) - PKT-LINE(LF) - *PKT-LINE(command LF) - *PKT-LINE(gpg-signature-lines LF) - PKT-LINE("push-cert-end" LF) - - packfile = "PACK" 28*(OCTET) ----- - -If the receiving end does not support delete-refs, the sending end MUST -NOT ask for delete command. - -If the receiving end does not support push-cert, the sending end -MUST NOT send a push-cert command. When a push-cert command is -sent, command-list MUST NOT be sent; the commands recorded in the -push certificate is used instead. - -The packfile MUST NOT be sent if the only command used is 'delete'. - -A packfile MUST be sent if either create or update command is used, -even if the server already has all the necessary objects. In this -case the client MUST send an empty packfile. The only time this -is likely to happen is if the client is creating -a new branch or a tag that points to an existing obj-id. - -The server will receive the packfile, unpack it, then validate each -reference that is being updated that it hasn't changed while the request -was being processed (the obj-id is still the same as the old-id), and -it will run any update hooks to make sure that the update is acceptable. -If all of that is fine, the server will then update the references. - -Push Certificate ----------------- - -A push certificate begins with a set of header lines. After the -header and an empty line, the protocol commands follow, one per -line. Note that the trailing LF in push-cert PKT-LINEs is _not_ -optional; it must be present. - -Currently, the following header fields are defined: - -`pusher` ident:: - Identify the GPG key in "Human Readable Name " - format. - -`pushee` url:: - The repository URL (anonymized, if the URL contains - authentication material) the user who ran `git push` - intended to push into. - -`nonce` nonce:: - The 'nonce' string the receiving repository asked the - pushing user to include in the certificate, to prevent - replay attacks. - -The GPG signature lines are a detached signature for the contents -recorded in the push certificate before the signature block begins. -The detached signature is used to certify that the commands were -given by the pusher, who must be the signer. - -Report Status -------------- - -After receiving the pack data from the sender, the receiver sends a -report if 'report-status' capability is in effect. -It is a short listing of what happened in that update. It will first -list the status of the packfile unpacking as either 'unpack ok' or -'unpack [error]'. Then it will list the status for each of the references -that it tried to update. Each line is either 'ok [refname]' if the -update was successful, or 'ng [refname] [error]' if the update was not. - ----- - report-status = unpack-status - 1*(command-status) - flush-pkt - - unpack-status = PKT-LINE("unpack" SP unpack-result) - unpack-result = "ok" / error-msg - - command-status = command-ok / command-fail - command-ok = PKT-LINE("ok" SP refname) - command-fail = PKT-LINE("ng" SP refname SP error-msg) - - error-msg = 1*(OCTECT) ; where not "ok" ----- - -Updates can be unsuccessful for a number of reasons. The reference can have -changed since the reference discovery phase was originally sent, meaning -someone pushed in the meantime. The reference being pushed could be a -non-fast-forward reference and the update hooks or configuration could be -set to not allow that, etc. Also, some references can be updated while others -can be rejected. - -An example client/server communication might look like this: - ----- - S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n - S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n - S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n - S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n - S: 0000 - - C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n - C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n - C: 0000 - C: [PACKDATA] - - S: 000eunpack ok\n - S: 0018ok refs/heads/debug\n - S: 002ang refs/heads/master non-fast-forward\n ----- -*/ diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go deleted file mode 100644 index e2a0a108b24..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go +++ /dev/null @@ -1,165 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ( - ok = "ok" -) - -// ReportStatus is a report status message, as used in the git-receive-pack -// process whenever the 'report-status' capability is negotiated. -type ReportStatus struct { - UnpackStatus string - CommandStatuses []*CommandStatus -} - -// NewReportStatus creates a new ReportStatus message. -func NewReportStatus() *ReportStatus { - return &ReportStatus{} -} - -// Error returns the first error if any. -func (s *ReportStatus) Error() error { - if s.UnpackStatus != ok { - return fmt.Errorf("unpack error: %s", s.UnpackStatus) - } - - for _, s := range s.CommandStatuses { - if err := s.Error(); err != nil { - return err - } - } - - return nil -} - -// Encode writes the report status to a writer. -func (s *ReportStatus) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { - return err - } - - for _, cs := range s.CommandStatuses { - if err := cs.encode(w); err != nil { - return err - } - } - - return e.Flush() -} - -// Decode reads from the given reader and decodes a report-status message. It -// does not read more input than what is needed to fill the report status. -func (s *ReportStatus) Decode(r io.Reader) error { - scan := pktline.NewScanner(r) - if err := s.scanFirstLine(scan); err != nil { - return err - } - - if err := s.decodeReportStatus(scan.Bytes()); err != nil { - return err - } - - flushed := false - for scan.Scan() { - b := scan.Bytes() - if isFlush(b) { - flushed = true - break - } - - if err := s.decodeCommandStatus(b); err != nil { - return err - } - } - - if !flushed { - return fmt.Errorf("missing flush") - } - - return scan.Err() -} - -func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { - if scan.Scan() { - return nil - } - - if scan.Err() != nil { - return scan.Err() - } - - return io.ErrUnexpectedEOF -} - -func (s *ReportStatus) decodeReportStatus(b []byte) error { - if isFlush(b) { - return fmt.Errorf("premature flush") - } - - b = bytes.TrimSuffix(b, eol) - - line := string(b) - fields := strings.SplitN(line, " ", 2) - if len(fields) != 2 || fields[0] != "unpack" { - return fmt.Errorf("malformed unpack status: %s", line) - } - - s.UnpackStatus = fields[1] - return nil -} - -func (s *ReportStatus) decodeCommandStatus(b []byte) error { - b = bytes.TrimSuffix(b, eol) - - line := string(b) - fields := strings.SplitN(line, " ", 3) - status := ok - if len(fields) == 3 && fields[0] == "ng" { - status = fields[2] - } else if len(fields) != 2 || fields[0] != "ok" { - return fmt.Errorf("malformed command status: %s", line) - } - - cs := &CommandStatus{ - ReferenceName: plumbing.ReferenceName(fields[1]), - Status: status, - } - s.CommandStatuses = append(s.CommandStatuses, cs) - return nil -} - -// CommandStatus is the status of a reference in a report status. -// See ReportStatus struct. -type CommandStatus struct { - ReferenceName plumbing.ReferenceName - Status string -} - -// Error returns the error, if any. -func (s *CommandStatus) Error() error { - if s.Status == ok { - return nil - } - - return fmt.Errorf("command error on %s: %s", - s.ReferenceName.String(), s.Status) -} - -func (s *CommandStatus) encode(w io.Writer) error { - e := pktline.NewEncoder(w) - if s.Error() == nil { - return e.Encodef("ok %s\n", s.ReferenceName.String()) - } - - return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go deleted file mode 100644 index fe4fe688795..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go +++ /dev/null @@ -1,92 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ( - shallowLineLen = 48 - unshallowLineLen = 50 -) - -type ShallowUpdate struct { - Shallows []plumbing.Hash - Unshallows []plumbing.Hash -} - -func (r *ShallowUpdate) Decode(reader io.Reader) error { - s := pktline.NewScanner(reader) - - for s.Scan() { - line := s.Bytes() - line = bytes.TrimSpace(line) - - var err error - switch { - case bytes.HasPrefix(line, shallow): - err = r.decodeShallowLine(line) - case bytes.HasPrefix(line, unshallow): - err = r.decodeUnshallowLine(line) - case bytes.Equal(line, pktline.Flush): - return nil - } - - if err != nil { - return err - } - } - - return s.Err() -} - -func (r *ShallowUpdate) decodeShallowLine(line []byte) error { - hash, err := r.decodeLine(line, shallow, shallowLineLen) - if err != nil { - return err - } - - r.Shallows = append(r.Shallows, hash) - return nil -} - -func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { - hash, err := r.decodeLine(line, unshallow, unshallowLineLen) - if err != nil { - return err - } - - r.Unshallows = append(r.Unshallows, hash) - return nil -} - -func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { - if len(line) != expLen { - return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) - } - - raw := string(line[expLen-40 : expLen]) - return plumbing.NewHash(raw), nil -} - -func (r *ShallowUpdate) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - - for _, h := range r.Shallows { - if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { - return err - } - } - - for _, h := range r.Unshallows { - if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { - return err - } - } - - return e.Flush() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go deleted file mode 100644 index de5001281fd..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go +++ /dev/null @@ -1,33 +0,0 @@ -package sideband - -// Type sideband type "side-band" or "side-band-64k" -type Type int8 - -const ( - // Sideband legacy sideband type up to 1000-byte messages - Sideband Type = iota - // Sideband64k sideband type up to 65519-byte messages - Sideband64k Type = iota - - // MaxPackedSize for Sideband type - MaxPackedSize = 1000 - // MaxPackedSize64k for Sideband64k type - MaxPackedSize64k = 65520 -) - -// Channel sideband channel -type Channel byte - -// WithPayload encode the payload as a message -func (ch Channel) WithPayload(payload []byte) []byte { - return append([]byte{byte(ch)}, payload...) -} - -const ( - // PackData packfile content - PackData Channel = 1 - // ProgressMessage progress messages - ProgressMessage Channel = 2 - // ErrorMessage fatal error message just before stream aborts - ErrorMessage Channel = 3 -) diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go deleted file mode 100644 index 0116f962ef2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go +++ /dev/null @@ -1,148 +0,0 @@ -package sideband - -import ( - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded -var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") - -// Progress where the progress information is stored -type Progress interface { - io.Writer -} - -// Demuxer demultiplexes the progress reports and error info interleaved with the -// packfile itself. -// -// A sideband has three different channels the main one, called PackData, contains -// the packfile data; the ErrorMessage channel, that contains server errors; and -// the last one, ProgressMessage channel, containing information about the ongoing -// task happening in the server (optional, can be suppressed sending NoProgress -// or Quiet capabilities to the server) -// -// In order to demultiplex the data stream, method `Read` should be called to -// retrieve the PackData channel, the incoming data from the ProgressMessage is -// written at `Progress` (if any), if any message is retrieved from the -// ErrorMessage channel an error is returned and we can assume that the -// connection has been closed. -type Demuxer struct { - t Type - r io.Reader - s *pktline.Scanner - - max int - pending []byte - - // Progress is where the progress messages are stored - Progress Progress -} - -// NewDemuxer returns a new Demuxer for the given t and read from r -func NewDemuxer(t Type, r io.Reader) *Demuxer { - max := MaxPackedSize64k - if t == Sideband { - max = MaxPackedSize - } - - return &Demuxer{ - t: t, - r: r, - max: max, - s: pktline.NewScanner(r), - } -} - -// Read reads up to len(p) bytes from the PackData channel into p, an error can -// be return if an error happens when reading or if a message is sent in the -// ErrorMessage channel. -// -// When a ProgressMessage is read, is not copy to b, instead of this is written -// to the Progress -func (d *Demuxer) Read(b []byte) (n int, err error) { - var read, req int - - req = len(b) - for read < req { - n, err := d.doRead(b[read:req]) - read += n - - if err != nil { - return read, err - } - } - - return read, nil -} - -func (d *Demuxer) doRead(b []byte) (int, error) { - read, err := d.nextPackData() - size := len(read) - wanted := len(b) - - if size > wanted { - d.pending = read[wanted:] - } - - if wanted > size { - wanted = size - } - - size = copy(b, read[:wanted]) - return size, err -} - -func (d *Demuxer) nextPackData() ([]byte, error) { - content := d.getPending() - if len(content) != 0 { - return content, nil - } - - if !d.s.Scan() { - if err := d.s.Err(); err != nil { - return nil, err - } - - return nil, io.EOF - } - - content = d.s.Bytes() - - size := len(content) - if size == 0 { - return nil, nil - } else if size > d.max { - return nil, ErrMaxPackedExceeded - } - - switch Channel(content[0]) { - case PackData: - return content[1:], nil - case ProgressMessage: - if d.Progress != nil { - _, err := d.Progress.Write(content[1:]) - return nil, err - } - case ErrorMessage: - return nil, fmt.Errorf("unexpected error: %s", content[1:]) - default: - return nil, fmt.Errorf("unknown channel %s", content) - } - - return nil, nil -} - -func (d *Demuxer) getPending() (b []byte) { - if len(d.pending) == 0 { - return nil - } - - content := d.pending - d.pending = nil - - return content -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go deleted file mode 100644 index c5d24295291..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package sideband implements a sideband mutiplex/demultiplexer -package sideband - -// If 'side-band' or 'side-band-64k' capabilities have been specified by -// the client, the server will send the packfile data multiplexed. -// -// Either mode indicates that the packfile data will be streamed broken -// up into packets of up to either 1000 bytes in the case of 'side_band', -// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up -// of a leading 4-byte pkt-line length of how much data is in the packet, -// followed by a 1-byte stream code, followed by the actual data. -// -// The stream code can be one of: -// -// 1 - pack data -// 2 - progress messages -// 3 - fatal error message just before stream aborts -// -// The "side-band-64k" capability came about as a way for newer clients -// that can handle much larger packets to request packets that are -// actually crammed nearly full, while maintaining backward compatibility -// for the older clients. -// -// Further, with side-band and its up to 1000-byte messages, it's actually -// 999 bytes of payload and 1 byte for the stream code. With side-band-64k, -// same deal, you have up to 65519 bytes of data and 1 byte for the stream -// code. -// -// The client MUST send only maximum of one of "side-band" and "side- -// band-64k". Server MUST diagnose it as an error if client requests -// both. diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go deleted file mode 100644 index d51ac826952..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go +++ /dev/null @@ -1,65 +0,0 @@ -package sideband - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Muxer multiplex the packfile along with the progress messages and the error -// information. The multiplex is perform using pktline format. -type Muxer struct { - max int - e *pktline.Encoder -} - -const chLen = 1 - -// NewMuxer returns a new Muxer for the given t that writes on w. -// -// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any -// other value is given, max pack is set to MaxPackedSize64k, that is the -// maximum length of a line in pktline format. -func NewMuxer(t Type, w io.Writer) *Muxer { - max := MaxPackedSize64k - if t == Sideband { - max = MaxPackedSize - } - - return &Muxer{ - max: max - chLen, - e: pktline.NewEncoder(w), - } -} - -// Write writes p in the PackData channel -func (m *Muxer) Write(p []byte) (int, error) { - return m.WriteChannel(PackData, p) -} - -// WriteChannel writes p in the given channel. This method can be used with any -// channel, but is recommend use it only for the ProgressMessage and -// ErrorMessage channels and use Write for the PackData channel -func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { - wrote := 0 - size := len(p) - for wrote < size { - n, err := m.doWrite(t, p[wrote:]) - wrote += n - - if err != nil { - return wrote, err - } - } - - return wrote, nil -} - -func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { - sz := len(p) - if sz > m.max { - sz = m.max - } - - return sz, m.e.Encode(ch.WithPayload(p[:sz])) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go deleted file mode 100644 index b3a7ee804c5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go +++ /dev/null @@ -1,127 +0,0 @@ -package packp - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ackLineLen = 44 - -// ServerResponse object acknowledgement from upload-pack service -type ServerResponse struct { - ACKs []plumbing.Hash -} - -// Decode decodes the response into the struct, isMultiACK should be true, if -// the request was done with multi_ack or multi_ack_detailed capabilities. -func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { - // TODO: implement support for multi_ack or multi_ack_detailed responses - if isMultiACK { - return errors.New("multi_ack and multi_ack_detailed are not supported") - } - - s := pktline.NewScanner(reader) - - for s.Scan() { - line := s.Bytes() - - if err := r.decodeLine(line); err != nil { - return err - } - - // we need to detect when the end of a response header and the beginning - // of a packfile header happened, some requests to the git daemon - // produces a duplicate ACK header even when multi_ack is not supported. - stop, err := r.stopReading(reader) - if err != nil { - return err - } - - if stop { - break - } - } - - return s.Err() -} - -// stopReading detects when a valid command such as ACK or NAK is found to be -// read in the buffer without moving the read pointer. -func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { - ahead, err := reader.Peek(7) - if err == io.EOF { - return true, nil - } - - if err != nil { - return false, err - } - - if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { - return false, nil - } - - if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { - return false, nil - } - - return true, nil -} - -func (r *ServerResponse) isValidCommand(b []byte) bool { - commands := [][]byte{ack, nak} - for _, c := range commands { - if bytes.Equal(b, c) { - return true - } - } - - return false -} - -func (r *ServerResponse) decodeLine(line []byte) error { - if len(line) == 0 { - return fmt.Errorf("unexpected flush") - } - - if bytes.Equal(line[0:3], ack) { - return r.decodeACKLine(line) - } - - if bytes.Equal(line[0:3], nak) { - return nil - } - - return fmt.Errorf("unexpected content %q", string(line)) -} - -func (r *ServerResponse) decodeACKLine(line []byte) error { - if len(line) < ackLineLen { - return fmt.Errorf("malformed ACK %q", line) - } - - sp := bytes.Index(line, []byte(" ")) - h := plumbing.NewHash(string(line[sp+1 : sp+41])) - r.ACKs = append(r.ACKs, h) - return nil -} - -// Encode encodes the ServerResponse into a writer. -func (r *ServerResponse) Encode(w io.Writer) error { - if len(r.ACKs) > 1 { - return errors.New("multi_ack and multi_ack_detailed are not supported") - } - - e := pktline.NewEncoder(w) - if len(r.ACKs) == 0 { - return e.Encodef("%s\n", nak) - } - - return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go deleted file mode 100644 index 44db8e4014e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go +++ /dev/null @@ -1,168 +0,0 @@ -package packp - -import ( - "fmt" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// UploadRequest values represent the information transmitted on a -// upload-request message. Values from this type are not zero-value -// safe, use the New function instead. -// This is a low level type, use UploadPackRequest instead. -type UploadRequest struct { - Capabilities *capability.List - Wants []plumbing.Hash - Shallows []plumbing.Hash - Depth Depth -} - -// Depth values stores the desired depth of the requested packfile: see -// DepthCommit, DepthSince and DepthReference. -type Depth interface { - isDepth() - IsZero() bool -} - -// DepthCommits values stores the maximum number of requested commits in -// the packfile. Zero means infinite. A negative value will have -// undefined consequences. -type DepthCommits int - -func (d DepthCommits) isDepth() {} - -func (d DepthCommits) IsZero() bool { - return d == 0 -} - -// DepthSince values requests only commits newer than the specified time. -type DepthSince time.Time - -func (d DepthSince) isDepth() {} - -func (d DepthSince) IsZero() bool { - return time.Time(d).IsZero() -} - -// DepthReference requests only commits not to found in the specified reference. -type DepthReference string - -func (d DepthReference) isDepth() {} - -func (d DepthReference) IsZero() bool { - return string(d) == "" -} - -// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be -// used. It has no capabilities, wants or shallows and an infinite depth. Please -// note that to encode an upload-request it has to have at least one wanted hash. -func NewUploadRequest() *UploadRequest { - return &UploadRequest{ - Capabilities: capability.NewList(), - Wants: []plumbing.Hash{}, - Shallows: []plumbing.Hash{}, - Depth: DepthCommits(0), - } -} - -// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest -// value, the request capabilities are filled with the most optimal ones, based -// on the adv value (advertised capabilities), the UploadRequest generated it -// has no wants or shallows and an infinite depth. -func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { - r := NewUploadRequest() - - if adv.Supports(capability.MultiACKDetailed) { - r.Capabilities.Set(capability.MultiACKDetailed) - } else if adv.Supports(capability.MultiACK) { - r.Capabilities.Set(capability.MultiACK) - } - - if adv.Supports(capability.Sideband64k) { - r.Capabilities.Set(capability.Sideband64k) - } else if adv.Supports(capability.Sideband) { - r.Capabilities.Set(capability.Sideband) - } - - if adv.Supports(capability.ThinPack) { - r.Capabilities.Set(capability.ThinPack) - } - - if adv.Supports(capability.OFSDelta) { - r.Capabilities.Set(capability.OFSDelta) - } - - if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) - } - - return r -} - -// Validate validates the content of UploadRequest, following the next rules: -// - Wants MUST have at least one reference -// - capability.Shallow MUST be present if Shallows is not empty -// - is a non-zero DepthCommits is given capability.Shallow MUST be present -// - is a DepthSince is given capability.Shallow MUST be present -// - is a DepthReference is given capability.DeepenNot MUST be present -// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k -// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed -func (r *UploadRequest) Validate() error { - if len(r.Wants) == 0 { - return fmt.Errorf("want can't be empty") - } - - if err := r.validateRequiredCapabilities(); err != nil { - return err - } - - if err := r.validateConflictCapabilities(); err != nil { - return err - } - - return nil -} - -func (r *UploadRequest) validateRequiredCapabilities() error { - msg := "missing capability %s" - - if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) { - return fmt.Errorf(msg, capability.Shallow) - } - - switch r.Depth.(type) { - case DepthCommits: - if r.Depth != DepthCommits(0) { - if !r.Capabilities.Supports(capability.Shallow) { - return fmt.Errorf(msg, capability.Shallow) - } - } - case DepthSince: - if !r.Capabilities.Supports(capability.DeepenSince) { - return fmt.Errorf(msg, capability.DeepenSince) - } - case DepthReference: - if !r.Capabilities.Supports(capability.DeepenNot) { - return fmt.Errorf(msg, capability.DeepenNot) - } - } - - return nil -} - -func (r *UploadRequest) validateConflictCapabilities() error { - msg := "capabilities %s and %s are mutually exclusive" - if r.Capabilities.Supports(capability.Sideband) && - r.Capabilities.Supports(capability.Sideband64k) { - return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) - } - - if r.Capabilities.Supports(capability.MultiACK) && - r.Capabilities.Supports(capability.MultiACKDetailed) { - return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go deleted file mode 100644 index 449b729a5e0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go +++ /dev/null @@ -1,257 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Decode reads the next upload-request form its input and -// stores it in the UploadRequest. -func (u *UploadRequest) Decode(r io.Reader) error { - d := newUlReqDecoder(r) - return d.Decode(u) -} - -type ulReqDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - err error // sticky error, use the parser.error() method to fill this out - data *UploadRequest // parsed data is stored here -} - -func newUlReqDecoder(r io.Reader) *ulReqDecoder { - return &ulReqDecoder{ - s: pktline.NewScanner(r), - } -} - -func (d *ulReqDecoder) Decode(v *UploadRequest) error { - d.data = v - - for state := d.decodeFirstWant; state != nil; { - state = state() - } - - return d.err -} - -// fills out the parser stiky error -func (d *ulReqDecoder) error(format string, a ...interface{}) { - msg := fmt.Sprintf( - "pkt-line %d: %s", d.nLine, - fmt.Sprintf(format, a...), - ) - - d.err = NewErrUnexpectedData(msg, d.line) -} - -// Reads a new pkt-line from the scanner, makes its payload available as -// p.line and increments p.nLine. A successful invocation returns true, -// otherwise, false is returned and the sticky error is filled out -// accordingly. Trims eols at the end of the payloads. -func (d *ulReqDecoder) nextLine() bool { - d.nLine++ - - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { - return false - } - - d.error("EOF") - return false - } - - d.line = d.s.Bytes() - d.line = bytes.TrimSuffix(d.line, eol) - - return true -} - -// Expected format: want [ capabilities] -func (d *ulReqDecoder) decodeFirstWant() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if !bytes.HasPrefix(d.line, want) { - d.error("missing 'want ' prefix") - return nil - } - d.line = bytes.TrimPrefix(d.line, want) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Wants = append(d.data.Wants, hash) - - return d.decodeCaps -} - -func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { - if len(d.line) < hashSize { - d.err = fmt.Errorf("malformed hash: %v", d.line) - return plumbing.ZeroHash, false - } - - var hash plumbing.Hash - if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { - d.error("invalid hash text: %s", err) - return plumbing.ZeroHash, false - } - d.line = d.line[hashSize:] - - return hash, true -} - -// Expected format: sp cap1 sp cap2 sp cap3... -func (d *ulReqDecoder) decodeCaps() stateFn { - d.line = bytes.TrimPrefix(d.line, sp) - if err := d.data.Capabilities.Decode(d.line); err != nil { - d.error("invalid capabilities: %s", err) - } - - return d.decodeOtherWants -} - -// Expected format: want -func (d *ulReqDecoder) decodeOtherWants() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if bytes.HasPrefix(d.line, shallow) { - return d.decodeShallow - } - - if bytes.HasPrefix(d.line, deepen) { - return d.decodeDeepen - } - - if len(d.line) == 0 { - return nil - } - - if !bytes.HasPrefix(d.line, want) { - d.error("unexpected payload while expecting a want: %q", d.line) - return nil - } - d.line = bytes.TrimPrefix(d.line, want) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Wants = append(d.data.Wants, hash) - - return d.decodeOtherWants -} - -// Expected format: shallow -func (d *ulReqDecoder) decodeShallow() stateFn { - if bytes.HasPrefix(d.line, deepen) { - return d.decodeDeepen - } - - if len(d.line) == 0 { - return nil - } - - if !bytes.HasPrefix(d.line, shallow) { - d.error("unexpected payload while expecting a shallow: %q", d.line) - return nil - } - d.line = bytes.TrimPrefix(d.line, shallow) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Shallows = append(d.data.Shallows, hash) - - if ok := d.nextLine(); !ok { - return nil - } - - return d.decodeShallow -} - -// Expected format: deepen / deepen-since
      / deepen-not -func (d *ulReqDecoder) decodeDeepen() stateFn { - if bytes.HasPrefix(d.line, deepenCommits) { - return d.decodeDeepenCommits - } - - if bytes.HasPrefix(d.line, deepenSince) { - return d.decodeDeepenSince - } - - if bytes.HasPrefix(d.line, deepenReference) { - return d.decodeDeepenReference - } - - if len(d.line) == 0 { - return nil - } - - d.error("unexpected deepen specification: %q", d.line) - return nil -} - -func (d *ulReqDecoder) decodeDeepenCommits() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenCommits) - - var n int - if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { - return nil - } - if n < 0 { - d.err = fmt.Errorf("negative depth") - return nil - } - d.data.Depth = DepthCommits(n) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeDeepenSince() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenSince) - - var secs int64 - secs, d.err = strconv.ParseInt(string(d.line), 10, 64) - if d.err != nil { - return nil - } - t := time.Unix(secs, 0).UTC() - d.data.Depth = DepthSince(t) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeDeepenReference() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenReference) - - d.data.Depth = DepthReference(string(d.line)) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeFlush() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if len(d.line) != 0 { - d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go deleted file mode 100644 index 486307688a4..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go +++ /dev/null @@ -1,145 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Encode writes the UlReq encoding of u to the stream. -// -// All the payloads will end with a newline character. Wants and -// shallows are sorted alphabetically. A depth of 0 means no depth -// request is sent. -func (u *UploadRequest) Encode(w io.Writer) error { - e := newUlReqEncoder(w) - return e.Encode(u) -} - -type ulReqEncoder struct { - pe *pktline.Encoder // where to write the encoded data - data *UploadRequest // the data to encode - err error // sticky error -} - -func newUlReqEncoder(w io.Writer) *ulReqEncoder { - return &ulReqEncoder{ - pe: pktline.NewEncoder(w), - } -} - -func (e *ulReqEncoder) Encode(v *UploadRequest) error { - e.data = v - - if len(v.Wants) == 0 { - return fmt.Errorf("empty wants provided") - } - - plumbing.HashesSort(e.data.Wants) - for state := e.encodeFirstWant; state != nil; { - state = state() - } - - return e.err -} - -func (e *ulReqEncoder) encodeFirstWant() stateFn { - var err error - if e.data.Capabilities.IsEmpty() { - err = e.pe.Encodef("want %s\n", e.data.Wants[0]) - } else { - err = e.pe.Encodef( - "want %s %s\n", - e.data.Wants[0], - e.data.Capabilities.String(), - ) - } - - if err != nil { - e.err = fmt.Errorf("encoding first want line: %s", err) - return nil - } - - return e.encodeAdditionalWants -} - -func (e *ulReqEncoder) encodeAdditionalWants() stateFn { - last := e.data.Wants[0] - for _, w := range e.data.Wants[1:] { - if bytes.Equal(last[:], w[:]) { - continue - } - - if err := e.pe.Encodef("want %s\n", w); err != nil { - e.err = fmt.Errorf("encoding want %q: %s", w, err) - return nil - } - - last = w - } - - return e.encodeShallows -} - -func (e *ulReqEncoder) encodeShallows() stateFn { - plumbing.HashesSort(e.data.Shallows) - - var last plumbing.Hash - for _, s := range e.data.Shallows { - if bytes.Equal(last[:], s[:]) { - continue - } - - if err := e.pe.Encodef("shallow %s\n", s); err != nil { - e.err = fmt.Errorf("encoding shallow %q: %s", s, err) - return nil - } - - last = s - } - - return e.encodeDepth -} - -func (e *ulReqEncoder) encodeDepth() stateFn { - switch depth := e.data.Depth.(type) { - case DepthCommits: - if depth != 0 { - commits := int(depth) - if err := e.pe.Encodef("deepen %d\n", commits); err != nil { - e.err = fmt.Errorf("encoding depth %d: %s", depth, err) - return nil - } - } - case DepthSince: - when := time.Time(depth).UTC() - if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { - e.err = fmt.Errorf("encoding depth %s: %s", when, err) - return nil - } - case DepthReference: - reference := string(depth) - if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { - e.err = fmt.Errorf("encoding depth %s: %s", reference, err) - return nil - } - default: - e.err = fmt.Errorf("unsupported depth type") - return nil - } - - return e.encodeFlush -} - -func (e *ulReqEncoder) encodeFlush() stateFn { - if err := e.pe.Flush(); err != nil { - e.err = fmt.Errorf("encoding flush-pkt: %s", err) - return nil - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go deleted file mode 100644 index b63b0234001..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go +++ /dev/null @@ -1,122 +0,0 @@ -package packp - -import ( - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" -) - -var ( - ErrEmptyCommands = errors.New("commands cannot be empty") - ErrMalformedCommand = errors.New("malformed command") -) - -// ReferenceUpdateRequest values represent reference upload requests. -// Values from this type are not zero-value safe, use the New function instead. -type ReferenceUpdateRequest struct { - Capabilities *capability.List - Commands []*Command - Shallow *plumbing.Hash - // Packfile contains an optional packfile reader. - Packfile io.ReadCloser - - // Progress receives sideband progress messages from the server - Progress sideband.Progress -} - -// New returns a pointer to a new ReferenceUpdateRequest value. -func NewReferenceUpdateRequest() *ReferenceUpdateRequest { - return &ReferenceUpdateRequest{ - // TODO: Add support for push-cert - Capabilities: capability.NewList(), - Commands: nil, - } -} - -// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new -// ReferenceUpdateRequest value, the request capabilities are filled with the -// most optimal ones, based on the adv value (advertised capabilities), the -// ReferenceUpdateRequest contains no commands -// -// It does set the following capabilities: -// - agent -// - report-status -// - ofs-delta -// - ref-delta -// - delete-refs -// It leaves up to the user to add the following capabilities later: -// - atomic -// - ofs-delta -// - side-band -// - side-band-64k -// - quiet -// - push-cert -func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { - r := NewReferenceUpdateRequest() - - if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) - } - - if adv.Supports(capability.ReportStatus) { - r.Capabilities.Set(capability.ReportStatus) - } - - return r -} - -func (r *ReferenceUpdateRequest) validate() error { - if len(r.Commands) == 0 { - return ErrEmptyCommands - } - - for _, c := range r.Commands { - if err := c.validate(); err != nil { - return err - } - } - - return nil -} - -type Action string - -const ( - Create Action = "create" - Update = "update" - Delete = "delete" - Invalid = "invalid" -) - -type Command struct { - Name plumbing.ReferenceName - Old plumbing.Hash - New plumbing.Hash -} - -func (c *Command) Action() Action { - if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { - return Invalid - } - - if c.Old == plumbing.ZeroHash { - return Create - } - - if c.New == plumbing.ZeroHash { - return Delete - } - - return Update -} - -func (c *Command) validate() error { - if c.Action() == Invalid { - return ErrMalformedCommand - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go deleted file mode 100644 index 2c9843a5661..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go +++ /dev/null @@ -1,250 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -var ( - shallowLineLength = len(shallow) + hashSize - minCommandLength = hashSize*2 + 2 + 1 - minCommandAndCapsLength = minCommandLength + 1 -) - -var ( - ErrEmpty = errors.New("empty update-request message") - errNoCommands = errors.New("unexpected EOF before any command") - errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") -) - -func errMalformedRequest(reason string) error { - return fmt.Errorf("malformed request: %s", reason) -} - -func errInvalidHashSize(got int) error { - return fmt.Errorf("invalid hash size: expected %d, got %d", - hashSize, got) -} - -func errInvalidHash(err error) error { - return fmt.Errorf("invalid hash: %s", err.Error()) -} - -func errInvalidShallowLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid shallow line length: expected %d, got %d", - shallowLineLength, got)) -} - -func errInvalidCommandCapabilitiesLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid command and capabilities line length: expected at least %d, got %d", - minCommandAndCapsLength, got)) -} - -func errInvalidCommandLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid command line length: expected at least %d, got %d", - minCommandLength, got)) -} - -func errInvalidShallowObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid shallow object id: %s", err.Error())) -} - -func errInvalidOldObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid old object id: %s", err.Error())) -} - -func errInvalidNewObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid new object id: %s", err.Error())) -} - -func errMalformedCommand(err error) error { - return errMalformedRequest(fmt.Sprintf( - "malformed command: %s", err.Error())) -} - -// Decode reads the next update-request message form the reader and wr -func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { - var rc io.ReadCloser - var ok bool - rc, ok = r.(io.ReadCloser) - if !ok { - rc = ioutil.NopCloser(r) - } - - d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} - return d.Decode(req) -} - -type updReqDecoder struct { - r io.ReadCloser - s *pktline.Scanner - req *ReferenceUpdateRequest -} - -func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { - d.req = req - funcs := []func() error{ - d.scanLine, - d.decodeShallow, - d.decodeCommandAndCapabilities, - d.decodeCommands, - d.setPackfile, - req.validate, - } - - for _, f := range funcs { - if err := f(); err != nil { - return err - } - } - - return nil -} - -func (d *updReqDecoder) scanLine() error { - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(ErrEmpty) - } - - return nil -} - -func (d *updReqDecoder) decodeShallow() error { - b := d.s.Bytes() - - if !bytes.HasPrefix(b, shallowNoSp) { - return nil - } - - if len(b) != shallowLineLength { - return errInvalidShallowLineLength(len(b)) - } - - h, err := parseHash(string(b[len(shallow):])) - if err != nil { - return errInvalidShallowObjId(err) - } - - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(errNoCommands) - } - - d.req.Shallow = &h - - return nil -} - -func (d *updReqDecoder) decodeCommands() error { - for { - b := d.s.Bytes() - if bytes.Equal(b, pktline.Flush) { - return nil - } - - c, err := parseCommand(b) - if err != nil { - return err - } - - d.req.Commands = append(d.req.Commands, c) - - if ok := d.s.Scan(); !ok { - return d.s.Err() - } - } -} - -func (d *updReqDecoder) decodeCommandAndCapabilities() error { - b := d.s.Bytes() - i := bytes.IndexByte(b, 0) - if i == -1 { - return errMissingCapabilitiesDelimiter - } - - if len(b) < minCommandAndCapsLength { - return errInvalidCommandCapabilitiesLineLength(len(b)) - } - - cmd, err := parseCommand(b[:i]) - if err != nil { - return err - } - - d.req.Commands = append(d.req.Commands, cmd) - - if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { - return err - } - - if err := d.scanLine(); err != nil { - return err - } - - return nil -} - -func (d *updReqDecoder) setPackfile() error { - d.req.Packfile = d.r - - return nil -} - -func parseCommand(b []byte) (*Command, error) { - if len(b) < minCommandLength { - return nil, errInvalidCommandLineLength(len(b)) - } - - var ( - os, ns string - n plumbing.ReferenceName - ) - if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { - return nil, errMalformedCommand(err) - } - - oh, err := parseHash(os) - if err != nil { - return nil, errInvalidOldObjId(err) - } - - nh, err := parseHash(ns) - if err != nil { - return nil, errInvalidNewObjId(err) - } - - return &Command{Old: oh, New: nh, Name: n}, nil -} - -func parseHash(s string) (plumbing.Hash, error) { - if len(s) != hashSize { - return plumbing.ZeroHash, errInvalidHashSize(len(s)) - } - - if _, err := hex.DecodeString(s); err != nil { - return plumbing.ZeroHash, errInvalidHash(err) - } - - h := plumbing.NewHash(s) - return h, nil -} - -func (d *updReqDecoder) scanErrorOr(origErr error) error { - if err := d.s.Err(); err != nil { - return err - } - - return origErr -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go deleted file mode 100644 index 6a79653f138..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go +++ /dev/null @@ -1,75 +0,0 @@ -package packp - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -var ( - zeroHashString = plumbing.ZeroHash.String() -) - -// Encode writes the ReferenceUpdateRequest encoding to the stream. -func (r *ReferenceUpdateRequest) Encode(w io.Writer) error { - if err := r.validate(); err != nil { - return err - } - - e := pktline.NewEncoder(w) - - if err := r.encodeShallow(e, r.Shallow); err != nil { - return err - } - - if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil { - return err - } - - if r.Packfile != nil { - if _, err := io.Copy(w, r.Packfile); err != nil { - return err - } - - return r.Packfile.Close() - } - - return nil -} - -func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, - h *plumbing.Hash) error { - - if h == nil { - return nil - } - - objId := []byte(h.String()) - return e.Encodef("%s%s", shallow, objId) -} - -func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, - cmds []*Command, cap *capability.List) error { - - if err := e.Encodef("%s\x00%s", - formatCommand(cmds[0]), cap.String()); err != nil { - return err - } - - for _, cmd := range cmds[1:] { - if err := e.Encodef(formatCommand(cmd)); err != nil { - return err - } - } - - return e.Flush() -} - -func formatCommand(cmd *Command) string { - o := cmd.Old.String() - n := cmd.New.String() - return fmt.Sprintf("%s %s %s", o, n, cmd.Name) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go deleted file mode 100644 index de2206b3fc4..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go +++ /dev/null @@ -1,98 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// UploadPackRequest represents a upload-pack request. -// Zero-value is not safe, use NewUploadPackRequest instead. -type UploadPackRequest struct { - UploadRequest - UploadHaves -} - -// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. -func NewUploadPackRequest() *UploadPackRequest { - ur := NewUploadRequest() - return &UploadPackRequest{ - UploadHaves: UploadHaves{}, - UploadRequest: *ur, - } -} - -// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and -// returns a pointer. The request capabilities are filled with the most optimal -// ones, based on the adv value (advertised capabilities), the UploadPackRequest -// it has no wants, haves or shallows and an infinite depth -func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { - ur := NewUploadRequestFromCapabilities(adv) - return &UploadPackRequest{ - UploadHaves: UploadHaves{}, - UploadRequest: *ur, - } -} - -// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants -// length is zero -func (r *UploadPackRequest) IsEmpty() bool { - return isSubset(r.Wants, r.Haves) -} - -func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { - for _, h := range needle { - found := false - for _, oh := range haystack { - if h == oh { - found = true - break - } - } - - if !found { - return false - } - } - - return true -} - -// UploadHaves is a message to signal the references that a client has in a -// upload-pack. Do not use this directly. Use UploadPackRequest request instead. -type UploadHaves struct { - Haves []plumbing.Hash -} - -// Encode encodes the UploadHaves into the Writer. If flush is true, a flush -// command will be encoded at the end of the writer content. -func (u *UploadHaves) Encode(w io.Writer, flush bool) error { - e := pktline.NewEncoder(w) - - plumbing.HashesSort(u.Haves) - - var last plumbing.Hash - for _, have := range u.Haves { - if bytes.Equal(last[:], have[:]) { - continue - } - - if err := e.Encodef("have %s\n", have); err != nil { - return fmt.Errorf("sending haves for %q: %s", have, err) - } - - last = have - } - - if flush && len(u.Haves) != 0 { - if err := e.Flush(); err != nil { - return fmt.Errorf("sending flush-pkt after haves: %s", err) - } - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go deleted file mode 100644 index a9a7192ea56..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go +++ /dev/null @@ -1,109 +0,0 @@ -package packp - -import ( - "errors" - "io" - - "bufio" - - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ErrUploadPackResponseNotDecoded is returned if Read is called without -// decoding first -var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") - -// UploadPackResponse contains all the information responded by the upload-pack -// service, the response implements io.ReadCloser that allows to read the -// packfile directly from it. -type UploadPackResponse struct { - ShallowUpdate - ServerResponse - - r io.ReadCloser - isShallow bool - isMultiACK bool - isOk bool -} - -// NewUploadPackResponse create a new UploadPackResponse instance, the request -// being responded by the response is required. -func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { - isShallow := !req.Depth.IsZero() - isMultiACK := req.Capabilities.Supports(capability.MultiACK) || - req.Capabilities.Supports(capability.MultiACKDetailed) - - return &UploadPackResponse{ - isShallow: isShallow, - isMultiACK: isMultiACK, - } -} - -// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, -// and sets its packfile reader. -func NewUploadPackResponseWithPackfile(req *UploadPackRequest, - pf io.ReadCloser) *UploadPackResponse { - - r := NewUploadPackResponse(req) - r.r = pf - return r -} - -// Decode decodes all the responses sent by upload-pack service into the struct -// and prepares it to read the packfile using the Read method -func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { - buf := bufio.NewReader(reader) - - if r.isShallow { - if err := r.ShallowUpdate.Decode(buf); err != nil { - return err - } - } - - if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil { - return err - } - - // now the reader is ready to read the packfile content - r.r = ioutil.NewReadCloser(buf, reader) - - return nil -} - -// Encode encodes an UploadPackResponse. -func (r *UploadPackResponse) Encode(w io.Writer) (err error) { - if r.isShallow { - if err := r.ShallowUpdate.Encode(w); err != nil { - return err - } - } - - if err := r.ServerResponse.Encode(w); err != nil { - return err - } - - defer ioutil.CheckClose(r.r, &err) - _, err = io.Copy(w, r.r) - return err -} - -// Read reads the packfile data, if the request was done with any Sideband -// capability the content read should be demultiplexed. If the methods wasn't -// called before the ErrUploadPackResponseNotDecoded will be return -func (r *UploadPackResponse) Read(p []byte) (int, error) { - if r.r == nil { - return 0, ErrUploadPackResponseNotDecoded - } - - return r.r.Read(p) -} - -// Close the underlying reader, if any -func (r *UploadPackResponse) Close() error { - if r.r == nil { - return nil - } - - return r.r.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/reference.go deleted file mode 100644 index 08e908f1f37..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/reference.go +++ /dev/null @@ -1,209 +0,0 @@ -package plumbing - -import ( - "errors" - "fmt" - "strings" -) - -const ( - refPrefix = "refs/" - refHeadPrefix = refPrefix + "heads/" - refTagPrefix = refPrefix + "tags/" - refRemotePrefix = refPrefix + "remotes/" - refNotePrefix = refPrefix + "notes/" - symrefPrefix = "ref: " -) - -// RefRevParseRules are a set of rules to parse references into short names. -// These are the same rules as used by git in shorten_unambiguous_ref. -// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 -var RefRevParseRules = []string{ - "refs/%s", - "refs/tags/%s", - "refs/heads/%s", - "refs/remotes/%s", - "refs/remotes/%s/HEAD", -} - -var ( - ErrReferenceNotFound = errors.New("reference not found") -) - -// ReferenceType reference type's -type ReferenceType int8 - -const ( - InvalidReference ReferenceType = 0 - HashReference ReferenceType = 1 - SymbolicReference ReferenceType = 2 -) - -func (r ReferenceType) String() string { - switch r { - case InvalidReference: - return "invalid-reference" - case HashReference: - return "hash-reference" - case SymbolicReference: - return "symbolic-reference" - } - - return "" -} - -// ReferenceName reference name's -type ReferenceName string - -// NewBranchReferenceName returns a reference name describing a branch based on -// his short name. -func NewBranchReferenceName(name string) ReferenceName { - return ReferenceName(refHeadPrefix + name) -} - -// NewNoteReferenceName returns a reference name describing a note based on his -// short name. -func NewNoteReferenceName(name string) ReferenceName { - return ReferenceName(refNotePrefix + name) -} - -// NewRemoteReferenceName returns a reference name describing a remote branch -// based on his short name and the remote name. -func NewRemoteReferenceName(remote, name string) ReferenceName { - return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name)) -} - -// NewRemoteHEADReferenceName returns a reference name describing a the HEAD -// branch of a remote. -func NewRemoteHEADReferenceName(remote string) ReferenceName { - return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD)) -} - -// NewTagReferenceName returns a reference name describing a tag based on short -// his name. -func NewTagReferenceName(name string) ReferenceName { - return ReferenceName(refTagPrefix + name) -} - -// IsBranch check if a reference is a branch -func (r ReferenceName) IsBranch() bool { - return strings.HasPrefix(string(r), refHeadPrefix) -} - -// IsNote check if a reference is a note -func (r ReferenceName) IsNote() bool { - return strings.HasPrefix(string(r), refNotePrefix) -} - -// IsRemote check if a reference is a remote -func (r ReferenceName) IsRemote() bool { - return strings.HasPrefix(string(r), refRemotePrefix) -} - -// IsTag check if a reference is a tag -func (r ReferenceName) IsTag() bool { - return strings.HasPrefix(string(r), refTagPrefix) -} - -func (r ReferenceName) String() string { - return string(r) -} - -// Short returns the short name of a ReferenceName -func (r ReferenceName) Short() string { - s := string(r) - res := s - for _, format := range RefRevParseRules { - _, err := fmt.Sscanf(s, format, &res) - if err == nil { - continue - } - } - - return res -} - -const ( - HEAD ReferenceName = "HEAD" - Master ReferenceName = "refs/heads/master" -) - -// Reference is a representation of git reference -type Reference struct { - t ReferenceType - n ReferenceName - h Hash - target ReferenceName -} - -// NewReferenceFromStrings creates a reference from name and target as string, -// the resulting reference can be a SymbolicReference or a HashReference base -// on the target provided -func NewReferenceFromStrings(name, target string) *Reference { - n := ReferenceName(name) - - if strings.HasPrefix(target, symrefPrefix) { - target := ReferenceName(target[len(symrefPrefix):]) - return NewSymbolicReference(n, target) - } - - return NewHashReference(n, NewHash(target)) -} - -// NewSymbolicReference creates a new SymbolicReference reference -func NewSymbolicReference(n, target ReferenceName) *Reference { - return &Reference{ - t: SymbolicReference, - n: n, - target: target, - } -} - -// NewHashReference creates a new HashReference reference -func NewHashReference(n ReferenceName, h Hash) *Reference { - return &Reference{ - t: HashReference, - n: n, - h: h, - } -} - -// Type return the type of a reference -func (r *Reference) Type() ReferenceType { - return r.t -} - -// Name return the name of a reference -func (r *Reference) Name() ReferenceName { - return r.n -} - -// Hash return the hash of a hash reference -func (r *Reference) Hash() Hash { - return r.h -} - -// Target return the target of a symbolic reference -func (r *Reference) Target() ReferenceName { - return r.target -} - -// Strings dump a reference as a [2]string -func (r *Reference) Strings() [2]string { - var o [2]string - o[0] = r.Name().String() - - switch r.Type() { - case HashReference: - o[1] = r.Hash().String() - case SymbolicReference: - o[1] = symrefPrefix + r.Target().String() - } - - return o -} - -func (r *Reference) String() string { - s := r.Strings() - return fmt.Sprintf("%s %s", s[1], s[0]) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revision.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revision.go deleted file mode 100644 index 5f053b200c0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revision.go +++ /dev/null @@ -1,11 +0,0 @@ -package plumbing - -// Revision represents a git revision -// to get more details about git revisions -// please check git manual page : -// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html -type Revision string - -func (r Revision) String() string { - return string(r) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go deleted file mode 100644 index b9109870f00..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package revlist provides support to access the ancestors of commits, in a -// similar way as the git-rev-list command. -package revlist - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// Objects applies a complementary set. It gets all the hashes from all -// the reachable objects from the given objects. Ignore param are object hashes -// that we want to ignore on the result. All that objects must be accessible -// from the object storer. -func Objects( - s storer.EncodedObjectStorer, - objs, - ignore []plumbing.Hash, -) ([]plumbing.Hash, error) { - return ObjectsWithStorageForIgnores(s, s, objs, ignore) -} - -// ObjectsWithStorageForIgnores is the same as Objects, but a -// secondary storage layer can be provided, to be used to finding the -// full set of objects to be ignored while finding the reachable -// objects. This is useful when the main `s` storage layer is slow -// and/or remote, while the ignore list is available somewhere local. -func ObjectsWithStorageForIgnores( - s, ignoreStore storer.EncodedObjectStorer, - objs, - ignore []plumbing.Hash, -) ([]plumbing.Hash, error) { - ignore, err := objects(ignoreStore, ignore, nil, true) - if err != nil { - return nil, err - } - - return objects(s, objs, ignore, false) -} - -func objects( - s storer.EncodedObjectStorer, - objects, - ignore []plumbing.Hash, - allowMissingObjects bool, -) ([]plumbing.Hash, error) { - seen := hashListToSet(ignore) - result := make(map[plumbing.Hash]bool) - visited := make(map[plumbing.Hash]bool) - - walkerFunc := func(h plumbing.Hash) { - if !seen[h] { - result[h] = true - seen[h] = true - } - } - - for _, h := range objects { - if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil { - if allowMissingObjects && err == plumbing.ErrObjectNotFound { - continue - } - - return nil, err - } - } - - return hashSetToList(result), nil -} - -// processObject obtains the object using the hash an process it depending of its type -func processObject( - s storer.EncodedObjectStorer, - h plumbing.Hash, - seen map[plumbing.Hash]bool, - visited map[plumbing.Hash]bool, - ignore []plumbing.Hash, - walkerFunc func(h plumbing.Hash), -) error { - if seen[h] { - return nil - } - - o, err := s.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return err - } - - do, err := object.DecodeObject(s, o) - if err != nil { - return err - } - - switch do := do.(type) { - case *object.Commit: - return reachableObjects(do, seen, visited, ignore, walkerFunc) - case *object.Tree: - return iterateCommitTrees(seen, do, walkerFunc) - case *object.Tag: - walkerFunc(do.Hash) - return processObject(s, do.Target, seen, visited, ignore, walkerFunc) - case *object.Blob: - walkerFunc(do.Hash) - default: - return fmt.Errorf("object type not valid: %s. "+ - "Object reference: %s", o.Type(), o.Hash()) - } - - return nil -} - -// reachableObjects returns, using the callback function, all the reachable -// objects from the specified commit. To avoid to iterate over seen commits, -// if a commit hash is into the 'seen' set, we will not iterate all his trees -// and blobs objects. -func reachableObjects( - commit *object.Commit, - seen map[plumbing.Hash]bool, - visited map[plumbing.Hash]bool, - ignore []plumbing.Hash, - cb func(h plumbing.Hash), -) error { - i := object.NewCommitPreorderIter(commit, seen, ignore) - pending := make(map[plumbing.Hash]bool) - addPendingParents(pending, visited, commit) - for { - commit, err := i.Next() - if err == io.EOF { - break - } - - if err != nil { - return err - } - - if pending[commit.Hash] { - delete(pending, commit.Hash) - } - - addPendingParents(pending, visited, commit) - - if visited[commit.Hash] && len(pending) == 0 { - break - } - - if seen[commit.Hash] { - continue - } - - cb(commit.Hash) - - tree, err := commit.Tree() - if err != nil { - return err - } - - if err := iterateCommitTrees(seen, tree, cb); err != nil { - return err - } - } - - return nil -} - -func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) { - for _, p := range commit.ParentHashes { - if !visited[p] { - pending[p] = true - } - } -} - -// iterateCommitTrees iterate all reachable trees from the given commit -func iterateCommitTrees( - seen map[plumbing.Hash]bool, - tree *object.Tree, - cb func(h plumbing.Hash), -) error { - if seen[tree.Hash] { - return nil - } - - cb(tree.Hash) - - treeWalker := object.NewTreeWalker(tree, true, seen) - - for { - _, e, err := treeWalker.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if e.Mode == filemode.Submodule { - continue - } - - if seen[e.Hash] { - continue - } - - cb(e.Hash) - } - - return nil -} - -func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash { - var result []plumbing.Hash - for key := range hashes { - result = append(result, key) - } - - return result -} - -func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool { - result := make(map[plumbing.Hash]bool) - for _, h := range hashes { - result[h] = true - } - - return result -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go deleted file mode 100644 index 4d4f179c618..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package storer defines the interfaces to store objects, references, etc. -package storer diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go deleted file mode 100644 index 33113949b3a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go +++ /dev/null @@ -1,9 +0,0 @@ -package storer - -import "github.com/go-git/go-git/v5/plumbing/format/index" - -// IndexStorer generic storage of index.Index -type IndexStorer interface { - SetIndex(*index.Index) error - Index() (*index.Index, error) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go deleted file mode 100644 index dfe309db166..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go +++ /dev/null @@ -1,288 +0,0 @@ -package storer - -import ( - "errors" - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing" -) - -var ( - //ErrStop is used to stop a ForEach function in an Iter - ErrStop = errors.New("stop iter") -) - -// EncodedObjectStorer generic storage of objects -type EncodedObjectStorer interface { - // NewEncodedObject returns a new plumbing.EncodedObject, the real type - // of the object can be a custom implementation or the default one, - // plumbing.MemoryObject. - NewEncodedObject() plumbing.EncodedObject - // SetEncodedObject saves an object into the storage, the object should - // be create with the NewEncodedObject, method, and file if the type is - // not supported. - SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) - // EncodedObject gets an object by hash with the given - // plumbing.ObjectType. Implementors should return - // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with - // both the given hash and object type. - // - // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, - // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must - // be looked up regardless of its type. - EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) - // IterObjects returns a custom EncodedObjectStorer over all the object - // on the storage. - // - // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, - IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error) - // HasEncodedObject returns ErrObjNotFound if the object doesn't - // exist. If the object does exist, it returns nil. - HasEncodedObject(plumbing.Hash) error - // EncodedObjectSize returns the plaintext size of the encoded object. - EncodedObjectSize(plumbing.Hash) (int64, error) -} - -// DeltaObjectStorer is an EncodedObjectStorer that can return delta -// objects. -type DeltaObjectStorer interface { - // DeltaObject is the same as EncodedObject but without resolving deltas. - // Deltas will be returned as plumbing.DeltaObject instances. - DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) -} - -// Transactioner is a optional method for ObjectStorer, it enable transaction -// base write and read operations in the storage -type Transactioner interface { - // Begin starts a transaction. - Begin() Transaction -} - -// LooseObjectStorer is an optional interface for managing "loose" -// objects, i.e. those not in packfiles. -type LooseObjectStorer interface { - // ForEachObjectHash iterates over all the (loose) object hashes - // in the repository without necessarily having to read those objects. - // Objects only inside pack files may be omitted. - // If ErrStop is sent the iteration is stop but no error is returned. - ForEachObjectHash(func(plumbing.Hash) error) error - // LooseObjectTime looks up the (m)time associated with the - // loose object (that is not in a pack file). Some - // implementations (e.g. without loose objects) - // always return an error. - LooseObjectTime(plumbing.Hash) (time.Time, error) - // DeleteLooseObject deletes a loose object if it exists. - DeleteLooseObject(plumbing.Hash) error -} - -// PackedObjectStorer is an optional interface for managing objects in -// packfiles. -type PackedObjectStorer interface { - // ObjectPacks returns hashes of object packs if the underlying - // implementation has pack files. - ObjectPacks() ([]plumbing.Hash, error) - // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist. - // Deletion is only performed if the pack is older than the supplied time (or the time is zero). - DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error -} - -// PackfileWriter is a optional method for ObjectStorer, it enable direct write -// of packfile to the storage -type PackfileWriter interface { - // PackfileWriter returns a writer for writing a packfile to the storage - // - // If the Storer not implements PackfileWriter the objects should be written - // using the Set method. - PackfileWriter() (io.WriteCloser, error) -} - -// EncodedObjectIter is a generic closable interface for iterating over objects. -type EncodedObjectIter interface { - Next() (plumbing.EncodedObject, error) - ForEach(func(plumbing.EncodedObject) error) error - Close() -} - -// Transaction is an in-progress storage transaction. A transaction must end -// with a call to Commit or Rollback. -type Transaction interface { - SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) - EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) - Commit() error - Rollback() error -} - -// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a -// series of object hashes and yields their associated objects by retrieving -// each one from object storage. The retrievals are lazy and only occur when the -// iterator moves forward with a call to Next(). -// -// The EncodedObjectLookupIter must be closed with a call to Close() when it is -// no longer needed. -type EncodedObjectLookupIter struct { - storage EncodedObjectStorer - series []plumbing.Hash - t plumbing.ObjectType - pos int -} - -// NewEncodedObjectLookupIter returns an object iterator given an object storage -// and a slice of object hashes. -func NewEncodedObjectLookupIter( - storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter { - return &EncodedObjectLookupIter{ - storage: storage, - series: series, - t: t, - } -} - -// Next returns the next object from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. If the object can't be found in -// the object storage, it will return plumbing.ErrObjectNotFound as an error. -// If the object is retrieved successfully error will be nil. -func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) { - if iter.pos >= len(iter.series) { - return nil, io.EOF - } - - hash := iter.series[iter.pos] - obj, err := iter.storage.EncodedObject(iter.t, hash) - if err == nil { - iter.pos++ - } - - return obj, err -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *EncodedObjectLookupIter) Close() { - iter.pos = len(iter.series) -} - -// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a -// series of objects stored in a slice and yields each one in turn when Next() -// is called. -// -// The EncodedObjectSliceIter must be closed with a call to Close() when it is -// no longer needed. -type EncodedObjectSliceIter struct { - series []plumbing.EncodedObject -} - -// NewEncodedObjectSliceIter returns an object iterator for the given slice of -// objects. -func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter { - return &EncodedObjectSliceIter{ - series: series, - } -} - -// Next returns the next object from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. If the object is retrieved -// successfully error will be nil. -func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) { - if len(iter.series) == 0 { - return nil, io.EOF - } - - obj := iter.series[0] - iter.series = iter.series[1:] - - return obj, nil -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *EncodedObjectSliceIter) Close() { - iter.series = []plumbing.EncodedObject{} -} - -// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several -// EncodedObjectIter, -// -// The MultiObjectIter must be closed with a call to Close() when it is no -// longer needed. -type MultiEncodedObjectIter struct { - iters []EncodedObjectIter -} - -// NewMultiEncodedObjectIter returns an object iterator for the given slice of -// EncodedObjectIters. -func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { - return &MultiEncodedObjectIter{iters: iters} -} - -// Next returns the next object from the iterator, if one iterator reach io.EOF -// is removed and the next one is used. -func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) { - if len(iter.iters) == 0 { - return nil, io.EOF - } - - obj, err := iter.iters[0].Next() - if err == io.EOF { - iter.iters[0].Close() - iter.iters = iter.iters[1:] - return iter.Next() - } - - return obj, err -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *MultiEncodedObjectIter) Close() { - for _, i := range iter.iters { - i.Close() - } -} - -type bareIterator interface { - Next() (plumbing.EncodedObject, error) - Close() -} - -// ForEachIterator is a helper function to build iterators without need to -// rewrite the same ForEach function each time. -func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error { - defer iter.Close() - for { - obj, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(obj); err != nil { - if err == ErrStop { - return nil - } - - return err - } - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go deleted file mode 100644 index 1d74ef3c6ac..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go +++ /dev/null @@ -1,240 +0,0 @@ -package storer - -import ( - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" -) - -const MaxResolveRecursion = 1024 - -// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion -// is exceeded -var ErrMaxResolveRecursion = errors.New("max. recursion level reached") - -// ReferenceStorer is a generic storage of references. -type ReferenceStorer interface { - SetReference(*plumbing.Reference) error - // CheckAndSetReference sets the reference `new`, but if `old` is - // not `nil`, it first checks that the current stored value for - // `old.Name()` matches the given reference value in `old`. If - // not, it returns an error and doesn't update `new`. - CheckAndSetReference(new, old *plumbing.Reference) error - Reference(plumbing.ReferenceName) (*plumbing.Reference, error) - IterReferences() (ReferenceIter, error) - RemoveReference(plumbing.ReferenceName) error - CountLooseRefs() (int, error) - PackRefs() error -} - -// ReferenceIter is a generic closable interface for iterating over references. -type ReferenceIter interface { - Next() (*plumbing.Reference, error) - ForEach(func(*plumbing.Reference) error) error - Close() -} - -type referenceFilteredIter struct { - ff func(r *plumbing.Reference) bool - iter ReferenceIter -} - -// NewReferenceFilteredIter returns a reference iterator for the given reference -// Iterator. This iterator will iterate only references that accomplish the -// provided function. -func NewReferenceFilteredIter( - ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter { - return &referenceFilteredIter{ff, iter} -} - -// Next returns the next reference from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. -func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) { - for { - r, err := iter.iter.Next() - if err != nil { - return nil, err - } - - if iter.ff(r) { - return r, nil - } - - continue - } -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stopped but no error is returned. The iterator is closed. -func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error { - defer iter.Close() - for { - r, err := iter.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if err := cb(r); err != nil { - if err == ErrStop { - break - } - - return err - } - } - - return nil -} - -// Close releases any resources used by the iterator. -func (iter *referenceFilteredIter) Close() { - iter.iter.Close() -} - -// ReferenceSliceIter implements ReferenceIter. It iterates over a series of -// references stored in a slice and yields each one in turn when Next() is -// called. -// -// The ReferenceSliceIter must be closed with a call to Close() when it is no -// longer needed. -type ReferenceSliceIter struct { - series []*plumbing.Reference - pos int -} - -// NewReferenceSliceIter returns a reference iterator for the given slice of -// objects. -func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter { - return &ReferenceSliceIter{ - series: series, - } -} - -// Next returns the next reference from the iterator. If the iterator has -// reached the end it will return io.EOF as an error. -func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { - if iter.pos >= len(iter.series) { - return nil, io.EOF - } - - obj := iter.series[iter.pos] - iter.pos++ - return obj, nil -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { - return forEachReferenceIter(iter, cb) -} - -type bareReferenceIterator interface { - Next() (*plumbing.Reference, error) - Close() -} - -func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { - defer iter.Close() - for { - obj, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(obj); err != nil { - if err == ErrStop { - return nil - } - - return err - } - } -} - -// Close releases any resources used by the iterator. -func (iter *ReferenceSliceIter) Close() { - iter.pos = len(iter.series) -} - -// MultiReferenceIter implements ReferenceIter. It iterates over several -// ReferenceIter, -// -// The MultiReferenceIter must be closed with a call to Close() when it is no -// longer needed. -type MultiReferenceIter struct { - iters []ReferenceIter -} - -// NewMultiReferenceIter returns an reference iterator for the given slice of -// EncodedObjectIters. -func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { - return &MultiReferenceIter{iters: iters} -} - -// Next returns the next reference from the iterator, if one iterator reach -// io.EOF is removed and the next one is used. -func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { - if len(iter.iters) == 0 { - return nil, io.EOF - } - - obj, err := iter.iters[0].Next() - if err == io.EOF { - iter.iters[0].Close() - iter.iters = iter.iters[1:] - return iter.Next() - } - - return obj, err -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { - return forEachReferenceIter(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *MultiReferenceIter) Close() { - for _, i := range iter.iters { - i.Close() - } -} - -// ResolveReference resolves a SymbolicReference to a HashReference. -func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { - r, err := s.Reference(n) - if err != nil || r == nil { - return r, err - } - return resolveReference(s, r, 0) -} - -func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) { - if r.Type() != plumbing.SymbolicReference { - return r, nil - } - - if recursion > MaxResolveRecursion { - return nil, ErrMaxResolveRecursion - } - - t, err := s.Reference(r.Target()) - if err != nil { - return nil, err - } - - recursion++ - return resolveReference(s, t, recursion) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go deleted file mode 100644 index 39ef5ea5c67..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go +++ /dev/null @@ -1,10 +0,0 @@ -package storer - -import "github.com/go-git/go-git/v5/plumbing" - -// ShallowStorer is a storage of references to shallow commits by hash, -// meaning that these commits have missing parents because of a shallow fetch. -type ShallowStorer interface { - SetShallow([]plumbing.Hash) error - Shallow() ([]plumbing.Hash, error) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go deleted file mode 100644 index c7bc65a0c49..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go +++ /dev/null @@ -1,15 +0,0 @@ -package storer - -// Storer is a basic storer for encoded objects and references. -type Storer interface { - EncodedObjectStorer - ReferenceStorer -} - -// Initializer should be implemented by storers that require to perform any -// operation when creating a new repository (i.e. git init). -type Initializer interface { - // Init performs initialization of the storer and returns the error, if - // any. - Init() error -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go deleted file mode 100644 index 4f6d210e98a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package client contains helper function to deal with the different client -// protocols. -package client - -import ( - "fmt" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/file" - "github.com/go-git/go-git/v5/plumbing/transport/git" - "github.com/go-git/go-git/v5/plumbing/transport/http" - "github.com/go-git/go-git/v5/plumbing/transport/ssh" -) - -// Protocols are the protocols supported by default. -var Protocols = map[string]transport.Transport{ - "http": http.DefaultClient, - "https": http.DefaultClient, - "ssh": ssh.DefaultClient, - "git": git.DefaultClient, - "file": file.DefaultClient, -} - -// InstallProtocol adds or modifies an existing protocol. -func InstallProtocol(scheme string, c transport.Transport) { - if c == nil { - delete(Protocols, scheme) - return - } - - Protocols[scheme] = c -} - -// NewClient returns the appropriate client among of the set of known protocols: -// http://, https://, ssh:// and file://. -// See `InstallProtocol` to add or modify protocols. -func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { - f, ok := Protocols[endpoint.Protocol] - if !ok { - return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) - } - - if f == nil { - return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) - } - - return f, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go deleted file mode 100644 index ead215557d9..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ /dev/null @@ -1,274 +0,0 @@ -// Package transport includes the implementation for different transport -// protocols. -// -// `Client` can be used to fetch and send packfiles to a git server. -// The `client` package provides higher level functions to instantiate the -// appropriate `Client` based on the repository URL. -// -// go-git supports HTTP and SSH (see `Protocols`), but you can also install -// your own protocols (see the `client` package). -// -// Each protocol has its own implementation of `Client`, but you should -// generally not use them directly, use `client.NewClient` instead. -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/url" - "strconv" - "strings" - - giturl "github.com/go-git/go-git/v5/internal/url" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -var ( - ErrRepositoryNotFound = errors.New("repository not found") - ErrEmptyRemoteRepository = errors.New("remote repository is empty") - ErrAuthenticationRequired = errors.New("authentication required") - ErrAuthorizationFailed = errors.New("authorization failed") - ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") - ErrInvalidAuthMethod = errors.New("invalid auth method") - ErrAlreadyConnected = errors.New("session already established") -) - -const ( - UploadPackServiceName = "git-upload-pack" - ReceivePackServiceName = "git-receive-pack" -) - -// Transport can initiate git-upload-pack and git-receive-pack processes. -// It is implemented both by the client and the server, making this a RPC. -type Transport interface { - // NewUploadPackSession starts a git-upload-pack session for an endpoint. - NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error) - // NewReceivePackSession starts a git-receive-pack session for an endpoint. - NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error) -} - -type Session interface { - // AdvertisedReferences retrieves the advertised references for a - // repository. - // If the repository does not exist, returns ErrRepositoryNotFound. - // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. - AdvertisedReferences() (*packp.AdvRefs, error) - io.Closer -} - -type AuthMethod interface { - fmt.Stringer - Name() string -} - -// UploadPackSession represents a git-upload-pack session. -// A git-upload-pack session has two steps: reference discovery -// (AdvertisedReferences) and uploading pack (UploadPack). -type UploadPackSession interface { - Session - // UploadPack takes a git-upload-pack request and returns a response, - // including a packfile. Don't be confused by terminology, the client - // side of a git-upload-pack is called git-fetch-pack, although here - // the same interface is used to make it RPC-like. - UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error) -} - -// ReceivePackSession represents a git-receive-pack session. -// A git-receive-pack session has two steps: reference discovery -// (AdvertisedReferences) and receiving pack (ReceivePack). -// In that order. -type ReceivePackSession interface { - Session - // ReceivePack sends an update references request and a packfile - // reader and returns a ReportStatus and error. Don't be confused by - // terminology, the client side of a git-receive-pack is called - // git-send-pack, although here the same interface is used to make it - // RPC-like. - ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) -} - -// Endpoint represents a Git URL in any supported protocol. -type Endpoint struct { - // Protocol is the protocol of the endpoint (e.g. git, https, file). - Protocol string - // User is the user. - User string - // Password is the password. - Password string - // Host is the host. - Host string - // Port is the port to connect, if 0 the default port for the given protocol - // wil be used. - Port int - // Path is the repository path. - Path string -} - -var defaultPorts = map[string]int{ - "http": 80, - "https": 443, - "git": 9418, - "ssh": 22, -} - -// String returns a string representation of the Git URL. -func (u *Endpoint) String() string { - var buf bytes.Buffer - if u.Protocol != "" { - buf.WriteString(u.Protocol) - buf.WriteByte(':') - } - - if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" { - buf.WriteString("//") - - if u.User != "" || u.Password != "" { - buf.WriteString(url.PathEscape(u.User)) - if u.Password != "" { - buf.WriteByte(':') - buf.WriteString(url.PathEscape(u.Password)) - } - - buf.WriteByte('@') - } - - if u.Host != "" { - buf.WriteString(u.Host) - - if u.Port != 0 { - port, ok := defaultPorts[strings.ToLower(u.Protocol)] - if !ok || ok && port != u.Port { - fmt.Fprintf(&buf, ":%d", u.Port) - } - } - } - } - - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - - buf.WriteString(u.Path) - return buf.String() -} - -func NewEndpoint(endpoint string) (*Endpoint, error) { - if e, ok := parseSCPLike(endpoint); ok { - return e, nil - } - - if e, ok := parseFile(endpoint); ok { - return e, nil - } - - return parseURL(endpoint) -} - -func parseURL(endpoint string) (*Endpoint, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - - if !u.IsAbs() { - return nil, plumbing.NewPermanentError(fmt.Errorf( - "invalid endpoint: %s", endpoint, - )) - } - - var user, pass string - if u.User != nil { - user = u.User.Username() - pass, _ = u.User.Password() - } - - return &Endpoint{ - Protocol: u.Scheme, - User: user, - Password: pass, - Host: u.Hostname(), - Port: getPort(u), - Path: getPath(u), - }, nil -} - -func getPort(u *url.URL) int { - p := u.Port() - if p == "" { - return 0 - } - - i, err := strconv.Atoi(p) - if err != nil { - return 0 - } - - return i -} - -func getPath(u *url.URL) string { - var res string = u.Path - if u.RawQuery != "" { - res += "?" + u.RawQuery - } - - if u.Fragment != "" { - res += "#" + u.Fragment - } - - return res -} - -func parseSCPLike(endpoint string) (*Endpoint, bool) { - if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { - return nil, false - } - - user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) - port, err := strconv.Atoi(portStr) - if err != nil { - port = 22 - } - - return &Endpoint{ - Protocol: "ssh", - User: user, - Host: host, - Port: port, - Path: path, - }, true -} - -func parseFile(endpoint string) (*Endpoint, bool) { - if giturl.MatchesScheme(endpoint) { - return nil, false - } - - path := endpoint - return &Endpoint{ - Protocol: "file", - Path: path, - }, true -} - -// UnsupportedCapabilities are the capabilities not supported by any client -// implementation -var UnsupportedCapabilities = []capability.Capability{ - capability.MultiACK, - capability.MultiACKDetailed, - capability.ThinPack, -} - -// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities -// from a capability.List, the intended usage is on the client implementation -// to filter the capabilities from an AdvRefs message. -func FilterUnsupportedCapabilities(list *capability.List) { - for _, c := range UnsupportedCapabilities { - list.Delete(c) - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go deleted file mode 100644 index f6e23652a79..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go +++ /dev/null @@ -1,156 +0,0 @@ -// Package file implements the file transport protocol. -package file - -import ( - "bufio" - "errors" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" -) - -// DefaultClient is the default local client. -var DefaultClient = NewClient( - transport.UploadPackServiceName, - transport.ReceivePackServiceName, -) - -type runner struct { - UploadPackBin string - ReceivePackBin string -} - -// NewClient returns a new local client using the given git-upload-pack and -// git-receive-pack binaries. -func NewClient(uploadPackBin, receivePackBin string) transport.Transport { - return common.NewClient(&runner{ - UploadPackBin: uploadPackBin, - ReceivePackBin: receivePackBin, - }) -} - -func prefixExecPath(cmd string) (string, error) { - // Use `git --exec-path` to find the exec path. - execCmd := exec.Command("git", "--exec-path") - - stdout, err := execCmd.StdoutPipe() - if err != nil { - return "", err - } - stdoutBuf := bufio.NewReader(stdout) - - err = execCmd.Start() - if err != nil { - return "", err - } - - execPathBytes, isPrefix, err := stdoutBuf.ReadLine() - if err != nil { - return "", err - } - if isPrefix { - return "", errors.New("Couldn't read exec-path line all at once") - } - - err = execCmd.Wait() - if err != nil { - return "", err - } - execPath := string(execPathBytes) - execPath = strings.TrimSpace(execPath) - cmd = filepath.Join(execPath, cmd) - - // Make sure it actually exists. - _, err = exec.LookPath(cmd) - if err != nil { - return "", err - } - return cmd, nil -} - -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod, -) (common.Command, error) { - - switch cmd { - case transport.UploadPackServiceName: - cmd = r.UploadPackBin - case transport.ReceivePackServiceName: - cmd = r.ReceivePackBin - } - - _, err := exec.LookPath(cmd) - if err != nil { - if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { - cmd, err = prefixExecPath(cmd) - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - - return &command{cmd: exec.Command(cmd, ep.Path)}, nil -} - -type command struct { - cmd *exec.Cmd - stderrCloser io.Closer - closed bool -} - -func (c *command) Start() error { - return c.cmd.Start() -} - -func (c *command) StderrPipe() (io.Reader, error) { - // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. - // We use an io.Pipe and close it after the command finishes. - r, w := io.Pipe() - c.cmd.Stderr = w - c.stderrCloser = r - return r, nil -} - -func (c *command) StdinPipe() (io.WriteCloser, error) { - return c.cmd.StdinPipe() -} - -func (c *command) StdoutPipe() (io.Reader, error) { - return c.cmd.StdoutPipe() -} - -func (c *command) Kill() error { - c.cmd.Process.Kill() - return c.Close() -} - -// Close waits for the command to exit. -func (c *command) Close() error { - if c.closed { - return nil - } - - defer func() { - c.closed = true - _ = c.stderrCloser.Close() - - }() - - err := c.cmd.Wait() - if _, ok := err.(*os.PathError); ok { - return nil - } - - // When a repository does not exist, the command exits with code 128. - if _, ok := err.(*exec.ExitError); ok { - return nil - } - - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go deleted file mode 100644 index b45d7a71c2f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go +++ /dev/null @@ -1,53 +0,0 @@ -package file - -import ( - "fmt" - "os" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/plumbing/transport/server" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ServeUploadPack serves a git-upload-pack request using standard output, input -// and error. This is meant to be used when implementing a git-upload-pack -// command. -func ServeUploadPack(path string) error { - ep, err := transport.NewEndpoint(path) - if err != nil { - return err - } - - // TODO: define and implement a server-side AuthMethod - s, err := server.DefaultServer.NewUploadPackSession(ep, nil) - if err != nil { - return fmt.Errorf("error creating session: %s", err) - } - - return common.ServeUploadPack(srvCmd, s) -} - -// ServeReceivePack serves a git-receive-pack request using standard output, -// input and error. This is meant to be used when implementing a -// git-receive-pack command. -func ServeReceivePack(path string) error { - ep, err := transport.NewEndpoint(path) - if err != nil { - return err - } - - // TODO: define and implement a server-side AuthMethod - s, err := server.DefaultServer.NewReceivePackSession(ep, nil) - if err != nil { - return fmt.Errorf("error creating session: %s", err) - } - - return common.ServeReceivePack(srvCmd, s) -} - -var srvCmd = common.ServerCommand{ - Stdin: os.Stdin, - Stdout: ioutil.WriteNopCloser(os.Stdout), - Stderr: os.Stderr, -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go deleted file mode 100644 index 306aae261f0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package git implements the git transport protocol. -package git - -import ( - "fmt" - "io" - "net" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// DefaultClient is the default git client. -var DefaultClient = common.NewClient(&runner{}) - -const DefaultPort = 9418 - -type runner struct{} - -// Command returns a new Command for the given cmd in the given Endpoint -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { - // auth not allowed since git protocol doesn't support authentication - if auth != nil { - return nil, transport.ErrInvalidAuthMethod - } - c := &command{command: cmd, endpoint: ep} - if err := c.connect(); err != nil { - return nil, err - } - return c, nil -} - -type command struct { - conn net.Conn - connected bool - command string - endpoint *transport.Endpoint -} - -// Start executes the command sending the required message to the TCP connection -func (c *command) Start() error { - cmd := endpointToCommand(c.command, c.endpoint) - - e := pktline.NewEncoder(c.conn) - return e.Encode([]byte(cmd)) -} - -func (c *command) connect() error { - if c.connected { - return transport.ErrAlreadyConnected - } - - var err error - c.conn, err = net.Dial("tcp", c.getHostWithPort()) - if err != nil { - return err - } - - c.connected = true - return nil -} - -func (c *command) getHostWithPort() string { - host := c.endpoint.Host - port := c.endpoint.Port - if port <= 0 { - port = DefaultPort - } - - return fmt.Sprintf("%s:%d", host, port) -} - -// StderrPipe git protocol doesn't have any dedicated error channel -func (c *command) StderrPipe() (io.Reader, error) { - return nil, nil -} - -// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent -// call to the Close function from the connection, a command execution in git -// protocol can't be closed or killed -func (c *command) StdinPipe() (io.WriteCloser, error) { - return ioutil.WriteNopCloser(c.conn), nil -} - -// StdoutPipe return the underlying connection as Reader -func (c *command) StdoutPipe() (io.Reader, error) { - return c.conn, nil -} - -func endpointToCommand(cmd string, ep *transport.Endpoint) string { - host := ep.Host - if ep.Port != DefaultPort { - host = fmt.Sprintf("%s:%d", ep.Host, ep.Port) - } - - return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0) -} - -// Close closes the TCP connection and connection. -func (c *command) Close() error { - if !c.connected { - return nil - } - - c.connected = false - return c.conn.Close() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go deleted file mode 100644 index aeedc5bb552..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ /dev/null @@ -1,281 +0,0 @@ -// Package http implements the HTTP transport protocol. -package http - -import ( - "bytes" - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// it requires a bytes.Buffer, because we need to know the length -func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { - req.Header.Add("User-Agent", "git/1.0") - req.Header.Add("Host", host) // host:port - - if content == nil { - req.Header.Add("Accept", "*/*") - return - } - - req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType)) - req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType)) - req.Header.Add("Content-Length", strconv.Itoa(content.Len())) -} - -const infoRefsPath = "/info/refs" - -func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) { - url := fmt.Sprintf( - "%s%s?service=%s", - s.endpoint.String(), infoRefsPath, serviceName, - ) - - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - s.ApplyAuthToRequest(req) - applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) - res, err := s.client.Do(req) - if err != nil { - return nil, err - } - - s.ModifyEndpointIfRedirect(res) - defer ioutil.CheckClose(res.Body, &err) - - if err = NewErr(res); err != nil { - return nil, err - } - - ar := packp.NewAdvRefs() - if err = ar.Decode(res.Body); err != nil { - if err == packp.ErrEmptyAdvRefs { - err = transport.ErrEmptyRemoteRepository - } - - return nil, err - } - - transport.FilterUnsupportedCapabilities(ar.Capabilities) - s.advRefs = ar - - return ar, nil -} - -type client struct { - c *http.Client -} - -// DefaultClient is the default HTTP client, which uses `http.DefaultClient`. -var DefaultClient = NewClient(nil) - -// NewClient creates a new client with a custom net/http client. -// See `InstallProtocol` to install and override default http client. -// Unless a properly initialized client is given, it will fall back into -// `http.DefaultClient`. -// -// Note that for HTTP client cannot distinguish between private repositories and -// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` -// for both. -func NewClient(c *http.Client) transport.Transport { - if c == nil { - return &client{http.DefaultClient} - } - - return &client{ - c: c, - } -} - -func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.UploadPackSession, error) { - - return newUploadPackSession(c.c, ep, auth) -} - -func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.ReceivePackSession, error) { - - return newReceivePackSession(c.c, ep, auth) -} - -type session struct { - auth AuthMethod - client *http.Client - endpoint *transport.Endpoint - advRefs *packp.AdvRefs -} - -func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { - s := &session{ - auth: basicAuthFromEndpoint(ep), - client: c, - endpoint: ep, - } - if auth != nil { - a, ok := auth.(AuthMethod) - if !ok { - return nil, transport.ErrInvalidAuthMethod - } - - s.auth = a - } - - return s, nil -} - -func (s *session) ApplyAuthToRequest(req *http.Request) { - if s.auth == nil { - return - } - - s.auth.SetAuth(req) -} - -func (s *session) ModifyEndpointIfRedirect(res *http.Response) { - if res.Request == nil { - return - } - - r := res.Request - if !strings.HasSuffix(r.URL.Path, infoRefsPath) { - return - } - - h, p, err := net.SplitHostPort(r.URL.Host) - if err != nil { - h = r.URL.Host - } - if p != "" { - port, err := strconv.Atoi(p) - if err == nil { - s.endpoint.Port = port - } - } - s.endpoint.Host = h - - s.endpoint.Protocol = r.URL.Scheme - s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] -} - -func (*session) Close() error { - return nil -} - -// AuthMethod is concrete implementation of common.AuthMethod for HTTP services -type AuthMethod interface { - transport.AuthMethod - SetAuth(r *http.Request) -} - -func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth { - u := ep.User - if u == "" { - return nil - } - - return &BasicAuth{u, ep.Password} -} - -// BasicAuth represent a HTTP basic auth -type BasicAuth struct { - Username, Password string -} - -func (a *BasicAuth) SetAuth(r *http.Request) { - if a == nil { - return - } - - r.SetBasicAuth(a.Username, a.Password) -} - -// Name is name of the auth -func (a *BasicAuth) Name() string { - return "http-basic-auth" -} - -func (a *BasicAuth) String() string { - masked := "*******" - if a.Password == "" { - masked = "" - } - - return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) -} - -// TokenAuth implements an http.AuthMethod that can be used with http transport -// to authenticate with HTTP token authentication (also known as bearer -// authentication). -// -// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. -// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers -// use basic HTTP authentication, with the OAuth token as user or password. -// Check the documentation of your git server for details. -type TokenAuth struct { - Token string -} - -func (a *TokenAuth) SetAuth(r *http.Request) { - if a == nil { - return - } - r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token)) -} - -// Name is name of the auth -func (a *TokenAuth) Name() string { - return "http-token-auth" -} - -func (a *TokenAuth) String() string { - masked := "*******" - if a.Token == "" { - masked = "" - } - return fmt.Sprintf("%s - %s", a.Name(), masked) -} - -// Err is a dedicated error to return errors based on status code -type Err struct { - Response *http.Response -} - -// NewErr returns a new Err based on a http response -func NewErr(r *http.Response) error { - if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { - return nil - } - - switch r.StatusCode { - case http.StatusUnauthorized: - return transport.ErrAuthenticationRequired - case http.StatusForbidden: - return transport.ErrAuthorizationFailed - case http.StatusNotFound: - return transport.ErrRepositoryNotFound - } - - return plumbing.NewUnexpectedError(&Err{r}) -} - -// StatusCode returns the status code of the response -func (e *Err) StatusCode() int { - return e.Response.StatusCode -} - -func (e *Err) Error() string { - return fmt.Sprintf("unexpected requesting %q status code: %d", - e.Response.Request.URL, e.Response.StatusCode, - ) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go deleted file mode 100644 index 433dfcfda19..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go +++ /dev/null @@ -1,106 +0,0 @@ -package http - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type rpSession struct { - *session -} - -func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - s, err := newSession(c, ep, auth) - return &rpSession{s}, err -} - -func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.ReceivePackServiceName) -} - -func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( - *packp.ReportStatus, error) { - url := fmt.Sprintf( - "%s/%s", - s.endpoint.String(), transport.ReceivePackServiceName, - ) - - buf := bytes.NewBuffer(nil) - if err := req.Encode(buf); err != nil { - return nil, err - } - - res, err := s.doRequest(ctx, http.MethodPost, url, buf) - if err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(res.Body) - if err == ioutil.ErrEmptyReader { - return nil, nil - } - - if err != nil { - return nil, err - } - - var d *sideband.Demuxer - if req.Capabilities.Supports(capability.Sideband64k) { - d = sideband.NewDemuxer(sideband.Sideband64k, r) - } else if req.Capabilities.Supports(capability.Sideband) { - d = sideband.NewDemuxer(sideband.Sideband, r) - } - if d != nil { - d.Progress = req.Progress - r = d - } - - rc := ioutil.NewReadCloser(r, res.Body) - - report := packp.NewReportStatus() - if err := report.Decode(rc); err != nil { - return nil, err - } - - return report, report.Error() -} - -func (s *rpSession) doRequest( - ctx context.Context, method, url string, content *bytes.Buffer, -) (*http.Response, error) { - - var body io.Reader - if content != nil { - body = content - } - - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, plumbing.NewPermanentError(err) - } - - applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName) - s.ApplyAuthToRequest(req) - - res, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, plumbing.NewUnexpectedError(err) - } - - if err := NewErr(res); err != nil { - _ = res.Body.Close() - return nil, err - } - - return res, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go deleted file mode 100644 index db37089402a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go +++ /dev/null @@ -1,123 +0,0 @@ -package http - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type upSession struct { - *session -} - -func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { - s, err := newSession(c, ep, auth) - return &upSession{s}, err -} - -func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.UploadPackServiceName) -} - -func (s *upSession) UploadPack( - ctx context.Context, req *packp.UploadPackRequest, -) (*packp.UploadPackResponse, error) { - - if req.IsEmpty() { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - url := fmt.Sprintf( - "%s/%s", - s.endpoint.String(), transport.UploadPackServiceName, - ) - - content, err := uploadPackRequestToReader(req) - if err != nil { - return nil, err - } - - res, err := s.doRequest(ctx, http.MethodPost, url, content) - if err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(res.Body) - if err != nil { - if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { - return nil, transport.ErrEmptyUploadPackRequest - } - - return nil, err - } - - rc := ioutil.NewReadCloser(r, res.Body) - return common.DecodeUploadPackResponse(rc, req) -} - -// Close does nothing. -func (s *upSession) Close() error { - return nil -} - -func (s *upSession) doRequest( - ctx context.Context, method, url string, content *bytes.Buffer, -) (*http.Response, error) { - - var body io.Reader - if content != nil { - body = content - } - - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, plumbing.NewPermanentError(err) - } - - applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName) - s.ApplyAuthToRequest(req) - - res, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, plumbing.NewUnexpectedError(err) - } - - if err := NewErr(res); err != nil { - _ = res.Body.Close() - return nil, err - } - - return res, nil -} - -func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { - buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - - if err := req.UploadRequest.Encode(buf); err != nil { - return nil, fmt.Errorf("sending upload-req message: %s", err) - } - - if err := req.UploadHaves.Encode(buf, false); err != nil { - return nil, fmt.Errorf("sending haves message: %s", err) - } - - if err := e.EncodeString("done\n"); err != nil { - return nil, err - } - - return buf, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go deleted file mode 100644 index 89432e34c14..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ /dev/null @@ -1,474 +0,0 @@ -// Package common implements the git pack protocol with a pluggable transport. -// This is a low-level package to implement new transports. Use a concrete -// implementation instead (e.g. http, file, ssh). -// -// A simple example of usage can be found in the file package. -package common - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "strings" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - readErrorSecondsTimeout = 10 -) - -var ( - ErrTimeoutExceeded = errors.New("timeout exceeded") -) - -// Commander creates Command instances. This is the main entry point for -// transport implementations. -type Commander interface { - // Command creates a new Command for the given git command and - // endpoint. cmd can be git-upload-pack or git-receive-pack. An - // error should be returned if the endpoint is not supported or the - // command cannot be created (e.g. binary does not exist, connection - // cannot be established). - Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) -} - -// Command is used for a single command execution. -// This interface is modeled after exec.Cmd and ssh.Session in the standard -// library. -type Command interface { - // StderrPipe returns a pipe that will be connected to the command's - // standard error when the command starts. It should not be called after - // Start. - StderrPipe() (io.Reader, error) - // StdinPipe returns a pipe that will be connected to the command's - // standard input when the command starts. It should not be called after - // Start. The pipe should be closed when no more input is expected. - StdinPipe() (io.WriteCloser, error) - // StdoutPipe returns a pipe that will be connected to the command's - // standard output when the command starts. It should not be called after - // Start. - StdoutPipe() (io.Reader, error) - // Start starts the specified command. It does not wait for it to - // complete. - Start() error - // Close closes the command and releases any resources used by it. It - // will block until the command exits. - Close() error -} - -// CommandKiller expands the Command interface, enabling it for being killed. -type CommandKiller interface { - // Kill and close the session whatever the state it is. It will block until - // the command is terminated. - Kill() error -} - -type client struct { - cmdr Commander -} - -// NewClient creates a new client using the given Commander. -func NewClient(runner Commander) transport.Transport { - return &client{runner} -} - -// NewUploadPackSession creates a new UploadPackSession. -func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.UploadPackSession, error) { - - return c.newSession(transport.UploadPackServiceName, ep, auth) -} - -// NewReceivePackSession creates a new ReceivePackSession. -func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.ReceivePackSession, error) { - - return c.newSession(transport.ReceivePackServiceName, ep, auth) -} - -type session struct { - Stdin io.WriteCloser - Stdout io.Reader - Command Command - - isReceivePack bool - advRefs *packp.AdvRefs - packRun bool - finished bool - firstErrLine chan string -} - -func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { - cmd, err := c.cmdr.Command(s, ep, auth) - if err != nil { - return nil, err - } - - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - - if err := cmd.Start(); err != nil { - return nil, err - } - - return &session{ - Stdin: stdin, - Stdout: stdout, - Command: cmd, - firstErrLine: c.listenFirstError(stderr), - isReceivePack: s == transport.ReceivePackServiceName, - }, nil -} - -func (c *client) listenFirstError(r io.Reader) chan string { - if r == nil { - return nil - } - - errLine := make(chan string, 1) - go func() { - s := bufio.NewScanner(r) - if s.Scan() { - errLine <- s.Text() - } else { - close(errLine) - } - - _, _ = io.Copy(stdioutil.Discard, r) - }() - - return errLine -} - -// AdvertisedReferences retrieves the advertised references from the server. -func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { - if s.advRefs != nil { - return s.advRefs, nil - } - - ar := packp.NewAdvRefs() - if err := ar.Decode(s.Stdout); err != nil { - if err := s.handleAdvRefDecodeError(err); err != nil { - return nil, err - } - } - - // Some servers like jGit, announce capabilities instead of returning an - // packp message with a flush. This verifies that we received a empty - // adv-refs, even it contains capabilities. - if !s.isReceivePack && ar.IsEmpty() { - return nil, transport.ErrEmptyRemoteRepository - } - - transport.FilterUnsupportedCapabilities(ar.Capabilities) - s.advRefs = ar - return ar, nil -} - -func (s *session) handleAdvRefDecodeError(err error) error { - // If repository is not found, we get empty stdout and server writes an - // error to stderr. - if err == packp.ErrEmptyInput { - s.finished = true - if err := s.checkNotFoundError(); err != nil { - return err - } - - return io.ErrUnexpectedEOF - } - - // For empty (but existing) repositories, we get empty advertised-references - // message. But valid. That is, it includes at least a flush. - if err == packp.ErrEmptyAdvRefs { - // Empty repositories are valid for git-receive-pack. - if s.isReceivePack { - return nil - } - - if err := s.finish(); err != nil { - return err - } - - return transport.ErrEmptyRemoteRepository - } - - // Some server sends the errors as normal content (git protocol), so when - // we try to decode it fails, we need to check the content of it, to detect - // not found errors - if uerr, ok := err.(*packp.ErrUnexpectedData); ok { - if isRepoNotFoundError(string(uerr.Data)) { - return transport.ErrRepositoryNotFound - } - } - - return err -} - -// UploadPack performs a request to the server to fetch a packfile. A reader is -// returned with the packfile content. The reader must be closed after reading. -func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - if _, err := s.AdvertisedReferences(); err != nil { - return nil, err - } - - s.packRun = true - - in := s.StdinContext(ctx) - out := s.StdoutContext(ctx) - - if err := uploadPack(in, out, req); err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(out) - if err == ioutil.ErrEmptyReader { - if c, ok := s.Stdout.(io.Closer); ok { - _ = c.Close() - } - - return nil, transport.ErrEmptyUploadPackRequest - } - - if err != nil { - return nil, err - } - - rc := ioutil.NewReadCloser(r, s) - return DecodeUploadPackResponse(rc, req) -} - -func (s *session) StdinContext(ctx context.Context) io.WriteCloser { - return ioutil.NewWriteCloserOnError( - ioutil.NewContextWriteCloser(ctx, s.Stdin), - s.onError, - ) -} - -func (s *session) StdoutContext(ctx context.Context) io.Reader { - return ioutil.NewReaderOnError( - ioutil.NewContextReader(ctx, s.Stdout), - s.onError, - ) -} - -func (s *session) onError(err error) { - if k, ok := s.Command.(CommandKiller); ok { - _ = k.Kill() - } - - _ = s.Close() -} - -func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { - if _, err := s.AdvertisedReferences(); err != nil { - return nil, err - } - - s.packRun = true - - w := s.StdinContext(ctx) - if err := req.Encode(w); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - - if !req.Capabilities.Supports(capability.ReportStatus) { - // If we don't have report-status, we can only - // check return value error. - return nil, s.Command.Close() - } - - r := s.StdoutContext(ctx) - - var d *sideband.Demuxer - if req.Capabilities.Supports(capability.Sideband64k) { - d = sideband.NewDemuxer(sideband.Sideband64k, r) - } else if req.Capabilities.Supports(capability.Sideband) { - d = sideband.NewDemuxer(sideband.Sideband, r) - } - if d != nil { - d.Progress = req.Progress - r = d - } - - report := packp.NewReportStatus() - if err := report.Decode(r); err != nil { - return nil, err - } - - if err := report.Error(); err != nil { - defer s.Close() - return report, err - } - - return report, s.Command.Close() -} - -func (s *session) finish() error { - if s.finished { - return nil - } - - s.finished = true - - // If we did not run a upload/receive-pack, we close the connection - // gracefully by sending a flush packet to the server. If the server - // operates correctly, it will exit with status 0. - if !s.packRun { - _, err := s.Stdin.Write(pktline.FlushPkt) - return err - } - - return nil -} - -func (s *session) Close() (err error) { - err = s.finish() - - defer ioutil.CheckClose(s.Command, &err) - return -} - -func (s *session) checkNotFoundError() error { - t := time.NewTicker(time.Second * readErrorSecondsTimeout) - defer t.Stop() - - select { - case <-t.C: - return ErrTimeoutExceeded - case line, ok := <-s.firstErrLine: - if !ok { - return nil - } - - if isRepoNotFoundError(line) { - return transport.ErrRepositoryNotFound - } - - return fmt.Errorf("unknown error: %s", line) - } -} - -var ( - githubRepoNotFoundErr = "ERROR: Repository not found." - bitbucketRepoNotFoundErr = "conq: repository does not exist." - localRepoNotFoundErr = "does not appear to be a git repository" - gitProtocolNotFoundErr = "ERR \n Repository not found." - gitProtocolNoSuchErr = "ERR no such repository" - gitProtocolAccessDeniedErr = "ERR access denied" - gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" -) - -func isRepoNotFoundError(s string) bool { - if strings.HasPrefix(s, githubRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { - return true - } - - if strings.HasSuffix(s, localRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNoSuchErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolAccessDeniedErr) { - return true - } - - if strings.HasPrefix(s, gogsAccessDeniedErr) { - return true - } - - return false -} - -var ( - nak = []byte("NAK") - eol = []byte("\n") -) - -// uploadPack implements the git-upload-pack protocol. -func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { - // TODO support multi_ack mode - // TODO support multi_ack_detailed mode - // TODO support acks for common objects - // TODO build a proper state machine for all these processing options - - if err := req.UploadRequest.Encode(w); err != nil { - return fmt.Errorf("sending upload-req message: %s", err) - } - - if err := req.UploadHaves.Encode(w, true); err != nil { - return fmt.Errorf("sending haves message: %s", err) - } - - if err := sendDone(w); err != nil { - return fmt.Errorf("sending done message: %s", err) - } - - if err := w.Close(); err != nil { - return fmt.Errorf("closing input: %s", err) - } - - return nil -} - -func sendDone(w io.Writer) error { - e := pktline.NewEncoder(w) - - return e.Encodef("done\n") -} - -// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse -func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( - *packp.UploadPackResponse, error, -) { - res := packp.NewUploadPackResponse(req) - if err := res.Decode(r); err != nil { - return nil, fmt.Errorf("error decoding upload-pack response: %s", err) - } - - return res, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go deleted file mode 100644 index e2480848a47..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go +++ /dev/null @@ -1,73 +0,0 @@ -package common - -import ( - "context" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ServerCommand is used for a single server command execution. -type ServerCommand struct { - Stderr io.Writer - Stdout io.WriteCloser - Stdin io.Reader -} - -func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { - ioutil.CheckClose(cmd.Stdout, &err) - - ar, err := s.AdvertisedReferences() - if err != nil { - return err - } - - if err := ar.Encode(cmd.Stdout); err != nil { - return err - } - - req := packp.NewUploadPackRequest() - if err := req.Decode(cmd.Stdin); err != nil { - return err - } - - var resp *packp.UploadPackResponse - resp, err = s.UploadPack(context.TODO(), req) - if err != nil { - return err - } - - return resp.Encode(cmd.Stdout) -} - -func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { - ar, err := s.AdvertisedReferences() - if err != nil { - return fmt.Errorf("internal error in advertised references: %s", err) - } - - if err := ar.Encode(cmd.Stdout); err != nil { - return fmt.Errorf("error in advertised references encoding: %s", err) - } - - req := packp.NewReferenceUpdateRequest() - if err := req.Decode(cmd.Stdin); err != nil { - return fmt.Errorf("error decoding: %s", err) - } - - rs, err := s.ReceivePack(context.TODO(), req) - if rs != nil { - if err := rs.Encode(cmd.Stdout); err != nil { - return fmt.Errorf("error in encoding report status %s", err) - } - } - - if err != nil { - return fmt.Errorf("error in receive pack: %s", err) - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go deleted file mode 100644 index e7e2b075e5e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go +++ /dev/null @@ -1,64 +0,0 @@ -package server - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/storage/filesystem" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" -) - -// DefaultLoader is a filesystem loader ignoring host and resolving paths to /. -var DefaultLoader = NewFilesystemLoader(osfs.New("")) - -// Loader loads repository's storer.Storer based on an optional host and a path. -type Loader interface { - // Load loads a storer.Storer given a transport.Endpoint. - // Returns transport.ErrRepositoryNotFound if the repository does not - // exist. - Load(ep *transport.Endpoint) (storer.Storer, error) -} - -type fsLoader struct { - base billy.Filesystem -} - -// NewFilesystemLoader creates a Loader that ignores host and resolves paths -// with a given base filesystem. -func NewFilesystemLoader(base billy.Filesystem) Loader { - return &fsLoader{base} -} - -// Load looks up the endpoint's path in the base file system and returns a -// storer for it. Returns transport.ErrRepositoryNotFound if a repository does -// not exist in the given path. -func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { - fs, err := l.base.Chroot(ep.Path) - if err != nil { - return nil, err - } - - if _, err := fs.Stat("config"); err != nil { - return nil, transport.ErrRepositoryNotFound - } - - return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil -} - -// MapLoader is a Loader that uses a lookup map of storer.Storer by -// transport.Endpoint. -type MapLoader map[string]storer.Storer - -// Load returns a storer.Storer for given a transport.Endpoint by looking it up -// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not -// exist. -func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { - s, ok := l[ep.String()] - if !ok { - return nil, transport.ErrRepositoryNotFound - } - - return s, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go deleted file mode 100644 index 727f9021507..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go +++ /dev/null @@ -1,424 +0,0 @@ -// Package server implements the git server protocol. For most use cases, the -// transport-specific implementations should be used. -package server - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/revlist" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var DefaultServer = NewServer(DefaultLoader) - -type server struct { - loader Loader - handler *handler -} - -// NewServer returns a transport.Transport implementing a git server, -// independent of transport. Each transport must wrap this. -func NewServer(loader Loader) transport.Transport { - return &server{ - loader, - &handler{asClient: false}, - } -} - -// NewClient returns a transport.Transport implementing a client with an -// embedded server. -func NewClient(loader Loader) transport.Transport { - return &server{ - loader, - &handler{asClient: true}, - } -} - -func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { - sto, err := s.loader.Load(ep) - if err != nil { - return nil, err - } - - return s.handler.NewUploadPackSession(sto) -} - -func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - sto, err := s.loader.Load(ep) - if err != nil { - return nil, err - } - - return s.handler.NewReceivePackSession(sto) -} - -type handler struct { - asClient bool -} - -func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { - return &upSession{ - session: session{storer: s, asClient: h.asClient}, - }, nil -} - -func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { - return &rpSession{ - session: session{storer: s, asClient: h.asClient}, - cmdStatus: map[plumbing.ReferenceName]error{}, - }, nil -} - -type session struct { - storer storer.Storer - caps *capability.List - asClient bool -} - -func (s *session) Close() error { - return nil -} - -func (s *session) SetAuth(transport.AuthMethod) error { - //TODO: deprecate - return nil -} - -func (s *session) checkSupportedCapabilities(cl *capability.List) error { - for _, c := range cl.All() { - if !s.caps.Supports(c) { - return fmt.Errorf("unsupported capability: %s", c) - } - } - - return nil -} - -type upSession struct { - session -} - -func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - ar := packp.NewAdvRefs() - - if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { - return nil, err - } - - s.caps = ar.Capabilities - - if err := setReferences(s.storer, ar); err != nil { - return nil, err - } - - if err := setHEAD(s.storer, ar); err != nil { - return nil, err - } - - if s.asClient && len(ar.References) == 0 { - return nil, transport.ErrEmptyRemoteRepository - } - - return ar, nil -} - -func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - if s.caps == nil { - s.caps = capability.NewList() - if err := s.setSupportedCapabilities(s.caps); err != nil { - return nil, err - } - } - - if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { - return nil, err - } - - s.caps = req.Capabilities - - if len(req.Shallows) > 0 { - return nil, fmt.Errorf("shallow not supported") - } - - objs, err := s.objectsToUpload(req) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - e := packfile.NewEncoder(pw, s.storer, false) - go func() { - // TODO: plumb through a pack window. - _, err := e.Encode(objs, 10) - pw.CloseWithError(err) - }() - - return packp.NewUploadPackResponseWithPackfile(req, - ioutil.NewContextReadCloser(ctx, pr), - ), nil -} - -func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { - haves, err := revlist.Objects(s.storer, req.Haves, nil) - if err != nil { - return nil, err - } - - return revlist.Objects(s.storer, req.Wants, haves) -} - -func (*upSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { - return err - } - - if err := c.Set(capability.OFSDelta); err != nil { - return err - } - - return nil -} - -type rpSession struct { - session - cmdStatus map[plumbing.ReferenceName]error - firstErr error - unpackErr error -} - -func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - ar := packp.NewAdvRefs() - - if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { - return nil, err - } - - s.caps = ar.Capabilities - - if err := setReferences(s.storer, ar); err != nil { - return nil, err - } - - if err := setHEAD(s.storer, ar); err != nil { - return nil, err - } - - return ar, nil -} - -var ( - ErrUpdateReference = errors.New("failed to update ref") -) - -func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { - if s.caps == nil { - s.caps = capability.NewList() - if err := s.setSupportedCapabilities(s.caps); err != nil { - return nil, err - } - } - - if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { - return nil, err - } - - s.caps = req.Capabilities - - //TODO: Implement 'atomic' update of references. - - if req.Packfile != nil { - r := ioutil.NewContextReadCloser(ctx, req.Packfile) - if err := s.writePackfile(r); err != nil { - s.unpackErr = err - s.firstErr = err - return s.reportStatus(), err - } - } - - s.updateReferences(req) - return s.reportStatus(), s.firstErr -} - -func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) { - for _, cmd := range req.Commands { - exists, err := referenceExists(s.storer, cmd.Name) - if err != nil { - s.setStatus(cmd.Name, err) - continue - } - - switch cmd.Action() { - case packp.Create: - if exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - ref := plumbing.NewHashReference(cmd.Name, cmd.New) - err := s.storer.SetReference(ref) - s.setStatus(cmd.Name, err) - case packp.Delete: - if !exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - err := s.storer.RemoveReference(cmd.Name) - s.setStatus(cmd.Name, err) - case packp.Update: - if !exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - ref := plumbing.NewHashReference(cmd.Name, cmd.New) - err := s.storer.SetReference(ref) - s.setStatus(cmd.Name, err) - } - } -} - -func (s *rpSession) writePackfile(r io.ReadCloser) error { - if r == nil { - return nil - } - - if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { - _ = r.Close() - return err - } - - return r.Close() -} - -func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { - s.cmdStatus[ref] = err - if s.firstErr == nil && err != nil { - s.firstErr = err - } -} - -func (s *rpSession) reportStatus() *packp.ReportStatus { - if !s.caps.Supports(capability.ReportStatus) { - return nil - } - - rs := packp.NewReportStatus() - rs.UnpackStatus = "ok" - - if s.unpackErr != nil { - rs.UnpackStatus = s.unpackErr.Error() - } - - if s.cmdStatus == nil { - return rs - } - - for ref, err := range s.cmdStatus { - msg := "ok" - if err != nil { - msg = err.Error() - } - status := &packp.CommandStatus{ - ReferenceName: ref, - Status: msg, - } - rs.CommandStatuses = append(rs.CommandStatuses, status) - } - - return rs -} - -func (*rpSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { - return err - } - - if err := c.Set(capability.OFSDelta); err != nil { - return err - } - - if err := c.Set(capability.DeleteRefs); err != nil { - return err - } - - return c.Set(capability.ReportStatus) -} - -func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { - ref, err := s.Reference(plumbing.HEAD) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - - if ref.Type() == plumbing.SymbolicReference { - if err := ar.AddReference(ref); err != nil { - return nil - } - - ref, err = storer.ResolveReference(s, ref.Target()) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - } - - if ref.Type() != plumbing.HashReference { - return plumbing.ErrInvalidType - } - - h := ref.Hash() - ar.Head = &h - - return nil -} - -func setReferences(s storer.Storer, ar *packp.AdvRefs) error { - //TODO: add peeled references. - iter, err := s.IterReferences() - if err != nil { - return err - } - - return iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - ar.References[ref.Name().String()] = ref.Hash() - return nil - }) -} - -func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { - _, err := s.Reference(n) - if err == plumbing.ErrReferenceNotFound { - return false, nil - } - - return err == nil, err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go deleted file mode 100644 index b79a74e41d6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go +++ /dev/null @@ -1,322 +0,0 @@ -package ssh - -import ( - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "os/user" - "path/filepath" - - "github.com/go-git/go-git/v5/plumbing/transport" - - "github.com/mitchellh/go-homedir" - "github.com/xanzy/ssh-agent" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/knownhosts" -) - -const DefaultUsername = "git" - -// AuthMethod is the interface all auth methods for the ssh client -// must implement. The clientConfig method returns the ssh client -// configuration needed to establish an ssh connection. -type AuthMethod interface { - transport.AuthMethod - // ClientConfig should return a valid ssh.ClientConfig to be used to create - // a connection to the SSH server. - ClientConfig() (*ssh.ClientConfig, error) -} - -// The names of the AuthMethod implementations. To be returned by the -// Name() method. Most git servers only allow PublicKeysName and -// PublicKeysCallbackName. -const ( - KeyboardInteractiveName = "ssh-keyboard-interactive" - PasswordName = "ssh-password" - PasswordCallbackName = "ssh-password-callback" - PublicKeysName = "ssh-public-keys" - PublicKeysCallbackName = "ssh-public-key-callback" -) - -// KeyboardInteractive implements AuthMethod by using a -// prompt/response sequence controlled by the server. -type KeyboardInteractive struct { - User string - Challenge ssh.KeyboardInteractiveChallenge - HostKeyCallbackHelper -} - -func (a *KeyboardInteractive) Name() string { - return KeyboardInteractiveName -} - -func (a *KeyboardInteractive) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ - a.Challenge, - }, - }) -} - -// Password implements AuthMethod by using the given password. -type Password struct { - User string - Password string - HostKeyCallbackHelper -} - -func (a *Password) Name() string { - return PasswordName -} - -func (a *Password) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *Password) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.Password(a.Password)}, - }) -} - -// PasswordCallback implements AuthMethod by using a callback -// to fetch the password. -type PasswordCallback struct { - User string - Callback func() (pass string, err error) - HostKeyCallbackHelper -} - -func (a *PasswordCallback) Name() string { - return PasswordCallbackName -} - -func (a *PasswordCallback) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, - }) -} - -// PublicKeys implements AuthMethod by using the given key pairs. -type PublicKeys struct { - User string - Signer ssh.Signer - HostKeyCallbackHelper -} - -// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An -// encryption password should be given if the pemBytes contains a password -// encrypted PEM block otherwise password should be empty. It supports RSA -// (PKCS#1), DSA (OpenSSL), and ECDSA private keys. -func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("invalid PEM data") - } - if x509.IsEncryptedPEMBlock(block) { - key, err := x509.DecryptPEMBlock(block, []byte(password)) - if err != nil { - return nil, err - } - - block = &pem.Block{Type: block.Type, Bytes: key} - pemBytes = pem.EncodeToMemory(block) - } - - signer, err := ssh.ParsePrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return &PublicKeys{User: user, Signer: signer}, nil -} - -// NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM -// encoded private key. An encryption password should be given if the pemBytes -// contains a password encrypted PEM block otherwise password should be empty. -func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { - bytes, err := ioutil.ReadFile(pemFile) - if err != nil { - return nil, err - } - - return NewPublicKeys(user, bytes, password) -} - -func (a *PublicKeys) Name() string { - return PublicKeysName -} - -func (a *PublicKeys) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, - }) -} - -func username() (string, error) { - var username string - if user, err := user.Current(); err == nil { - username = user.Username - } else { - username = os.Getenv("USER") - } - - if username == "" { - return "", errors.New("failed to get username") - } - - return username, nil -} - -// PublicKeysCallback implements AuthMethod by asking a -// ssh.agent.Agent to act as a signer. -type PublicKeysCallback struct { - User string - Callback func() (signers []ssh.Signer, err error) - HostKeyCallbackHelper -} - -// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens -// a pipe with the SSH agent and uses the pipe as the implementer of the public -// key callback function. -func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) { - var err error - if u == "" { - u, err = username() - if err != nil { - return nil, err - } - } - - a, _, err := sshagent.New() - if err != nil { - return nil, fmt.Errorf("error creating SSH agent: %q", err) - } - - return &PublicKeysCallback{ - User: u, - Callback: a.Signers, - }, nil -} - -func (a *PublicKeysCallback) Name() string { - return PublicKeysCallbackName -} - -func (a *PublicKeysCallback) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, - }) -} - -// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a -// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT -// -// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS -// environment variable, example: -// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file -// -// If SSH_KNOWN_HOSTS is not set the following file locations will be used: -// ~/.ssh/known_hosts -// /etc/ssh/ssh_known_hosts -func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { - var err error - - if len(files) == 0 { - if files, err = getDefaultKnownHostsFiles(); err != nil { - return nil, err - } - } - - if files, err = filterKnownHostsFiles(files...); err != nil { - return nil, err - } - - return knownhosts.New(files...) -} - -func getDefaultKnownHostsFiles() ([]string, error) { - files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS")) - if len(files) != 0 { - return files, nil - } - - homeDirPath, err := homedir.Dir() - if err != nil { - return nil, err - } - - return []string{ - filepath.Join(homeDirPath, "/.ssh/known_hosts"), - "/etc/ssh/ssh_known_hosts", - }, nil -} - -func filterKnownHostsFiles(files ...string) ([]string, error) { - var out []string - for _, file := range files { - _, err := os.Stat(file) - if err == nil { - out = append(out, file) - continue - } - - if !os.IsNotExist(err) { - return nil, err - } - } - - if len(out) == 0 { - return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable") - } - - return out, nil -} - -// HostKeyCallbackHelper is a helper that provides common functionality to -// configure HostKeyCallback into a ssh.ClientConfig. -type HostKeyCallbackHelper struct { - // HostKeyCallback is the function type used for verifying server keys. - // If nil default callback will be create using NewKnownHostsCallback - // without argument. - HostKeyCallback ssh.HostKeyCallback -} - -// SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If -// HostKeyCallback is empty a default callback is created using -// NewKnownHostsCallback. -func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) { - var err error - if m.HostKeyCallback == nil { - if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil { - return cfg, err - } - } - - cfg.HostKeyCallback = m.HostKeyCallback - return cfg, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go deleted file mode 100644 index c05ded986d8..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package ssh implements the SSH transport protocol. -package ssh - -import ( - "context" - "fmt" - "reflect" - "strconv" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - - "github.com/kevinburke/ssh_config" - "golang.org/x/crypto/ssh" - "golang.org/x/net/proxy" -) - -// DefaultClient is the default SSH client. -var DefaultClient = NewClient(nil) - -// DefaultSSHConfig is the reader used to access parameters stored in the -// system's ssh_config files. If nil all the ssh_config are ignored. -var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings - -type sshConfig interface { - Get(alias, key string) string -} - -// NewClient creates a new SSH client with an optional *ssh.ClientConfig. -func NewClient(config *ssh.ClientConfig) transport.Transport { - return common.NewClient(&runner{config: config}) -} - -// DefaultAuthBuilder is the function used to create a default AuthMethod, when -// the user doesn't provide any. -var DefaultAuthBuilder = func(user string) (AuthMethod, error) { - return NewSSHAgentAuth(user) -} - -const DefaultPort = 22 - -type runner struct { - config *ssh.ClientConfig -} - -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { - c := &command{command: cmd, endpoint: ep, config: r.config} - if auth != nil { - c.setAuth(auth) - } - - if err := c.connect(); err != nil { - return nil, err - } - return c, nil -} - -type command struct { - *ssh.Session - connected bool - command string - endpoint *transport.Endpoint - client *ssh.Client - auth AuthMethod - config *ssh.ClientConfig -} - -func (c *command) setAuth(auth transport.AuthMethod) error { - a, ok := auth.(AuthMethod) - if !ok { - return transport.ErrInvalidAuthMethod - } - - c.auth = a - return nil -} - -func (c *command) Start() error { - return c.Session.Start(endpointToCommand(c.command, c.endpoint)) -} - -// Close closes the SSH session and connection. -func (c *command) Close() error { - if !c.connected { - return nil - } - - c.connected = false - - //XXX: If did read the full packfile, then the session might be already - // closed. - _ = c.Session.Close() - - return c.client.Close() -} - -// connect connects to the SSH server, unless a AuthMethod was set with -// SetAuth method, by default uses an auth method based on PublicKeysCallback, -// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK -// environment var. -func (c *command) connect() error { - if c.connected { - return transport.ErrAlreadyConnected - } - - if c.auth == nil { - if err := c.setAuthFromEndpoint(); err != nil { - return err - } - } - - var err error - config, err := c.auth.ClientConfig() - if err != nil { - return err - } - - overrideConfig(c.config, config) - - c.client, err = dial("tcp", c.getHostWithPort(), config) - if err != nil { - return err - } - - c.Session, err = c.client.NewSession() - if err != nil { - _ = c.client.Close() - return err - } - - c.connected = true - return nil -} - -func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { - var ( - ctx = context.Background() - cancel context.CancelFunc - ) - if config.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, config.Timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - - conn, err := proxy.Dial(ctx, network, addr) - if err != nil { - return nil, err - } - c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return ssh.NewClient(c, chans, reqs), nil -} - -func (c *command) getHostWithPort() string { - if addr, found := c.doGetHostWithPortFromSSHConfig(); found { - return addr - } - - host := c.endpoint.Host - port := c.endpoint.Port - if port <= 0 { - port = DefaultPort - } - - return fmt.Sprintf("%s:%d", host, port) -} - -func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { - if DefaultSSHConfig == nil { - return - } - - host := c.endpoint.Host - port := c.endpoint.Port - - configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname") - if configHost != "" { - host = configHost - found = true - } - - if !found { - return - } - - configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port") - if configPort != "" { - if i, err := strconv.Atoi(configPort); err == nil { - port = i - } - } - - addr = fmt.Sprintf("%s:%d", host, port) - return -} - -func (c *command) setAuthFromEndpoint() error { - var err error - c.auth, err = DefaultAuthBuilder(c.endpoint.User) - return err -} - -func endpointToCommand(cmd string, ep *transport.Endpoint) string { - return fmt.Sprintf("%s '%s'", cmd, ep.Path) -} - -func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) { - if overrides == nil { - return - } - - t := reflect.TypeOf(*c) - vc := reflect.ValueOf(c).Elem() - vo := reflect.ValueOf(overrides).Elem() - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - vcf := vc.FieldByName(f.Name) - vof := vo.FieldByName(f.Name) - vcf.Set(vof) - } - - *c = vc.Interface().(ssh.ClientConfig) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/prune.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/prune.go deleted file mode 100644 index cc5907a1429..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/prune.go +++ /dev/null @@ -1,66 +0,0 @@ -package git - -import ( - "errors" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type PruneHandler func(unreferencedObjectHash plumbing.Hash) error -type PruneOptions struct { - // OnlyObjectsOlderThan if set to non-zero value - // selects only objects older than the time provided. - OnlyObjectsOlderThan time.Time - // Handler is called on matching objects - Handler PruneHandler -} - -var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported") - -// DeleteObject deletes an object from a repository. -// The type conveniently matches PruneHandler. -func (r *Repository) DeleteObject(hash plumbing.Hash) error { - los, ok := r.Storer.(storer.LooseObjectStorer) - if !ok { - return ErrLooseObjectsNotSupported - } - - return los.DeleteLooseObject(hash) -} - -func (r *Repository) Prune(opt PruneOptions) error { - los, ok := r.Storer.(storer.LooseObjectStorer) - if !ok { - return ErrLooseObjectsNotSupported - } - - pw := newObjectWalker(r.Storer) - err := pw.walkAllRefs() - if err != nil { - return err - } - // Now walk all (loose) objects in storage. - return los.ForEachObjectHash(func(hash plumbing.Hash) error { - // Get out if we have seen this object. - if pw.isSeen(hash) { - return nil - } - // Otherwise it is a candidate for pruning. - // Check out for too new objects next. - if !opt.OnlyObjectsOlderThan.IsZero() { - // Errors here are non-fatal. The object may be e.g. packed. - // Or concurrently deleted. Skip such objects. - t, err := los.LooseObjectTime(hash) - if err != nil { - return nil - } - // Skip too new objects. - if !t.Before(opt.OnlyObjectsOlderThan) { - return nil - } - } - return opt.Handler(hash) - }) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/references.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/references.go deleted file mode 100644 index 6d96035afab..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/references.go +++ /dev/null @@ -1,264 +0,0 @@ -package git - -import ( - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// References returns a slice of Commits for the file at "path", starting from -// the commit provided that contains the file from the provided path. The last -// commit into the returned slice is the commit where the file was created. -// If the provided commit does not contains the specified path, a nil slice is -// returned. The commits are sorted in commit order, newer to older. -// -// Caveats: -// -// - Moves and copies are not currently supported. -// -// - Cherry-picks are not detected unless there are no commits between them and -// therefore can appear repeated in the list. (see git path-id for hints on how -// to fix this). -func references(c *object.Commit, path string) ([]*object.Commit, error) { - var result []*object.Commit - seen := make(map[plumbing.Hash]struct{}) - if err := walkGraph(&result, &seen, c, path); err != nil { - return nil, err - } - - // TODO result should be returned without ordering - sortCommits(result) - - // for merges of identical cherry-picks - return removeComp(path, result, equivalent) -} - -type commitSorterer struct { - l []*object.Commit -} - -func (s commitSorterer) Len() int { - return len(s.l) -} - -func (s commitSorterer) Less(i, j int) bool { - return s.l[i].Committer.When.Before(s.l[j].Committer.When) || - s.l[i].Committer.When.Equal(s.l[j].Committer.When) && - s.l[i].Author.When.Before(s.l[j].Author.When) -} - -func (s commitSorterer) Swap(i, j int) { - s.l[i], s.l[j] = s.l[j], s.l[i] -} - -// SortCommits sorts a commit list by commit date, from older to newer. -func sortCommits(l []*object.Commit) { - s := &commitSorterer{l} - sort.Sort(s) -} - -// Recursive traversal of the commit graph, generating a linear history of the -// path. -func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { - // check and update seen - if _, ok := (*seen)[current.Hash]; ok { - return nil - } - (*seen)[current.Hash] = struct{}{} - - // if the path is not in the current commit, stop searching. - if _, err := current.File(path); err != nil { - return nil - } - - // optimization: don't traverse branches that does not - // contain the path. - parents, err := parentsContainingPath(path, current) - if err != nil { - return err - } - switch len(parents) { - // if the path is not found in any of its parents, the path was - // created by this commit; we must add it to the revisions list and - // stop searching. This includes the case when current is the - // initial commit. - case 0: - *result = append(*result, current) - return nil - case 1: // only one parent contains the path - // if the file contents has change, add the current commit - different, err := differentContents(path, current, parents) - if err != nil { - return err - } - if len(different) == 1 { - *result = append(*result, current) - } - // in any case, walk the parent - return walkGraph(result, seen, parents[0], path) - default: // more than one parent contains the path - // TODO: detect merges that had a conflict, because they must be - // included in the result here. - for _, p := range parents { - err := walkGraph(result, seen, p, path) - if err != nil { - return err - } - } - } - return nil -} - -func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { - // TODO: benchmark this method making git.object.Commit.parent public instead of using - // an iterator - var result []*object.Commit - iter := c.Parents() - for { - parent, err := iter.Next() - if err == io.EOF { - return result, nil - } - if err != nil { - return nil, err - } - if _, err := parent.File(path); err == nil { - result = append(result, parent) - } - } -} - -// Returns an slice of the commits in "cs" that has the file "path", but with different -// contents than what can be found in "c". -func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - h, found := blobHash(path, c) - if !found { - return nil, object.ErrFileNotFound - } - for _, cx := range cs { - if hx, found := blobHash(path, cx); found && h != hx { - result = append(result, cx) - } - } - return result, nil -} - -// blobHash returns the hash of a path in a commit -func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { - file, err := commit.File(path) - if err != nil { - var empty plumbing.Hash - return empty, found - } - return file.Hash, true -} - -type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) - -// Returns a new slice of commits, with duplicates removed. Expects a -// sorted commit list. Duplication is defined according to "comp". It -// will always keep the first commit of a series of duplicated commits. -func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - if len(cs) == 0 { - return result, nil - } - result = append(result, cs[0]) - for i := 1; i < len(cs); i++ { - equals, err := comp(path, cs[i], cs[i-1]) - if err != nil { - return nil, err - } - if !equals { - result = append(result, cs[i]) - } - } - return result, nil -} - -// Equivalent commits are commits whose patch is the same. -func equivalent(path string, a, b *object.Commit) (bool, error) { - numParentsA := a.NumParents() - numParentsB := b.NumParents() - - // the first commit is not equivalent to anyone - // and "I think" merges can not be equivalent to anything - if numParentsA != 1 || numParentsB != 1 { - return false, nil - } - - diffsA, err := patch(a, path) - if err != nil { - return false, err - } - diffsB, err := patch(b, path) - if err != nil { - return false, err - } - - return sameDiffs(diffsA, diffsB), nil -} - -func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { - // get contents of the file in the commit - file, err := c.File(path) - if err != nil { - return nil, err - } - content, err := file.Contents() - if err != nil { - return nil, err - } - - // get contents of the file in the first parent of the commit - var contentParent string - iter := c.Parents() - parent, err := iter.Next() - if err != nil { - return nil, err - } - file, err = parent.File(path) - if err != nil { - contentParent = "" - } else { - contentParent, err = file.Contents() - if err != nil { - return nil, err - } - } - - // compare the contents of parent and child - return diff.Do(content, contentParent), nil -} - -func sameDiffs(a, b []diffmatchpatch.Diff) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !sameDiff(a[i], b[i]) { - return false - } - } - return true -} - -func sameDiff(a, b diffmatchpatch.Diff) bool { - if a.Type != b.Type { - return false - } - switch a.Type { - case 0: - return countLines(a.Text) == countLines(b.Text) - case 1, -1: - return a.Text == b.Text - default: - panic("unreachable") - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/remote.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/remote.go deleted file mode 100644 index e642c572911..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/remote.go +++ /dev/null @@ -1,1154 +0,0 @@ -package git - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/revlist" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/client" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem" - "github.com/go-git/go-git/v5/storage/memory" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - NoErrAlreadyUpToDate = errors.New("already up-to-date") - ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") - ErrForceNeeded = errors.New("some refs were not updated") - ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec") -) - -const ( - // This describes the maximum number of commits to walk when - // computing the haves to send to a server, for each ref in the - // repo containing this remote, when not using the multi-ack - // protocol. Setting this to 0 means there is no limit. - maxHavesToVisitPerRef = 100 -) - -// Remote represents a connection to a remote repository. -type Remote struct { - c *config.RemoteConfig - s storage.Storer -} - -// NewRemote creates a new Remote. -// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote). -// Otherwise Remotes should be created via the use of a Repository. -func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote { - return &Remote{s: s, c: c} -} - -// Config returns the RemoteConfig object used to instantiate this Remote. -func (r *Remote) Config() *config.RemoteConfig { - return r.c -} - -func (r *Remote) String() string { - var fetch, push string - if len(r.c.URLs) > 0 { - fetch = r.c.URLs[0] - push = r.c.URLs[0] - } - - return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) -} - -// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the -// remote was already up-to-date. -func (r *Remote) Push(o *PushOptions) error { - return r.PushContext(context.Background(), o) -} - -// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { - if err := o.Validate(); err != nil { - return err - } - - if o.RemoteName != r.c.Name { - return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) - } - - s, err := newSendPackSession(r.c.URLs[0], o.Auth) - if err != nil { - return err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferences() - if err != nil { - return err - } - - remoteRefs, err := ar.AllReferences() - if err != nil { - return err - } - - isDelete := false - allDelete := true - for _, rs := range o.RefSpecs { - if rs.IsDelete() { - isDelete = true - } else { - allDelete = false - } - if isDelete && !allDelete { - break - } - } - - if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) { - return ErrDeleteRefNotSupported - } - - if o.Force { - for i := 0; i < len(o.RefSpecs); i++ { - rs := &o.RefSpecs[i] - if !rs.IsForceUpdate() { - o.RefSpecs[i] = config.RefSpec("+" + rs.String()) - } - } - } - - localRefs, err := r.references() - if err != nil { - return err - } - - req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar) - if err != nil { - return err - } - - if len(req.Commands) == 0 { - return NoErrAlreadyUpToDate - } - - objects := objectsToPush(req.Commands) - - haves, err := referencesToHashes(remoteRefs) - if err != nil { - return err - } - - stop, err := r.s.Shallow() - if err != nil { - return err - } - - // if we have shallow we should include this as part of the objects that - // we are aware. - haves = append(haves, stop...) - - var hashesToPush []plumbing.Hash - // Avoid the expensive revlist operation if we're only doing deletes. - if !allDelete { - if r.c.IsFirstURLLocal() { - // If we're are pushing to a local repo, it might be much - // faster to use a local storage layer to get the commits - // to ignore, when calculating the object revlist. - localStorer := filesystem.NewStorage( - osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) - hashesToPush, err = revlist.ObjectsWithStorageForIgnores( - r.s, localStorer, objects, haves) - } else { - hashesToPush, err = revlist.Objects(r.s, objects, haves) - } - if err != nil { - return err - } - } - - if len(hashesToPush) == 0 { - allDelete = true - for _, command := range req.Commands { - if command.Action() != packp.Delete { - allDelete = false - break - } - } - } - - rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete) - if err != nil { - return err - } - - if err = rs.Error(); err != nil { - return err - } - - return r.updateRemoteReferenceStorage(req, rs) -} - -func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { - return !ar.Capabilities.Supports(capability.OFSDelta) -} - -func (r *Remote) newReferenceUpdateRequest( - o *PushOptions, - localRefs []*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - ar *packp.AdvRefs, -) (*packp.ReferenceUpdateRequest, error) { - req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) - - if o.Progress != nil { - req.Progress = o.Progress - if ar.Capabilities.Supports(capability.Sideband64k) { - req.Capabilities.Set(capability.Sideband64k) - } else if ar.Capabilities.Supports(capability.Sideband) { - req.Capabilities.Set(capability.Sideband) - } - } - - if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil { - return nil, err - } - - return req, nil -} - -func (r *Remote) updateRemoteReferenceStorage( - req *packp.ReferenceUpdateRequest, - result *packp.ReportStatus, -) error { - - for _, spec := range r.c.Fetch { - for _, c := range req.Commands { - if !spec.Match(c.Name) { - continue - } - - local := spec.Dst(c.Name) - ref := plumbing.NewHashReference(local, c.New) - switch c.Action() { - case packp.Create, packp.Update: - if err := r.s.SetReference(ref); err != nil { - return err - } - case packp.Delete: - if err := r.s.RemoveReference(local); err != nil { - return err - } - } - } - } - - return nil -} - -// FetchContext fetches references along with the objects necessary to complete -// their histories. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { - _, err := r.fetch(ctx, o) - return err -} - -// Fetch fetches references along with the objects necessary to complete their -// histories. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -func (r *Remote) Fetch(o *FetchOptions) error { - return r.FetchContext(context.Background(), o) -} - -func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) { - if o.RemoteName == "" { - o.RemoteName = r.c.Name - } - - if err = o.Validate(); err != nil { - return nil, err - } - - if len(o.RefSpecs) == 0 { - o.RefSpecs = r.c.Fetch - } - - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferences() - if err != nil { - return nil, err - } - - req, err := r.newUploadPackRequest(o, ar) - if err != nil { - return nil, err - } - - if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil { - return nil, err - } - - remoteRefs, err := ar.AllReferences() - if err != nil { - return nil, err - } - - localRefs, err := r.references() - if err != nil { - return nil, err - } - - refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) - if err != nil { - return nil, err - } - - req.Wants, err = getWants(r.s, refs) - if len(req.Wants) > 0 { - req.Haves, err = getHaves(localRefs, remoteRefs, r.s) - if err != nil { - return nil, err - } - - if err = r.fetchPack(ctx, o, s, req); err != nil { - return nil, err - } - } - - updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) - if err != nil { - return nil, err - } - - if !updated { - return remoteRefs, NoErrAlreadyUpToDate - } - - return remoteRefs, nil -} - -func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { - c, ep, err := newClient(url) - if err != nil { - return nil, err - } - - return c.NewUploadPackSession(ep, auth) -} - -func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - c, ep, err := newClient(url) - if err != nil { - return nil, err - } - - return c.NewReceivePackSession(ep, auth) -} - -func newClient(url string) (transport.Transport, *transport.Endpoint, error) { - ep, err := transport.NewEndpoint(url) - if err != nil { - return nil, nil, err - } - - c, err := client.NewClient(ep) - if err != nil { - return nil, nil, err - } - - return c, ep, err -} - -func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession, - req *packp.UploadPackRequest) (err error) { - - reader, err := s.UploadPack(ctx, req) - if err != nil { - return err - } - - defer ioutil.CheckClose(reader, &err) - - if err = r.updateShallow(o, reader); err != nil { - return err - } - - if err = packfile.UpdateObjectStorage(r.s, - buildSidebandIfSupported(req.Capabilities, reader, o.Progress), - ); err != nil { - return err - } - - return err -} - -func (r *Remote) addReferencesToUpdate( - refspecs []config.RefSpec, - localRefs []*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - req *packp.ReferenceUpdateRequest, - prune bool, -) error { - // This references dictionary will be used to search references by name. - refsDict := make(map[string]*plumbing.Reference) - for _, ref := range localRefs { - refsDict[ref.Name().String()] = ref - } - - for _, rs := range refspecs { - if rs.IsDelete() { - if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil { - return err - } - } else { - err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req) - if err != nil { - return err - } - - if prune { - if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil { - return err - } - } - } - } - - return nil -} - -func (r *Remote) addOrUpdateReferences( - rs config.RefSpec, - localRefs []*plumbing.Reference, - refsDict map[string]*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - req *packp.ReferenceUpdateRequest, -) error { - // If it is not a wilcard refspec we can directly search for the reference - // in the references dictionary. - if !rs.IsWildcard() { - ref, ok := refsDict[rs.Src()] - if !ok { - return nil - } - - return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) - } - - for _, ref := range localRefs { - err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) - if err != nil { - return err - } - } - - return nil -} - -func (r *Remote) deleteReferences(rs config.RefSpec, - remoteRefs storer.ReferenceStorer, - refsDict map[string]*plumbing.Reference, - req *packp.ReferenceUpdateRequest, - prune bool) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } - - return iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - if prune { - rs := rs.Reverse() - if !rs.Match(ref.Name()) { - return nil - } - - if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok { - return nil - } - } else { - if rs.Dst("") != ref.Name() { - return nil - } - } - - cmd := &packp.Command{ - Name: ref.Name(), - Old: ref.Hash(), - New: plumbing.ZeroHash, - } - req.Commands = append(req.Commands, cmd) - return nil - }) -} - -func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, - remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, - req *packp.ReferenceUpdateRequest) error { - - if localRef.Type() != plumbing.HashReference { - return nil - } - - if !rs.Match(localRef.Name()) { - return nil - } - - cmd := &packp.Command{ - Name: rs.Dst(localRef.Name()), - Old: plumbing.ZeroHash, - New: localRef.Hash(), - } - - remoteRef, err := remoteRefs.Reference(cmd.Name) - if err == nil { - if remoteRef.Type() != plumbing.HashReference { - //TODO: check actual git behavior here - return nil - } - - cmd.Old = remoteRef.Hash() - } else if err != plumbing.ErrReferenceNotFound { - return err - } - - if cmd.Old == cmd.New { - return nil - } - - if !rs.IsForceUpdate() { - if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { - return err - } - } - - req.Commands = append(req.Commands, cmd) - return nil -} - -func (r *Remote) references() ([]*plumbing.Reference, error) { - var localRefs []*plumbing.Reference - - iter, err := r.s.IterReferences() - if err != nil { - return nil, err - } - - for { - ref, err := iter.Next() - if err == io.EOF { - break - } - - if err != nil { - return nil, err - } - - localRefs = append(localRefs, ref) - } - - return localRefs, nil -} - -func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) ( - map[plumbing.Hash]bool, error) { - remoteRefs := map[plumbing.Hash]bool{} - iter, err := remoteRefStorer.IterReferences() - if err != nil { - return nil, err - } - err = iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - remoteRefs[ref.Hash()] = true - return nil - }) - if err != nil { - return nil, err - } - return remoteRefs, nil -} - -// getHavesFromRef populates the given `haves` map with the given -// reference, and up to `maxHavesToVisitPerRef` ancestor commits. -func getHavesFromRef( - ref *plumbing.Reference, - remoteRefs map[plumbing.Hash]bool, - s storage.Storer, - haves map[plumbing.Hash]bool, -) error { - h := ref.Hash() - if haves[h] { - return nil - } - - // No need to load the commit if we know the remote already - // has this hash. - if remoteRefs[h] { - haves[h] = true - return nil - } - - commit, err := object.GetCommit(s, h) - if err != nil { - // Ignore the error if this isn't a commit. - haves[ref.Hash()] = true - return nil - } - - // Until go-git supports proper commit negotiation during an - // upload pack request, include up to `maxHavesToVisitPerRef` - // commits from the history of each ref. - walker := object.NewCommitPreorderIter(commit, haves, nil) - toVisit := maxHavesToVisitPerRef - return walker.ForEach(func(c *object.Commit) error { - haves[c.Hash] = true - toVisit-- - // If toVisit starts out at 0 (indicating there is no - // max), then it will be negative here and we won't stop - // early. - if toVisit == 0 || remoteRefs[c.Hash] { - return storer.ErrStop - } - return nil - }) -} - -func getHaves( - localRefs []*plumbing.Reference, - remoteRefStorer storer.ReferenceStorer, - s storage.Storer, -) ([]plumbing.Hash, error) { - haves := map[plumbing.Hash]bool{} - - // Build a map of all the remote references, to avoid loading too - // many parent commits for references we know don't need to be - // transferred. - remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer) - if err != nil { - return nil, err - } - - for _, ref := range localRefs { - if haves[ref.Hash()] { - continue - } - - if ref.Type() != plumbing.HashReference { - continue - } - - err = getHavesFromRef(ref, remoteRefs, s, haves) - if err != nil { - return nil, err - } - } - - var result []plumbing.Hash - for h := range haves { - result = append(result, h) - } - - return result, nil -} - -const refspecAllTags = "+refs/tags/*:refs/tags/*" - -func calculateRefs( - spec []config.RefSpec, - remoteRefs storer.ReferenceStorer, - tagMode TagMode, -) (memory.ReferenceStorage, error) { - if tagMode == AllTags { - spec = append(spec, refspecAllTags) - } - - refs := make(memory.ReferenceStorage) - for _, s := range spec { - if err := doCalculateRefs(s, remoteRefs, refs); err != nil { - return nil, err - } - } - - return refs, nil -} - -func doCalculateRefs( - s config.RefSpec, - remoteRefs storer.ReferenceStorer, - refs memory.ReferenceStorage, -) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } - - if s.IsExactSHA1() { - ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) - return refs.SetReference(ref) - } - - var matched bool - err = iter.ForEach(func(ref *plumbing.Reference) error { - if !s.Match(ref.Name()) { - return nil - } - - if ref.Type() == plumbing.SymbolicReference { - target, err := storer.ResolveReference(remoteRefs, ref.Name()) - if err != nil { - return err - } - - ref = plumbing.NewHashReference(ref.Name(), target.Hash()) - } - - if ref.Type() != plumbing.HashReference { - return nil - } - - matched = true - if err := refs.SetReference(ref); err != nil { - return err - } - - if !s.IsWildcard() { - return storer.ErrStop - } - - return nil - }) - - if !matched && !s.IsWildcard() { - return fmt.Errorf("couldn't find remote ref %q", s.Src()) - } - - return err -} - -func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { - wants := map[plumbing.Hash]bool{} - for _, ref := range refs { - hash := ref.Hash() - exists, err := objectExists(localStorer, ref.Hash()) - if err != nil { - return nil, err - } - - if !exists { - wants[hash] = true - } - } - - var result []plumbing.Hash - for h := range wants { - result = append(result, h) - } - - return result, nil -} - -func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { - _, err := s.EncodedObject(plumbing.AnyObject, h) - if err == plumbing.ErrObjectNotFound { - return false, nil - } - - return true, err -} - -func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error { - if cmd.Old == plumbing.ZeroHash { - _, err := remoteRefs.Reference(cmd.Name) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - - return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) - } - - ff, err := isFastForward(s, cmd.Old, cmd.New) - if err != nil { - return err - } - - if !ff { - return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) - } - - return nil -} - -func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) { - c, err := object.GetCommit(s, new) - if err != nil { - return false, err - } - - found := false - iter := object.NewCommitPreorderIter(c, nil, nil) - err = iter.ForEach(func(c *object.Commit) error { - if c.Hash != old { - return nil - } - - found = true - return storer.ErrStop - }) - return found, err -} - -func (r *Remote) newUploadPackRequest(o *FetchOptions, - ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { - - req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) - - if o.Depth != 0 { - req.Depth = packp.DepthCommits(o.Depth) - if err := req.Capabilities.Set(capability.Shallow); err != nil { - return nil, err - } - } - - if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { - if err := req.Capabilities.Set(capability.NoProgress); err != nil { - return nil, err - } - } - - isWildcard := true - for _, s := range o.RefSpecs { - if !s.IsWildcard() { - isWildcard = false - break - } - } - - if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) { - if err := req.Capabilities.Set(capability.IncludeTag); err != nil { - return nil, err - } - } - - return req, nil -} - -func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error { - var containsIsExact bool - for _, ref := range refs { - if ref.IsExactSHA1() { - containsIsExact = true - } - } - - if !containsIsExact { - return nil - } - - if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) || - ar.Capabilities.Supports(capability.AllowTipSHA1InWant) { - return nil - } - - return ErrExactSHA1NotSupported -} - -func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { - var t sideband.Type - - switch { - case l.Supports(capability.Sideband): - t = sideband.Sideband - case l.Supports(capability.Sideband64k): - t = sideband.Sideband64k - default: - return reader - } - - d := sideband.NewDemuxer(t, reader) - d.Progress = p - - return d -} - -func (r *Remote) updateLocalReferenceStorage( - specs []config.RefSpec, - fetchedRefs, remoteRefs memory.ReferenceStorage, - tagMode TagMode, - force bool, -) (updated bool, err error) { - isWildcard := true - forceNeeded := false - - for _, spec := range specs { - if !spec.IsWildcard() { - isWildcard = false - } - - for _, ref := range fetchedRefs { - if !spec.Match(ref.Name()) && !spec.IsExactSHA1() { - continue - } - - if ref.Type() != plumbing.HashReference { - continue - } - - localName := spec.Dst(ref.Name()) - old, _ := storer.ResolveReference(r.s, localName) - new := plumbing.NewHashReference(localName, ref.Hash()) - - // If the ref exists locally as a branch and force is not specified, - // only update if the new ref is an ancestor of the old - if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { - ff, err := isFastForward(r.s, old.Hash(), new.Hash()) - if err != nil { - return updated, err - } - - if !ff { - forceNeeded = true - continue - } - } - - refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old) - if err != nil { - return updated, err - } - - if refUpdated { - updated = true - } - } - } - - if tagMode == NoTags { - return updated, nil - } - - tags := fetchedRefs - if isWildcard { - tags = remoteRefs - } - tagUpdated, err := r.buildFetchedTags(tags) - if err != nil { - return updated, err - } - - if tagUpdated { - updated = true - } - - if forceNeeded { - err = ErrForceNeeded - } - - return -} - -func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) { - for _, ref := range refs { - if !ref.Name().IsTag() { - continue - } - - _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) - if err == plumbing.ErrObjectNotFound { - continue - } - - if err != nil { - return false, err - } - - refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref) - if err != nil { - return updated, err - } - - if refUpdated { - updated = true - } - } - - return -} - -// List the references on the remote repository. -func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferences() - if err != nil { - return nil, err - } - - allRefs, err := ar.AllReferences() - if err != nil { - return nil, err - } - - refs, err := allRefs.IterReferences() - if err != nil { - return nil, err - } - - var resultRefs []*plumbing.Reference - refs.ForEach(func(ref *plumbing.Reference) error { - resultRefs = append(resultRefs, ref) - return nil - }) - - return resultRefs, nil -} - -func objectsToPush(commands []*packp.Command) []plumbing.Hash { - var objects []plumbing.Hash - for _, cmd := range commands { - if cmd.New == plumbing.ZeroHash { - continue - } - - objects = append(objects, cmd.New) - } - return objects -} - -func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) { - iter, err := refs.IterReferences() - if err != nil { - return nil, err - } - - var hs []plumbing.Hash - err = iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - hs = append(hs, ref.Hash()) - return nil - }) - if err != nil { - return nil, err - } - - return hs, nil -} - -func pushHashes( - ctx context.Context, - sess transport.ReceivePackSession, - s storage.Storer, - req *packp.ReferenceUpdateRequest, - hs []plumbing.Hash, - useRefDeltas bool, - allDelete bool, -) (*packp.ReportStatus, error) { - - rd, wr := io.Pipe() - - config, err := s.Config() - if err != nil { - return nil, err - } - - // Set buffer size to 1 so the error message can be written when - // ReceivePack fails. Otherwise the goroutine will be blocked writing - // to the channel. - done := make(chan error, 1) - - if !allDelete { - req.Packfile = rd - go func() { - e := packfile.NewEncoder(wr, s, useRefDeltas) - if _, err := e.Encode(hs, config.Pack.Window); err != nil { - done <- wr.CloseWithError(err) - return - } - - done <- wr.Close() - }() - } else { - close(done) - } - - rs, err := sess.ReceivePack(ctx, req) - if err != nil { - // close the pipe to unlock encode write - _ = rd.Close() - return nil, err - } - - if err := <-done; err != nil { - return nil, err - } - - return rs, nil -} - -func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error { - if o.Depth == 0 || len(resp.Shallows) == 0 { - return nil - } - - shallows, err := r.s.Shallow() - if err != nil { - return err - } - -outer: - for _, s := range resp.Shallows { - for _, oldS := range shallows { - if s == oldS { - continue outer - } - } - shallows = append(shallows, s) - } - - return r.s.SetShallow(shallows) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/repository.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/repository.go deleted file mode 100644 index 47318d113bb..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/repository.go +++ /dev/null @@ -1,1614 +0,0 @@ -package git - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/internal/revision" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/imdario/mergo" - "golang.org/x/crypto/openpgp" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" -) - -// GitDirName this is a special folder where all the git stuff is. -const GitDirName = ".git" - -var ( - // ErrBranchExists an error stating the specified branch already exists - ErrBranchExists = errors.New("branch already exists") - // ErrBranchNotFound an error stating the specified branch does not exist - ErrBranchNotFound = errors.New("branch not found") - // ErrTagExists an error stating the specified tag already exists - ErrTagExists = errors.New("tag already exists") - // ErrTagNotFound an error stating the specified tag does not exist - ErrTagNotFound = errors.New("tag not found") - // ErrFetching is returned when the packfile could not be downloaded - ErrFetching = errors.New("unable to fetch packfile") - - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("Packed objects not supported") -) - -// Repository represents a git repository -type Repository struct { - Storer storage.Storer - - r map[string]*Remote - wt billy.Filesystem -} - -// Init creates an empty git repository, based on the given Storer and worktree. -// The worktree Filesystem is optional, if nil a bare repository is created. If -// the given storer is not empty ErrRepositoryAlreadyExists is returned -func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { - if err := initStorer(s); err != nil { - return nil, err - } - - r := newRepository(s, worktree) - _, err := r.Reference(plumbing.HEAD, false) - switch err { - case plumbing.ErrReferenceNotFound: - case nil: - return nil, ErrRepositoryAlreadyExists - default: - return nil, err - } - - h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) - if err := s.SetReference(h); err != nil { - return nil, err - } - - if worktree == nil { - r.setIsBare(true) - return r, nil - } - - return r, setWorktreeAndStoragePaths(r, worktree) -} - -func initStorer(s storer.Storer) error { - i, ok := s.(storer.Initializer) - if !ok { - return nil - } - - return i.Init() -} - -func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { - type fsBased interface { - Filesystem() billy.Filesystem - } - - // .git file is only created if the storage is file based and the file - // system is osfs.OS - fs, isFSBased := r.Storer.(fsBased) - if !isFSBased { - return nil - } - - if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { - return err - } - - return setConfigWorktree(r, worktree, fs.Filesystem()) -} - -func createDotGitFile(worktree, storage billy.Filesystem) error { - path, err := filepath.Rel(worktree.Root(), storage.Root()) - if err != nil { - path = storage.Root() - } - - if path == GitDirName { - // not needed, since the folder is the default place - return nil - } - - f, err := worktree.Create(GitDirName) - if err != nil { - return err - } - - defer f.Close() - _, err = fmt.Fprintf(f, "gitdir: %s\n", path) - return err -} - -func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { - path, err := filepath.Rel(storage.Root(), worktree.Root()) - if err != nil { - path = worktree.Root() - } - - if path == ".." { - // not needed, since the folder is the default place - return nil - } - - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Core.Worktree = path - return r.Storer.SetConfig(cfg) -} - -// Open opens a git repository using the given Storer and worktree filesystem, -// if the given storer is complete empty ErrRepositoryNotExists is returned. -// The worktree can be nil when the repository being opened is bare, if the -// repository is a normal one (not bare) and worktree is nil the err -// ErrWorktreeNotProvided is returned -func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { - _, err := s.Reference(plumbing.HEAD) - if err == plumbing.ErrReferenceNotFound { - return nil, ErrRepositoryNotExists - } - - if err != nil { - return nil, err - } - - return newRepository(s, worktree), nil -} - -// Clone a repository into the given Storer and worktree Filesystem with the -// given options, if worktree is nil a bare repository is created. If the given -// storer is not empty ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { - return CloneContext(context.Background(), s, worktree, o) -} - -// CloneContext a repository into the given Storer and worktree Filesystem with -// the given options, if worktree is nil a bare repository is created. If the -// given storer is not empty ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func CloneContext( - ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, -) (*Repository, error) { - r, err := Init(s, worktree) - if err != nil { - return nil, err - } - - return r, r.clone(ctx, o) -} - -// PlainInit create an empty git repository at the given path. isBare defines -// if the repository will have worktree (non-bare) or not (bare), if the path -// is not empty ErrRepositoryAlreadyExists is returned. -func PlainInit(path string, isBare bool) (*Repository, error) { - var wt, dot billy.Filesystem - - if isBare { - dot = osfs.New(path) - } else { - wt = osfs.New(path) - dot, _ = wt.Chroot(GitDirName) - } - - s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - - return Init(s, wt) -} - -// PlainOpen opens a git repository from the given path. It detects if the -// repository is bare or a normal one. If the path doesn't contain a valid -// repository ErrRepositoryNotExists is returned -func PlainOpen(path string) (*Repository, error) { - return PlainOpenWithOptions(path, &PlainOpenOptions{}) -} - -// PlainOpenWithOptions opens a git repository from the given path with specific -// options. See PlainOpen for more info. -func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) { - dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit) - if err != nil { - return nil, err - } - - if _, err := dot.Stat(""); err != nil { - if os.IsNotExist(err) { - return nil, ErrRepositoryNotExists - } - - return nil, err - } - - s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - - return Open(s, wt) -} - -func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { - if path, err = filepath.Abs(path); err != nil { - return nil, nil, err - } - var fs billy.Filesystem - var fi os.FileInfo - for { - fs = osfs.New(path) - fi, err = fs.Stat(GitDirName) - if err == nil { - // no error; stop - break - } - if !os.IsNotExist(err) { - // unknown error; stop - return nil, nil, err - } - if detect { - // try its parent as long as we haven't reached - // the root dir - if dir := filepath.Dir(path); dir != path { - path = dir - continue - } - } - // not detecting via parent dirs and the dir does not exist; - // stop - return fs, nil, nil - } - - if fi.IsDir() { - dot, err = fs.Chroot(GitDirName) - return dot, fs, err - } - - dot, err = dotGitFileToOSFilesystem(path, fs) - if err != nil { - return nil, nil, err - } - - return dot, fs, nil -} - -func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) { - f, err := fs.Open(GitDirName) - if err != nil { - return nil, err - } - defer ioutil.CheckClose(f, &err) - - b, err := stdioutil.ReadAll(f) - if err != nil { - return nil, err - } - - line := string(b) - const prefix = "gitdir: " - if !strings.HasPrefix(line, prefix) { - return nil, fmt.Errorf(".git file has no %s prefix", prefix) - } - - gitdir := strings.Split(line[len(prefix):], "\n")[0] - gitdir = strings.TrimSpace(gitdir) - if filepath.IsAbs(gitdir) { - return osfs.New(gitdir), nil - } - - return osfs.New(fs.Join(path, gitdir)), nil -} - -// PlainClone a repository into the path with the given options, isBare defines -// if the new repository will be bare or normal. If the path is not empty -// ErrRepositoryAlreadyExists is returned. -// -// TODO(mcuadros): move isBare to CloneOptions in v5 -func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { - return PlainCloneContext(context.Background(), path, isBare, o) -} - -// PlainCloneContext a repository into the path with the given options, isBare -// defines if the new repository will be bare or normal. If the path is not empty -// ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -// -// TODO(mcuadros): move isBare to CloneOptions in v5 -// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 -func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { - cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) - if err != nil { - return nil, err - } - - r, err := PlainInit(path, isBare) - if err != nil { - return nil, err - } - - err = r.clone(ctx, o) - if err != nil && err != ErrRepositoryAlreadyExists { - if cleanup { - cleanUpDir(path, cleanupParent) - } - } - - return r, err -} - -func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { - return &Repository{ - Storer: s, - wt: worktree, - r: make(map[string]*Remote), - } -} - -func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return true, true, nil - } - - return false, false, err - } - - if !fi.IsDir() { - return false, false, fmt.Errorf("path is not a directory: %s", path) - } - - f, err := os.Open(path) - if err != nil { - return false, false, err - } - - defer ioutil.CheckClose(f, &err) - - _, err = f.Readdirnames(1) - if err == io.EOF { - return true, false, nil - } - - if err != nil { - return false, false, err - } - - return false, false, nil -} - -func cleanUpDir(path string, all bool) error { - if all { - return os.RemoveAll(path) - } - - f, err := os.Open(path) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - names, err := f.Readdirnames(-1) - if err != nil { - return err - } - - for _, name := range names { - if err := os.RemoveAll(filepath.Join(path, name)); err != nil { - return err - } - } - - return err -} - -// Config return the repository config. In a filesystem backed repository this -// means read the `.git/config`. -func (r *Repository) Config() (*config.Config, error) { - return r.Storer.Config() -} - -// SetConfig marshall and writes the repository config. In a filesystem backed -// repository this means write the `.git/config`. This function should be called -// with the result of `Repository.Config` and never with the output of -// `Repository.ConfigScoped`. -func (r *Repository) SetConfig(cfg *config.Config) error { - return r.Storer.SetConfig(cfg) -} - -// ConfigScoped returns the repository config, merged with requested scope and -// lower. For example if, config.GlobalScope is given the local and global config -// are returned merged in one config value. -func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) { - // TODO(mcuadros): v6, add this as ConfigOptions.Scoped - - var err error - system := config.NewConfig() - if scope >= config.SystemScope { - system, err = config.LoadConfig(config.SystemScope) - if err != nil { - return nil, err - } - } - - global := config.NewConfig() - if scope >= config.GlobalScope { - global, err = config.LoadConfig(config.GlobalScope) - if err != nil { - return nil, err - } - } - - local, err := r.Storer.Config() - if err != nil { - return nil, err - } - - _ = mergo.Merge(global, system) - _ = mergo.Merge(local, global) - return local, nil -} - -// Remote return a remote if exists -func (r *Repository) Remote(name string) (*Remote, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - c, ok := cfg.Remotes[name] - if !ok { - return nil, ErrRemoteNotFound - } - - return NewRemote(r.Storer, c), nil -} - -// Remotes returns a list with all the remotes -func (r *Repository) Remotes() ([]*Remote, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - remotes := make([]*Remote, len(cfg.Remotes)) - - var i int - for _, c := range cfg.Remotes { - remotes[i] = NewRemote(r.Storer, c) - i++ - } - - return remotes, nil -} - -// CreateRemote creates a new remote -func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { - if err := c.Validate(); err != nil { - return nil, err - } - - remote := NewRemote(r.Storer, c) - - cfg, err := r.Config() - if err != nil { - return nil, err - } - - if _, ok := cfg.Remotes[c.Name]; ok { - return nil, ErrRemoteExists - } - - cfg.Remotes[c.Name] = c - return remote, r.Storer.SetConfig(cfg) -} - -// CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous". -// It's used like 'git fetch git@github.com:src-d/go-git.git master:master'. -func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) { - if err := c.Validate(); err != nil { - return nil, err - } - - if c.Name != "anonymous" { - return nil, ErrAnonymousRemoteName - } - - remote := NewRemote(r.Storer, c) - - return remote, nil -} - -// DeleteRemote delete a remote from the repository and delete the config -func (r *Repository) DeleteRemote(name string) error { - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Remotes[name]; !ok { - return ErrRemoteNotFound - } - - delete(cfg.Remotes, name) - return r.Storer.SetConfig(cfg) -} - -// Branch return a Branch if exists -func (r *Repository) Branch(name string) (*config.Branch, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - b, ok := cfg.Branches[name] - if !ok { - return nil, ErrBranchNotFound - } - - return b, nil -} - -// CreateBranch creates a new Branch -func (r *Repository) CreateBranch(c *config.Branch) error { - if err := c.Validate(); err != nil { - return err - } - - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Branches[c.Name]; ok { - return ErrBranchExists - } - - cfg.Branches[c.Name] = c - return r.Storer.SetConfig(cfg) -} - -// DeleteBranch delete a Branch from the repository and delete the config -func (r *Repository) DeleteBranch(name string) error { - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Branches[name]; !ok { - return ErrBranchNotFound - } - - delete(cfg.Branches, name) - return r.Storer.SetConfig(cfg) -} - -// CreateTag creates a tag. If opts is included, the tag is an annotated tag, -// otherwise a lightweight tag is created. -func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { - rname := plumbing.ReferenceName(path.Join("refs", "tags", name)) - - _, err := r.Storer.Reference(rname) - switch err { - case nil: - // Tag exists, this is an error - return nil, ErrTagExists - case plumbing.ErrReferenceNotFound: - // Tag missing, available for creation, pass this - default: - // Some other error - return nil, err - } - - var target plumbing.Hash - if opts != nil { - target, err = r.createTagObject(name, hash, opts) - if err != nil { - return nil, err - } - } else { - target = hash - } - - ref := plumbing.NewHashReference(rname, target) - if err = r.Storer.SetReference(ref); err != nil { - return nil, err - } - - return ref, nil -} - -func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) { - if err := opts.Validate(r, hash); err != nil { - return plumbing.ZeroHash, err - } - - rawobj, err := object.GetObject(r.Storer, hash) - if err != nil { - return plumbing.ZeroHash, err - } - - tag := &object.Tag{ - Name: name, - Tagger: *opts.Tagger, - Message: opts.Message, - TargetType: rawobj.Type(), - Target: hash, - } - - if opts.SignKey != nil { - sig, err := r.buildTagSignature(tag, opts.SignKey) - if err != nil { - return plumbing.ZeroHash, err - } - - tag.PGPSignature = sig - } - - obj := r.Storer.NewEncodedObject() - if err := tag.Encode(obj); err != nil { - return plumbing.ZeroHash, err - } - - return r.Storer.SetEncodedObject(obj) -} - -func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := tag.Encode(encoded); err != nil { - return "", err - } - - rdr, err := encoded.Reader() - if err != nil { - return "", err - } - - var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil { - return "", err - } - - return b.String(), nil -} - -// Tag returns a tag from the repository. -// -// If you want to check to see if the tag is an annotated tag, you can call -// TagObject on the hash of the reference in ForEach: -// -// ref, err := r.Tag("v0.1.0") -// if err != nil { -// // Handle error -// } -// -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// } -// -func (r *Repository) Tag(name string) (*plumbing.Reference, error) { - ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) - if err != nil { - if err == plumbing.ErrReferenceNotFound { - // Return a friendly error for this one, versus just ReferenceNotFound. - return nil, ErrTagNotFound - } - - return nil, err - } - - return ref, nil -} - -// DeleteTag deletes a tag from the repository. -func (r *Repository) DeleteTag(name string) error { - _, err := r.Tag(name) - if err != nil { - return err - } - - return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name))) -} - -func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) { - obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return plumbing.ZeroHash, err - } - switch obj.Type() { - case plumbing.TagObject: - t, err := object.DecodeTag(r.Storer, obj) - if err != nil { - return plumbing.ZeroHash, err - } - return r.resolveToCommitHash(t.Target) - case plumbing.CommitObject: - return h, nil - default: - return plumbing.ZeroHash, ErrUnableToResolveCommit - } -} - -// Clone clones a remote repository -func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { - if err := o.Validate(); err != nil { - return err - } - - c := &config.RemoteConfig{ - Name: o.RemoteName, - URLs: []string{o.URL}, - Fetch: r.cloneRefSpec(o), - } - - if _, err := r.CreateRemote(c); err != nil { - return err - } - - ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ - RefSpecs: c.Fetch, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Tags: o.Tags, - RemoteName: o.RemoteName, - }, o.ReferenceName) - if err != nil { - return err - } - - if r.wt != nil && !o.NoCheckout { - w, err := r.Worktree() - if err != nil { - return err - } - - head, err := r.Head() - if err != nil { - return err - } - - if err := w.Reset(&ResetOptions{ - Mode: MergeReset, - Commit: head.Hash(), - }); err != nil { - return err - } - - if o.RecurseSubmodules != NoRecurseSubmodules { - if err := w.updateSubmodules(&SubmoduleUpdateOptions{ - RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, - }); err != nil { - return err - } - } - } - - if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil { - return err - } - - if ref.Name().IsBranch() { - branchRef := ref.Name() - branchName := strings.Split(string(branchRef), "refs/heads/")[1] - - b := &config.Branch{ - Name: branchName, - Merge: branchRef, - } - if o.RemoteName == "" { - b.Remote = "origin" - } else { - b.Remote = o.RemoteName - } - if err := r.CreateBranch(b); err != nil { - return err - } - } - - return nil -} - -const ( - refspecTag = "+refs/tags/%s:refs/tags/%[1]s" - refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" - refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" -) - -func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { - switch { - case o.ReferenceName.IsTag(): - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), - } - case o.SingleBranch && o.ReferenceName == plumbing.HEAD: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), - config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), - } - case o.SingleBranch: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)), - } - default: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)), - } - } -} - -func (r *Repository) setIsBare(isBare bool) error { - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Core.IsBare = isBare - return r.Storer.SetConfig(cfg) -} - -func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error { - if !o.SingleBranch { - return nil - } - - c.Fetch = r.cloneRefSpec(o) - - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Remotes[c.Name] = c - return r.Storer.SetConfig(cfg) -} - -func (r *Repository) fetchAndUpdateReferences( - ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName, -) (*plumbing.Reference, error) { - - if err := o.Validate(); err != nil { - return nil, err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return nil, err - } - - objsUpdated := true - remoteRefs, err := remote.fetch(ctx, o) - if err == NoErrAlreadyUpToDate { - objsUpdated = false - } else if err == packfile.ErrEmptyPackfile { - return nil, ErrFetching - } else if err != nil { - return nil, err - } - - resolvedRef, err := storer.ResolveReference(remoteRefs, ref) - if err != nil { - return nil, err - } - - refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) - if err != nil { - return nil, err - } - - if !objsUpdated && !refsUpdated { - return nil, NoErrAlreadyUpToDate - } - - return resolvedRef, nil -} - -func (r *Repository) updateReferences(spec []config.RefSpec, - resolvedRef *plumbing.Reference) (updated bool, err error) { - - if !resolvedRef.Name().IsBranch() { - // Detached HEAD mode - h, err := r.resolveToCommitHash(resolvedRef.Hash()) - if err != nil { - return false, err - } - head := plumbing.NewHashReference(plumbing.HEAD, h) - return updateReferenceStorerIfNeeded(r.Storer, head) - } - - refs := []*plumbing.Reference{ - // Create local reference for the resolved ref - resolvedRef, - // Create local symbolic HEAD - plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()), - } - - refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...) - - for _, ref := range refs { - u, err := updateReferenceStorerIfNeeded(r.Storer, ref) - if err != nil { - return updated, err - } - - if u { - updated = true - } - } - - return -} - -func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, - resolvedHead *plumbing.Reference) []*plumbing.Reference { - - var refs []*plumbing.Reference - - // Create resolved HEAD reference with remote prefix if it does not - // exist. This is needed when using single branch and HEAD. - for _, rs := range spec { - name := resolvedHead.Name() - if !rs.Match(name) { - continue - } - - name = rs.Dst(name) - _, err := r.Storer.Reference(name) - if err == plumbing.ErrReferenceNotFound { - refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) - } - } - - return refs -} - -func checkAndUpdateReferenceStorerIfNeeded( - s storer.ReferenceStorer, r, old *plumbing.Reference) ( - updated bool, err error) { - p, err := s.Reference(r.Name()) - if err != nil && err != plumbing.ErrReferenceNotFound { - return false, err - } - - // we use the string method to compare references, is the easiest way - if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { - if err := s.CheckAndSetReference(r, old); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func updateReferenceStorerIfNeeded( - s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { - return checkAndUpdateReferenceStorerIfNeeded(s, r, nil) -} - -// Fetch fetches references along with the objects necessary to complete -// their histories, from the remote named as FetchOptions.RemoteName. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -func (r *Repository) Fetch(o *FetchOptions) error { - return r.FetchContext(context.Background(), o) -} - -// FetchContext fetches references along with the objects necessary to complete -// their histories, from the remote named as FetchOptions.RemoteName. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return err - } - - return remote.FetchContext(ctx, o) -} - -// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date, from the remote named as -// FetchOptions.RemoteName. -func (r *Repository) Push(o *PushOptions) error { - return r.PushContext(context.Background(), o) -} - -// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date, from the remote named as -// FetchOptions.RemoteName. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return err - } - - return remote.PushContext(ctx, o) -} - -// Log returns the commit history from the given LogOptions. -func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { - fn := commitIterFunc(o.Order) - if fn == nil { - return nil, fmt.Errorf("invalid Order=%v", o.Order) - } - - var ( - it object.CommitIter - err error - ) - if o.All { - it, err = r.logAll(fn) - } else { - it, err = r.log(o.From, fn) - } - - if err != nil { - return nil, err - } - - if o.FileName != nil { - // for `git log --all` also check parent (if the next commit comes from the real parent) - it = r.logWithFile(*o.FileName, it, o.All) - } - if o.PathFilter != nil { - it = r.logWithPathFilter(o.PathFilter, it, o.All) - } - - if o.Since != nil || o.Until != nil { - limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until} - it = r.logWithLimit(it, limitOptions) - } - - return it, nil -} - -func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { - h := from - if from == plumbing.ZeroHash { - head, err := r.Head() - if err != nil { - return nil, err - } - - h = head.Hash() - } - - commit, err := r.CommitObject(h) - if err != nil { - return nil, err - } - return commitIterFunc(commit), nil -} - -func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { - return object.NewCommitAllIter(r.Storer, commitIterFunc) -} - -func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter { - return object.NewCommitPathIterFromIter( - func(path string) bool { - return path == fileName - }, - commitIter, - checkParent, - ) -} - -func (*Repository) logWithPathFilter(pathFilter func(string) bool, commitIter object.CommitIter, checkParent bool) object.CommitIter { - return object.NewCommitPathIterFromIter( - pathFilter, - commitIter, - checkParent, - ) -} - -func (*Repository) logWithLimit(commitIter object.CommitIter, limitOptions object.LogLimitOptions) object.CommitIter { - return object.NewCommitLimitIterFromIter(commitIter, limitOptions) -} - -func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { - switch order { - case LogOrderDefault: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPreorderIter(c, nil, nil) - } - case LogOrderDFS: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPreorderIter(c, nil, nil) - } - case LogOrderDFSPost: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPostorderIter(c, nil) - } - case LogOrderBSF: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitIterBSF(c, nil, nil) - } - case LogOrderCommitterTime: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitIterCTime(c, nil, nil) - } - } - return nil -} - -// Tags returns all the tag References in a repository. -// -// If you want to check to see if the tag is an annotated tag, you can call -// TagObject on the hash Reference passed in through ForEach: -// -// iter, err := r.Tags() -// if err != nil { -// // Handle error -// } -// -// if err := iter.ForEach(func (ref *plumbing.Reference) error { -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// return err -// } -// }); err != nil { -// // Handle outer iterator error -// } -// -func (r *Repository) Tags() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsTag() - }, refIter), nil -} - -// Branches returns all the References that are Branches. -func (r *Repository) Branches() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsBranch() - }, refIter), nil -} - -// Notes returns all the References that are notes. For more information: -// https://git-scm.com/docs/git-notes -func (r *Repository) Notes() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsNote() - }, refIter), nil -} - -// TreeObject return a Tree with the given hash. If not found -// plumbing.ErrObjectNotFound is returned -func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) { - return object.GetTree(r.Storer, h) -} - -// TreeObjects returns an unsorted TreeIter with all the trees in the repository -func (r *Repository) TreeObjects() (*object.TreeIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) - if err != nil { - return nil, err - } - - return object.NewTreeIter(r.Storer, iter), nil -} - -// CommitObject return a Commit with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) { - return object.GetCommit(r.Storer, h) -} - -// CommitObjects returns an unsorted CommitIter with all the commits in the repository. -func (r *Repository) CommitObjects() (object.CommitIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) - if err != nil { - return nil, err - } - - return object.NewCommitIter(r.Storer, iter), nil -} - -// BlobObject returns a Blob with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) { - return object.GetBlob(r.Storer, h) -} - -// BlobObjects returns an unsorted BlobIter with all the blobs in the repository. -func (r *Repository) BlobObjects() (*object.BlobIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) - if err != nil { - return nil, err - } - - return object.NewBlobIter(r.Storer, iter), nil -} - -// TagObject returns a Tag with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. This method only returns -// annotated Tags, no lightweight Tags. -func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) { - return object.GetTag(r.Storer, h) -} - -// TagObjects returns a unsorted TagIter that can step through all of the annotated -// tags in the repository. -func (r *Repository) TagObjects() (*object.TagIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject) - if err != nil { - return nil, err - } - - return object.NewTagIter(r.Storer, iter), nil -} - -// Object returns an Object with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) { - obj, err := r.Storer.EncodedObject(t, h) - if err != nil { - return nil, err - } - - return object.DecodeObject(r.Storer, obj) -} - -// Objects returns an unsorted ObjectIter with all the objects in the repository. -func (r *Repository) Objects() (*object.ObjectIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject) - if err != nil { - return nil, err - } - - return object.NewObjectIter(r.Storer, iter), nil -} - -// Head returns the reference where HEAD is pointing to. -func (r *Repository) Head() (*plumbing.Reference, error) { - return storer.ResolveReference(r.Storer, plumbing.HEAD) -} - -// Reference returns the reference for a given reference name. If resolved is -// true, any symbolic reference will be resolved. -func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) ( - *plumbing.Reference, error) { - - if resolved { - return storer.ResolveReference(r.Storer, name) - } - - return r.Storer.Reference(name) -} - -// References returns an unsorted ReferenceIter for all references. -func (r *Repository) References() (storer.ReferenceIter, error) { - return r.Storer.IterReferences() -} - -// Worktree returns a worktree based on the given fs, if nil the default -// worktree will be used. -func (r *Repository) Worktree() (*Worktree, error) { - if r.wt == nil { - return nil, ErrIsBareRepository - } - - return &Worktree{r: r, Filesystem: r.wt}, nil -} - -// ResolveRevision resolves revision to corresponding hash. It will always -// resolve to a commit hash, not a tree or annotated tag. -// -// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, -// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}) -func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { - p := revision.NewParserFromString(string(rev)) - - items, err := p.Parse() - - if err != nil { - return nil, err - } - - var commit *object.Commit - - for _, item := range items { - switch item.(type) { - case revision.Ref: - revisionRef := item.(revision.Ref) - - var tryHashes []plumbing.Hash - - maybeHash := plumbing.NewHash(string(revisionRef)) - - if !maybeHash.IsZero() { - tryHashes = append(tryHashes, maybeHash) - } - - for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { - ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) - - if err == nil { - tryHashes = append(tryHashes, ref.Hash()) - break - } - } - - // in ambiguous cases, `git rev-parse` will emit a warning, but - // will always return the oid in preference to a ref; we don't have - // the ability to emit a warning here, so (for speed purposes) - // don't bother to detect the ambiguity either, just return in the - // priority that git would. - gotOne := false - for _, hash := range tryHashes { - commitObj, err := r.CommitObject(hash) - if err == nil { - commit = commitObj - gotOne = true - break - } - - tagObj, err := r.TagObject(hash) - if err == nil { - // If the tag target lookup fails here, this most likely - // represents some sort of repo corruption, so let the - // error bubble up. - tagCommit, err := tagObj.Commit() - if err != nil { - return &plumbing.ZeroHash, err - } - commit = tagCommit - gotOne = true - break - } - } - - if !gotOne { - return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound - } - - case revision.CaretPath: - depth := item.(revision.CaretPath).Depth - - if depth == 0 { - break - } - - iter := commit.Parents() - - c, err := iter.Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - if depth == 1 { - commit = c - - break - } - - c, err = iter.Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - commit = c - case revision.TildePath: - for i := 0; i < item.(revision.TildePath).Depth; i++ { - c, err := commit.Parents().Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - commit = c - } - case revision.CaretReg: - history := object.NewCommitPreorderIter(commit, nil, nil) - - re := item.(revision.CaretReg).Regexp - negate := item.(revision.CaretReg).Negate - - var c *object.Commit - - err := history.ForEach(func(hc *object.Commit) error { - if !negate && re.MatchString(hc.Message) { - c = hc - return storer.ErrStop - } - - if negate && !re.MatchString(hc.Message) { - c = hc - return storer.ErrStop - } - - return nil - }) - if err != nil { - return &plumbing.ZeroHash, err - } - - if c == nil { - return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) - } - - commit = c - } - } - - return &commit.Hash, nil -} - -type RepackConfig struct { - // UseRefDeltas configures whether packfile encoder will use reference deltas. - // By default OFSDeltaObject is used. - UseRefDeltas bool - // OnlyDeletePacksOlderThan if set to non-zero value - // selects only objects older than the time provided. - OnlyDeletePacksOlderThan time.Time -} - -func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { - pos, ok := r.Storer.(storer.PackedObjectStorer) - if !ok { - return ErrPackedObjectsNotSupported - } - - // Get the existing object packs. - hs, err := pos.ObjectPacks() - if err != nil { - return err - } - - // Create a new pack. - nh, err := r.createNewObjectPack(cfg) - if err != nil { - return err - } - - // Delete old packs. - for _, h := range hs { - // Skip if new hash is the same as an old one. - if h == nh { - continue - } - err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan) - if err != nil { - return err - } - } - - return nil -} - -// createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter -// deferred close has the right scope. -func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { - ow := newObjectWalker(r.Storer) - err = ow.walkAllRefs() - if err != nil { - return h, err - } - objs := make([]plumbing.Hash, 0, len(ow.seen)) - for h := range ow.seen { - objs = append(objs, h) - } - pfw, ok := r.Storer.(storer.PackfileWriter) - if !ok { - return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter") - } - wc, err := pfw.PackfileWriter() - if err != nil { - return h, err - } - defer ioutil.CheckClose(wc, &err) - scfg, err := r.Config() - if err != nil { - return h, err - } - enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas) - h, err = enc.Encode(objs, scfg.Pack.Window) - if err != nil { - return h, err - } - - // Delete the packed, loose objects. - if los, ok := r.Storer.(storer.LooseObjectStorer); ok { - err = los.ForEachObjectHash(func(hash plumbing.Hash) error { - if ow.isSeen(hash) { - err = los.DeleteLooseObject(hash) - if err != nil { - return err - } - } - return nil - }) - if err != nil { - return h, err - } - } - - return h, err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/status.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/status.go deleted file mode 100644 index 7f18e02278b..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/status.go +++ /dev/null @@ -1,79 +0,0 @@ -package git - -import ( - "bytes" - "fmt" - "path/filepath" -) - -// Status represents the current status of a Worktree. -// The key of the map is the path of the file. -type Status map[string]*FileStatus - -// File returns the FileStatus for a given path, if the FileStatus doesn't -// exists a new FileStatus is added to the map using the path as key. -func (s Status) File(path string) *FileStatus { - if _, ok := (s)[path]; !ok { - s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked} - } - - return s[path] -} - -// IsUntracked checks if file for given path is 'Untracked' -func (s Status) IsUntracked(path string) bool { - stat, ok := (s)[filepath.ToSlash(path)] - return ok && stat.Worktree == Untracked -} - -// IsClean returns true if all the files are in Unmodified status. -func (s Status) IsClean() bool { - for _, status := range s { - if status.Worktree != Unmodified || status.Staging != Unmodified { - return false - } - } - - return true -} - -func (s Status) String() string { - buf := bytes.NewBuffer(nil) - for path, status := range s { - if status.Staging == Unmodified && status.Worktree == Unmodified { - continue - } - - if status.Staging == Renamed { - path = fmt.Sprintf("%s -> %s", path, status.Extra) - } - - fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path) - } - - return buf.String() -} - -// FileStatus contains the status of a file in the worktree -type FileStatus struct { - // Staging is the status of a file in the staging area - Staging StatusCode - // Worktree is the status of a file in the worktree - Worktree StatusCode - // Extra contains extra information, such as the previous name in a rename - Extra string -} - -// StatusCode status code of a file in the Worktree -type StatusCode byte - -const ( - Unmodified StatusCode = ' ' - Untracked StatusCode = '?' - Modified StatusCode = 'M' - Added StatusCode = 'A' - Deleted StatusCode = 'D' - Renamed StatusCode = 'R' - Copied StatusCode = 'C' - UpdatedButUnmerged StatusCode = 'U' -) diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go deleted file mode 100644 index 78a646465a2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go +++ /dev/null @@ -1,48 +0,0 @@ -package filesystem - -import ( - "os" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type ConfigStorage struct { - dir *dotgit.DotGit -} - -func (c *ConfigStorage) Config() (conf *config.Config, err error) { - f, err := c.dir.Config() - if err != nil { - if os.IsNotExist(err) { - return config.NewConfig(), nil - } - - return nil, err - } - - defer ioutil.CheckClose(f, &err) - return config.ReadConfig(f) -} - -func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) { - if err = cfg.Validate(); err != nil { - return err - } - - f, err := c.dir.ConfigWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - b, err := cfg.Marshal() - if err != nil { - return err - } - - _, err = f.Write(b) - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go deleted file mode 100644 index 6ab2cdf38aa..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go +++ /dev/null @@ -1,37 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing" -) - -type deltaObject struct { - plumbing.EncodedObject - base plumbing.Hash - hash plumbing.Hash - size int64 -} - -func newDeltaObject( - obj plumbing.EncodedObject, - hash plumbing.Hash, - base plumbing.Hash, - size int64) plumbing.DeltaObject { - return &deltaObject{ - EncodedObject: obj, - hash: hash, - base: base, - size: size, - } -} - -func (o *deltaObject) BaseHash() plumbing.Hash { - return o.base -} - -func (o *deltaObject) ActualSize() int64 { - return o.size -} - -func (o *deltaObject) ActualHash() plumbing.Hash { - return o.hash -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go deleted file mode 100644 index 83c76830719..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ /dev/null @@ -1,1111 +0,0 @@ -// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt -package dotgit - -import ( - "bufio" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -const ( - suffix = ".git" - packedRefsPath = "packed-refs" - configPath = "config" - indexPath = "index" - shallowPath = "shallow" - modulePath = "modules" - objectsPath = "objects" - packPath = "pack" - refsPath = "refs" - - tmpPackedRefsPrefix = "._packed-refs" - - packPrefix = "pack-" - packExt = ".pack" - idxExt = ".idx" -) - -var ( - // ErrNotFound is returned by New when the path is not found. - ErrNotFound = errors.New("path not found") - // ErrIdxNotFound is returned by Idxfile when the idx file is not found - ErrIdxNotFound = errors.New("idx file not found") - // ErrPackfileNotFound is returned by Packfile when the packfile is not found - ErrPackfileNotFound = errors.New("packfile not found") - // ErrConfigNotFound is returned by Config when the config is not found - ErrConfigNotFound = errors.New("config file not found") - // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is - // found in the packed-ref file. This is usually the case for corrupted git - // repositories. - ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") - // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. - ErrPackedRefsBadFormat = errors.New("malformed packed-ref") - // ErrSymRefTargetNotFound is returned when a symbolic reference is - // targeting a non-existing object. This usually means the repository - // is corrupt. - ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") - // ErrIsDir is returned when a reference file is attempting to be read, - // but the path specified is a directory. - ErrIsDir = errors.New("reference path is a directory") -) - -// Options holds configuration for the storage. -type Options struct { - // ExclusiveAccess means that the filesystem is not modified externally - // while the repo is open. - ExclusiveAccess bool - // KeepDescriptors makes the file descriptors to be reused but they will - // need to be manually closed calling Close(). - KeepDescriptors bool -} - -// The DotGit type represents a local git repository on disk. This -// type is not zero-value-safe, use the New function to initialize it. -type DotGit struct { - options Options - fs billy.Filesystem - - // incoming object directory information - incomingChecked bool - incomingDirName string - - objectList []plumbing.Hash - objectMap map[plumbing.Hash]struct{} - packList []plumbing.Hash - packMap map[plumbing.Hash]struct{} - - files map[plumbing.Hash]billy.File -} - -// New returns a DotGit value ready to be used. The path argument must -// be the absolute path of a git repository directory (e.g. -// "/foo/bar/.git"). -func New(fs billy.Filesystem) *DotGit { - return NewWithOptions(fs, Options{}) -} - -// NewWithOptions sets non default configuration options. -// See New for complete help. -func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { - return &DotGit{ - options: o, - fs: fs, - } -} - -// Initialize creates all the folder scaffolding. -func (d *DotGit) Initialize() error { - mustExists := []string{ - d.fs.Join("objects", "info"), - d.fs.Join("objects", "pack"), - d.fs.Join("refs", "heads"), - d.fs.Join("refs", "tags"), - } - - for _, path := range mustExists { - _, err := d.fs.Stat(path) - if err == nil { - continue - } - - if !os.IsNotExist(err) { - return err - } - - if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { - return err - } - } - - return nil -} - -// Close closes all opened files. -func (d *DotGit) Close() error { - var firstError error - if d.files != nil { - for _, f := range d.files { - err := f.Close() - if err != nil && firstError == nil { - firstError = err - continue - } - } - - d.files = nil - } - - if firstError != nil { - return firstError - } - - return nil -} - -// ConfigWriter returns a file pointer for write to the config file -func (d *DotGit) ConfigWriter() (billy.File, error) { - return d.fs.Create(configPath) -} - -// Config returns a file pointer for read to the config file -func (d *DotGit) Config() (billy.File, error) { - return d.fs.Open(configPath) -} - -// IndexWriter returns a file pointer for write to the index file -func (d *DotGit) IndexWriter() (billy.File, error) { - return d.fs.Create(indexPath) -} - -// Index returns a file pointer for read to the index file -func (d *DotGit) Index() (billy.File, error) { - return d.fs.Open(indexPath) -} - -// ShallowWriter returns a file pointer for write to the shallow file -func (d *DotGit) ShallowWriter() (billy.File, error) { - return d.fs.Create(shallowPath) -} - -// Shallow returns a file pointer for read to the shallow file -func (d *DotGit) Shallow() (billy.File, error) { - f, err := d.fs.Open(shallowPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - return f, nil -} - -// NewObjectPack return a writer for a new packfile, it saves the packfile to -// disk and also generates and save the index for the given packfile. -func (d *DotGit) NewObjectPack() (*PackWriter, error) { - d.cleanPackList() - return newPackWrite(d.fs) -} - -// ObjectPacks returns the list of availables packfiles -func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { - if !d.options.ExclusiveAccess { - return d.objectPacks() - } - - err := d.genPackList() - if err != nil { - return nil, err - } - - return d.packList, nil -} - -func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { - packDir := d.fs.Join(objectsPath, packPath) - files, err := d.fs.ReadDir(packDir) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - var packs []plumbing.Hash - for _, f := range files { - n := f.Name() - if !strings.HasSuffix(n, packExt) || !strings.HasPrefix(n, packPrefix) { - continue - } - - h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - packs = append(packs, h) - } - - return packs, nil -} - -func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { - return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) -} - -func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { - if d.options.KeepDescriptors && extension == "pack" { - if d.files == nil { - d.files = make(map[plumbing.Hash]billy.File) - } - - f, ok := d.files[hash] - if ok { - return f, nil - } - } - - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - path := d.objectPackPath(hash, extension) - pack, err := d.fs.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, ErrPackfileNotFound - } - - return nil, err - } - - if d.options.KeepDescriptors && extension == "pack" { - d.files[hash] = pack - } - - return pack, nil -} - -// ObjectPack returns a fs.File of the given packfile -func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - return d.objectPackOpen(hash, `pack`) -} - -// ObjectPackIdx returns a fs.File of the index file for a given packfile -func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - return d.objectPackOpen(hash, `idx`) -} - -func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { - d.cleanPackList() - - path := d.objectPackPath(hash, `pack`) - if !t.IsZero() { - fi, err := d.fs.Stat(path) - if err != nil { - return err - } - // too new, skip deletion. - if !fi.ModTime().Before(t) { - return nil - } - } - err := d.fs.Remove(path) - if err != nil { - return err - } - return d.fs.Remove(d.objectPackPath(hash, `idx`)) -} - -// NewObject return a writer for a new object file. -func (d *DotGit) NewObject() (*ObjectWriter, error) { - d.cleanObjectList() - - return newObjectWriter(d.fs) -} - -// Objects returns a slice with the hashes of objects found under the -// .git/objects/ directory. -func (d *DotGit) Objects() ([]plumbing.Hash, error) { - if d.options.ExclusiveAccess { - err := d.genObjectList() - if err != nil { - return nil, err - } - - return d.objectList, nil - } - - var objects []plumbing.Hash - err := d.ForEachObjectHash(func(hash plumbing.Hash) error { - objects = append(objects, hash) - return nil - }) - if err != nil { - return nil, err - } - return objects, nil -} - -// ForEachObjectHash iterates over the hashes of objects found under the -// .git/objects/ directory and executes the provided function. -func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { - if !d.options.ExclusiveAccess { - return d.forEachObjectHash(fun) - } - - err := d.genObjectList() - if err != nil { - return err - } - - for _, h := range d.objectList { - err := fun(h) - if err != nil { - return err - } - } - - return nil -} - -func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { - files, err := d.fs.ReadDir(objectsPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { - base := f.Name() - d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) - if err != nil { - return err - } - - for _, o := range d { - h := plumbing.NewHash(base + o.Name()) - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - err = fun(h) - if err != nil { - return err - } - } - } - } - - return nil -} - -func (d *DotGit) cleanObjectList() { - d.objectMap = nil - d.objectList = nil -} - -func (d *DotGit) genObjectList() error { - if d.objectMap != nil { - return nil - } - - d.objectMap = make(map[plumbing.Hash]struct{}) - return d.forEachObjectHash(func(h plumbing.Hash) error { - d.objectList = append(d.objectList, h) - d.objectMap[h] = struct{}{} - - return nil - }) -} - -func (d *DotGit) hasObject(h plumbing.Hash) error { - if !d.options.ExclusiveAccess { - return nil - } - - err := d.genObjectList() - if err != nil { - return err - } - - _, ok := d.objectMap[h] - if !ok { - return plumbing.ErrObjectNotFound - } - - return nil -} - -func (d *DotGit) cleanPackList() { - d.packMap = nil - d.packList = nil -} - -func (d *DotGit) genPackList() error { - if d.packMap != nil { - return nil - } - - op, err := d.objectPacks() - if err != nil { - return err - } - - d.packMap = make(map[plumbing.Hash]struct{}) - d.packList = nil - - for _, h := range op { - d.packList = append(d.packList, h) - d.packMap[h] = struct{}{} - } - - return nil -} - -func (d *DotGit) hasPack(h plumbing.Hash) error { - if !d.options.ExclusiveAccess { - return nil - } - - err := d.genPackList() - if err != nil { - return err - } - - _, ok := d.packMap[h] - if !ok { - return ErrPackfileNotFound - } - - return nil -} - -func (d *DotGit) objectPath(h plumbing.Hash) string { - hash := h.String() - return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) -} - -// incomingObjectPath is intended to add support for a git pre-receive hook -// to be written it adds support for go-git to find objects in an "incoming" -// directory, so that the library can be used to write a pre-receive hook -// that deals with the incoming objects. -// -// More on git hooks found here : https://git-scm.com/docs/githooks -// More on 'quarantine'/incoming directory here: -// https://git-scm.com/docs/git-receive-pack -func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { - hString := h.String() - - if d.incomingDirName == "" { - return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) - } - - return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) -} - -// hasIncomingObjects searches for an incoming directory and keeps its name -// so it doesn't have to be found each time an object is accessed. -func (d *DotGit) hasIncomingObjects() bool { - if !d.incomingChecked { - directoryContents, err := d.fs.ReadDir(objectsPath) - if err == nil { - for _, file := range directoryContents { - if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { - d.incomingDirName = file.Name() - } - } - } - - d.incomingChecked = true - } - - return d.incomingDirName != "" -} - -// Object returns a fs.File pointing the object file, if exists -func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { - err := d.hasObject(h) - if err != nil { - return nil, err - } - - obj1, err1 := d.fs.Open(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) - if err2 != nil { - return obj1, err1 - } - return obj2, err2 - } - return obj1, err1 -} - -// ObjectStat returns a os.FileInfo pointing the object file, if exists -func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { - err := d.hasObject(h) - if err != nil { - return nil, err - } - - obj1, err1 := d.fs.Stat(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) - if err2 != nil { - return obj1, err1 - } - return obj2, err2 - } - return obj1, err1 -} - -// ObjectDelete removes the object file, if exists -func (d *DotGit) ObjectDelete(h plumbing.Hash) error { - d.cleanObjectList() - - err1 := d.fs.Remove(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - err2 := d.fs.Remove(d.incomingObjectPath(h)) - if err2 != nil { - return err1 - } - return err2 - } - return err1 -} - -func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { - b, err := stdioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - line := strings.TrimSpace(string(b)) - return plumbing.NewReferenceFromStrings(name, line), nil -} - -func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { - if old == nil { - return nil - } - ref, err := d.readReferenceFrom(f, old.Name().String()) - if err != nil { - return err - } - if ref.Hash() != old.Hash() { - return storage.ErrReferenceHasChanged - } - _, err = f.Seek(0, io.SeekStart) - if err != nil { - return err - } - return f.Truncate(0) -} - -func (d *DotGit) SetRef(r, old *plumbing.Reference) error { - var content string - switch r.Type() { - case plumbing.SymbolicReference: - content = fmt.Sprintf("ref: %s\n", r.Target()) - case plumbing.HashReference: - content = fmt.Sprintln(r.Hash().String()) - } - - fileName := r.Name().String() - - return d.setRef(fileName, content, old) -} - -// Refs scans the git directory collecting references, which it returns. -// Symbolic references are resolved and included in the output. -func (d *DotGit) Refs() ([]*plumbing.Reference, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefFromHEAD(&refs); err != nil { - return nil, err - } - - return refs, nil -} - -// Ref returns the reference for a given reference name. -func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { - ref, err := d.readReferenceFile(".", name.String()) - if err == nil { - return ref, nil - } - - return d.packedRef(name) -} - -func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { - s := bufio.NewScanner(f) - var refs []*plumbing.Reference - for s.Scan() { - ref, err := d.processLine(s.Text()) - if err != nil { - return nil, err - } - - if ref != nil { - refs = append(refs, ref) - } - } - - return refs, s.Err() -} - -func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { - f, err := d.fs.Open(packedRefsPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer ioutil.CheckClose(f, &err) - return d.findPackedRefsInFile(f) -} - -func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { - refs, err := d.findPackedRefs() - if err != nil { - return nil, err - } - - for _, ref := range refs { - if ref.Name() == name { - return ref, nil - } - } - - return nil, plumbing.ErrReferenceNotFound -} - -// RemoveRef removes a reference by name. -func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { - path := d.fs.Join(".", name.String()) - _, err := d.fs.Stat(path) - if err == nil { - err = d.fs.Remove(path) - // Drop down to remove it from the packed refs file, too. - } - - if err != nil && !os.IsNotExist(err) { - return err - } - - return d.rewritePackedRefsWithoutRef(name) -} - -func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefs() - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefsInFile(f) - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( - pr billy.File, err error) { - var f billy.File - defer func() { - if err != nil && f != nil { - ioutil.CheckClose(f, &err) - } - }() - - // File mode is retrieved from a constant defined in the target specific - // files (dotgit_rewrite_packed_refs_*). Some modes are not available - // in all filesystems. - openFlags := d.openAndLockPackedRefsMode() - if doCreate { - openFlags |= os.O_CREATE - } - - // Keep trying to open and lock the file until we're sure the file - // didn't change between the open and the lock. - for { - f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) - if err != nil { - if os.IsNotExist(err) && !doCreate { - return nil, nil - } - - return nil, err - } - fi, err := d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - mtime := fi.ModTime() - - err = f.Lock() - if err != nil { - return nil, err - } - - fi, err = d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - if mtime.Equal(fi.ModTime()) { - break - } - // The file has changed since we opened it. Close and retry. - err = f.Close() - if err != nil { - return nil, err - } - } - return f, nil -} - -func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { - pr, err := d.openAndLockPackedRefs(false) - if err != nil { - return err - } - if pr == nil { - return nil - } - defer ioutil.CheckClose(pr, &err) - - // Creating the temp file in the same directory as the target file - // improves our chances for rename operation to be atomic. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - s := bufio.NewScanner(pr) - found := false - for s.Scan() { - line := s.Text() - ref, err := d.processLine(line) - if err != nil { - return err - } - - if ref != nil && ref.Name() == name { - found = true - continue - } - - if _, err := fmt.Fprintln(tmp, line); err != nil { - return err - } - } - - if err := s.Err(); err != nil { - return err - } - - if !found { - return nil - } - - return d.rewritePackedRefsWhileLocked(tmp, pr) -} - -// process lines from a packed-refs file -func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { - if len(line) == 0 { - return nil, nil - } - - switch line[0] { - case '#': // comment - ignore - return nil, nil - case '^': // annotated tag commit of the previous line - ignore - return nil, nil - default: - ws := strings.Split(line, " ") // hash then ref - if len(ws) != 2 { - return nil, ErrPackedRefsBadFormat - } - - return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil - } -} - -func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { - return d.walkReferencesTree(refs, []string{refsPath}, seen) -} - -func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { - files, err := d.fs.ReadDir(d.fs.Join(relPath...)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - newRelPath := append(append([]string(nil), relPath...), f.Name()) - if f.IsDir() { - if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { - return err - } - - continue - } - - ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) - if err != nil { - return err - } - - if ref != nil && !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - - return nil -} - -func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { - ref, err := d.readReferenceFile(".", "HEAD") - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - *refs = append(*refs, ref) - return nil -} - -func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { - path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) - st, err := d.fs.Stat(path) - if err != nil { - return nil, err - } - if st.IsDir() { - return nil, ErrIsDir - } - - f, err := d.fs.Open(path) - if err != nil { - return nil, err - } - defer ioutil.CheckClose(f, &err) - - return d.readReferenceFrom(f, name) -} - -func (d *DotGit) CountLooseRefs() (int, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return 0, err - } - - return len(refs), nil -} - -// PackRefs packs all loose refs into the packed-refs file. -// -// This implementation only works under the assumption that the view -// of the file system won't be updated during this operation. This -// strategy would not work on a general file system though, without -// locking each loose reference and checking it again before deleting -// the file, because otherwise an updated reference could sneak in and -// then be deleted by the packed-refs process. Alternatively, every -// ref update could also lock packed-refs, so only one lock is -// required during ref-packing. But that would worsen performance in -// the common case. -// -// TODO: add an "all" boolean like the `git pack-refs --all` flag. -// When `all` is false, it would only pack refs that have already been -// packed, plus all tags. -func (d *DotGit) PackRefs() (err error) { - // Lock packed-refs, and create it if it doesn't exist yet. - f, err := d.openAndLockPackedRefs(true) - if err != nil { - return err - } - defer ioutil.CheckClose(f, &err) - - // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. - var refs []*plumbing.Reference - seen := make(map[plumbing.ReferenceName]bool) - if err = d.addRefsFromRefDir(&refs, seen); err != nil { - return err - } - if len(refs) == 0 { - // Nothing to do! - return nil - } - numLooseRefs := len(refs) - if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { - return err - } - - // Write them all to a new temp packed-refs file. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - w := bufio.NewWriter(tmp) - for _, ref := range refs { - _, err = w.WriteString(ref.String() + "\n") - if err != nil { - return err - } - } - err = w.Flush() - if err != nil { - return err - } - - // Rename the temp packed-refs file. - err = d.rewritePackedRefsWhileLocked(tmp, f) - if err != nil { - return err - } - - // Delete all the loose refs, while still holding the packed-refs - // lock. - for _, ref := range refs[:numLooseRefs] { - path := d.fs.Join(".", ref.Name().String()) - err = d.fs.Remove(path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - - return nil -} - -// Module return a billy.Filesystem pointing to the module folder -func (d *DotGit) Module(name string) (billy.Filesystem, error) { - return d.fs.Chroot(d.fs.Join(modulePath, name)) -} - -// Alternates returns DotGit(s) based off paths in objects/info/alternates if -// available. This can be used to checks if it's a shared repository. -func (d *DotGit) Alternates() ([]*DotGit, error) { - altpath := d.fs.Join("objects", "info", "alternates") - f, err := d.fs.Open(altpath) - if err != nil { - return nil, err - } - defer f.Close() - - var alternates []*DotGit - - // Read alternate paths line-by-line and create DotGit objects. - scanner := bufio.NewScanner(f) - for scanner.Scan() { - path := scanner.Text() - if !filepath.IsAbs(path) { - // For relative paths, we can perform an internal conversion to - // slash so that they work cross-platform. - slashPath := filepath.ToSlash(path) - // If the path is not absolute, it must be relative to object - // database (.git/objects/info). - // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html - // Hence, derive a path relative to DotGit's root. - // "../../../reponame/.git/" -> "../../reponame/.git" - // Remove the first ../ - relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) - normalPath := filepath.FromSlash(relpath) - path = filepath.Join(d.fs.Root(), normalPath) - } - fs := osfs.New(filepath.Dir(path)) - alternates = append(alternates, New(fs)) - } - - if err = scanner.Err(); err != nil { - return nil, err - } - - return alternates, nil -} - -// Fs returns the underlying filesystem of the DotGit folder. -func (d *DotGit) Fs() billy.Filesystem { - return d.fs -} - -func isHex(s string) bool { - for _, b := range []byte(s) { - if isNum(b) { - continue - } - if isHexAlpha(b) { - continue - } - - return false - } - - return true -} - -func isNum(b byte) bool { - return b >= '0' && b <= '9' -} - -func isHexAlpha(b byte) bool { - return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go deleted file mode 100644 index 43263eadfa0..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go +++ /dev/null @@ -1,81 +0,0 @@ -package dotgit - -import ( - "io" - "os" - "runtime" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -func (d *DotGit) openAndLockPackedRefsMode() int { - if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { - return os.O_RDWR - } - - return os.O_RDONLY -} - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // Try plain rename. If we aren't using the bare Windows filesystem as the - // storage layer, we might be able to get away with a rename over a locked - // file. - err := d.fs.Rename(tmp.Name(), pr.Name()) - if err == nil { - return nil - } - - // If we are in a filesystem that does not support rename (e.g. sivafs) - // a full copy is done. - if err == billy.ErrNotSupported { - return d.copyNewFile(tmp, pr) - } - - if runtime.GOOS != "windows" { - return err - } - - // Otherwise, Windows doesn't let us rename over a locked file, so - // we have to do a straight copy. Unfortunately this could result - // in a partially-written file if the process fails before the - // copy completes. - return d.copyToExistingFile(tmp, pr) -} - -func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { - _, err := pr.Seek(0, io.SeekStart) - if err != nil { - return err - } - err = pr.Truncate(0) - if err != nil { - return err - } - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - _, err = io.Copy(pr, tmp) - - return err -} - -func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { - prWrite, err := d.fs.Create(pr.Name()) - if err != nil { - return err - } - - defer ioutil.CheckClose(prWrite, &err) - - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - - _, err = io.Copy(prWrite, tmp) - - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go deleted file mode 100644 index c057f5c4865..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go +++ /dev/null @@ -1,90 +0,0 @@ -package dotgit - -import ( - "fmt" - "os" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { - if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { - return d.setRefRwfs(fileName, content, old) - } - - return d.setRefNorwfs(fileName, content, old) -} - -func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { - // If we are not checking an old ref, just truncate the file. - mode := os.O_RDWR | os.O_CREATE - if old == nil { - mode |= os.O_TRUNC - } - - f, err := d.fs.OpenFile(fileName, mode, 0666) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - // Lock is unlocked by the deferred Close above. This is because Unlock - // does not imply a fsync and thus there would be a race between - // Unlock+Close and other concurrent writers. Adding Sync to go-billy - // could work, but this is better (and avoids superfluous syncs). - err = f.Lock() - if err != nil { - return err - } - - // this is a no-op to call even when old is nil. - err = d.checkReferenceAndTruncate(f, old) - if err != nil { - return err - } - - _, err = f.Write([]byte(content)) - return err -} - -// There are some filesystems that don't support opening files in RDWD mode. -// In these filesystems the standard SetRef function can not be used as it -// reads the reference file to check that it's not modified before updating it. -// -// This version of the function writes the reference without extra checks -// making it compatible with these simple filesystems. This is usually not -// a problem as they should be accessed by only one process at a time. -func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { - _, err := d.fs.Stat(fileName) - if err == nil && old != nil { - fRead, err := d.fs.Open(fileName) - if err != nil { - return err - } - - ref, err := d.readReferenceFrom(fRead, old.Name().String()) - fRead.Close() - - if err != nil { - return err - } - - if ref.Hash() != old.Hash() { - return fmt.Errorf("reference has changed concurrently") - } - } - - f, err := d.fs.Create(fileName) - if err != nil { - return err - } - - defer f.Close() - - _, err = f.Write([]byte(content)) - return err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go deleted file mode 100644 index e2ede938cce..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go +++ /dev/null @@ -1,284 +0,0 @@ -package dotgit - -import ( - "fmt" - "io" - "sync/atomic" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/format/objfile" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - - "github.com/go-git/go-billy/v5" -) - -// PackWriter is a io.Writer that generates the packfile index simultaneously, -// a packfile.Decoder is used with a file reader to read the file being written -// this operation is synchronized with the write operations. -// The packfile is written in a temp file, when Close is called this file -// is renamed/moved (depends on the Filesystem implementation) to the final -// location, if the PackWriter is not used, nothing is written -type PackWriter struct { - Notify func(plumbing.Hash, *idxfile.Writer) - - fs billy.Filesystem - fr, fw billy.File - synced *syncedReader - checksum plumbing.Hash - parser *packfile.Parser - writer *idxfile.Writer - result chan error -} - -func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { - fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") - if err != nil { - return nil, err - } - - fr, err := fs.Open(fw.Name()) - if err != nil { - return nil, err - } - - writer := &PackWriter{ - fs: fs, - fw: fw, - fr: fr, - synced: newSyncedReader(fw, fr), - result: make(chan error), - } - - go writer.buildIndex() - return writer, nil -} - -func (w *PackWriter) buildIndex() { - s := packfile.NewScanner(w.synced) - w.writer = new(idxfile.Writer) - var err error - w.parser, err = packfile.NewParser(s, w.writer) - if err != nil { - w.result <- err - return - } - - checksum, err := w.parser.Parse() - if err != nil { - w.result <- err - return - } - - w.checksum = checksum - w.result <- err -} - -// waitBuildIndex waits until buildIndex function finishes, this can terminate -// with a packfile.ErrEmptyPackfile, this means that nothing was written so we -// ignore the error -func (w *PackWriter) waitBuildIndex() error { - err := <-w.result - if err == packfile.ErrEmptyPackfile { - return nil - } - - return err -} - -func (w *PackWriter) Write(p []byte) (int, error) { - return w.synced.Write(p) -} - -// Close closes all the file descriptors and save the final packfile, if nothing -// was written, the tempfiles are deleted without writing a packfile. -func (w *PackWriter) Close() error { - defer func() { - if w.Notify != nil && w.writer != nil && w.writer.Finished() { - w.Notify(w.checksum, w.writer) - } - - close(w.result) - }() - - if err := w.synced.Close(); err != nil { - return err - } - - if err := w.waitBuildIndex(); err != nil { - return err - } - - if err := w.fr.Close(); err != nil { - return err - } - - if err := w.fw.Close(); err != nil { - return err - } - - if w.writer == nil || !w.writer.Finished() { - return w.clean() - } - - return w.save() -} - -func (w *PackWriter) clean() error { - return w.fs.Remove(w.fw.Name()) -} - -func (w *PackWriter) save() error { - base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) - idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) - if err != nil { - return err - } - - if err := w.encodeIdx(idx); err != nil { - return err - } - - if err := idx.Close(); err != nil { - return err - } - - return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) -} - -func (w *PackWriter) encodeIdx(writer io.Writer) error { - idx, err := w.writer.Index() - if err != nil { - return err - } - - e := idxfile.NewEncoder(writer) - _, err = e.Encode(idx) - return err -} - -type syncedReader struct { - w io.Writer - r io.ReadSeeker - - blocked, done uint32 - written, read uint64 - news chan bool -} - -func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { - return &syncedReader{ - w: w, - r: r, - news: make(chan bool), - } -} - -func (s *syncedReader) Write(p []byte) (n int, err error) { - defer func() { - written := atomic.AddUint64(&s.written, uint64(n)) - read := atomic.LoadUint64(&s.read) - if written > read { - s.wake() - } - }() - - n, err = s.w.Write(p) - return -} - -func (s *syncedReader) Read(p []byte) (n int, err error) { - defer func() { atomic.AddUint64(&s.read, uint64(n)) }() - - for { - s.sleep() - n, err = s.r.Read(p) - if err == io.EOF && !s.isDone() && n == 0 { - continue - } - - break - } - - return -} - -func (s *syncedReader) isDone() bool { - return atomic.LoadUint32(&s.done) == 1 -} - -func (s *syncedReader) isBlocked() bool { - return atomic.LoadUint32(&s.blocked) == 1 -} - -func (s *syncedReader) wake() { - if s.isBlocked() { - atomic.StoreUint32(&s.blocked, 0) - s.news <- true - } -} - -func (s *syncedReader) sleep() { - read := atomic.LoadUint64(&s.read) - written := atomic.LoadUint64(&s.written) - if read >= written { - atomic.StoreUint32(&s.blocked, 1) - <-s.news - } - -} - -func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { - return s.r.Seek(offset, whence) - } - - p, err := s.r.Seek(offset, whence) - atomic.StoreUint64(&s.read, uint64(p)) - - return p, err -} - -func (s *syncedReader) Close() error { - atomic.StoreUint32(&s.done, 1) - close(s.news) - return nil -} - -type ObjectWriter struct { - objfile.Writer - fs billy.Filesystem - f billy.File -} - -func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { - f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") - if err != nil { - return nil, err - } - - return &ObjectWriter{ - Writer: (*objfile.NewWriter(f)), - fs: fs, - f: f, - }, nil -} - -func (w *ObjectWriter) Close() error { - if err := w.Writer.Close(); err != nil { - return err - } - - if err := w.f.Close(); err != nil { - return err - } - - return w.save() -} - -func (w *ObjectWriter) save() error { - hash := w.Hash().String() - file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) - - return w.fs.Rename(w.f.Name(), file) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go deleted file mode 100644 index a19176f83db..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go +++ /dev/null @@ -1,54 +0,0 @@ -package filesystem - -import ( - "bufio" - "os" - - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type IndexStorage struct { - dir *dotgit.DotGit -} - -func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { - f, err := s.dir.IndexWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - bw := bufio.NewWriter(f) - defer func() { - if e := bw.Flush(); err == nil && e != nil { - err = e - } - }() - - e := index.NewEncoder(bw) - err = e.Encode(idx) - return err -} - -func (s *IndexStorage) Index() (i *index.Index, err error) { - idx := &index.Index{ - Version: 2, - } - - f, err := s.dir.Index() - if err != nil { - if os.IsNotExist(err) { - return idx, nil - } - - return nil, err - } - - defer ioutil.CheckClose(f, &err) - - d := index.NewDecoder(bufio.NewReader(f)) - err = d.Decode(idx) - return idx, err -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go deleted file mode 100644 index 20336c11846..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go +++ /dev/null @@ -1,20 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" -) - -type ModuleStorage struct { - dir *dotgit.DotGit -} - -func (s *ModuleStorage) Module(name string) (storage.Storer, error) { - fs, err := s.dir.Module(name) - if err != nil { - return nil, err - } - - return NewStorage(fs, cache.NewObjectLRUDefault()), nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go deleted file mode 100644 index 74371743902..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go +++ /dev/null @@ -1,817 +0,0 @@ -package filesystem - -import ( - "io" - "os" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/format/objfile" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -type ObjectStorage struct { - options Options - - // objectCache is an object cache uses to cache delta's bases and also recently - // loaded loose objects - objectCache cache.Object - - dir *dotgit.DotGit - index map[plumbing.Hash]idxfile.Index - - packList []plumbing.Hash - packListIdx int - packfiles map[plumbing.Hash]*packfile.Packfile -} - -// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. -func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { - return NewObjectStorageWithOptions(dir, objectCache, Options{}) -} - -// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options -func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { - return &ObjectStorage{ - options: ops, - objectCache: objectCache, - dir: dir, - } -} - -func (s *ObjectStorage) requireIndex() error { - if s.index != nil { - return nil - } - - s.index = make(map[plumbing.Hash]idxfile.Index) - packs, err := s.dir.ObjectPacks() - if err != nil { - return err - } - - for _, h := range packs { - if err := s.loadIdxFile(h); err != nil { - return err - } - } - - return nil -} - -// Reindex indexes again all packfiles. Useful if git changed packfiles externally -func (s *ObjectStorage) Reindex() { - s.index = nil -} - -func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { - f, err := s.dir.ObjectPackIdx(h) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - idxf := idxfile.NewMemoryIndex() - d := idxfile.NewDecoder(f) - if err = d.Decode(idxf); err != nil { - return err - } - - s.index[h] = idxf - return err -} - -func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { - return &plumbing.MemoryObject{} -} - -func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { - if err := s.requireIndex(); err != nil { - return nil, err - } - - w, err := s.dir.NewObjectPack() - if err != nil { - return nil, err - } - - w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { - index, err := writer.Index() - if err == nil { - s.index[h] = index - } - } - - return w, nil -} - -// SetEncodedObject adds a new object to the storage. -func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) { - if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { - return plumbing.ZeroHash, plumbing.ErrInvalidType - } - - ow, err := s.dir.NewObject() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(ow, &err) - - or, err := o.Reader() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(or, &err) - - if err = ow.WriteHeader(o.Type(), o.Size()); err != nil { - return plumbing.ZeroHash, err - } - - if _, err = io.Copy(ow, or); err != nil { - return plumbing.ZeroHash, err - } - - return o.Hash(), err -} - -// HasEncodedObject returns nil if the object exists, without actually -// reading the object data from storage. -func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { - // Check unpacked objects - f, err := s.dir.Object(h) - if err != nil { - if !os.IsNotExist(err) { - return err - } - // Fall through to check packed objects. - } else { - defer ioutil.CheckClose(f, &err) - return nil - } - - // Check packed objects. - if err := s.requireIndex(); err != nil { - return err - } - _, _, offset := s.findObjectInPackfile(h) - if offset == -1 { - return plumbing.ErrObjectNotFound - } - return nil -} - -func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( - size int64, err error) { - f, err := s.dir.Object(h) - if err != nil { - if os.IsNotExist(err) { - return 0, plumbing.ErrObjectNotFound - } - - return 0, err - } - - r, err := objfile.NewReader(f) - if err != nil { - return 0, err - } - defer ioutil.CheckClose(r, &err) - - _, size, err = r.Header() - return size, err -} - -func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { - if p := s.packfileFromCache(pack); p != nil { - return p, nil - } - - f, err := s.dir.ObjectPack(pack) - if err != nil { - return nil, err - } - - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) - } - - return p, s.storePackfileInCache(pack, p) -} - -func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { - if s.packfiles == nil { - if s.options.KeepDescriptors { - s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) - } else if s.options.MaxOpenDescriptors > 0 { - s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) - s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) - } - } - - return s.packfiles[hash] -} - -func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { - if s.options.KeepDescriptors { - s.packfiles[hash] = p - return nil - } - - if s.options.MaxOpenDescriptors <= 0 { - return nil - } - - // start over as the limit of packList is hit - if s.packListIdx >= len(s.packList) { - s.packListIdx = 0 - } - - // close the existing packfile if open - if next := s.packList[s.packListIdx]; !next.IsZero() { - open := s.packfiles[next] - delete(s.packfiles, next) - if open != nil { - if err := open.Close(); err != nil { - return err - } - } - } - - // cache newly open packfile - s.packList[s.packListIdx] = hash - s.packfiles[hash] = p - s.packListIdx++ - - return nil -} - -func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( - size int64, err error) { - if err := s.requireIndex(); err != nil { - return 0, err - } - - pack, _, offset := s.findObjectInPackfile(h) - if offset == -1 { - return 0, plumbing.ErrObjectNotFound - } - - idx := s.index[pack] - hash, err := idx.FindHash(offset) - if err == nil { - obj, ok := s.objectCache.Get(hash) - if ok { - return obj.Size(), nil - } - } else if err != nil && err != plumbing.ErrObjectNotFound { - return 0, err - } - - p, err := s.packfile(idx, pack) - if err != nil { - return 0, err - } - - if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { - defer ioutil.CheckClose(p, &err) - } - - return p.GetSizeByOffset(offset) -} - -// EncodedObjectSize returns the plaintext size of the given object, -// without actually reading the full object data from storage. -func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( - size int64, err error) { - size, err = s.encodedObjectSizeFromUnpacked(h) - if err != nil && err != plumbing.ErrObjectNotFound { - return 0, err - } else if err == nil { - return size, nil - } - - return s.encodedObjectSizeFromPackfile(h) -} - -// EncodedObject returns the object with the given hash, by searching for it in -// the packfile and the git object directories. -func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - var obj plumbing.EncodedObject - var err error - - if s.index != nil { - obj, err = s.getFromPackfile(h, false) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromUnpacked(h) - } - } else { - obj, err = s.getFromUnpacked(h) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromPackfile(h, false) - } - } - - // If the error is still object not found, check if it's a shared object - // repository. - if err == plumbing.ErrObjectNotFound { - dotgits, e := s.dir.Alternates() - if e == nil { - // Create a new object storage with the DotGit(s) and check for the - // required hash object. Skip when not found. - for _, dg := range dotgits { - o := NewObjectStorage(dg, s.objectCache) - enobj, enerr := o.EncodedObject(t, h) - if enerr != nil { - continue - } - return enobj, nil - } - } - } - - if err != nil { - return nil, err - } - - if plumbing.AnyObject != t && obj.Type() != t { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -// DeltaObject returns the object with the given hash, by searching for -// it in the packfile and the git object directories. -func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType, - h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, err := s.getFromUnpacked(h) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromPackfile(h, true) - } - - if err != nil { - return nil, err - } - - if plumbing.AnyObject != t && obj.Type() != t { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { - f, err := s.dir.Object(h) - if err != nil { - if os.IsNotExist(err) { - return nil, plumbing.ErrObjectNotFound - } - - return nil, err - } - defer ioutil.CheckClose(f, &err) - - if cacheObj, found := s.objectCache.Get(h); found { - return cacheObj, nil - } - - obj = s.NewEncodedObject() - r, err := objfile.NewReader(f) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(r, &err) - - t, size, err := r.Header() - if err != nil { - return nil, err - } - - obj.SetType(t) - obj.SetSize(size) - w, err := obj.Writer() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(w, &err) - - s.objectCache.Put(obj) - - _, err = io.Copy(w, r) - return obj, err -} - -// Get returns the object with the given hash, by searching for it in -// the packfile. -func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( - plumbing.EncodedObject, error) { - - if err := s.requireIndex(); err != nil { - return nil, err - } - - pack, hash, offset := s.findObjectInPackfile(h) - if offset == -1 { - return nil, plumbing.ErrObjectNotFound - } - - idx := s.index[pack] - p, err := s.packfile(idx, pack) - if err != nil { - return nil, err - } - - if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { - defer ioutil.CheckClose(p, &err) - } - - if canBeDelta { - return s.decodeDeltaObjectAt(p, offset, hash) - } - - return s.decodeObjectAt(p, offset) -} - -func (s *ObjectStorage) decodeObjectAt( - p *packfile.Packfile, - offset int64, -) (plumbing.EncodedObject, error) { - hash, err := p.FindHash(offset) - if err == nil { - obj, ok := s.objectCache.Get(hash) - if ok { - return obj, nil - } - } - - if err != nil && err != plumbing.ErrObjectNotFound { - return nil, err - } - - return p.GetByOffset(offset) -} - -func (s *ObjectStorage) decodeDeltaObjectAt( - p *packfile.Packfile, - offset int64, - hash plumbing.Hash, -) (plumbing.EncodedObject, error) { - scan := p.Scanner() - header, err := scan.SeekObjectHeader(offset) - if err != nil { - return nil, err - } - - var ( - base plumbing.Hash - ) - - switch header.Type { - case plumbing.REFDeltaObject: - base = header.Reference - case plumbing.OFSDeltaObject: - base, err = p.FindHash(header.OffsetReference) - if err != nil { - return nil, err - } - default: - return s.decodeObjectAt(p, offset) - } - - obj := &plumbing.MemoryObject{} - obj.SetType(header.Type) - w, err := obj.Writer() - if err != nil { - return nil, err - } - - if _, _, err := scan.NextObject(w); err != nil { - return nil, err - } - - return newDeltaObject(obj, hash, base, header.Length), nil -} - -func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { - for packfile, index := range s.index { - offset, err := index.FindOffset(h) - if err == nil { - return packfile, h, offset - } - } - - return plumbing.ZeroHash, plumbing.ZeroHash, -1 -} - -// IterEncodedObjects returns an iterator for all the objects in the packfile -// with the given type. -func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { - objects, err := s.dir.Objects() - if err != nil { - return nil, err - } - - seen := make(map[plumbing.Hash]struct{}) - var iters []storer.EncodedObjectIter - if len(objects) != 0 { - iters = append(iters, &objectsIter{s: s, t: t, h: objects}) - seen = hashListAsMap(objects) - } - - packi, err := s.buildPackfileIters(t, seen) - if err != nil { - return nil, err - } - - iters = append(iters, packi) - return storer.NewMultiEncodedObjectIter(iters), nil -} - -func (s *ObjectStorage) buildPackfileIters( - t plumbing.ObjectType, - seen map[plumbing.Hash]struct{}, -) (storer.EncodedObjectIter, error) { - if err := s.requireIndex(); err != nil { - return nil, err - } - - packs, err := s.dir.ObjectPacks() - if err != nil { - return nil, err - } - return &lazyPackfilesIter{ - hashes: packs, - open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) { - pack, err := s.dir.ObjectPack(h) - if err != nil { - return nil, err - } - return newPackfileIter( - s.dir.Fs(), pack, t, seen, s.index[h], - s.objectCache, s.options.KeepDescriptors, - ) - }, - }, nil -} - -// Close closes all opened files. -func (s *ObjectStorage) Close() error { - var firstError error - if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { - for _, packfile := range s.packfiles { - err := packfile.Close() - if firstError == nil && err != nil { - firstError = err - } - } - } - - s.packfiles = nil - s.dir.Close() - - return firstError -} - -type lazyPackfilesIter struct { - hashes []plumbing.Hash - open func(h plumbing.Hash) (storer.EncodedObjectIter, error) - cur storer.EncodedObjectIter -} - -func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) { - for { - if it.cur == nil { - if len(it.hashes) == 0 { - return nil, io.EOF - } - h := it.hashes[0] - it.hashes = it.hashes[1:] - - sub, err := it.open(h) - if err == io.EOF { - continue - } else if err != nil { - return nil, err - } - it.cur = sub - } - ob, err := it.cur.Next() - if err == io.EOF { - it.cur.Close() - it.cur = nil - continue - } else if err != nil { - return nil, err - } - return ob, nil - } -} - -func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return storer.ForEachIterator(it, cb) -} - -func (it *lazyPackfilesIter) Close() { - if it.cur != nil { - it.cur.Close() - it.cur = nil - } - it.hashes = nil -} - -type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} - - // tells whether the pack file should be left open after iteration or not - keepPack bool -} - -// NewPackfileIter returns a new EncodedObjectIter for the provided packfile -// and object type. Packfile and index file will be closed after they're -// used. If keepPack is true the packfile won't be closed after the iteration -// finished. -func NewPackfileIter( - fs billy.Filesystem, - f billy.File, - idxFile billy.File, - t plumbing.ObjectType, - keepPack bool, -) (storer.EncodedObjectIter, error) { - idx := idxfile.NewMemoryIndex() - if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { - return nil, err - } - - if err := idxFile.Close(); err != nil { - return nil, err - } - - seen := make(map[plumbing.Hash]struct{}) - return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) -} - -func newPackfileIter( - fs billy.Filesystem, - f billy.File, - t plumbing.ObjectType, - seen map[plumbing.Hash]struct{}, - index idxfile.Index, - cache cache.Object, - keepPack bool, -) (storer.EncodedObjectIter, error) { - var p *packfile.Packfile - if cache != nil { - p = packfile.NewPackfileWithCache(index, fs, f, cache) - } else { - p = packfile.NewPackfile(index, fs, f) - } - - iter, err := p.GetByType(t) - if err != nil { - return nil, err - } - - return &packfileIter{ - pack: f, - iter: iter, - seen: seen, - keepPack: keepPack, - }, nil -} - -func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { - for { - obj, err := iter.iter.Next() - if err != nil { - return nil, err - } - - if _, ok := iter.seen[obj.Hash()]; ok { - continue - } - - return obj, nil - } -} - -func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { - for { - o, err := iter.Next() - if err != nil { - if err == io.EOF { - iter.Close() - return nil - } - return err - } - - if err := cb(o); err != nil { - return err - } - } -} - -func (iter *packfileIter) Close() { - iter.iter.Close() - if !iter.keepPack { - _ = iter.pack.Close() - } -} - -type objectsIter struct { - s *ObjectStorage - t plumbing.ObjectType - h []plumbing.Hash -} - -func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { - if len(iter.h) == 0 { - return nil, io.EOF - } - - obj, err := iter.s.getFromUnpacked(iter.h[0]) - iter.h = iter.h[1:] - - if err != nil { - return nil, err - } - - if iter.t != plumbing.AnyObject && iter.t != obj.Type() { - return iter.Next() - } - - return obj, err -} - -func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { - for { - o, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - - if err := cb(o); err != nil { - return err - } - } -} - -func (iter *objectsIter) Close() { - iter.h = []plumbing.Hash{} -} - -func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} { - m := make(map[plumbing.Hash]struct{}, len(l)) - for _, h := range l { - m[h] = struct{}{} - } - return m -} - -func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { - err := s.dir.ForEachObjectHash(fun) - if err == storer.ErrStop { - return nil - } - return err -} - -func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { - fi, err := s.dir.ObjectStat(hash) - if err != nil { - return time.Time{}, err - } - return fi.ModTime(), nil -} - -func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error { - return s.dir.ObjectDelete(hash) -} - -func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { - return s.dir.ObjectPacks() -} - -func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error { - return s.dir.DeleteOldObjectPackAndIndex(h, t) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go deleted file mode 100644 index aabcd7308d6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go +++ /dev/null @@ -1,44 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" -) - -type ReferenceStorage struct { - dir *dotgit.DotGit -} - -func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { - return r.dir.SetRef(ref, nil) -} - -func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { - return r.dir.SetRef(ref, old) -} - -func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { - return r.dir.Ref(n) -} - -func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { - refs, err := r.dir.Refs() - if err != nil { - return nil, err - } - - return storer.NewReferenceSliceIter(refs), nil -} - -func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { - return r.dir.RemoveRef(n) -} - -func (r *ReferenceStorage) CountLooseRefs() (int, error) { - return r.dir.CountLooseRefs() -} - -func (r *ReferenceStorage) PackRefs() error { - return r.dir.PackRefs() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go deleted file mode 100644 index afb600cf2da..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go +++ /dev/null @@ -1,54 +0,0 @@ -package filesystem - -import ( - "bufio" - "fmt" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ShallowStorage where the shallow commits are stored, an internal to -// manipulate the shallow file -type ShallowStorage struct { - dir *dotgit.DotGit -} - -// SetShallow save the shallows in the shallow file in the .git folder as one -// commit per line represented by 40-byte hexadecimal object terminated by a -// newline. -func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { - f, err := s.dir.ShallowWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - for _, h := range commits { - if _, err := fmt.Fprintf(f, "%s\n", h); err != nil { - return err - } - } - - return err -} - -// Shallow return the shallow commits reading from shallo file from .git -func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { - f, err := s.dir.Shallow() - if f == nil || err != nil { - return nil, err - } - - defer ioutil.CheckClose(f, &err) - - var hash []plumbing.Hash - - scn := bufio.NewScanner(f) - for scn.Scan() { - hash = append(hash, plumbing.NewHash(scn.Text())) - } - - return hash, scn.Err() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go deleted file mode 100644 index 8b69b27b00a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package filesystem is a storage backend base on filesystems -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - - "github.com/go-git/go-billy/v5" -) - -// Storage is an implementation of git.Storer that stores data on disk in the -// standard git format (this is, the .git directory). Zero values of this type -// are not safe to use, see the NewStorage function below. -type Storage struct { - fs billy.Filesystem - dir *dotgit.DotGit - - ObjectStorage - ReferenceStorage - IndexStorage - ShallowStorage - ConfigStorage - ModuleStorage -} - -// Options holds configuration for the storage. -type Options struct { - // ExclusiveAccess means that the filesystem is not modified externally - // while the repo is open. - ExclusiveAccess bool - // KeepDescriptors makes the file descriptors to be reused but they will - // need to be manually closed calling Close(). - KeepDescriptors bool - // MaxOpenDescriptors is the max number of file descriptors to keep - // open. If KeepDescriptors is true, all file descriptors will remain open. - MaxOpenDescriptors int -} - -// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. -func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { - return NewStorageWithOptions(fs, cache, Options{}) -} - -// NewStorageWithOptions returns a new Storage with extra options, -// backed by a given `fs.Filesystem` and cache. -func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { - dirOps := dotgit.Options{ - ExclusiveAccess: ops.ExclusiveAccess, - } - dir := dotgit.NewWithOptions(fs, dirOps) - - return &Storage{ - fs: fs, - dir: dir, - - ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), - ReferenceStorage: ReferenceStorage{dir: dir}, - IndexStorage: IndexStorage{dir: dir}, - ShallowStorage: ShallowStorage{dir: dir}, - ConfigStorage: ConfigStorage{dir: dir}, - ModuleStorage: ModuleStorage{dir: dir}, - } -} - -// Filesystem returns the underlying filesystem -func (s *Storage) Filesystem() billy.Filesystem { - return s.fs -} - -// Init initializes .git directory -func (s *Storage) Init() error { - return s.dir.Initialize() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go deleted file mode 100644 index fdf8fcfc67e..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go +++ /dev/null @@ -1,320 +0,0 @@ -// Package memory is a storage backend base on memory -package memory - -import ( - "fmt" - "time" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" -) - -var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") - -// Storage is an implementation of git.Storer that stores data on memory, being -// ephemeral. The use of this storage should be done in controlled environments, -// since the representation in memory of some repository can fill the machine -// memory. in the other hand this storage has the best performance. -type Storage struct { - ConfigStorage - ObjectStorage - ShallowStorage - IndexStorage - ReferenceStorage - ModuleStorage -} - -// NewStorage returns a new Storage base on memory -func NewStorage() *Storage { - return &Storage{ - ReferenceStorage: make(ReferenceStorage), - ConfigStorage: ConfigStorage{}, - ShallowStorage: ShallowStorage{}, - ObjectStorage: ObjectStorage{ - Objects: make(map[plumbing.Hash]plumbing.EncodedObject), - Commits: make(map[plumbing.Hash]plumbing.EncodedObject), - Trees: make(map[plumbing.Hash]plumbing.EncodedObject), - Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), - Tags: make(map[plumbing.Hash]plumbing.EncodedObject), - }, - ModuleStorage: make(ModuleStorage), - } -} - -type ConfigStorage struct { - config *config.Config -} - -func (c *ConfigStorage) SetConfig(cfg *config.Config) error { - if err := cfg.Validate(); err != nil { - return err - } - - c.config = cfg - return nil -} - -func (c *ConfigStorage) Config() (*config.Config, error) { - if c.config == nil { - c.config = config.NewConfig() - } - - return c.config, nil -} - -type IndexStorage struct { - index *index.Index -} - -func (c *IndexStorage) SetIndex(idx *index.Index) error { - c.index = idx - return nil -} - -func (c *IndexStorage) Index() (*index.Index, error) { - if c.index == nil { - c.index = &index.Index{Version: 2} - } - - return c.index, nil -} - -type ObjectStorage struct { - Objects map[plumbing.Hash]plumbing.EncodedObject - Commits map[plumbing.Hash]plumbing.EncodedObject - Trees map[plumbing.Hash]plumbing.EncodedObject - Blobs map[plumbing.Hash]plumbing.EncodedObject - Tags map[plumbing.Hash]plumbing.EncodedObject -} - -func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { - return &plumbing.MemoryObject{} -} - -func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { - h := obj.Hash() - o.Objects[h] = obj - - switch obj.Type() { - case plumbing.CommitObject: - o.Commits[h] = o.Objects[h] - case plumbing.TreeObject: - o.Trees[h] = o.Objects[h] - case plumbing.BlobObject: - o.Blobs[h] = o.Objects[h] - case plumbing.TagObject: - o.Tags[h] = o.Objects[h] - default: - return h, ErrUnsupportedObjectType - } - - return h, nil -} - -func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { - if _, ok := o.Objects[h]; !ok { - return plumbing.ErrObjectNotFound - } - return nil -} - -func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( - size int64, err error) { - obj, ok := o.Objects[h] - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - return obj.Size(), nil -} - -func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, ok := o.Objects[h] - if !ok || (plumbing.AnyObject != t && obj.Type() != t) { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { - var series []plumbing.EncodedObject - switch t { - case plumbing.AnyObject: - series = flattenObjectMap(o.Objects) - case plumbing.CommitObject: - series = flattenObjectMap(o.Commits) - case plumbing.TreeObject: - series = flattenObjectMap(o.Trees) - case plumbing.BlobObject: - series = flattenObjectMap(o.Blobs) - case plumbing.TagObject: - series = flattenObjectMap(o.Tags) - } - - return storer.NewEncodedObjectSliceIter(series), nil -} - -func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { - objects := make([]plumbing.EncodedObject, 0, len(m)) - for _, obj := range m { - objects = append(objects, obj) - } - return objects -} - -func (o *ObjectStorage) Begin() storer.Transaction { - return &TxObjectStorage{ - Storage: o, - Objects: make(map[plumbing.Hash]plumbing.EncodedObject), - } -} - -func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { - for h := range o.Objects { - err := fun(h) - if err != nil { - if err == storer.ErrStop { - return nil - } - return err - } - } - return nil -} - -func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { - return nil, nil -} -func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { - return nil -} - -var errNotSupported = fmt.Errorf("Not supported") - -func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { - return time.Time{}, errNotSupported -} -func (s *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { - return errNotSupported -} - -type TxObjectStorage struct { - Storage *ObjectStorage - Objects map[plumbing.Hash]plumbing.EncodedObject -} - -func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { - h := obj.Hash() - tx.Objects[h] = obj - - return h, nil -} - -func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, ok := tx.Objects[h] - if !ok || (plumbing.AnyObject != t && obj.Type() != t) { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (tx *TxObjectStorage) Commit() error { - for h, obj := range tx.Objects { - delete(tx.Objects, h) - if _, err := tx.Storage.SetEncodedObject(obj); err != nil { - return err - } - } - - return nil -} - -func (tx *TxObjectStorage) Rollback() error { - tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject) - return nil -} - -type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference - -func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { - if ref != nil { - r[ref.Name()] = ref - } - - return nil -} - -func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { - if ref == nil { - return nil - } - - if old != nil { - tmp := r[ref.Name()] - if tmp != nil && tmp.Hash() != old.Hash() { - return storage.ErrReferenceHasChanged - } - } - r[ref.Name()] = ref - return nil -} - -func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { - ref, ok := r[n] - if !ok { - return nil, plumbing.ErrReferenceNotFound - } - - return ref, nil -} - -func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { - var refs []*plumbing.Reference - for _, ref := range r { - refs = append(refs, ref) - } - - return storer.NewReferenceSliceIter(refs), nil -} - -func (r ReferenceStorage) CountLooseRefs() (int, error) { - return len(r), nil -} - -func (r ReferenceStorage) PackRefs() error { - return nil -} - -func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { - delete(r, n) - return nil -} - -type ShallowStorage []plumbing.Hash - -func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { - *s = commits - return nil -} - -func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { - return s, nil -} - -type ModuleStorage map[string]*Storage - -func (s ModuleStorage) Module(name string) (storage.Storer, error) { - if m, ok := s[name]; ok { - return m, nil - } - - m := NewStorage() - s[name] = m - - return m, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/storer.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/storer.go deleted file mode 100644 index 4800ac7ba07..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/storage/storer.go +++ /dev/null @@ -1,30 +0,0 @@ -package storage - -import ( - "errors" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -var ErrReferenceHasChanged = errors.New("reference has changed concurrently") - -// Storer is a generic storage of objects, references and any information -// related to a particular repository. The package github.com/go-git/go-git/v5/storage -// contains two implementation a filesystem base implementation (such as `.git`) -// and a memory implementations being ephemeral -type Storer interface { - storer.EncodedObjectStorer - storer.ReferenceStorer - storer.ShallowStorer - storer.IndexStorer - config.ConfigStorer - ModuleStorer -} - -// ModuleStorer allows interact with the modules' Storers -type ModuleStorer interface { - // Module returns a Storer representing a submodule, if not exists returns a - // new empty Storer is returned - Module(name string) (Storer, error) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/submodule.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/submodule.go deleted file mode 100644 index dff26b0d80b..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/submodule.go +++ /dev/null @@ -1,357 +0,0 @@ -package git - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -var ( - ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") - ErrSubmoduleNotInitialized = errors.New("submodule not initialized") -) - -// Submodule a submodule allows you to keep another Git repository in a -// subdirectory of your repository. -type Submodule struct { - // initialized defines if a submodule was already initialized. - initialized bool - - c *config.Submodule - w *Worktree -} - -// Config returns the submodule config -func (s *Submodule) Config() *config.Submodule { - return s.c -} - -// Init initialize the submodule reading the recorded Entry in the index for -// the given submodule -func (s *Submodule) Init() error { - cfg, err := s.w.r.Config() - if err != nil { - return err - } - - _, ok := cfg.Submodules[s.c.Name] - if ok { - return ErrSubmoduleAlreadyInitialized - } - - s.initialized = true - - cfg.Submodules[s.c.Name] = s.c - return s.w.r.Storer.SetConfig(cfg) -} - -// Status returns the status of the submodule. -func (s *Submodule) Status() (*SubmoduleStatus, error) { - idx, err := s.w.r.Storer.Index() - if err != nil { - return nil, err - } - - return s.status(idx) -} - -func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) { - status := &SubmoduleStatus{ - Path: s.c.Path, - } - - e, err := idx.Entry(s.c.Path) - if err != nil && err != index.ErrEntryNotFound { - return nil, err - } - - if e != nil { - status.Expected = e.Hash - } - - if !s.initialized { - return status, nil - } - - r, err := s.Repository() - if err != nil { - return nil, err - } - - head, err := r.Head() - if err == nil { - status.Current = head.Hash() - } - - if err != nil && err == plumbing.ErrReferenceNotFound { - err = nil - } - - return status, err -} - -// Repository returns the Repository represented by this submodule -func (s *Submodule) Repository() (*Repository, error) { - if !s.initialized { - return nil, ErrSubmoduleNotInitialized - } - - storer, err := s.w.r.Storer.Module(s.c.Name) - if err != nil { - return nil, err - } - - _, err = storer.Reference(plumbing.HEAD) - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - var exists bool - if err == nil { - exists = true - } - - var worktree billy.Filesystem - if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil { - return nil, err - } - - if exists { - return Open(storer, worktree) - } - - r, err := Init(storer, worktree) - if err != nil { - return nil, err - } - - _, err = r.CreateRemote(&config.RemoteConfig{ - Name: DefaultRemoteName, - URLs: []string{s.c.URL}, - }) - - return r, err -} - -// Update the registered submodule to match what the superproject expects, the -// submodule should be initialized first calling the Init method or setting in -// the options SubmoduleUpdateOptions.Init equals true -func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { - return s.UpdateContext(context.Background(), o) -} - -// UpdateContext the registered submodule to match what the superproject -// expects, the submodule should be initialized first calling the Init method or -// setting in the options SubmoduleUpdateOptions.Init equals true. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { - return s.update(ctx, o, plumbing.ZeroHash) -} - -func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error { - if !s.initialized && !o.Init { - return ErrSubmoduleNotInitialized - } - - if !s.initialized && o.Init { - if err := s.Init(); err != nil { - return err - } - } - - idx, err := s.w.r.Storer.Index() - if err != nil { - return err - } - - hash := forceHash - if hash.IsZero() { - e, err := idx.Entry(s.c.Path) - if err != nil { - return err - } - - hash = e.Hash - } - - r, err := s.Repository() - if err != nil { - return err - } - - if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil { - return err - } - - return s.doRecursiveUpdate(r, o) -} - -func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { - if o.RecurseSubmodules == NoRecurseSubmodules { - return nil - } - - w, err := r.Worktree() - if err != nil { - return err - } - - l, err := w.Submodules() - if err != nil { - return err - } - - new := &SubmoduleUpdateOptions{} - *new = *o - - new.RecurseSubmodules-- - return l.Update(new) -} - -func (s *Submodule) fetchAndCheckout( - ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, -) error { - if !o.NoFetch { - err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth}) - if err != nil && err != NoErrAlreadyUpToDate { - return err - } - } - - w, err := r.Worktree() - if err != nil { - return err - } - - if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { - return err - } - - head := plumbing.NewHashReference(plumbing.HEAD, hash) - return r.Storer.SetReference(head) -} - -// Submodules list of several submodules from the same repository. -type Submodules []*Submodule - -// Init initializes the submodules in this list. -func (s Submodules) Init() error { - for _, sub := range s { - if err := sub.Init(); err != nil { - return err - } - } - - return nil -} - -// Update updates all the submodules in this list. -func (s Submodules) Update(o *SubmoduleUpdateOptions) error { - return s.UpdateContext(context.Background(), o) -} - -// UpdateContext updates all the submodules in this list. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { - for _, sub := range s { - if err := sub.UpdateContext(ctx, o); err != nil { - return err - } - } - - return nil -} - -// Status returns the status of the submodules. -func (s Submodules) Status() (SubmodulesStatus, error) { - var list SubmodulesStatus - - var r *Repository - for _, sub := range s { - if r == nil { - r = sub.w.r - } - - idx, err := r.Storer.Index() - if err != nil { - return nil, err - } - - status, err := sub.status(idx) - if err != nil { - return nil, err - } - - list = append(list, status) - } - - return list, nil -} - -// SubmodulesStatus contains the status for all submodiles in the worktree -type SubmodulesStatus []*SubmoduleStatus - -// String is equivalent to `git submodule status` -func (s SubmodulesStatus) String() string { - buf := bytes.NewBuffer(nil) - for _, sub := range s { - fmt.Fprintln(buf, sub) - } - - return buf.String() -} - -// SubmoduleStatus contains the status for a submodule in the worktree -type SubmoduleStatus struct { - Path string - Current plumbing.Hash - Expected plumbing.Hash - Branch plumbing.ReferenceName -} - -// IsClean is the HEAD of the submodule is equals to the expected commit -func (s *SubmoduleStatus) IsClean() bool { - return s.Current == s.Expected -} - -// String is equivalent to `git submodule status ` -// -// This will print the SHA-1 of the currently checked out commit for a -// submodule, along with the submodule path and the output of git describe fo -// the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not -// initialized, + if the currently checked out submodule commit does not match -// the SHA-1 found in the index of the containing repository. -func (s *SubmoduleStatus) String() string { - var extra string - var status = ' ' - - if s.Current.IsZero() { - status = '-' - } else if !s.IsClean() { - status = '+' - } - - if len(s.Branch) != 0 { - extra = string(s.Branch[5:]) - } else if !s.Current.IsZero() { - extra = s.Current.String()[:7] - } - - if extra != "" { - extra = fmt.Sprintf(" (%s)", extra) - } - - return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/read.go deleted file mode 100644 index a14d48db9c7..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/read.go +++ /dev/null @@ -1,180 +0,0 @@ -// Package binary implements sintax-sugar functions on top of the standard -// library binary package -package binary - -import ( - "bufio" - "encoding/binary" - "io" - - "github.com/go-git/go-git/v5/plumbing" -) - -// Read reads structured binary data from r into data. Bytes are read and -// decoded in BigEndian order -// https://golang.org/pkg/encoding/binary/#Read -func Read(r io.Reader, data ...interface{}) error { - for _, v := range data { - if err := binary.Read(r, binary.BigEndian, v); err != nil { - return err - } - } - - return nil -} - -// ReadUntil reads from r untin delim is found -func ReadUntil(r io.Reader, delim byte) ([]byte, error) { - if bufr, ok := r.(*bufio.Reader); ok { - return ReadUntilFromBufioReader(bufr, delim) - } - - var buf [1]byte - value := make([]byte, 0, 16) - for { - if _, err := io.ReadFull(r, buf[:]); err != nil { - if err == io.EOF { - return nil, err - } - - return nil, err - } - - if buf[0] == delim { - return value, nil - } - - value = append(value, buf[0]) - } -} - -// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter -// from the result. -func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) { - value, err := r.ReadBytes(delim) - if err != nil || len(value) == 0 { - return nil, err - } - - return value[:len(value)-1], nil -} - -// ReadVariableWidthInt reads and returns an int in Git VLQ special format: -// -// Ordinary VLQ has some redundancies, example: the number 358 can be -// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the -// 4-octet VLQ 0x80808166 and so forth. -// -// To avoid these redundancies, the VLQ format used in Git removes this -// prepending redundancy and extends the representable range of shorter -// VLQs by adding an offset to VLQs of 2 or more octets in such a way -// that the lowest possible value for such an (N+1)-octet VLQ becomes -// exactly one more than the maximum possible value for an N-octet VLQ. -// In particular, since a 1-octet VLQ can store a maximum value of 127, -// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of -// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is -// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ -// (0x808000) has a value of 16512 instead of zero, which means -// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of -// just 2097151. And so forth. -// -// This is how the offset is saved in C: -// -// dheader[pos] = ofs & 127; -// while (ofs >>= 7) -// dheader[--pos] = 128 | (--ofs & 127); -// -func ReadVariableWidthInt(r io.Reader) (int64, error) { - var c byte - if err := Read(r, &c); err != nil { - return 0, err - } - - var v = int64(c & maskLength) - for c&maskContinue > 0 { - v++ - if err := Read(r, &c); err != nil { - return 0, err - } - - v = (v << lengthBits) + int64(c&maskLength) - } - - return v, nil -} - -const ( - maskContinue = uint8(128) // 1000 000 - maskLength = uint8(127) // 0111 1111 - lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length -) - -// ReadUint64 reads 8 bytes and returns them as a BigEndian uint32 -func ReadUint64(r io.Reader) (uint64, error) { - var v uint64 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadUint32 reads 4 bytes and returns them as a BigEndian uint32 -func ReadUint32(r io.Reader) (uint32, error) { - var v uint32 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16 -func ReadUint16(r io.Reader) (uint16, error) { - var v uint16 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadHash reads a plumbing.Hash from r -func ReadHash(r io.Reader) (plumbing.Hash, error) { - var h plumbing.Hash - if err := binary.Read(r, binary.BigEndian, h[:]); err != nil { - return plumbing.ZeroHash, err - } - - return h, nil -} - -const sniffLen = 8000 - -// IsBinary detects if data is a binary value based on: -// http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198 -func IsBinary(r io.Reader) (bool, error) { - reader := bufio.NewReader(r) - c := 0 - for { - if c == sniffLen { - break - } - - b, err := reader.ReadByte() - if err == io.EOF { - break - } - if err != nil { - return false, err - } - - if b == byte(0) { - return true, nil - } - - c++ - } - - return false, nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/write.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/write.go deleted file mode 100644 index c08c73a06b2..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/binary/write.go +++ /dev/null @@ -1,50 +0,0 @@ -package binary - -import ( - "encoding/binary" - "io" -) - -// Write writes the binary representation of data into w, using BigEndian order -// https://golang.org/pkg/encoding/binary/#Write -func Write(w io.Writer, data ...interface{}) error { - for _, v := range data { - if err := binary.Write(w, binary.BigEndian, v); err != nil { - return err - } - } - - return nil -} - -func WriteVariableWidthInt(w io.Writer, n int64) error { - buf := []byte{byte(n & 0x7f)} - n >>= 7 - for n != 0 { - n-- - buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...) - n >>= 7 - } - - _, err := w.Write(buf) - - return err -} - -// WriteUint64 writes the binary representation of a uint64 into w, in BigEndian -// order -func WriteUint64(w io.Writer, value uint64) error { - return binary.Write(w, binary.BigEndian, value) -} - -// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian -// order -func WriteUint32(w io.Writer, value uint32) error { - return binary.Write(w, binary.BigEndian, value) -} - -// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian -// order -func WriteUint16(w io.Writer, value uint16) error { - return binary.Write(w, binary.BigEndian, value) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go deleted file mode 100644 index 6142ed05155..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diff implements line oriented diffs, similar to the ancient -// Unix diff command. -// -// The current implementation is just a wrapper around Sergi's -// go-diff/diffmatchpatch library, which is a go port of Neil -// Fraser's google-diff-match-patch code -package diff - -import ( - "bytes" - "time" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// Do computes the (line oriented) modifications needed to turn the src -// string into the dst string. The underlying algorithm is Meyers, -// its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d -// is the size of the diff. -func Do(src, dst string) (diffs []diffmatchpatch.Diff) { - // the default timeout is time.Second which may be too small under heavy load - return DoWithTimeout(src, dst, time.Hour) -} - -// DoWithTimeout computes the (line oriented) modifications needed to turn the src -// string into the dst string. The `timeout` argument specifies the maximum -// amount of time it is allowed to spend in this function. If the timeout -// is exceeded, the parts of the strings which were not considered are turned into -// a bulk delete+insert and the half-baked suboptimal result is returned at once. -// The underlying algorithm is Meyers, its complexity is O(N*d) where N is -// min(lines(src), lines(dst)) and d is the size of the diff. -func DoWithTimeout (src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) { - dmp := diffmatchpatch.New() - dmp.DiffTimeout = timeout - wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst) - diffs = dmp.DiffMainRunes(wSrc, wDst, false) - diffs = dmp.DiffCharsToLines(diffs, warray) - return diffs -} - -// Dst computes and returns the destination text. -func Dst(diffs []diffmatchpatch.Diff) string { - var text bytes.Buffer - for _, d := range diffs { - if d.Type != diffmatchpatch.DiffDelete { - text.WriteString(d.Text) - } - } - return text.String() -} - -// Src computes and returns the source text -func Src(diffs []diffmatchpatch.Diff) string { - var text bytes.Buffer - for _, d := range diffs { - if d.Type != diffmatchpatch.DiffInsert { - text.WriteString(d.Text) - } - } - return text.String() -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go deleted file mode 100644 index e9dcbfe49bf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go +++ /dev/null @@ -1,170 +0,0 @@ -// Package ioutil implements some I/O utility functions. -package ioutil - -import ( - "bufio" - "context" - "errors" - "io" - - "github.com/jbenet/go-context/io" -) - -type readPeeker interface { - io.Reader - Peek(int) ([]byte, error) -} - -var ( - ErrEmptyReader = errors.New("reader is empty") -) - -// NonEmptyReader takes a reader and returns it if it is not empty, or -// `ErrEmptyReader` if it is empty. If there is an error when reading the first -// byte of the given reader, it will be propagated. -func NonEmptyReader(r io.Reader) (io.Reader, error) { - pr, ok := r.(readPeeker) - if !ok { - pr = bufio.NewReader(r) - } - - _, err := pr.Peek(1) - if err == io.EOF { - return nil, ErrEmptyReader - } - - if err != nil { - return nil, err - } - - return pr, nil -} - -type readCloser struct { - io.Reader - closer io.Closer -} - -func (r *readCloser) Close() error { - return r.closer.Close() -} - -// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and -// `io.Closer`. -func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { - return &readCloser{Reader: r, closer: c} -} - -type writeCloser struct { - io.Writer - closer io.Closer -} - -func (r *writeCloser) Close() error { - return r.closer.Close() -} - -// NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and -// `io.Closer`. -func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser { - return &writeCloser{Writer: w, closer: c} -} - -type writeNopCloser struct { - io.Writer -} - -func (writeNopCloser) Close() error { return nil } - -// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping -// the provided Writer w. -func WriteNopCloser(w io.Writer) io.WriteCloser { - return writeNopCloser{w} -} - -// CheckClose calls Close on the given io.Closer. If the given *error points to -// nil, it will be assigned the error returned by Close. Otherwise, any error -// returned by Close will be ignored. CheckClose is usually called with defer. -func CheckClose(c io.Closer, err *error) { - if cerr := c.Close(); cerr != nil && *err == nil { - *err = cerr - } -} - -// NewContextWriter wraps a writer to make it respect given Context. -// If there is a blocking write, the returned Writer will return whenever the -// context is cancelled (the return values are n=0 and err=ctx.Err()). -func NewContextWriter(ctx context.Context, w io.Writer) io.Writer { - return ctxio.NewWriter(ctx, w) -} - -// NewContextReader wraps a reader to make it respect given Context. -// If there is a blocking read, the returned Reader will return whenever the -// context is cancelled (the return values are n=0 and err=ctx.Err()). -func NewContextReader(ctx context.Context, r io.Reader) io.Reader { - return ctxio.NewReader(ctx, r) -} - -// NewContextWriteCloser as NewContextWriter but with io.Closer interface. -func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser { - ctxw := ctxio.NewWriter(ctx, w) - return NewWriteCloser(ctxw, w) -} - -// NewContextReadCloser as NewContextReader but with io.Closer interface. -func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser { - ctxr := ctxio.NewReader(ctx, r) - return NewReadCloser(ctxr, r) -} - -type readerOnError struct { - io.Reader - notify func(error) -} - -// NewReaderOnError returns a io.Reader that call the notify function when an -// unexpected (!io.EOF) error happens, after call Read function. -func NewReaderOnError(r io.Reader, notify func(error)) io.Reader { - return &readerOnError{r, notify} -} - -// NewReadCloserOnError returns a io.ReadCloser that call the notify function -// when an unexpected (!io.EOF) error happens, after call Read function. -func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser { - return NewReadCloser(NewReaderOnError(r, notify), r) -} - -func (r *readerOnError) Read(buf []byte) (n int, err error) { - n, err = r.Reader.Read(buf) - if err != nil && err != io.EOF { - r.notify(err) - } - - return -} - -type writerOnError struct { - io.Writer - notify func(error) -} - -// NewWriterOnError returns a io.Writer that call the notify function when an -// unexpected (!io.EOF) error happens, after call Write function. -func NewWriterOnError(w io.Writer, notify func(error)) io.Writer { - return &writerOnError{w, notify} -} - -// NewWriteCloserOnError returns a io.WriteCloser that call the notify function -//when an unexpected (!io.EOF) error happens, after call Write function. -func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { - return NewWriteCloser(NewWriterOnError(w, notify), w) -} - -func (r *writerOnError) Write(p []byte) (n int, err error) { - n, err = r.Writer.Write(p) - if err != nil && err != io.EOF { - r.notify(err) - } - - return -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go deleted file mode 100644 index cc6dc890716..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go +++ /dev/null @@ -1,149 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// Action values represent the kind of things a Change can represent: -// insertion, deletions or modifications of files. -type Action int - -// The set of possible actions in a change. -const ( - _ Action = iota - Insert - Delete - Modify -) - -// String returns the action as a human readable text. -func (a Action) String() string { - switch a { - case Insert: - return "Insert" - case Delete: - return "Delete" - case Modify: - return "Modify" - default: - panic(fmt.Sprintf("unsupported action: %d", a)) - } -} - -// A Change value represent how a noder has change between to merkletries. -type Change struct { - // The noder before the change or nil if it was inserted. - From noder.Path - // The noder after the change or nil if it was deleted. - To noder.Path -} - -// Action is convenience method that returns what Action c represents. -func (c *Change) Action() (Action, error) { - if c.From == nil && c.To == nil { - return Action(0), fmt.Errorf("malformed change: nil from and to") - } - if c.From == nil { - return Insert, nil - } - if c.To == nil { - return Delete, nil - } - - return Modify, nil -} - -// NewInsert returns a new Change representing the insertion of n. -func NewInsert(n noder.Path) Change { return Change{To: n} } - -// NewDelete returns a new Change representing the deletion of n. -func NewDelete(n noder.Path) Change { return Change{From: n} } - -// NewModify returns a new Change representing that a has been modified and -// it is now b. -func NewModify(a, b noder.Path) Change { - return Change{ - From: a, - To: b, - } -} - -// String returns a single change in human readable form, using the -// format: '<' + action + space + path + '>'. The contents of the file -// before or after the change are not included in this format. -// -// Example: inserting a file at the path a/b/c.txt will return "". -func (c Change) String() string { - action, err := c.Action() - if err != nil { - panic(err) - } - - var path string - if action == Delete { - path = c.From.String() - } else { - path = c.To.String() - } - - return fmt.Sprintf("<%s %s>", action, path) -} - -// Changes is a list of changes between to merkletries. -type Changes []Change - -// NewChanges returns an empty list of changes. -func NewChanges() Changes { - return Changes{} -} - -// Add adds the change c to the list of changes. -func (l *Changes) Add(c Change) { - *l = append(*l, c) -} - -// AddRecursiveInsert adds the required changes to insert all the -// file-like noders found in root, recursively. -func (l *Changes) AddRecursiveInsert(root noder.Path) error { - return l.addRecursive(root, NewInsert) -} - -// AddRecursiveDelete adds the required changes to delete all the -// file-like noders found in root, recursively. -func (l *Changes) AddRecursiveDelete(root noder.Path) error { - return l.addRecursive(root, NewDelete) -} - -type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete - -func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { - if !root.IsDir() { - l.Add(ctor(root)) - return nil - } - - i, err := NewIterFromPath(root) - if err != nil { - return err - } - - var current noder.Path - for { - if current, err = i.Step(); err != nil { - if err == io.EOF { - break - } - return err - } - if current.IsDir() { - continue - } - l.Add(ctor(current)) - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go deleted file mode 100644 index bd084b2ab03..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go +++ /dev/null @@ -1,428 +0,0 @@ -package merkletrie - -// The focus of this difftree implementation is to save time by -// skipping whole directories if their hash is the same in both -// trees. -// -// The diff algorithm implemented here is based on the doubleiter -// type defined in this same package; we will iterate over both -// trees at the same time, while comparing the current noders in -// each iterator. Depending on how they differ we will output the -// corresponding changes and move the iterators further over both -// trees. -// -// The table bellow show all the possible comparison results, along -// with what changes should we produce and how to advance the -// iterators. -// -// The table is implemented by the switches in this function, -// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs. -// -// Many Bothans died to bring us this information, make sure you -// understand the table before modifying this code. - -// # Cases -// -// When comparing noders in both trees you will find yourself in -// one of 169 possible cases, but if we ignore moves, we can -// simplify a lot the search space into the following table: -// -// - "-": nothing, no file or directory -// - a<>: an empty file named "a". -// - a<1>: a file named "a", with "1" as its contents. -// - a<2>: a file named "a", with "2" as its contents. -// - a(): an empty dir named "a". -// - a(...): a dir named "a", with some files and/or dirs inside (possibly -// empty). -// - a(;;;): a dir named "a", with some other files and/or dirs inside -// (possibly empty), which different from the ones in "a(...)". -// -// \ to - a<> a<1> a<2> a() a(...) a(;;;) -// from \ -// - 00 01 02 03 04 05 06 -// a<> 10 11 12 13 14 15 16 -// a<1> 20 21 22 23 24 25 26 -// a<2> 30 31 32 33 34 35 36 -// a() 40 41 42 43 44 45 46 -// a(...) 50 51 52 53 54 55 56 -// a(;;;) 60 61 62 63 64 65 66 -// -// Every (from, to) combination in the table is a special case, but -// some of them can be merged into some more general cases, for -// instance 11 and 22 can be merged into the general case: both -// noders are equal. -// -// Here is a full list of all the cases that are similar and how to -// merge them together into more general cases. Each general case -// is labeled with an uppercase letter for further reference, and it -// is followed by the pseudocode of the checks you have to perfrom -// on both noders to see if you are in such a case, the actions to -// perform (i.e. what changes to output) and how to advance the -// iterators of each tree to continue the comparison process. -// -// ## A. Impossible: 00 -// -// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66 -// - check: `SameName() && SameHash()` -// - action: do nothing. -// - advance: `FromNext(); ToNext()` -// -// ### C. To was created: 01, 02, 03, 04, 05, 06 -// - check: `DifferentName() && ToBeforeFrom()` -// - action: insertRecursively(to) -// - advance: `ToNext()` -// -// ### D. From was deleted: 10, 20, 30, 40, 50, 60 -// - check: `DifferentName() && FromBeforeTo()` -// - action: `DeleteRecursively(from)` -// - advance: `FromNext()` -// -// ### E. Empty file to file with contents: 12, 13 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// ToIsFile() && FromIsEmpty()` -// - action: `modifyFile(from, to)` -// - advance: `FromNext()` or `FromStep()` -// -// ### E'. file with contents to empty file: 21, 31 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// ToIsFile() && ToIsEmpty()` -// - action: `modifyFile(from, to)` -// - advance: `FromNext()` or `FromStep()` -// -// ### F. empty file to empty dir with the same name: 14 -// - check: `SameName() && FromIsFile() && FromIsEmpty() && -// ToIsDir() && ToIsEmpty()` -// - action: `DeleteFile(from); InsertEmptyDir(to)` -// - advance: `FromNext(); ToNext()` -// -// ### F'. empty dir to empty file of the same name: 41 -// - check: `SameName() && FromIsDir() && FromIsEmpty && -// ToIsFile() && ToIsEmpty()` -// - action: `DeleteEmptyDir(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` or step for any of them. -// -// ### G. empty file to non-empty dir of the same name: 15, 16 -// - check: `SameName() && FromIsFile() && ToIsDir() && -// FromIsEmpty() && ToIsNotEmpty()` -// - action: `DeleteFile(from); InsertDirRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### G'. non-empty dir to empty file of the same name: 51, 61 -// - check: `SameName() && FromIsDir() && FromIsNotEmpty() && -// ToIsFile() && FromIsEmpty()` -// - action: `DeleteDirRecursively(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### H. modify file contents: 23, 32 -// - check: `SameName() && FromIsFile() && ToIsFile() && -// FromIsNotEmpty() && ToIsNotEmpty()` -// - action: `ModifyFile(from, to)` -// - advance: `FromNext(); ToNext()` -// -// ### I. file with contents to empty dir: 24, 34 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()` -// - action: `DeleteFile(from); InsertEmptyDir(to)` -// - advance: `FromNext(); ToNext()` -// -// ### I'. empty dir to file with contents: 42, 43 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsFile() && ToIsEmpty()` -// - action: `DeleteDir(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### J. file with contents to dir with contents: 25, 26, 35, 36 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `DeleteFile(from); InsertDirRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### J'. dir with contents to file with contents: 52, 62, 53, 63 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()` -// - action: `DeleteDirRecursively(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### K. empty dir to dir with contents: 45, 46 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `InsertChildrenRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### K'. dir with contents to empty dir: 54, 64 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `DeleteChildrenRecursively(from)` -// - advance: `FromNext(); ToNext()` -// -// ### L. dir with contents to dir with different contents: 56, 65 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: nothing -// - advance: `FromStep(); ToStep()` -// -// - -// All these cases can be further simplified by a truth table -// reduction process, in which we gather similar checks together to -// make the final code easier to read and understand. -// -// The first 6 columns are the outputs of the checks to perform on -// both noders. I have labeled them 1 to 6, this is what they mean: -// -// 1: SameName() -// 2: SameHash() -// 3: FromIsDir() -// 4: ToIsDir() -// 5: FromIsEmpty() -// 6: ToIsEmpty() -// -// The from and to columns are a fsnoder example of the elements -// that you will find on each tree under the specified comparison -// results (columns 1 to 6). -// -// The type column identifies the case we are into, from the list above. -// -// The type' column identifies the new set of reduced cases, using -// lowercase letters, and they are explained after the table. -// -// The last column is the set of actions and advances for each case. -// -// "---" means impossible except in case of hash collision. -// -// advance meaning: -// - NN: from.Next(); to.Next() -// - SS: from.Step(); to.Step() -// -// 1 2 3 4 5 6 | from | to |type|type'|action ; advance -// ------------+--------+--------+----+------------------------------------ -// 0 0 0 0 0 0 | | | | | if !SameName() { -// . | | | | | if FromBeforeTo() { -// . | | | D | d | delete(from); from.Next() -// . | | | | | } else { -// . | | | C | c | insert(to); to.Next() -// . | | | | | } -// 0 1 1 1 1 1 | | | | | } -// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN -// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN -// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN -// 1 0 0 0 1 1 | ---- | ---- | | e | -// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN -// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN -// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN -// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN -// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN -// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN -// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN -// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN -// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS -// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChildren(from); NN -// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN -// 1 0 1 1 1 1 | ---- | ---- | | | -// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN -// 1 1 0 0 0 1 | ---- | ---- | | b | -// 1 1 0 0 1 0 | ---- | ---- | | b | -// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN -// 1 1 0 1 0 0 | ---- | ---- | | b | -// 1 1 0 1 0 1 | ---- | ---- | | b | -// 1 1 0 1 1 0 | ---- | ---- | | b | -// 1 1 0 1 1 1 | ---- | ---- | | b | -// 1 1 1 0 0 0 | ---- | ---- | | b | -// 1 1 1 0 0 1 | ---- | ---- | | b | -// 1 1 1 0 1 0 | ---- | ---- | | b | -// 1 1 1 0 1 1 | ---- | ---- | | b | -// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN -// 1 1 1 1 0 1 | ---- | ---- | | b | -// 1 1 1 1 1 0 | ---- | ---- | | b | -// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN -// -// c and d: -// if !SameName() -// d if FromBeforeTo() -// c else -// b: SameName) && sameHash() -// e: SameName() && !sameHash() && BothAreFiles() -// f: SameName() && !sameHash() && FileAndDir() -// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty -// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty -// h: else of i - -import ( - "context" - "errors" - "fmt" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -var ( - // ErrCanceled is returned whenever the operation is canceled. - ErrCanceled = errors.New("operation canceled") -) - -// DiffTree calculates the list of changes between two merkletries. It -// uses the provided hashEqual callback to compare noders. -func DiffTree( - fromTree, - toTree noder.Noder, - hashEqual noder.Equal, -) (Changes, error) { - return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual) -} - -// DiffTreeContext calculates the list of changes between two merkletries. It -// uses the provided hashEqual callback to compare noders. -// Error will be returned if context expires -// Provided context must be non nil -func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder, - hashEqual noder.Equal) (Changes, error) { - ret := NewChanges() - - ii, err := newDoubleIter(fromTree, toTree, hashEqual) - if err != nil { - return nil, err - } - - for { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - from := ii.from.current - to := ii.to.current - - switch r := ii.remaining(); r { - case noMoreNoders: - return ret, nil - case onlyFromRemains: - if err = ret.AddRecursiveDelete(from); err != nil { - return nil, err - } - if err = ii.nextFrom(); err != nil { - return nil, err - } - case onlyToRemains: - if err = ret.AddRecursiveInsert(to); err != nil { - return nil, err - } - if err = ii.nextTo(); err != nil { - return nil, err - } - case bothHaveNodes: - if err = diffNodes(&ret, ii); err != nil { - return nil, err - } - default: - panic(fmt.Sprintf("unknown remaining value: %d", r)) - } - } -} - -func diffNodes(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - var err error - - // compare their full paths as strings - switch from.Compare(to) { - case -1: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = ii.nextFrom(); err != nil { - return err - } - case 1: - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextTo(); err != nil { - return err - } - default: - if err := diffNodesSameName(changes, ii); err != nil { - return err - } - } - - return nil -} - -func diffNodesSameName(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - - status, err := ii.compare() - if err != nil { - return err - } - - switch { - case status.sameHash: - // do nothing - if err = ii.nextBoth(); err != nil { - return err - } - case status.bothAreFiles: - changes.Add(NewModify(from, to)) - if err = ii.nextBoth(); err != nil { - return err - } - case status.fileAndDir: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case status.bothAreDirs: - if err = diffDirs(changes, ii); err != nil { - return err - } - default: - return fmt.Errorf("bad status from double iterator") - } - - return nil -} - -func diffDirs(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - - status, err := ii.compare() - if err != nil { - return err - } - - switch { - case status.fromIsEmptyDir: - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case status.toIsEmptyDir: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case !status.fromIsEmptyDir && !status.toIsEmptyDir: - // do nothing - if err = ii.stepBoth(); err != nil { - return err - } - default: - return fmt.Errorf("both dirs are empty but has different hash") - } - - return nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go deleted file mode 100644 index 5204024ad4f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Package merkletrie provides support for n-ary trees that are at the same -time Merkle trees and Radix trees (tries). - -Git trees are Radix n-ary trees in virtue of the names of their -tree entries. At the same time, git trees are Merkle trees thanks to -their hashes. - -This package defines Merkle tries as nodes that should have: - -- a hash: the Merkle part of the Merkle trie - -- a key: the Radix part of the Merkle trie - -The Merkle hash condition is not enforced by this package though. This -means that the hash of a node doesn't have to take into account the hashes of -their children, which is good for testing purposes. - -Nodes in the Merkle trie are abstracted by the Noder interface. The -intended use is that git trees implements this interface, either -directly or using a simple wrapper. - -This package provides an iterator for merkletries that can skip whole -directory-like noders and an efficient merkletrie comparison algorithm. - -When comparing git trees, the simple approach of alphabetically sorting -their elements and comparing the resulting lists is too slow as it -depends linearly on the number of files in the trees: When a directory -has lots of files but none of them has been modified, this approach is -very expensive. We can do better by prunning whole directories that -have not change, just by looking at their hashes. This package provides -the tools to do exactly that. -*/ -package merkletrie diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go deleted file mode 100644 index 4a4341b3875..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go +++ /dev/null @@ -1,187 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A doubleIter is a convenience type to keep track of the current -// noders in two merkletries that are going to be iterated in parallel. -// It has methods for: -// -// - iterating over the merkletries, both at the same time or -// individually: nextFrom, nextTo, nextBoth, stepBoth -// -// - checking if there are noders left in one or both of them with the -// remaining method and its associated returned type. -// -// - comparing the current noders of both merkletries in several ways, -// with the compare method and its associated returned type. -type doubleIter struct { - from struct { - iter *Iter - current noder.Path // nil if no more nodes - } - to struct { - iter *Iter - current noder.Path // nil if no more nodes - } - hashEqual noder.Equal -} - -// NewdoubleIter returns a new doubleIter for the merkletries "from" and -// "to". The hashEqual callback function will be used by the doubleIter -// to compare the hash of the noders in the merkletries. The doubleIter -// will be initialized to the first elements in each merkletrie if any. -func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) ( - *doubleIter, error) { - var ii doubleIter - var err error - - if ii.from.iter, err = NewIter(from); err != nil { - return nil, fmt.Errorf("from: %s", err) - } - if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil { - return nil, fmt.Errorf("from: %s", err) - } - - if ii.to.iter, err = NewIter(to); err != nil { - return nil, fmt.Errorf("to: %s", err) - } - if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil { - return nil, fmt.Errorf("to: %s", err) - } - - ii.hashEqual = hashEqual - - return &ii, nil -} - -func turnEOFIntoNil(e error) error { - if e != nil && e != io.EOF { - return e - } - return nil -} - -// NextBoth makes d advance to the next noder in both merkletries. If -// any of them is a directory, it skips its contents. -func (d *doubleIter) nextBoth() error { - if err := d.nextFrom(); err != nil { - return err - } - if err := d.nextTo(); err != nil { - return err - } - - return nil -} - -// NextFrom makes d advance to the next noder in the "from" merkletrie, -// skipping its contents if it is a directory. -func (d *doubleIter) nextFrom() (err error) { - d.from.current, err = d.from.iter.Next() - return turnEOFIntoNil(err) -} - -// NextTo makes d advance to the next noder in the "to" merkletrie, -// skipping its contents if it is a directory. -func (d *doubleIter) nextTo() (err error) { - d.to.current, err = d.to.iter.Next() - return turnEOFIntoNil(err) -} - -// StepBoth makes d advance to the next noder in both merkletries, -// getting deeper into directories if that is the case. -func (d *doubleIter) stepBoth() (err error) { - if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil { - return err - } - if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil { - return err - } - return nil -} - -// Remaining returns if there are no more noders in the tree, if both -// have noders or if one of them doesn't. -func (d *doubleIter) remaining() remaining { - if d.from.current == nil && d.to.current == nil { - return noMoreNoders - } - - if d.from.current == nil && d.to.current != nil { - return onlyToRemains - } - - if d.from.current != nil && d.to.current == nil { - return onlyFromRemains - } - - return bothHaveNodes -} - -// Remaining values tells you whether both trees still have noders, or -// only one of them or none of them. -type remaining int - -const ( - noMoreNoders remaining = iota - onlyToRemains - onlyFromRemains - bothHaveNodes -) - -// Compare returns the comparison between the current elements in the -// merkletries. -func (d *doubleIter) compare() (s comparison, err error) { - s.sameHash = d.hashEqual(d.from.current, d.to.current) - - fromIsDir := d.from.current.IsDir() - toIsDir := d.to.current.IsDir() - - s.bothAreDirs = fromIsDir && toIsDir - s.bothAreFiles = !fromIsDir && !toIsDir - s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles - - fromNumChildren, err := d.from.current.NumChildren() - if err != nil { - return comparison{}, fmt.Errorf("from: %s", err) - } - - toNumChildren, err := d.to.current.NumChildren() - if err != nil { - return comparison{}, fmt.Errorf("to: %s", err) - } - - s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0 - s.toIsEmptyDir = toIsDir && toNumChildren == 0 - - return -} - -// Answers to a lot of questions you can ask about how to noders are -// equal or different. -type comparison struct { - // the following are only valid if both nodes have the same name - // (i.e. nameComparison == 0) - - // Do both nodes have the same hash? - sameHash bool - // Are both nodes files? - bothAreFiles bool - - // the following are only valid if any of the noders are dirs, - // this is, if !bothAreFiles - - // Is one a file and the other a dir? - fileAndDir bool - // Are both nodes dirs? - bothAreDirs bool - // Is the from node an empty dir? - fromIsEmptyDir bool - // Is the to Node an empty dir? - toIsEmptyDir bool -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go deleted file mode 100644 index 165bd42fcf5..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ /dev/null @@ -1,196 +0,0 @@ -package filesystem - -import ( - "io" - "os" - "path" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" - - "github.com/go-git/go-billy/v5" -) - -var ignore = map[string]bool{ - ".git": true, -} - -// The node represents a file or a directory in a billy.Filesystem. It -// implements the interface noder.Noder of merkletrie package. -// -// This implementation implements a "standard" hash method being able to be -// compared with any other noder.Noder implementation inside of go-git. -type node struct { - fs billy.Filesystem - submodules map[string]plumbing.Hash - - path string - hash []byte - children []noder.Noder - isDir bool -} - -// NewRootNode returns the root node based on a given billy.Filesystem. -// -// In order to provide the submodule hash status, a map[string]plumbing.Hash -// should be provided where the key is the path of the submodule and the commit -// of the submodule HEAD -func NewRootNode( - fs billy.Filesystem, - submodules map[string]plumbing.Hash, -) noder.Noder { - return &node{fs: fs, submodules: submodules, isDir: true} -} - -// Hash the hash of a filesystem is the result of concatenating the computed -// plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the -// difftree algorithm will detect changes in the contents of files and also in -// their mode. -// -// The hash of a directory is always a 24-bytes slice of zero values -func (n *node) Hash() []byte { - return n.hash -} - -func (n *node) Name() string { - return path.Base(n.path) -} - -func (n *node) IsDir() bool { - return n.isDir -} - -func (n *node) Children() ([]noder.Noder, error) { - if err := n.calculateChildren(); err != nil { - return nil, err - } - - return n.children, nil -} - -func (n *node) NumChildren() (int, error) { - if err := n.calculateChildren(); err != nil { - return -1, err - } - - return len(n.children), nil -} - -func (n *node) calculateChildren() error { - if !n.IsDir() { - return nil - } - - if len(n.children) != 0 { - return nil - } - - files, err := n.fs.ReadDir(n.path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return nil - } - - for _, file := range files { - if _, ok := ignore[file.Name()]; ok { - continue - } - - c, err := n.newChildNode(file) - if err != nil { - return err - } - - n.children = append(n.children, c) - } - - return nil -} - -func (n *node) newChildNode(file os.FileInfo) (*node, error) { - path := path.Join(n.path, file.Name()) - - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - - node := &node{ - fs: n.fs, - submodules: n.submodules, - - path: path, - hash: hash, - isDir: file.IsDir(), - } - - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) - node.isDir = false - } - - return node, nil -} - -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil - } - - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) - } else { - hash, err = n.doCalculateHashForRegular(path, file) - } - - if err != nil { - return nil, err - } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err - } - - return append(hash[:], mode.Bytes()...), nil -} - -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) - if err != nil { - return plumbing.ZeroHash, err - } - - defer f.Close() - - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) - if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err - } - - return h.Sum(), nil -} - -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) - if err != nil { - return plumbing.ZeroHash, err - } - - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) - if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err - } - - return h.Sum(), nil -} - -func (n *node) String() string { - return n.path -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go deleted file mode 100644 index d05b0c694de..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go +++ /dev/null @@ -1,90 +0,0 @@ -package index - -import ( - "path" - "strings" - - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// The node represents a index.Entry or a directory inferred from the path -// of all entries. It implements the interface noder.Noder of merkletrie -// package. -// -// This implementation implements a "standard" hash method being able to be -// compared with any other noder.Noder implementation inside of go-git -type node struct { - path string - entry *index.Entry - children []noder.Noder - isDir bool -} - -// NewRootNode returns the root node of a computed tree from a index.Index, -func NewRootNode(idx *index.Index) noder.Noder { - const rootNode = "" - - m := map[string]*node{rootNode: {isDir: true}} - - for _, e := range idx.Entries { - parts := strings.Split(e.Name, string("/")) - - var fullpath string - for _, part := range parts { - parent := fullpath - fullpath = path.Join(fullpath, part) - - if _, ok := m[fullpath]; ok { - continue - } - - n := &node{path: fullpath} - if fullpath == e.Name { - n.entry = e - } else { - n.isDir = true - } - - m[n.path] = n - m[parent].children = append(m[parent].children, n) - } - } - - return m[rootNode] -} - -func (n *node) String() string { - return n.path -} - -// Hash the hash of a filesystem is a 24-byte slice, is the result of -// concatenating the computed plumbing.Hash of the file as a Blob and its -// plumbing.FileMode; that way the difftree algorithm will detect changes in the -// contents of files and also in their mode. -// -// If the node is computed and not based on a index.Entry the hash is equals -// to a 24-bytes slices of zero values. -func (n *node) Hash() []byte { - if n.entry == nil { - return make([]byte, 24) - } - - return append(n.entry.Hash[:], n.entry.Mode.Bytes()...) -} - -func (n *node) Name() string { - return path.Base(n.path) -} - -func (n *node) IsDir() bool { - return n.isDir -} - -func (n *node) Children() ([]noder.Noder, error) { - return n.children, nil -} - -func (n *node) NumChildren() (int, error) { - return len(n.children), nil -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go deleted file mode 100644 index 131878a1c7a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go +++ /dev/null @@ -1,91 +0,0 @@ -package frame - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A Frame is a collection of siblings in a trie, sorted alphabetically -// by name. -type Frame struct { - // siblings, sorted in reverse alphabetical order by name - stack []noder.Noder -} - -type byName []noder.Noder - -func (a byName) Len() int { return len(a) } -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byName) Less(i, j int) bool { - return strings.Compare(a[i].Name(), a[j].Name()) < 0 -} - -// New returns a frame with the children of the provided node. -func New(n noder.Noder) (*Frame, error) { - children, err := n.Children() - if err != nil { - return nil, err - } - - sort.Sort(sort.Reverse(byName(children))) - return &Frame{ - stack: children, - }, nil -} - -// String returns the quoted names of the noders in the frame sorted in -// alphabetical order by name, surrounded by square brackets and -// separated by comas. -// -// Examples: -// [] -// ["a", "b"] -func (f *Frame) String() string { - var buf bytes.Buffer - _ = buf.WriteByte('[') - - sep := "" - for i := f.Len() - 1; i >= 0; i-- { - _, _ = buf.WriteString(sep) - sep = ", " - _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name())) - } - - _ = buf.WriteByte(']') - - return buf.String() -} - -// First returns, but dont extract, the noder with the alphabetically -// smaller name in the frame and true if the frame was not empty. -// Otherwise it returns nil and false. -func (f *Frame) First() (noder.Noder, bool) { - if f.Len() == 0 { - return nil, false - } - - top := f.Len() - 1 - - return f.stack[top], true -} - -// Drop extracts the noder with the alphabetically smaller name in the -// frame or does nothing if the frame was empty. -func (f *Frame) Drop() { - if f.Len() == 0 { - return - } - - top := f.Len() - 1 - f.stack[top] = nil - f.stack = f.stack[:top] -} - -// Len returns the number of noders in the frame. -func (f *Frame) Len() int { - return len(f.stack) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go deleted file mode 100644 index d75afec4643..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go +++ /dev/null @@ -1,216 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/internal/frame" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// Iter is an iterator for merkletries (only the trie part of the -// merkletrie is relevant here, it does not use the Hasher interface). -// -// The iteration is performed in depth-first pre-order. Entries at each -// depth are traversed in (case-sensitive) alphabetical order. -// -// This is the kind of traversal you will expect when listing ordinary -// files and directories recursively, for example: -// -// Trie Traversal order -// ---- --------------- -// . -// / | \ c -// / | \ d/ -// d c z ===> d/a -// / \ d/b -// b a z -// -// -// This iterator is somewhat especial as you can chose to skip whole -// "directories" when iterating: -// -// - The Step method will iterate normally. -// -// - the Next method will not descend deeper into the tree. -// -// For example, if the iterator is at `d/`, the Step method will return -// `d/a` while the Next would have returned `z` instead (skipping `d/` -// and its descendants). The name of the these two methods are based on -// the well known "next" and "step" operations, quite common in -// debuggers, like gdb. -// -// The paths returned by the iterator will be relative, if the iterator -// was created from a single node, or absolute, if the iterator was -// created from the path to the node (the path will be prefixed to all -// returned paths). -type Iter struct { - // Tells if the iteration has started. - hasStarted bool - // The top of this stack has the current node and its siblings. The - // rest of the stack keeps the ancestors of the current node and - // their corresponding siblings. The current element is always the - // top element of the top frame. - // - // When "step"ping into a node, its children are pushed as a new - // frame. - // - // When "next"ing pass a node, the current element is dropped by - // popping the top frame. - frameStack []*frame.Frame - // The base path used to turn the relative paths used internally by - // the iterator into absolute paths used by external applications. - // For relative iterator this will be nil. - base noder.Path -} - -// NewIter returns a new relative iterator using the provider noder as -// its unnamed root. When iterating, all returned paths will be -// relative to node. -func NewIter(n noder.Noder) (*Iter, error) { - return newIter(n, nil) -} - -// NewIterFromPath returns a new absolute iterator from the noder at the -// end of the path p. When iterating, all returned paths will be -// absolute, using the root of the path p as their root. -func NewIterFromPath(p noder.Path) (*Iter, error) { - return newIter(p, p) // Path implements Noder -} - -func newIter(root noder.Noder, base noder.Path) (*Iter, error) { - ret := &Iter{ - base: base, - } - - if root == nil { - return ret, nil - } - - frame, err := frame.New(root) - if err != nil { - return nil, err - } - ret.push(frame) - - return ret, nil -} - -func (iter *Iter) top() (*frame.Frame, bool) { - if len(iter.frameStack) == 0 { - return nil, false - } - top := len(iter.frameStack) - 1 - - return iter.frameStack[top], true -} - -func (iter *Iter) push(f *frame.Frame) { - iter.frameStack = append(iter.frameStack, f) -} - -const ( - doDescend = true - dontDescend = false -) - -// Next returns the path of the next node without descending deeper into -// the trie and nil. If there are no more entries in the trie it -// returns nil and io.EOF. In case of error, it will return nil and the -// error. -func (iter *Iter) Next() (noder.Path, error) { - return iter.advance(dontDescend) -} - -// Step returns the path to the next node in the trie, descending deeper -// into it if needed, and nil. If there are no more nodes in the trie, -// it returns nil and io.EOF. In case of error, it will return nil and -// the error. -func (iter *Iter) Step() (noder.Path, error) { - return iter.advance(doDescend) -} - -// Advances the iterator in the desired direction: descend or -// dontDescend. -// -// Returns the new current element and a nil error on success. If there -// are no more elements in the trie below the base, it returns nil, and -// io.EOF. Returns nil and an error in case of errors. -func (iter *Iter) advance(wantDescend bool) (noder.Path, error) { - current, err := iter.current() - if err != nil { - return nil, err - } - - // The first time we just return the current node. - if !iter.hasStarted { - iter.hasStarted = true - return current, nil - } - - // Advances means getting a next current node, either its first child or - // its next sibling, depending if we must descend or not. - numChildren, err := current.NumChildren() - if err != nil { - return nil, err - } - - mustDescend := numChildren != 0 && wantDescend - if mustDescend { - // descend: add a new frame with the current's children. - frame, err := frame.New(current) - if err != nil { - return nil, err - } - iter.push(frame) - } else { - // don't descend: just drop the current node - iter.drop() - } - - return iter.current() -} - -// Returns the path to the current node, adding the base if there was -// one, and a nil error. If there were no noders left, it returns nil -// and io.EOF. If an error occurred, it returns nil and the error. -func (iter *Iter) current() (noder.Path, error) { - if topFrame, ok := iter.top(); !ok { - return nil, io.EOF - } else if _, ok := topFrame.First(); !ok { - return nil, io.EOF - } - - ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack)) - - // concat the base... - ret = append(ret, iter.base...) - // ... and the current node and all its ancestors - for i, f := range iter.frameStack { - t, ok := f.First() - if !ok { - panic(fmt.Sprintf("frame %d is empty", i)) - } - ret = append(ret, t) - } - - return ret, nil -} - -// removes the current node if any, and all the frames that become empty as a -// consequence of this action. -func (iter *Iter) drop() { - frame, ok := iter.top() - if !ok { - return - } - - frame.Drop() - // if the frame is empty, remove it and its parent, recursively - if frame.Len() == 0 { - top := len(iter.frameStack) - 1 - iter.frameStack[top] = nil - iter.frameStack = iter.frameStack[:top] - iter.drop() - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go deleted file mode 100644 index d6b3de4adaf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package noder provide an interface for defining nodes in a -// merkletrie, their hashes and their paths (a noders and its -// ancestors). -// -// The hasher interface is easy to implement naively by elements that -// already have a hash, like git blobs and trees. More sophisticated -// implementations can implement the Equal function in exotic ways -// though: for instance, comparing the modification time of directories -// in a filesystem. -package noder - -import "fmt" - -// Hasher interface is implemented by types that can tell you -// their hash. -type Hasher interface { - Hash() []byte -} - -// Equal functions take two hashers and return if they are equal. -// -// These functions are expected to be faster than reflect.Equal or -// reflect.DeepEqual because they can compare just the hash of the -// objects, instead of their contents, so they are expected to be O(1). -type Equal func(a, b Hasher) bool - -// The Noder interface is implemented by the elements of a Merkle Trie. -// -// There are two types of elements in a Merkle Trie: -// -// - file-like nodes: they cannot have children. -// -// - directory-like nodes: they can have 0 or more children and their -// hash is calculated by combining their children hashes. -type Noder interface { - Hasher - fmt.Stringer // for testing purposes - // Name returns the name of an element (relative, not its full - // path). - Name() string - // IsDir returns true if the element is a directory-like node or - // false if it is a file-like node. - IsDir() bool - // Children returns the children of the element. Note that empty - // directory-like noders and file-like noders will both return - // NoChildren. - Children() ([]Noder, error) - // NumChildren returns the number of children this element has. - // - // This method is an optimization: the number of children is easily - // calculated as the length of the value returned by the Children - // method (above); yet, some implementations will be able to - // implement NumChildren in O(1) while Children is usually more - // complex. - NumChildren() (int, error) -} - -// NoChildren represents the children of a noder without children. -var NoChildren = []Noder{} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go deleted file mode 100644 index 1c7ef54eebc..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go +++ /dev/null @@ -1,90 +0,0 @@ -package noder - -import ( - "bytes" - "strings" -) - -// Path values represent a noder and its ancestors. The root goes first -// and the actual final noder the path is referring to will be the last. -// -// A path implements the Noder interface, redirecting all the interface -// calls to its final noder. -// -// Paths build from an empty Noder slice are not valid paths and should -// not be used. -type Path []Noder - -// String returns the full path of the final noder as a string, using -// "/" as the separator. -func (p Path) String() string { - var buf bytes.Buffer - sep := "" - for _, e := range p { - _, _ = buf.WriteString(sep) - sep = "/" - _, _ = buf.WriteString(e.Name()) - } - - return buf.String() -} - -// Last returns the final noder in the path. -func (p Path) Last() Noder { - return p[len(p)-1] -} - -// Hash returns the hash of the final noder of the path. -func (p Path) Hash() []byte { - return p.Last().Hash() -} - -// Name returns the name of the final noder of the path. -func (p Path) Name() string { - return p.Last().Name() -} - -// IsDir returns if the final noder of the path is a directory-like -// noder. -func (p Path) IsDir() bool { - return p.Last().IsDir() -} - -// Children returns the children of the final noder in the path. -func (p Path) Children() ([]Noder, error) { - return p.Last().Children() -} - -// NumChildren returns the number of children the final noder of the -// path has. -func (p Path) NumChildren() (int, error) { - return p.Last().NumChildren() -} - -// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger -// than other, in "directory order"; for example: -// -// "a" < "b" -// "a/b/c/d/z" < "b" -// "a/b/a" > "a/b" -func (p Path) Compare(other Path) int { - i := 0 - for { - switch { - case len(other) == len(p) && i == len(p): - return 0 - case i == len(other): - return 1 - case i == len(p): - return -1 - default: - // We do *not* normalize Unicode here. CGit doesn't. - // https://github.com/src-d/go-git/issues/1057 - cmp := strings.Compare(p[i].Name(), other[i].Name()) - if cmp != 0 { - return cmp - } - } - i++ - } -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree.go deleted file mode 100644 index 7f394d484c6..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree.go +++ /dev/null @@ -1,954 +0,0 @@ -package git - -import ( - "context" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/util" -) - -var ( - ErrWorktreeNotClean = errors.New("worktree is not clean") - ErrSubmoduleNotFound = errors.New("submodule not found") - ErrUnstagedChanges = errors.New("worktree contains unstaged changes") - ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") - ErrNonFastForwardUpdate = errors.New("non-fast-forward update") -) - -// Worktree represents a git worktree. -type Worktree struct { - // Filesystem underlying filesystem. - Filesystem billy.Filesystem - // External excludes not found in the repository .gitignore - Excludes []gitignore.Pattern - - r *Repository -} - -// Pull incorporates changes from a remote repository into the current branch. -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// Pull only supports merges where the can be resolved as a fast-forward. -func (w *Worktree) Pull(o *PullOptions) error { - return w.PullContext(context.Background(), o) -} - -// PullContext incorporates changes from a remote repository into the current -// branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if -// there are no changes to be fetched, or an error. -// -// Pull only supports merges where the can be resolved as a fast-forward. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := w.r.Remote(o.RemoteName) - if err != nil { - return err - } - - fetchHead, err := remote.fetch(ctx, &FetchOptions{ - RemoteName: o.RemoteName, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Force: o.Force, - }) - - updated := true - if err == NoErrAlreadyUpToDate { - updated = false - } else if err != nil { - return err - } - - ref, err := storer.ResolveReference(fetchHead, o.ReferenceName) - if err != nil { - return err - } - - head, err := w.r.Head() - if err == nil { - if !updated && head.Hash() == ref.Hash() { - return NoErrAlreadyUpToDate - } - - ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash()) - if err != nil { - return err - } - - if !ff { - return ErrNonFastForwardUpdate - } - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - if err := w.updateHEAD(ref.Hash()); err != nil { - return err - } - - if err := w.Reset(&ResetOptions{ - Mode: MergeReset, - Commit: ref.Hash(), - }); err != nil { - return err - } - - if o.RecurseSubmodules != NoRecurseSubmodules { - return w.updateSubmodules(&SubmoduleUpdateOptions{ - RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, - }) - } - - return nil -} - -func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error { - s, err := w.Submodules() - if err != nil { - return err - } - o.Init = true - return s.Update(o) -} - -// Checkout switch branches or restore working tree files. -func (w *Worktree) Checkout(opts *CheckoutOptions) error { - if err := opts.Validate(); err != nil { - return err - } - - if opts.Create { - if err := w.createBranch(opts); err != nil { - return err - } - } - - c, err := w.getCommitFromCheckoutOptions(opts) - if err != nil { - return err - } - - ro := &ResetOptions{Commit: c, Mode: MergeReset} - if opts.Force { - ro.Mode = HardReset - } else if opts.Keep { - ro.Mode = SoftReset - } - - if !opts.Hash.IsZero() && !opts.Create { - err = w.setHEADToCommit(opts.Hash) - } else { - err = w.setHEADToBranch(opts.Branch, c) - } - - if err != nil { - return err - } - - return w.Reset(ro) -} -func (w *Worktree) createBranch(opts *CheckoutOptions) error { - _, err := w.r.Storer.Reference(opts.Branch) - if err == nil { - return fmt.Errorf("a branch named %q already exists", opts.Branch) - } - - if err != plumbing.ErrReferenceNotFound { - return err - } - - if opts.Hash.IsZero() { - ref, err := w.r.Head() - if err != nil { - return err - } - - opts.Hash = ref.Hash() - } - - return w.r.Storer.SetReference( - plumbing.NewHashReference(opts.Branch, opts.Hash), - ) -} - -func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil - } - - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } - - if !b.Name().IsTag() { - return b.Hash(), nil - } - - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) - if err != nil { - return plumbing.ZeroHash, err - } - - switch o := o.(type) { - case *object.Tag: - if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) - } - - return o.Target, nil - case *object.Commit: - return o.Hash, nil - } - - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) -} - -func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { - head := plumbing.NewHashReference(plumbing.HEAD, commit) - return w.r.Storer.SetReference(head) -} - -func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error { - target, err := w.r.Storer.Reference(branch) - if err != nil { - return err - } - - var head *plumbing.Reference - if target.Name().IsBranch() { - head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name()) - } else { - head = plumbing.NewHashReference(plumbing.HEAD, commit) - } - - return w.r.Storer.SetReference(head) -} - -// Reset the worktree to a specified state. -func (w *Worktree) Reset(opts *ResetOptions) error { - if err := opts.Validate(w.r); err != nil { - return err - } - - if opts.Mode == MergeReset { - unstaged, err := w.containsUnstagedChanges() - if err != nil { - return err - } - - if unstaged { - return ErrUnstagedChanges - } - } - - if err := w.setHEADCommit(opts.Commit); err != nil { - return err - } - - if opts.Mode == SoftReset { - return nil - } - - t, err := w.getTreeFromCommitHash(opts.Commit) - if err != nil { - return err - } - - if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetIndex(t); err != nil { - return err - } - } - - if opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetWorktree(t); err != nil { - return err - } - } - - return nil -} - -func (w *Worktree) resetIndex(t *object.Tree) error { - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - b := newIndexBuilder(idx) - - changes, err := w.diffTreeWithStaging(t, true) - if err != nil { - return err - } - - for _, ch := range changes { - a, err := ch.Action() - if err != nil { - return err - } - - var name string - var e *object.TreeEntry - - switch a { - case merkletrie.Modify, merkletrie.Insert: - name = ch.To.String() - e, err = t.FindEntry(name) - if err != nil { - return err - } - case merkletrie.Delete: - name = ch.From.String() - } - - b.Remove(name) - if e == nil { - continue - } - - b.Add(&index.Entry{ - Name: name, - Hash: e.Hash, - Mode: e.Mode, - }) - - } - - b.Write(idx) - return w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) resetWorktree(t *object.Tree) error { - changes, err := w.diffStagingWithWorktree(true) - if err != nil { - return err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - b := newIndexBuilder(idx) - - for _, ch := range changes { - if err := w.checkoutChange(ch, t, b); err != nil { - return err - } - } - - b.Write(idx) - return w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error { - a, err := ch.Action() - if err != nil { - return err - } - - var e *object.TreeEntry - var name string - var isSubmodule bool - - switch a { - case merkletrie.Modify, merkletrie.Insert: - name = ch.To.String() - e, err = t.FindEntry(name) - if err != nil { - return err - } - - isSubmodule = e.Mode == filemode.Submodule - case merkletrie.Delete: - return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String()) - } - - if isSubmodule { - return w.checkoutChangeSubmodule(name, a, e, idx) - } - - return w.checkoutChangeRegularFile(name, a, t, e, idx) -} - -func (w *Worktree) containsUnstagedChanges() (bool, error) { - ch, err := w.diffStagingWithWorktree(false) - if err != nil { - return false, err - } - - for _, c := range ch { - a, err := c.Action() - if err != nil { - return false, err - } - - if a == merkletrie.Insert { - continue - } - - return true, nil - } - - return false, nil -} - -func (w *Worktree) setHEADCommit(commit plumbing.Hash) error { - head, err := w.r.Reference(plumbing.HEAD, false) - if err != nil { - return err - } - - if head.Type() == plumbing.HashReference { - head = plumbing.NewHashReference(plumbing.HEAD, commit) - return w.r.Storer.SetReference(head) - } - - branch, err := w.r.Reference(head.Target(), false) - if err != nil { - return err - } - - if !branch.Name().IsBranch() { - return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type()) - } - - branch = plumbing.NewHashReference(branch.Name(), commit) - return w.r.Storer.SetReference(branch) -} - -func (w *Worktree) checkoutChangeSubmodule(name string, - a merkletrie.Action, - e *object.TreeEntry, - idx *indexBuilder, -) error { - switch a { - case merkletrie.Modify: - sub, err := w.Submodule(name) - if err != nil { - return err - } - - if !sub.initialized { - return nil - } - - return w.addIndexFromTreeEntry(name, e, idx) - case merkletrie.Insert: - mode, err := e.Mode.ToOSFileMode() - if err != nil { - return err - } - - if err := w.Filesystem.MkdirAll(name, mode); err != nil { - return err - } - - return w.addIndexFromTreeEntry(name, e, idx) - } - - return nil -} - -func (w *Worktree) checkoutChangeRegularFile(name string, - a merkletrie.Action, - t *object.Tree, - e *object.TreeEntry, - idx *indexBuilder, -) error { - switch a { - case merkletrie.Modify: - idx.Remove(name) - - // to apply perm changes the file is deleted, billy doesn't implement - // chmod - if err := w.Filesystem.Remove(name); err != nil { - return err - } - - fallthrough - case merkletrie.Insert: - f, err := t.File(name) - if err != nil { - return err - } - - if err := w.checkoutFile(f); err != nil { - return err - } - - return w.addIndexFromFile(name, e.Hash, idx) - } - - return nil -} - -var copyBufferPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - -func (w *Worktree) checkoutFile(f *object.File) (err error) { - mode, err := f.Mode.ToOSFileMode() - if err != nil { - return - } - - if mode&os.ModeSymlink != 0 { - return w.checkoutFileSymlink(f) - } - - from, err := f.Reader() - if err != nil { - return - } - - defer ioutil.CheckClose(from, &err) - - to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return - } - - defer ioutil.CheckClose(to, &err) - buf := copyBufferPool.Get().([]byte) - _, err = io.CopyBuffer(to, from, buf) - copyBufferPool.Put(buf) - return -} - -func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { - from, err := f.Reader() - if err != nil { - return - } - - defer ioutil.CheckClose(from, &err) - - bytes, err := stdioutil.ReadAll(from) - if err != nil { - return - } - - err = w.Filesystem.Symlink(string(bytes), f.Name) - - // On windows, this might fail. - // Follow Git on Windows behavior by writing the link as it is. - if err != nil && isSymlinkWindowsNonAdmin(err) { - mode, _ := f.Mode.ToOSFileMode() - - to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return err - } - - defer ioutil.CheckClose(to, &err) - - _, err = to.Write(bytes) - return err - } - return -} - -func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error { - idx.Remove(name) - idx.Add(&index.Entry{ - Hash: f.Hash, - Name: name, - Mode: filemode.Submodule, - }) - return nil -} - -func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error { - idx.Remove(name) - fi, err := w.Filesystem.Lstat(name) - if err != nil { - return err - } - - mode, err := filemode.NewFromOSFileMode(fi.Mode()) - if err != nil { - return err - } - - e := &index.Entry{ - Hash: h, - Name: name, - Mode: mode, - ModifiedAt: fi.ModTime(), - Size: uint32(fi.Size()), - } - - // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid - // can be retrieved, otherwise this doesn't apply - if fillSystemInfo != nil { - fillSystemInfo(e, fi.Sys()) - } - idx.Add(e) - return nil -} - -func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { - c, err := w.r.CommitObject(commit) - if err != nil { - return nil, err - } - - return c.Tree() -} - -var fillSystemInfo func(e *index.Entry, sys interface{}) - -const gitmodulesFile = ".gitmodules" - -// Submodule returns the submodule with the given name -func (w *Worktree) Submodule(name string) (*Submodule, error) { - l, err := w.Submodules() - if err != nil { - return nil, err - } - - for _, m := range l { - if m.Config().Name == name { - return m, nil - } - } - - return nil, ErrSubmoduleNotFound -} - -// Submodules returns all the available submodules -func (w *Worktree) Submodules() (Submodules, error) { - l := make(Submodules, 0) - m, err := w.readGitmodulesFile() - if err != nil || m == nil { - return l, err - } - - c, err := w.r.Config() - if err != nil { - return nil, err - } - - for _, s := range m.Submodules { - l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) - } - - return l, nil -} - -func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { - m := &Submodule{w: w} - m.initialized = fromConfig != nil - - if !m.initialized { - m.c = fromModules - return m - } - - m.c = fromConfig - m.c.Path = fromModules.Path - return m -} - -func (w *Worktree) isSymlink(path string) bool { - if s, err := w.Filesystem.Lstat(path); err == nil { - return s.Mode()&os.ModeSymlink != 0 - } - return false -} - -func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { - if w.isSymlink(gitmodulesFile) { - return nil, ErrGitModulesSymlink - } - - f, err := w.Filesystem.Open(gitmodulesFile) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - defer f.Close() - input, err := stdioutil.ReadAll(f) - if err != nil { - return nil, err - } - - m := config.NewModules() - return m, m.Unmarshal(input) -} - -// Clean the worktree by removing untracked files. -// An empty dir could be removed - this is what `git clean -f -d .` does. -func (w *Worktree) Clean(opts *CleanOptions) error { - s, err := w.Status() - if err != nil { - return err - } - - root := "" - files, err := w.Filesystem.ReadDir(root) - if err != nil { - return err - } - return w.doClean(s, opts, root, files) -} - -func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { - for _, fi := range files { - if fi.Name() == GitDirName { - continue - } - - // relative path under the root - path := filepath.Join(dir, fi.Name()) - if fi.IsDir() { - if !opts.Dir { - continue - } - - subfiles, err := w.Filesystem.ReadDir(path) - if err != nil { - return err - } - err = w.doClean(status, opts, path, subfiles) - if err != nil { - return err - } - } else { - if status.IsUntracked(path) { - if err := w.Filesystem.Remove(path); err != nil { - return err - } - } - } - } - - if opts.Dir { - return doCleanDirectories(w.Filesystem, dir) - } - return nil -} - -// GrepResult is structure of a grep result. -type GrepResult struct { - // FileName is the name of file which contains match. - FileName string - // LineNumber is the line number of a file at which a match was found. - LineNumber int - // Content is the content of the file at the matching line. - Content string - // TreeName is the name of the tree (reference name/commit hash) at - // which the match was performed. - TreeName string -} - -func (gr GrepResult) String() string { - return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) -} - -// Grep performs grep on a worktree. -func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { - if err := opts.Validate(w); err != nil { - return nil, err - } - - // Obtain commit hash from options (CommitHash or ReferenceName). - var commitHash plumbing.Hash - // treeName contains the value of TreeName in GrepResult. - var treeName string - - if opts.ReferenceName != "" { - ref, err := w.r.Reference(opts.ReferenceName, true) - if err != nil { - return nil, err - } - commitHash = ref.Hash() - treeName = opts.ReferenceName.String() - } else if !opts.CommitHash.IsZero() { - commitHash = opts.CommitHash - treeName = opts.CommitHash.String() - } - - // Obtain a tree from the commit hash and get a tracked files iterator from - // the tree. - tree, err := w.getTreeFromCommitHash(commitHash) - if err != nil { - return nil, err - } - fileiter := tree.Files() - - return findMatchInFiles(fileiter, treeName, opts) -} - -// findMatchInFiles takes a FileIter, worktree name and GrepOptions, and -// returns a slice of GrepResult containing the result of regex pattern matching -// in content of all the files. -func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) { - var results []GrepResult - - err := fileiter.ForEach(func(file *object.File) error { - var fileInPathSpec bool - - // When no pathspecs are provided, search all the files. - if len(opts.PathSpecs) == 0 { - fileInPathSpec = true - } - - // Check if the file name matches with the pathspec. Break out of the - // loop once a match is found. - for _, pathSpec := range opts.PathSpecs { - if pathSpec != nil && pathSpec.MatchString(file.Name) { - fileInPathSpec = true - break - } - } - - // If the file does not match with any of the pathspec, skip it. - if !fileInPathSpec { - return nil - } - - grepResults, err := findMatchInFile(file, treeName, opts) - if err != nil { - return err - } - results = append(results, grepResults...) - - return nil - }) - - return results, err -} - -// findMatchInFile takes a single File, worktree name and GrepOptions, -// and returns a slice of GrepResult containing the result of regex pattern -// matching in the given file. -func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) { - var grepResults []GrepResult - - content, err := file.Contents() - if err != nil { - return grepResults, err - } - - // Split the file content and parse line-by-line. - contentByLine := strings.Split(content, "\n") - for lineNum, cnt := range contentByLine { - addToResult := false - - // Match the patterns and content. Break out of the loop once a - // match is found. - for _, pattern := range opts.Patterns { - if pattern != nil && pattern.MatchString(cnt) { - // Add to result only if invert match is not enabled. - if !opts.InvertMatch { - addToResult = true - break - } - } else if opts.InvertMatch { - // If matching fails, and invert match is enabled, add to - // results. - addToResult = true - break - } - } - - if addToResult { - grepResults = append(grepResults, GrepResult{ - FileName: file.Name, - LineNumber: lineNum + 1, - Content: cnt, - TreeName: treeName, - }) - } - } - - return grepResults, nil -} - -func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { - if err := util.RemoveAll(fs, name); err != nil { - return err - } - - dir := filepath.Dir(name) - return doCleanDirectories(fs, dir) -} - -// doCleanDirectories removes empty subdirs (without files) -func doCleanDirectories(fs billy.Filesystem, dir string) error { - files, err := fs.ReadDir(dir) - if err != nil { - return err - } - if len(files) == 0 { - return fs.Remove(dir) - } - return nil -} - -type indexBuilder struct { - entries map[string]*index.Entry -} - -func newIndexBuilder(idx *index.Index) *indexBuilder { - entries := make(map[string]*index.Entry, len(idx.Entries)) - for _, e := range idx.Entries { - entries[e.Name] = e - } - return &indexBuilder{ - entries: entries, - } -} - -func (b *indexBuilder) Write(idx *index.Index) { - idx.Entries = idx.Entries[:0] - for _, e := range b.entries { - idx.Entries = append(idx.Entries, e) - } -} - -func (b *indexBuilder) Add(e *index.Entry) { - b.entries[e.Name] = e -} - -func (b *indexBuilder) Remove(name string) { - delete(b.entries, filepath.ToSlash(name)) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_bsd.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_bsd.go deleted file mode 100644 index d4ea327588f..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_bsd.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build darwin freebsd netbsd - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_commit.go deleted file mode 100644 index 63eb2e86765..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ /dev/null @@ -1,228 +0,0 @@ -package git - -import ( - "bytes" - "path" - "sort" - "strings" - - "golang.org/x/crypto/openpgp" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage" - - "github.com/go-git/go-billy/v5" -) - -// Commit stores the current contents of the index in a new commit along with -// a log message from the user describing the changes. -func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) { - if err := opts.Validate(w.r); err != nil { - return plumbing.ZeroHash, err - } - - if opts.All { - if err := w.autoAddModifiedAndDeleted(); err != nil { - return plumbing.ZeroHash, err - } - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - h := &buildTreeHelper{ - fs: w.Filesystem, - s: w.r.Storer, - } - - tree, err := h.BuildTree(idx) - if err != nil { - return plumbing.ZeroHash, err - } - - commit, err := w.buildCommitObject(msg, opts, tree) - if err != nil { - return plumbing.ZeroHash, err - } - - return commit, w.updateHEAD(commit) -} - -func (w *Worktree) autoAddModifiedAndDeleted() error { - s, err := w.Status() - if err != nil { - return err - } - - for path, fs := range s { - if fs.Worktree != Modified && fs.Worktree != Deleted { - continue - } - - if _, err := w.Add(path); err != nil { - return err - } - } - - return nil -} - -func (w *Worktree) updateHEAD(commit plumbing.Hash) error { - head, err := w.r.Storer.Reference(plumbing.HEAD) - if err != nil { - return err - } - - name := plumbing.HEAD - if head.Type() != plumbing.HashReference { - name = head.Target() - } - - ref := plumbing.NewHashReference(name, commit) - return w.r.Storer.SetReference(ref) -} - -func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { - commit := &object.Commit{ - Author: *opts.Author, - Committer: *opts.Committer, - Message: msg, - TreeHash: tree, - ParentHashes: opts.Parents, - } - - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) - if err != nil { - return plumbing.ZeroHash, err - } - commit.PGPSignature = sig - } - - obj := w.r.Storer.NewEncodedObject() - if err := commit.Encode(obj); err != nil { - return plumbing.ZeroHash, err - } - return w.r.Storer.SetEncodedObject(obj) -} - -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err - } - var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err - } - return b.String(), nil -} - -// buildTreeHelper converts a given index.Index file into multiple git objects -// reading the blobs from the given filesystem and creating the trees from the -// index structure. The created objects are pushed to a given Storer. -type buildTreeHelper struct { - fs billy.Filesystem - s storage.Storer - - trees map[string]*object.Tree - entries map[string]*object.TreeEntry -} - -// BuildTree builds the tree objects and push its to the storer, the hash -// of the root tree is returned. -func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) { - const rootNode = "" - h.trees = map[string]*object.Tree{rootNode: {}} - h.entries = map[string]*object.TreeEntry{} - - for _, e := range idx.Entries { - if err := h.commitIndexEntry(e); err != nil { - return plumbing.ZeroHash, err - } - } - - return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode]) -} - -func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error { - parts := strings.Split(e.Name, "/") - - var fullpath string - for _, part := range parts { - parent := fullpath - fullpath = path.Join(fullpath, part) - - h.doBuildTree(e, parent, fullpath) - } - - return nil -} - -func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) { - if _, ok := h.trees[fullpath]; ok { - return - } - - if _, ok := h.entries[fullpath]; ok { - return - } - - te := object.TreeEntry{Name: path.Base(fullpath)} - - if fullpath == e.Name { - te.Mode = e.Mode - te.Hash = e.Hash - } else { - te.Mode = filemode.Dir - h.trees[fullpath] = &object.Tree{} - } - - h.trees[parent].Entries = append(h.trees[parent].Entries, te) -} - -type sortableEntries []object.TreeEntry - -func (sortableEntries) sortName(te object.TreeEntry) string { - if te.Mode == filemode.Dir { - return te.Name + "/" - } - return te.Name -} -func (se sortableEntries) Len() int { return len(se) } -func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) } -func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] } - -func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) { - sort.Sort(sortableEntries(t.Entries)) - for i, e := range t.Entries { - if e.Mode != filemode.Dir && !e.Hash.IsZero() { - continue - } - - path := path.Join(parent, e.Name) - - var err error - e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path]) - if err != nil { - return plumbing.ZeroHash, err - } - - t.Entries[i] = e - } - - o := h.s.NewEncodedObject() - if err := t.Encode(o); err != nil { - return plumbing.ZeroHash, err - } - - return h.s.SetEncodedObject(o) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_linux.go deleted file mode 100644 index cf0db25242d..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_plan9.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_plan9.go deleted file mode 100644 index 8cedf71a32a..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_plan9.go +++ /dev/null @@ -1,31 +0,0 @@ -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Dir); ok { - // Plan 9 doesn't have a CreatedAt field. - e.CreatedAt = time.Unix(int64(os.Mtime), 0) - - e.Dev = uint32(os.Dev) - - // Plan 9 has no Inode. - // ext2srv(4) appears to store Inode in Qid.Path. - e.Inode = uint32(os.Qid.Path) - - // Plan 9 has string UID/GID - e.GID = 0 - e.UID = 0 - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return true -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_status.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_status.go deleted file mode 100644 index 1542f5e6adf..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ /dev/null @@ -1,660 +0,0 @@ -package git - -import ( - "bytes" - "errors" - "io" - "os" - "path" - "path/filepath" - - "github.com/go-git/go-billy/v5/util" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/filesystem" - mindex "github.com/go-git/go-git/v5/utils/merkletrie/index" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -var ( - // ErrDestinationExists in an Move operation means that the target exists on - // the worktree. - ErrDestinationExists = errors.New("destination exists") - // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any - // files in the worktree. - ErrGlobNoMatches = errors.New("glob pattern did not match any files") -) - -// Status returns the working tree status. -func (w *Worktree) Status() (Status, error) { - var hash plumbing.Hash - - ref, err := w.r.Head() - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - if err == nil { - hash = ref.Hash() - } - - return w.status(hash) -} - -func (w *Worktree) status(commit plumbing.Hash) (Status, error) { - s := make(Status) - - left, err := w.diffCommitWithStaging(commit, false) - if err != nil { - return nil, err - } - - for _, ch := range left { - a, err := ch.Action() - if err != nil { - return nil, err - } - - fs := s.File(nameFromAction(&ch)) - fs.Worktree = Unmodified - - switch a { - case merkletrie.Delete: - s.File(ch.From.String()).Staging = Deleted - case merkletrie.Insert: - s.File(ch.To.String()).Staging = Added - case merkletrie.Modify: - s.File(ch.To.String()).Staging = Modified - } - } - - right, err := w.diffStagingWithWorktree(false) - if err != nil { - return nil, err - } - - for _, ch := range right { - a, err := ch.Action() - if err != nil { - return nil, err - } - - fs := s.File(nameFromAction(&ch)) - if fs.Staging == Untracked { - fs.Staging = Unmodified - } - - switch a { - case merkletrie.Delete: - fs.Worktree = Deleted - case merkletrie.Insert: - fs.Worktree = Untracked - fs.Staging = Untracked - case merkletrie.Modify: - fs.Worktree = Modified - } - } - - return s, nil -} - -func nameFromAction(ch *merkletrie.Change) string { - name := ch.To.String() - if name == "" { - return ch.From.String() - } - - return name -} - -func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) { - idx, err := w.r.Storer.Index() - if err != nil { - return nil, err - } - - from := mindex.NewRootNode(idx) - submodules, err := w.getSubmodulesStatus() - if err != nil { - return nil, err - } - - to := filesystem.NewRootNode(w.Filesystem, submodules) - - var c merkletrie.Changes - if reverse { - c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals) - } else { - c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals) - } - - if err != nil { - return nil, err - } - - return w.excludeIgnoredChanges(c), nil -} - -func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { - patterns, err := gitignore.ReadPatterns(w.Filesystem, nil) - if err != nil { - return changes - } - - patterns = append(patterns, w.Excludes...) - - if len(patterns) == 0 { - return changes - } - - m := gitignore.NewMatcher(patterns) - - var res merkletrie.Changes - for _, ch := range changes { - var path []string - for _, n := range ch.To { - path = append(path, n.Name()) - } - if len(path) == 0 { - for _, n := range ch.From { - path = append(path, n.Name()) - } - } - if len(path) != 0 { - isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) - if m.Match(path, isDir) { - continue - } - } - res = append(res, ch) - } - return res -} - -func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) { - o := map[string]plumbing.Hash{} - - sub, err := w.Submodules() - if err != nil { - return nil, err - } - - status, err := sub.Status() - if err != nil { - return nil, err - } - - for _, s := range status { - if s.Current.IsZero() { - o[s.Path] = s.Expected - continue - } - - o[s.Path] = s.Current - } - - return o, nil -} - -func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) { - var t *object.Tree - if !commit.IsZero() { - c, err := w.r.CommitObject(commit) - if err != nil { - return nil, err - } - - t, err = c.Tree() - if err != nil { - return nil, err - } - } - - return w.diffTreeWithStaging(t, reverse) -} - -func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) { - var from noder.Noder - if t != nil { - from = object.NewTreeRootNode(t) - } - - idx, err := w.r.Storer.Index() - if err != nil { - return nil, err - } - - to := mindex.NewRootNode(idx) - - if reverse { - return merkletrie.DiffTree(to, from, diffTreeIsEquals) - } - - return merkletrie.DiffTree(from, to, diffTreeIsEquals) -} - -var emptyNoderHash = make([]byte, 24) - -// diffTreeIsEquals is a implementation of noder.Equals, used to compare -// noder.Noder, it compare the content and the length of the hashes. -// -// Since some of the noder.Noder implementations doesn't compute a hash for -// some directories, if any of the hashes is a 24-byte slice of zero values -// the comparison is not done and the hashes are take as different. -func diffTreeIsEquals(a, b noder.Hasher) bool { - hashA := a.Hash() - hashB := b.Hash() - - if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) { - return false - } - - return bytes.Equal(hashA, hashB) -} - -// Add adds the file contents of a file in the worktree to the index. if the -// file is already staged in the index no error is returned. If a file deleted -// from the Workspace is given, the file is removed from the index. If a -// directory given, adds the files and all his sub-directories recursively in -// the worktree to the index. If any of the files is already staged in the index -// no error is returned. When path is a file, the blob.Hash is returned. -func (w *Worktree) Add(path string) (plumbing.Hash, error) { - // TODO(mcuadros): remove plumbing.Hash from signature at v5. - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - var h plumbing.Hash - var added bool - - fi, err := w.Filesystem.Lstat(path) - if err != nil || !fi.IsDir() { - added, h, err = w.doAddFile(idx, s, path) - } else { - added, err = w.doAddDirectory(idx, s, path) - } - - if err != nil { - return h, err - } - - if !added { - return h, nil - } - - return h, w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string) (added bool, err error) { - files, err := w.Filesystem.ReadDir(directory) - if err != nil { - return false, err - } - - for _, file := range files { - name := path.Join(directory, file.Name()) - - var a bool - if file.IsDir() { - if file.Name() == GitDirName { - // ignore special git directory - continue - } - a, err = w.doAddDirectory(idx, s, name) - } else { - a, _, err = w.doAddFile(idx, s, name) - } - - if err != nil { - return - } - - if !added && a { - added = true - } - } - - return -} - -// AddGlob adds all paths, matching pattern, to the index. If pattern matches a -// directory path, all directory contents are added to the index recursively. No -// error is returned if all matching paths are already staged in index. -func (w *Worktree) AddGlob(pattern string) error { - files, err := util.Glob(w.Filesystem, pattern) - if err != nil { - return err - } - - if len(files) == 0 { - return ErrGlobNoMatches - } - - s, err := w.Status() - if err != nil { - return err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - - var saveIndex bool - for _, file := range files { - fi, err := w.Filesystem.Lstat(file) - if err != nil { - return err - } - - var added bool - if fi.IsDir() { - added, err = w.doAddDirectory(idx, s, file) - } else { - added, _, err = w.doAddFile(idx, s, file) - } - - if err != nil { - return err - } - - if !saveIndex && added { - saveIndex = true - } - } - - if saveIndex { - return w.r.Storer.SetIndex(idx) - } - - return nil -} - -// doAddFile create a new blob from path and update the index, added is true if -// the file added is different from the index. -func (w *Worktree) doAddFile(idx *index.Index, s Status, path string) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { - return false, h, nil - } - - h, err = w.copyFileToStorage(path) - if err != nil { - if os.IsNotExist(err) { - added = true - h, err = w.deleteFromIndex(idx, path) - } - - return - } - - if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil { - return false, h, err - } - - return true, h, err -} - -func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) { - fi, err := w.Filesystem.Lstat(path) - if err != nil { - return plumbing.ZeroHash, err - } - - obj := w.r.Storer.NewEncodedObject() - obj.SetType(plumbing.BlobObject) - obj.SetSize(fi.Size()) - - writer, err := obj.Writer() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(writer, &err) - - if fi.Mode()&os.ModeSymlink != 0 { - err = w.fillEncodedObjectFromSymlink(writer, path, fi) - } else { - err = w.fillEncodedObjectFromFile(writer, path, fi) - } - - if err != nil { - return plumbing.ZeroHash, err - } - - return w.r.Storer.SetEncodedObject(obj) -} - -func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) { - src, err := w.Filesystem.Open(path) - if err != nil { - return err - } - - defer ioutil.CheckClose(src, &err) - - if _, err := io.Copy(dst, src); err != nil { - return err - } - - return err -} - -func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error { - target, err := w.Filesystem.Readlink(path) - if err != nil { - return err - } - - _, err = dst.Write([]byte(target)) - return err -} - -func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { - e, err := idx.Entry(filename) - if err != nil && err != index.ErrEntryNotFound { - return err - } - - if err == index.ErrEntryNotFound { - return w.doAddFileToIndex(idx, filename, h) - } - - return w.doUpdateFileToIndex(e, filename, h) -} - -func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { - return w.doUpdateFileToIndex(idx.Add(filename), filename, h) -} - -func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error { - info, err := w.Filesystem.Lstat(filename) - if err != nil { - return err - } - - e.Hash = h - e.ModifiedAt = info.ModTime() - e.Mode, err = filemode.NewFromOSFileMode(info.Mode()) - if err != nil { - return err - } - - if e.Mode.IsRegular() { - e.Size = uint32(info.Size()) - } - - fillSystemInfo(e, info.Sys()) - return nil -} - -// Remove removes files from the working tree and from the index. -func (w *Worktree) Remove(path string) (plumbing.Hash, error) { - // TODO(mcuadros): remove plumbing.Hash from signature at v5. - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - var h plumbing.Hash - - fi, err := w.Filesystem.Lstat(path) - if err != nil || !fi.IsDir() { - h, err = w.doRemoveFile(idx, path) - } else { - _, err = w.doRemoveDirectory(idx, path) - } - if err != nil { - return h, err - } - - return h, w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) { - files, err := w.Filesystem.ReadDir(directory) - if err != nil { - return false, err - } - - for _, file := range files { - name := path.Join(directory, file.Name()) - - var r bool - if file.IsDir() { - r, err = w.doRemoveDirectory(idx, name) - } else { - _, err = w.doRemoveFile(idx, name) - if err == index.ErrEntryNotFound { - err = nil - } - } - - if err != nil { - return - } - - if !removed && r { - removed = true - } - } - - err = w.removeEmptyDirectory(directory) - return -} - -func (w *Worktree) removeEmptyDirectory(path string) error { - files, err := w.Filesystem.ReadDir(path) - if err != nil { - return err - } - - if len(files) != 0 { - return nil - } - - return w.Filesystem.Remove(path) -} - -func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) { - hash, err := w.deleteFromIndex(idx, path) - if err != nil { - return plumbing.ZeroHash, err - } - - return hash, w.deleteFromFilesystem(path) -} - -func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) { - e, err := idx.Remove(path) - if err != nil { - return plumbing.ZeroHash, err - } - - return e.Hash, nil -} - -func (w *Worktree) deleteFromFilesystem(path string) error { - err := w.Filesystem.Remove(path) - if os.IsNotExist(err) { - return nil - } - - return err -} - -// RemoveGlob removes all paths, matching pattern, from the index. If pattern -// matches a directory path, all directory contents are removed from the index -// recursively. -func (w *Worktree) RemoveGlob(pattern string) error { - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - - entries, err := idx.Glob(pattern) - if err != nil { - return err - } - - for _, e := range entries { - file := filepath.FromSlash(e.Name) - if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) { - return err - } - - if _, err := w.doRemoveFile(idx, file); err != nil { - return err - } - - dir, _ := filepath.Split(file) - if err := w.removeEmptyDirectory(dir); err != nil { - return err - } - } - - return w.r.Storer.SetIndex(idx) -} - -// Move moves or rename a file in the worktree and the index, directories are -// not supported. -func (w *Worktree) Move(from, to string) (plumbing.Hash, error) { - // TODO(mcuadros): support directories and/or implement support for glob - if _, err := w.Filesystem.Lstat(from); err != nil { - return plumbing.ZeroHash, err - } - - if _, err := w.Filesystem.Lstat(to); err == nil { - return plumbing.ZeroHash, ErrDestinationExists - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - hash, err := w.deleteFromIndex(idx, from) - if err != nil { - return plumbing.ZeroHash, err - } - - if err := w.Filesystem.Rename(from, to); err != nil { - return hash, err - } - - if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil { - return hash, err - } - - return hash, w.r.Storer.SetIndex(idx) -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go deleted file mode 100644 index f45966be961..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build openbsd dragonfly solaris - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_windows.go b/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_windows.go deleted file mode 100644 index 1928f9712e9..00000000000 --- a/awsproviderlint/vendor/github.com/go-git/go-git/v5/worktree_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package git - -import ( - "os" - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Win32FileAttributeData); ok { - seconds := os.CreationTime.Nanoseconds() / 1000000000 - nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000 - e.CreatedAt = time.Unix(seconds, nanoseconds) - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - - if err != nil { - if errLink, ok := err.(*os.LinkError); ok { - if errNo, ok := errLink.Err.(syscall.Errno); ok { - return errNo == ERROR_PRIVILEGE_NOT_HELD - } - } - } - - return false -} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/README.md b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/README.md index 9b6845e9887..5d56f4b59c3 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/README.md +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/README.md @@ -132,7 +132,7 @@ Alternatively, you may configure the system-wide logger: ```go // log the standard logger from 'import "log"' -log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) log.SetPrefix("") log.SetFlags(0) diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 00000000000..44aa9bf2c62 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 00000000000..23486b6d74f --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/exclude.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/exclude.go new file mode 100644 index 00000000000..cfd4307a803 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/exclude.go @@ -0,0 +1,71 @@ +package hclog + +import ( + "regexp" + "strings" +) + +// ExcludeByMessage provides a simple way to build a list of log messages that +// can be queried and matched. This is meant to be used with the Exclude +// option on Options to suppress log messages. This does not hold any mutexs +// within itself, so normal usage would be to Add entries at setup and none after +// Exclude is going to be called. Exclude is called with a mutex held within +// the Logger, so that doesn't need to use a mutex. Example usage: +// +// f := new(ExcludeByMessage) +// f.Add("Noisy log message text") +// appLogger.Exclude = f.Exclude +type ExcludeByMessage struct { + messages map[string]struct{} +} + +// Add a message to be filtered. Do not call this after Exclude is to be called +// due to concurrency issues. +func (f *ExcludeByMessage) Add(msg string) { + if f.messages == nil { + f.messages = make(map[string]struct{}) + } + + f.messages[msg] = struct{}{} +} + +// Return true if the given message should be included +func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool { + _, ok := f.messages[msg] + return ok +} + +// ExcludeByPrefix is a simple type to match a message string that has a common prefix. +type ExcludeByPrefix string + +// Matches an message that starts with the prefix. +func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool { + return strings.HasPrefix(msg, string(p)) +} + +// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches +// the log entry is excluded. +type ExcludeByRegexp struct { + Regexp *regexp.Regexp +} + +// Exclude the log message if the message string matches the regexp +func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool { + return e.Regexp.MatchString(msg) +} + +// ExcludeFuncs is a slice of functions that will called to see if a log entry +// should be filtered or not. It stops calling functions once at least one returns +// true. +type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool + +// Calls each function until one of them returns true +func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool { + for _, f := range ff { + if f(level, msg, args...) { + return true + } + } + + return false +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/global.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/global.go index 3efc54c1290..22ebc57d877 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/global.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/global.go @@ -20,6 +20,13 @@ var ( // Default returns a globally held logger. This can be a good starting // place, and then you can use .With() and .Name() to create sub-loggers // to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. func Default() Logger { protect.Do(func() { // If SetDefault was used before Default() was called, we need to @@ -41,6 +48,13 @@ func L() Logger { // to the one given. This allows packages to use the default logger // and have higher level packages change it to match the execution // environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. func SetDefault(log Logger) Logger { old := def def = log diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.mod b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.mod index 0d079a65444..b6698c0836f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,6 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.10 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 ) + +go 1.13 diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.sum b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.sum index e03ee77d9e3..3a656dfd9c9 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,6 +1,18 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 00000000000..d8e2e76fc38 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,235 @@ +package hclog + +import ( + "io" + "log" + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + mu *sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + mu: new(sync.Mutex), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + return i.NamedIntercept(name) +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + return i.ResetNamedIntercept(name) +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.Named(name) + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + sub.Logger = i.Logger.ResetNamed(name) + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.mu.Lock() + defer i.mu.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} + +func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(i.StandardWriter(opts), "", 0) +} + +func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: i, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} + +func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutput(opts) + } else { + return nil + } +} + +func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if or, ok := i.Logger.(OutputResettable); ok { + return or.ResetOutputWithFlush(opts, flushable) + } else { + return nil + } +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/intlogger.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/intlogger.go index 219656c4cb3..f961ed91913 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -4,9 +4,11 @@ import ( "bytes" "encoding" "encoding/json" + "errors" "fmt" "io" "log" + "os" "reflect" "runtime" "sort" @@ -15,6 +17,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/fatih/color" ) // TimeFormat to use for logging. This is a version of RFC3339 that contains @@ -32,6 +36,14 @@ var ( Warn: "[WARN] ", Error: "[ERROR]", } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } ) // Make sure that intLogger is a Logger @@ -45,17 +57,32 @@ type intLogger struct { name string timeFormat string - // This is a pointer so that it's shared by any derived loggers, since + // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. - mutex *sync.Mutex + mutex Locker writer *writer level *int32 implied []interface{} + + exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool } // New returns a configured logger. func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { if opts == nil { opts = &LoggerOptions{} } @@ -76,16 +103,22 @@ func New(opts *LoggerOptions) Logger { } l := &intLogger{ - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - timeFormat: TimeFormat, - mutex: mutex, - writer: newWriter(output), - level: new(int32), + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, } - if opts.TimeFormat != "" { + l.setColorization(opts) + + if opts.DisableTime { + l.timeFormat = "" + } else if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -96,7 +129,7 @@ func New(opts *LoggerOptions) Logger { // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (l *intLogger) Log(level Level, msg string, args ...interface{}) { +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { if level < Level(atomic.LoadInt32(l.level)) { return } @@ -106,10 +139,14 @@ func (l *intLogger) Log(level Level, msg string, args ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() + if l.exclude != nil && l.exclude(level, msg, args...) { + return + } + if l.json { - l.logJSON(t, level, msg, args...) + l.logJSON(t, name, level, msg, args...) } else { - l.log(t, level, msg, args...) + l.logPlain(t, name, level, msg, args...) } l.writer.Flush(level) @@ -145,9 +182,11 @@ func trimCallerPath(path string) string { } // Non-JSON logging format function -func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - l.writer.WriteString(t.Format(l.timeFormat)) - l.writer.WriteByte(' ') +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { + if len(l.timeFormat) > 0 { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + } s, ok := _levelToBracket[level] if ok { @@ -156,8 +195,17 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ l.writer.WriteString("[?????]") } + offset := 3 if l.caller { - if _, file, line, ok := runtime.Caller(3); ok { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + if strings.HasSuffix(file, "intlogger.go") || strings.HasSuffix(file, "interceptlogger.go") { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { l.writer.WriteByte(' ') l.writer.WriteString(trimCallerPath(file)) l.writer.WriteByte(':') @@ -168,8 +216,8 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ l.writer.WriteByte(' ') - if l.name != "" { - l.writer.WriteString(l.name) + if name != "" { + l.writer.WriteString(name) l.writer.WriteString(": ") } @@ -186,7 +234,8 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ args = args[:len(args)-1] stacktrace = cs } else { - args = append(args, "") + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) } } @@ -222,6 +271,12 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ val = strconv.FormatUint(uint64(st), 10) case uint8: val = strconv.FormatUint(uint64(st), 10) + case Hex: + val = "0x" + strconv.FormatUint(uint64(st), 16) + case Octal: + val = "0" + strconv.FormatUint(uint64(st), 8) + case Binary: + val = "0b" + strconv.FormatUint(uint64(st), 2) case CapturedStacktrace: stacktrace = st continue FOR @@ -238,7 +293,12 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ } l.writer.WriteByte(' ') - l.writer.WriteString(args[i].(string)) + switch st := args[i].(type) { + case string: + l.writer.WriteString(st) + default: + l.writer.WriteString(fmt.Sprintf("%s", st)) + } l.writer.WriteByte('=') if !raw && strings.ContainsAny(val, " \t\n\r") { @@ -255,6 +315,7 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ if stacktrace != "" { l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") } } @@ -298,8 +359,8 @@ func (l *intLogger) renderSlice(v reflect.Value) string { } // JSON logging function -func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { - vals := l.jsonMapEntry(t, level, msg) +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) if args != nil && len(args) > 0 { @@ -309,16 +370,12 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf args = args[:len(args)-1] vals["stacktrace"] = cs } else { - args = append(args, "") + extra := args[len(args)-1] + args = append(args[:len(args)-1], MissingKey, extra) } } for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } val := args[i+1] switch sv := val.(type) { case error: @@ -334,14 +391,22 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf val = fmt.Sprintf(sv[0].(string), sv[1:]...) } - vals[args[i].(string)] = val + var key string + + switch st := args[i].(type) { + case string: + key = st + default: + key = fmt.Sprintf("%s", st) + } + vals[key] = val } } err := json.NewEncoder(l.writer).Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { - plainVal := l.jsonMapEntry(t, level, msg) + plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg json.NewEncoder(l.writer).Encode(plainVal) @@ -349,7 +414,7 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf } } -func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { vals := map[string]interface{}{ "@message": msg, "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), @@ -373,8 +438,8 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string vals["@level"] = levelStr - if l.name != "" { - vals["@module"] = l.name + if name != "" { + vals["@module"] = name } if l.caller { @@ -385,29 +450,34 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string return vals } +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + // Emit the message and args at DEBUG level func (l *intLogger) Debug(msg string, args ...interface{}) { - l.Log(Debug, msg, args...) + l.log(l.Name(), Debug, msg, args...) } // Emit the message and args at TRACE level func (l *intLogger) Trace(msg string, args ...interface{}) { - l.Log(Trace, msg, args...) + l.log(l.Name(), Trace, msg, args...) } // Emit the message and args at INFO level func (l *intLogger) Info(msg string, args ...interface{}) { - l.Log(Info, msg, args...) + l.log(l.Name(), Info, msg, args...) } // Emit the message and args at WARN level func (l *intLogger) Warn(msg string, args ...interface{}) { - l.Log(Warn, msg, args...) + l.log(l.Name(), Warn, msg, args...) } // Emit the message and args at ERROR level func (l *intLogger) Error(msg string, args ...interface{}) { - l.Log(Error, msg, args...) + l.log(l.Name(), Error, msg, args...) } // Indicate that the logger would emit TRACE level logs @@ -435,15 +505,20 @@ func (l *intLogger) IsError() bool { return Level(atomic.LoadInt32(l.level)) <= Error } +const MissingKey = "EXTRA_VALUE_AT_END" + // Return a sub-Logger for which every emitted log message will contain // the given key/value pairs. This is used to create a context specific // Logger. func (l *intLogger) With(args ...interface{}) Logger { + var extra interface{} + if len(args)%2 != 0 { - panic("With() call requires paired arguments") + extra = args[len(args)-1] + args = args[:len(args)-1] } - sl := *l + sl := l.copy() result := make(map[string]interface{}, len(l.implied)+len(args)) keys := make([]string, 0, len(l.implied)+len(args)) @@ -473,13 +548,17 @@ func (l *intLogger) With(args ...interface{}) Logger { sl.implied = append(sl.implied, result[k]) } - return &sl + if extra != nil { + sl.implied = append(sl.implied, MissingKey, extra) + } + + return sl } // Create a new sub-Logger that a name decending from the current name. // This is used to create a subsystem specific Logger. func (l *intLogger) Named(name string) Logger { - sl := *l + sl := l.copy() if sl.name != "" { sl.name = sl.name + "." + name @@ -487,18 +566,53 @@ func (l *intLogger) Named(name string) Logger { sl.name = name } - return &sl + return sl } // Create a new sub-Logger with an explicit name. This ignores the current // name. This is used to create a standalone logger that doesn't fall // within the normal hierarchy. func (l *intLogger) ResetNamed(name string) Logger { - sl := *l + sl := l.copy() sl.name = name - return &sl + return sl +} + +func (l *intLogger) ResetOutput(opts *LoggerOptions) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.resetOutput(opts) +} + +func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error { + if opts.Output == nil { + return errors.New("given output is nil") + } + if flushable == nil { + return errors.New("flushable is nil") + } + + l.mutex.Lock() + defer l.mutex.Unlock() + + if err := flushable.Flush(); err != nil { + return err + } + + return l.resetOutput(opts) +} + +func (l *intLogger) resetOutput(opts *LoggerOptions) error { + l.writer = newWriter(opts.Output, opts.Color) + l.setColorization(opts) + return nil } // Update the logging level on-the-fly. This will affect all subloggers as @@ -525,3 +639,41 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { forceLevel: opts.ForceLevel, } } + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/logger.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/logger.go index 080ed799966..83eafc152c6 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/logger.go @@ -5,7 +5,6 @@ import ( "log" "os" "strings" - "sync" ) var ( @@ -39,6 +38,9 @@ const ( // Error information about unrecoverable events. Error Level = 5 + + // Off disables all logging output. + Off Level = 6 ) // Format is a simple convience type for when formatting is required. When @@ -53,6 +55,33 @@ func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) } +// A simple shortcut to format numbers in hex when displayed with the normal +// text output. For example: L.Info("header value", Hex(17)) +type Hex int + +// A simple shortcut to format numbers in octal when displayed with the normal +// text output. For example: L.Info("perms", Octal(17)) +type Octal int + +// A simple shortcut to format numbers in binary when displayed with the normal +// text output. For example: L.Info("bits", Binary(17)) +type Binary int + +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -70,16 +99,42 @@ func LevelFromString(levelStr string) Level { return Warn case "error": return Error + case "off": + return Off default: return NoLevel } } +func (l Level) String() string { + switch l { + case Trace: + return "trace" + case Debug: + return "debug" + case Info: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case NoLevel: + return "none" + case Off: + return "off" + default: + return "unknown" + } +} + // Logger describes the interface that must be implemeted by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + // Emit a message and key/value pairs at the TRACE level Trace(msg string, args ...interface{}) @@ -111,9 +166,15 @@ type Logger interface { // Indicate if ERROR logs would be emitted. This and the other Is* guards IsError() bool + // ImpliedArgs returns With key/value pairs + ImpliedArgs() []interface{} + // Creates a sublogger that will always have the given key/value pairs With(args ...interface{}) Logger + // Returns the Name of the logger + Name() string + // Create a logger that will prepend the name string on the front of all messages. // If the logger already has a name, the new value will be appended to the current // name. That way, a major subsystem can use this to decorate all it's own logs @@ -125,7 +186,8 @@ type Logger interface { // the current name as well. ResetNamed(name string) Logger - // Updates the level. This should affect all sub-loggers as well. If an + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) @@ -162,8 +224,10 @@ type LoggerOptions struct { // Where to write the logs to. Defaults to os.Stderr if nil Output io.Writer - // An optional mutex pointer in case Output is shared - Mutex *sync.Mutex + // An optional Locker in case Output is shared. This can be a sync.Mutex or + // a NoopLocker if the caller wants control over output, e.g. for batching + // log lines. + Mutex Locker // Control if the output should be in JSON. JSONFormat bool @@ -173,4 +237,105 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + + // Control whether or not to display the time at all. This is required + // because setting TimeFormat to empty assumes the default format. + DisableTime bool + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption + + // A function which is called with the log information and if it returns true the value + // should not be logged. + // This is useful when interacting with a system that you wish to suppress the log + // message for (because it's too noisy, etc) + Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not effect any subloggers, and SetLevel on any subloggers + // will not effect the parent or sibling loggers. + IndependentLevels bool } + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger + + // Deprecated: use StandardLogger + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Deprecated: use StandardWriter + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) +} + +// Flushable represents a method for flushing an output buffer. It can be used +// if Resetting the log to use a new output, in order to flush the writes to +// the existing output beforehand. +type Flushable interface { + Flush() error +} + +// OutputResettable provides ways to swap the output in use at runtime +type OutputResettable interface { + // ResetOutput swaps the current output writer with the one given in the + // opts. Color options given in opts will be used for the new output. + ResetOutput(opts *LoggerOptions) error + + // ResetOutputWithFlush swaps the current output writer with the one given + // in the opts, first calling Flush on the given Flushable. Color options + // given in opts will be used for the new output. + ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error +} + +// Locker is used for locking output. If not set when creating a logger, a +// sync.Mutex will be used internally. +type Locker interface { + // Lock is called when the output is going to be changed or written to + Lock() + + // Unlock is called when the operation that called Lock() completes + Unlock() +} + +// NoopLocker implements locker but does nothing. This is useful if the client +// wants tight control over locking, in order to provide grouping of log +// entries or other functionality. +type NoopLocker struct{} + +// Lock does nothing +func (n NoopLocker) Lock() {} + +// Unlock does nothing +func (n NoopLocker) Unlock() {} + +var _ Locker = (*NoopLocker)(nil) diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 7ad6b351eb8..bc14f770807 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -15,6 +15,8 @@ func NewNullLogger() Logger { type nullLogger struct{} +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + func (l *nullLogger) Trace(msg string, args ...interface{}) {} func (l *nullLogger) Debug(msg string, args ...interface{}) {} @@ -35,8 +37,12 @@ func (l *nullLogger) IsWarn() bool { return false } func (l *nullLogger) IsError() bool { return false } +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + func (l *nullLogger) With(args ...interface{}) Logger { return l } +func (l *nullLogger) Name() string { return "" } + func (l *nullLogger) Named(name string) Logger { return l } func (l *nullLogger) ResetNamed(name string) Logger { return l } diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/stdlog.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/stdlog.go index 044a4696088..f35d875d327 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -2,6 +2,7 @@ package hclog import ( "bytes" + "log" "strings" ) @@ -25,36 +26,10 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { _, str := s.pickLevel(str) // Log at the forced level - switch s.forceLevel { - case Trace: - s.log.Trace(str) - case Debug: - s.log.Debug(str) - case Info: - s.log.Info(str) - case Warn: - s.log.Warn(str) - case Error: - s.log.Error(str) - default: - s.log.Info(str) - } + s.dispatch(str, s.forceLevel) } else if s.inferLevels { level, str := s.pickLevel(str) - switch level { - case Trace: - s.log.Trace(str) - case Debug: - s.log.Debug(str) - case Info: - s.log.Info(str) - case Warn: - s.log.Warn(str) - case Error: - s.log.Error(str) - default: - s.log.Info(str) - } + s.dispatch(str, level) } else { s.log.Info(str) } @@ -62,6 +37,23 @@ func (s *stdlogAdapter) Write(data []byte) (int, error) { return len(data), nil } +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + // Detect, based on conventions, what log level this is. func (s *stdlogAdapter) pickLevel(str string) (Level, string) { switch { @@ -81,3 +73,23 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { return Info, str } } + +type logWriter struct { + l *log.Logger +} + +func (l *logWriter) Write(b []byte) (int, error) { + l.l.Println(string(bytes.TrimRight(b, " \n\t"))) + return len(b), nil +} + +// Takes a standard library logger and returns a Logger that will write to it +func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger { + var dl LoggerOptions = *opts + + // Use the time format that log.Logger uses + dl.DisableTime = true + dl.Output = &logWriter{l} + + return New(&dl) +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/writer.go b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/writer.go index 7e8ec729da8..421a1f06c0b 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-hclog/writer.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-hclog/writer.go @@ -6,19 +6,27 @@ import ( ) type writer struct { - b bytes.Buffer - w io.Writer + b bytes.Buffer + w io.Writer + color ColorOption } -func newWriter(w io.Writer) *writer { - return &writer{w: w} +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} } func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + if lw, ok := w.w.(LevelWriter); ok { - _, err = lw.LevelWrite(level, w.b.Bytes()) + _, err = lw.LevelWrite(level, unwritten) } else { - _, err = w.w.Write(w.b.Bytes()) + _, err = w.w.Write(unwritten) } w.b.Reset() return err diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.mod b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.mod index f0115b782a1..4e182e6258f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.mod @@ -4,7 +4,7 @@ go 1.13 require ( github.com/golang/protobuf v1.3.4 - github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.sum b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.sum index 5d497615f5d..56062044ee4 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/go.sum @@ -4,8 +4,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -17,12 +21,17 @@ github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= @@ -31,6 +40,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -52,6 +62,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go index 6231a9fd625..a582181505f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -136,12 +136,12 @@ func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { status.Code(err) == codes.Canceled || status.Code(err) == codes.Unimplemented || err == context.Canceled { - c.log.Warn("received EOF, stopping recv loop", "err", err) + c.log.Debug("received EOF, stopping recv loop", "err", err) return } c.log.Error("error receiving data", "err", err) - continue + return } // Determine our output writer based on channel diff --git a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/server.go b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/server.go index 002d6080d4f..80f0ac396a4 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/go-plugin/server.go +++ b/awsproviderlint/vendor/github.com/hashicorp/go-plugin/server.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net" "os" "os/signal" @@ -260,9 +259,6 @@ func Serve(opts *ServeConfig) { // start with default version in the handshake config protoVersion, protoType, pluginSet := protocolVersion(opts) - // Logging goes to the original stderr - log.SetOutput(os.Stderr) - logger := opts.Logger if logger == nil { // internal logger to os.Stderr diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index 31eccf81d3c..c528de8387f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -1,6 +1,6 @@ package version -const version = "0.10.0" +const version = "0.12.0" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go index 52a0d012ac4..3b9005a6430 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -16,6 +16,7 @@ import ( const ( checkpointDisableEnvVar = "CHECKPOINT_DISABLE" + cliArgsEnvVar = "TF_CLI_ARGS" logEnvVar = "TF_LOG" inputEnvVar = "TF_INPUT" automationEnvVar = "TF_IN_AUTOMATION" @@ -26,10 +27,12 @@ const ( disablePluginTLSEnvVar = "TF_DISABLE_PLUGIN_TLS" skipProviderVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY" - varEnvVarPrefix = "TF_VAR_" + varEnvVarPrefix = "TF_VAR_" + cliArgEnvVarPrefix = "TF_CLI_ARGS_" ) var prohibitedEnvVars = []string{ + cliArgsEnvVar, inputEnvVar, automationEnvVar, logPathEnvVar, @@ -41,6 +44,48 @@ var prohibitedEnvVars = []string{ skipProviderVerifyEnvVar, } +var prohibitedEnvVarPrefixes = []string{ + varEnvVarPrefix, + cliArgEnvVarPrefix, +} + +func manualEnvVars(env map[string]string, cb func(k string)) { + for k := range env { + for _, p := range prohibitedEnvVars { + if p == k { + cb(k) + goto NextEnvVar + } + } + for _, prefix := range prohibitedEnvVarPrefixes { + if strings.HasPrefix(k, prefix) { + cb(k) + goto NextEnvVar + } + } + NextEnvVar: + } +} + +// ProhibitedEnv returns a slice of environment variable keys that are not allowed +// to be set manually from the passed environment. +func ProhibitedEnv(env map[string]string) []string { + var p []string + manualEnvVars(env, func(k string) { + p = append(p, k) + }) + return p +} + +// CleanEnv removes any prohibited environment variables from an environment map. +func CleanEnv(dirty map[string]string) map[string]string { + clean := dirty + manualEnvVars(clean, func(k string) { + delete(clean, k) + }) + return clean +} + func envMap(environ []string) map[string]string { env := map[string]string{} for _, ev := range environ { @@ -144,7 +189,9 @@ func (tf *Terraform) runTerraformCmdJSON(cmd *exec.Cmd, v interface{}) error { return err } - return json.Unmarshal(outbuf.Bytes(), v) + dec := json.NewDecoder(&outbuf) + dec.UseNumber() + return dec.Decode(v) } func (tf *Terraform) runTerraformCmd(cmd *exec.Cmd) error { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go index 241fe7158ff..241e6517bd9 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -11,7 +11,7 @@ import ( var ( // The "Required variable not set:" case is for 0.11 missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) - missingVarNameRegexp = regexp.MustCompile(`The root module input variable "(.+)" is not set, and has no default|Error: Required variable not set: (.+)`) + missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) @@ -26,10 +26,12 @@ var ( tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) + configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) ) func (tf *Terraform) parseError(err error, stderr string) error { - if _, ok := err.(*exec.ExitError); !ok { + ee, ok := err.(*exec.ExitError) + if !ok { return err } @@ -86,10 +88,25 @@ func (tf *Terraform) parseError(err error, stderr string) error { if len(submatches) == 2 { return &ErrWorkspaceExists{submatches[1]} } + case configInvalidErrRegexp.MatchString(stderr): + return &ErrConfigInvalid{stderr: stderr} + } + errString := strings.TrimSpace(stderr) + if errString == "" { + // if stderr is empty, return the ExitError directly, as it will have a better message + return ee } return errors.New(stderr) } +type ErrConfigInvalid struct { + stderr string +} + +func (e *ErrConfigInvalid) Error() string { + return "configuration is invalid" +} + type ErrNoSuitableBinary struct { err error } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go new file mode 100644 index 00000000000..de30890a697 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go @@ -0,0 +1,160 @@ +package tfexec + +import ( + "bytes" + "context" + "fmt" + "io" + "os/exec" + "path/filepath" + "strings" +) + +type formatConfig struct { + recursive bool + dir string +} + +var defaultFormatConfig = formatConfig{ + recursive: false, +} + +type FormatOption interface { + configureFormat(*formatConfig) +} + +func (opt *RecursiveOption) configureFormat(conf *formatConfig) { + conf.recursive = opt.recursive +} + +func (opt *DirOption) configureFormat(conf *formatConfig) { + conf.dir = opt.path +} + +// FormatString formats a passed string, given a path to Terraform. +func FormatString(ctx context.Context, execPath string, content string) (string, error) { + tf, err := NewTerraform(filepath.Dir(execPath), execPath) + if err != nil { + return "", err + } + + return tf.FormatString(ctx, content) +} + +// FormatString formats a passed string. +func (tf *Terraform) FormatString(ctx context.Context, content string) (string, error) { + in := strings.NewReader(content) + var outBuf bytes.Buffer + err := tf.Format(ctx, in, &outBuf) + if err != nil { + return "", err + } + return outBuf.String(), nil +} + +// Format performs formatting on the unformatted io.Reader (as stdin to the CLI) and returns +// the formatted result on the formatted io.Writer. +func (tf *Terraform) Format(ctx context.Context, unformatted io.Reader, formatted io.Writer) error { + cmd, err := tf.formatCmd(ctx, nil, Dir("-")) + if err != nil { + return err + } + + cmd.Stdin = unformatted + cmd.Stdout = mergeWriters(cmd.Stdout, formatted) + + return tf.runTerraformCmd(cmd) +} + +// FormatWrite attempts to format and modify all config files in the working or selected (via DirOption) directory. +func (tf *Terraform) FormatWrite(ctx context.Context, opts ...FormatOption) error { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=true", "-list=false", "-diff=false"}, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(cmd) +} + +// FormatCheck returns true if the config files in the working or selected (via DirOption) directory are already formatted. +func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (bool, []string, error) { + for _, o := range opts { + switch o := o.(type) { + case *DirOption: + if o.path == "-" { + return false, nil, fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString") + } + } + } + + cmd, err := tf.formatCmd(ctx, []string{"-write=false", "-list=true", "-diff=false", "-check=true"}, opts...) + if err != nil { + return false, nil, err + } + + var outBuf bytes.Buffer + cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf) + + err = tf.runTerraformCmd(cmd) + if err == nil { + return true, nil, nil + } + if cmd.ProcessState.ExitCode() == 3 { + // unformatted, parse the file list + + files := []string{} + lines := strings.Split(strings.Replace(outBuf.String(), "\r\n", "\n", -1), "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + if l == "" { + continue + } + files = append(files, l) + } + + return false, files, nil + } + return false, nil, err +} + +func (tf *Terraform) formatCmd(ctx context.Context, args []string, opts ...FormatOption) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_7_7, nil) + if err != nil { + return nil, fmt.Errorf("fmt was first introduced in Terraform 0.7.7: %w", err) + } + + c := defaultFormatConfig + + for _, o := range opts { + switch o.(type) { + case *RecursiveOption: + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("-recursive was added to fmt in Terraform 0.12: %w", err) + } + } + + o.configureFormat(&c) + } + + args = append([]string{"fmt", "-no-color"}, args...) + + if c.recursive { + args = append(args, "-recursive") + } + + if c.dir != "" { + args = append(args, c.dir) + } + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go index 4497f7f2773..6d20869bd46 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go @@ -34,6 +34,15 @@ func BackendConfig(backendConfig string) *BackendConfigOption { return &BackendConfigOption{backendConfig} } +type BackupOutOption struct { + path string +} + +// BackupOutOption represents the -backup-out flag. +func BackupOut(path string) *BackupOutOption { + return &BackupOutOption{path} +} + // BackupOption represents the -backup flag. type BackupOption struct { path string @@ -99,6 +108,23 @@ func Destroy(destroy bool) *DestroyFlagOption { return &DestroyFlagOption{destroy} } +type DryRunOption struct { + dryRun bool +} + +// DryRun represents the -dry-run flag. +func DryRun(dryRun bool) *DryRunOption { + return &DryRunOption{dryRun} +} + +type ForceOption struct { + force bool +} + +func Force(force bool) *ForceOption { + return &ForceOption{force} +} + type ForceCopyOption struct { forceCopy bool } @@ -217,6 +243,14 @@ func Reconfigure(reconfigure bool) *ReconfigureOption { return &ReconfigureOption{reconfigure} } +type RecursiveOption struct { + recursive bool +} + +func Recursive(r bool) *RecursiveOption { + return &RecursiveOption{r} +} + type RefreshOption struct { refresh bool } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go index e2f52870443..dbb58f04022 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go @@ -49,6 +49,7 @@ func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.Stat showCmd := tf.showCmd(ctx, true, mergeEnv) var ret tfjson.State + ret.UseJSONNumber(true) err = tf.runTerraformCmdJSON(showCmd, &ret) if err != nil { return nil, err @@ -91,6 +92,7 @@ func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts . showCmd := tf.showCmd(ctx, true, mergeEnv, statePath) var ret tfjson.State + ret.UseJSONNumber(true) err = tf.runTerraformCmdJSON(showCmd, &ret) if err != nil { return nil, err diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go new file mode 100644 index 00000000000..1646e52cd80 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go @@ -0,0 +1,105 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateMvConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateMvOptions = stateMvConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateMvCmdOption represents options used in the Refresh method. +type StateMvCmdOption interface { + configureStateMv(*stateMvConfig) +} + +func (opt *BackupOption) configureStateMv(conf *stateMvConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateMv(conf *stateMvConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateMv(conf *stateMvConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateMv(conf *stateMvConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateMv(conf *stateMvConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateMv(conf *stateMvConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateMv(conf *stateMvConfig) { + conf.stateOut = opt.path +} + +// StateMv represents the terraform state mv subcommand. +func (tf *Terraform) StateMv(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) error { + cmd, err := tf.stateMvCmd(ctx, source, destination, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(cmd) +} + +func (tf *Terraform) stateMvCmd(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) (*exec.Cmd, error) { + c := defaultStateMvOptions + + for _, o := range opts { + o.configureStateMv(&c) + } + + args := []string{"state", "mv", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, source) + args = append(args, destination) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index beeb82639c1..804402b47f4 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "log" "os" - "strings" "sync" "github.com/hashicorp/go-version" @@ -85,15 +84,10 @@ func NewTerraform(workingDir string, execPath string) (*Terraform, error) { // from os.Environ. Attempting to set environment variables that should be managed manually will // result in ErrManualEnvVar being returned. func (tf *Terraform) SetEnv(env map[string]string) error { - for k := range env { - if strings.HasPrefix(k, varEnvVarPrefix) { - return fmt.Errorf("variables should be passed using the Var option: %w", &ErrManualEnvVar{k}) - } - for _, p := range prohibitedEnvVars { - if p == k { - return &ErrManualEnvVar{k} - } - } + prohibited := ProhibitedEnv(env) + if len(prohibited) > 0 { + // just error on the first instance + return &ErrManualEnvVar{prohibited[0]} } tf.env = env diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go new file mode 100644 index 00000000000..5af05b7e623 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go @@ -0,0 +1,80 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" +) + +type upgrade012Config struct { + dir string + force bool + + reattachInfo ReattachInfo +} + +var defaultUpgrade012Options = upgrade012Config{ + force: false, +} + +// Upgrade012Option represents options used in the Destroy method. +type Upgrade012Option interface { + configureUpgrade012(*upgrade012Config) +} + +func (opt *DirOption) configureUpgrade012(conf *upgrade012Config) { + conf.dir = opt.path +} + +func (opt *ForceOption) configureUpgrade012(conf *upgrade012Config) { + conf.force = opt.force +} + +func (opt *ReattachOption) configureUpgrade012(conf *upgrade012Config) { + conf.reattachInfo = opt.info +} + +// Upgrade012 represents the terraform 0.12upgrade subcommand. +func (tf *Terraform) Upgrade012(ctx context.Context, opts ...Upgrade012Option) error { + cmd, err := tf.upgrade012Cmd(ctx, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(cmd) +} + +func (tf *Terraform) upgrade012Cmd(ctx context.Context, opts ...Upgrade012Option) (*exec.Cmd, error) { + err := tf.compatible(ctx, tf0_12_0, tf0_13_0) + if err != nil { + return nil, fmt.Errorf("terraform 0.12upgrade is only supported in 0.12 releases: %w", err) + } + + c := defaultUpgrade012Options + + for _, o := range opts { + o.configureUpgrade012(&c) + } + + args := []string{"0.12upgrade", "-no-color", "-yes"} + + // boolean opts: only pass if set + if c.force { + args = append(args, "-force") + } + + // optional positional argument + if c.dir != "" { + args = append(args, c.dir) + } + + mergeEnv := map[string]string{} + if c.reattachInfo != nil { + reattachStr, err := c.reattachInfo.marshalString() + if err != nil { + return nil, err + } + mergeEnv[reattachEnvVar] = reattachStr + } + + return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go new file mode 100644 index 00000000000..cac9d6b8479 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go @@ -0,0 +1,43 @@ +package tfexec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + tfjson "github.com/hashicorp/terraform-json" +) + +// Validate represents the validate subcommand to the Terraform CLI. The -json +// flag support was added in 0.12.0, so this will not work on earlier versions. +func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, error) { + err := tf.compatible(ctx, tf0_12_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform validate -json was added in 0.12.0: %w", err) + } + + cmd := tf.buildTerraformCmd(ctx, nil, "validate", "-no-color", "-json") + + var outbuf = bytes.Buffer{} + cmd.Stdout = &outbuf + + err = tf.runTerraformCmd(cmd) + // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors + if err != nil && cmd.ProcessState.ExitCode() != 1 { + return nil, err + } + + var out tfjson.ValidateOutput + jsonErr := json.Unmarshal(outbuf.Bytes(), &out) + if jsonErr != nil { + // the original call was possibly bad, if it has an error, actually just return that + if err != nil { + return nil, err + } + + return nil, jsonErr + } + + return &out, nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index ae1dd5fa2a8..6f3f13959da 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -12,6 +12,7 @@ import ( ) var ( + tf0_7_7 = version.Must(version.NewVersion("0.7.7")) tf0_12_0 = version.Must(version.NewVersion("0.12.0")) tf0_13_0 = version.Must(version.NewVersion("0.13.0")) ) diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfinstall/git_ref.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfinstall/git_ref.go deleted file mode 100644 index 60c01783270..00000000000 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfinstall/git_ref.go +++ /dev/null @@ -1,84 +0,0 @@ -package tfinstall - -import ( - "context" - "fmt" - "io/ioutil" - "log" - "os/exec" - "runtime" - - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" -) - -type GitRefOption struct { - installDir string - repoURL string - ref string -} - -var _ ExecPathFinder = &GitRefOption{} - -func GitRef(ref, repo, installDir string) *GitRefOption { - return &GitRefOption{ - installDir: installDir, - repoURL: repo, - ref: ref, - } -} - -func (opt *GitRefOption) ExecPath(ctx context.Context) (string, error) { - installDir, err := ensureInstallDir(opt.installDir) - if err != nil { - return "", err - } - - ref := plumbing.ReferenceName(opt.ref) - if opt.ref == "" { - ref = plumbing.ReferenceName("refs/heads/master") - } - - repoURL := opt.repoURL - if repoURL == "" { - repoURL = "https://github.com/hashicorp/terraform.git" - } - - _, err = git.PlainClone(installDir, false, &git.CloneOptions{ - URL: repoURL, - ReferenceName: ref, - - Depth: 1, - Tags: git.NoTags, - }) - if err != nil { - return "", fmt.Errorf("unable to clone %q: %w", repoURL, err) - } - - var binName string - { - // TODO: maybe there is a better way to make sure this filename is available? - // I guess we could locate it in a different dir, or nest the git underneath - // the root tmp dir, etc. - binPattern := "terraform" - if runtime.GOOS == "windows" { - binPattern = "terraform*.exe" - } - binFile, err := ioutil.TempFile(installDir, binPattern) - if err != nil { - return "", fmt.Errorf("unable to create bin file: %w", err) - } - binName = binFile.Name() - binFile.Close() - } - - cmd := exec.CommandContext(ctx, "go", "build", "-mod", "vendor", "-o", binName) - cmd.Dir = installDir - out, err := cmd.CombinedOutput() - log.Print(string(out)) - if err != nil { - return "", fmt.Errorf("unable to build Terraform: %w\n%s", err, out) - } - - return binName, nil -} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/Makefile b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/Makefile index 2d3f9a162dc..a279462b82a 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/Makefile +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/Makefile @@ -5,7 +5,7 @@ test: tools gotestsum --format=short-verbose $(TEST) $(TESTARGS) generate: - cd test-fixtures && make generate + cd testdata && make generate modules: go mod download && go mod verify diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/config.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/config.go index 3b8be511eef..36e53e6e755 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/config.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/config.go @@ -181,4 +181,8 @@ type ModuleCall struct { // The version constraint for modules that come from the registry. VersionConstraint string `json:"version_constraint,omitempty"` + + // The explicit resource dependencies for the "depends_on" value. + // As it must be a slice of references, Expression is not used. + DependsOn []string `json:"depends_on,omitempty"` } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/go.mod b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/go.mod index 0e7ab50f354..415bf18bf3b 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/go.mod +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/go.mod @@ -4,5 +4,6 @@ go 1.13 require ( github.com/davecgh/go-spew v1.1.1 + github.com/google/go-cmp v0.3.1 github.com/zclconf/go-cty v1.2.1 ) diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/schemas.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/schemas.go index 5dd430a5553..e688c1728ed 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -88,7 +88,7 @@ type SchemaDescriptionKind string const ( // SchemaDescriptionKindPlain indicates a string in plain text format. - SchemaDescriptionKindPlain SchemaDescriptionKind = "plaintext" + SchemaDescriptionKindPlain SchemaDescriptionKind = "plain" // SchemaDescriptionKindMarkdown indicates a Markdown string and may need to be // processed prior to presentation. diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/state.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/state.go index a320bc8271d..e1a9149c159 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/state.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/state.go @@ -1,6 +1,7 @@ package tfjson import ( + "bytes" "encoding/json" "errors" "fmt" @@ -12,6 +13,12 @@ const StateFormatVersion = "0.1" // State is the top-level representation of a Terraform state. type State struct { + // useJSONNumber opts into the behavior of calling + // json.Decoder.UseNumber prior to decoding the state, which turns + // numbers into json.Numbers instead of float64s. Set it using + // State.UseJSONNumber. + useJSONNumber bool + // The version of the state format. This should always match the // StateFormatVersion constant in this package, or else am // unmarshal will be unstable. @@ -24,6 +31,14 @@ type State struct { Values *StateValues `json:"values,omitempty"` } +// UseJSONNumber controls whether the State will be decoded using the +// json.Number behavior or the float64 behavior. When b is true, the State will +// represent numbers in StateOutputs as json.Numbers. When b is false, the +// State will represent numbers in StateOutputs as float64s. +func (s *State) UseJSONNumber(b bool) { + s.useJSONNumber = b +} + // Validate checks to ensure that the state is present, and the // version matches the version supported by this library. func (s *State) Validate() error { @@ -46,7 +61,11 @@ func (s *State) UnmarshalJSON(b []byte) error { type rawState State var state rawState - err := json.Unmarshal(b, &state) + dec := json.NewDecoder(bytes.NewReader(b)) + if s.useJSONNumber { + dec.UseNumber() + } + err := dec.Decode(&state) if err != nil { return err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/validate.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/validate.go new file mode 100644 index 00000000000..7de13ad51e9 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/validate.go @@ -0,0 +1,33 @@ +package tfjson + +// Pos represents a position in a config file +type Pos struct { + Line int `json:"line"` + Column int `json:"column"` + Byte int `json:"byte"` +} + +// Range represents a range of bytes between two positions +type Range struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` +} + +// Diagnostic represents information to be presented to a user about an +// error or anomaly in parsing or evaluating configuration +type Diagnostic struct { + Severity string `json:"severity,omitempty"` + Summary string `json:"summary,omitempty"` + Detail string `json:"detail,omitempty"` + Range *Range `json:"range,omitempty"` +} + +// ValidateOutput represents JSON output from terraform validate +// (available from 0.12 onwards) +type ValidateOutput struct { + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []Diagnostic `json:"diagnostics"` +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-json/version.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/version.go new file mode 100644 index 00000000000..16f0a853e34 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-json/version.go @@ -0,0 +1,11 @@ +package tfjson + +// VersionOutput represents output from the version -json command +// added in v0.13 +type VersionOutput struct { + Version string `json:"terraform_version"` + Revision string `json:"terraform_revision"` + Platform string `json:"platform,omitempty"` + ProviderSelections map[string]string `json:"provider_selections"` + Outdated bool `json:"terraform_outdated"` +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go index 8037b1301e5..d43eab3249f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go @@ -10,6 +10,9 @@ import "fmt" // return diag.FromErr(err) // } func FromErr(err error) Diagnostics { + if err == nil { + return nil + } return Diagnostics{ Diagnostic{ Severity: Error, diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go index 7ee21614b9f..b406cbaea45 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go @@ -26,6 +26,10 @@ func (e *NotFoundError) Error() string { return "couldn't find resource" } +func (e *NotFoundError) Unwrap() error { + return e.LastError +} + // UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending type UnexpectedStateError struct { LastError error @@ -42,6 +46,10 @@ func (e *UnexpectedStateError) Error() string { ) } +func (e *UnexpectedStateError) Unwrap() error { + return e.LastError +} + // TimeoutError is returned when WaitForState times out type TimeoutError struct { LastError error @@ -77,3 +85,7 @@ func (e *TimeoutError) Error() string { return fmt.Sprintf("timeout while waiting for %s%s", expectedState, suffix) } + +func (e *TimeoutError) Unwrap() error { + return e.LastError +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go new file mode 100644 index 00000000000..345abf71995 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/json.go @@ -0,0 +1,12 @@ +package resource + +import ( + "bytes" + "encoding/json" +) + +func unmarshalJSON(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go index e247c647fc7..bef3a442f19 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/terraform-exec/tfexec" "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest" "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" @@ -36,6 +35,9 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, // plugins. os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") + // Terraform doesn't need to reach out to Checkpoint during testing. + wd.Setenv("CHECKPOINT_DISABLE", "1") + // Terraform 0.12.X and 0.13.X+ treat namespaceless providers // differently in terms of what namespace they default to. So we're // going to set both variations, as we don't know which version of @@ -83,6 +85,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, Level: hclog.Trace, Output: ioutil.Discard, }), + NoLogOutputOverride: true, } // let's actually start the provider server @@ -100,10 +103,6 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }, } - // plugin.DebugServe hijacks our log output location, so let's - // reset it - logging.SetOutput(t) - // when the provider exits, remove one from the waitgroup // so we can track when everything is done go func(c <-chan struct{}) { @@ -162,6 +161,7 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, Level: hclog.Trace, Output: ioutil.Discard, }), + NoLogOutputOverride: true, } // let's actually start the provider server @@ -179,10 +179,6 @@ func runProviderCommand(t testing.T, f func() error, wd *plugintest.WorkingDir, }, } - // plugin.DebugServe hijacks our log output location, so let's - // reset it - logging.SetOutput(t) - // when the provider exits, remove one from the waitgroup // so we can track when everything is done go func(c <-chan struct{}) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go index 42c7b64081c..7f8c225291e 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -1,6 +1,7 @@ package resource import ( + "encoding/json" "fmt" "strconv" @@ -70,11 +71,11 @@ func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { elements[i] = el.(bool) } os.Value = elements - // unmarshalled number from JSON will always be float64 - case float64: + // unmarshalled number from JSON will always be json.Number + case json.Number: elements := make([]interface{}, len(v)) for i, el := range v { - elements[i] = el.(float64) + elements[i] = el.(json.Number) } os.Value = elements case []interface{}: @@ -93,10 +94,10 @@ func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { os.Type = "string" os.Value = strconv.FormatBool(v) return os, nil - // unmarshalled number from JSON will always be float64 - case float64: + // unmarshalled number from JSON will always be json.Number + case json.Number: os.Type = "string" - os.Value = strconv.FormatFloat(v, 'f', -1, 64) + os.Value = v.String() return os, nil } @@ -155,8 +156,13 @@ func shimResourceStateKey(res *tfjson.StateResource) (string, error) { var index int switch idx := res.Index.(type) { - case float64: - index = int(idx) + case json.Number: + i, err := idx.Int64() + if err != nil { + return "", fmt.Errorf("unexpected index value (%q) for %q, ", + idx, res.Address) + } + index = int(i) default: return "", fmt.Errorf("unexpected index type (%T) for %q, "+ "for_each is not supported", res.Index, res.Address) @@ -256,8 +262,8 @@ func (sf *shimmedFlatmap) AddEntry(key string, value interface{}) error { return nil case bool: sf.m[key] = strconv.FormatBool(el) - case float64: - sf.m[key] = strconv.FormatFloat(el, 'f', -1, 64) + case json.Number: + sf.m[key] = el.String() case string: sf.m[key] = el case map[string]interface{}: diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go index 30952460b45..c9d90dd2159 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -114,7 +114,7 @@ func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { t.Fatalf("Step %d/%d error running import, expected an error with pattern (%s), no match on: %s", i+1, len(c.Steps), step.ExpectError.String(), err) } } else { - if c.ErrorCheck != nil { + if err != nil && c.ErrorCheck != nil { err = c.ErrorCheck(err) } if err != nil { @@ -134,7 +134,7 @@ func runNewTest(t testing.T, c TestCase, helper *plugintest.Helper) { t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) } } else { - if c.ErrorCheck != nil { + if err != nil && c.ErrorCheck != nil { err = c.ErrorCheck(err) } if err != nil { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go index fdd322e2daa..0e218f60217 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -1,6 +1,7 @@ package resource import ( + "errors" "fmt" tfjson "github.com/hashicorp/terraform-json" @@ -196,7 +197,7 @@ func testStepNewConfig(t testing.T, c TestCase, wd *plugintest.WorkingDir, step } return fmt.Errorf("After applying this test step and performing a `terraform refresh`, the plan was not empty.\nstdout\n\n%s", stdout) } else if step.ExpectNonEmptyPlan && planIsEmpty(plan) { - return fmt.Errorf("Expected a non-empty plan, but got an empty plan!") + return errors.New("Expected a non-empty plan, but got an empty plan") } // ID-ONLY REFRESH diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go index b625feae0ea..6b7bdae8c96 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go @@ -78,6 +78,10 @@ type RetryError struct { Retryable bool } +func (e *RetryError) Unwrap() error { + return e.Err +} + // RetryableError is a helper to create a RetryError that's retryable from a // given error. To prevent logic errors, will return an error when passed a // nil error. diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go index 354c94e0716..d9c824d26d8 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -282,7 +282,11 @@ func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *tfpr } // if there's a JSON state, we need to decode it. case len(req.RawState.JSON) > 0: - err = json.Unmarshal(req.RawState.JSON, &jsonMap) + if res.UseJSONNumber { + err = unmarshalJSON(req.RawState.JSON, &jsonMap) + } else { + err = json.Unmarshal(req.RawState.JSON, &jsonMap) + } if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil @@ -405,7 +409,7 @@ func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version in return nil, 0, err } - jsonMap, err := StateValueToJSONMap(newConfigVal, schemaType) + jsonMap, err := stateValueToJSONMap(newConfigVal, schemaType, res.UseJSONNumber) return jsonMap, upgradedVersion, err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go new file mode 100644 index 00000000000..265099a6b6f --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/json.go @@ -0,0 +1,12 @@ +package schema + +import ( + "bytes" + "encoding/json" +) + +func unmarshalJSON(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + return dec.Decode(v) +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go index f41964a5890..2f5663d357e 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -191,6 +191,16 @@ type Resource struct { // other user facing usage. It can be plain-text or markdown depending on the // global DescriptionKind setting. Description string + + // UseJSONNumber should be set when state upgraders will expect + // json.Numbers instead of float64s for numbers. This is added as a + // toggle for backwards compatibility for type assertions, but should + // be used in all new resources to avoid bugs with sufficiently large + // user input. + // + // See github.com/hashicorp/terraform-plugin-sdk/issues/655 for more + // details. + UseJSONNumber bool } // ShimInstanceStateFromValue converts a cty.Value to a diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index 24befc9d7fa..26e736bf5a0 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -212,8 +212,6 @@ type Schema struct { // // ValidateFunc is honored only when the schema's Type is set to TypeInt, // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. - // - // Deprecated: please use ValidateDiagFunc ValidateFunc SchemaValidateFunc // ValidateDiagFunc allows individual fields to define arbitrary validation diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go index 40a5abc0db8..e1575a0f891 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go @@ -77,14 +77,24 @@ func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.B // StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON // encoding. func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + return stateValueToJSONMap(val, ty, false) +} + +func stateValueToJSONMap(val cty.Value, ty cty.Type, useJSONNumber bool) (map[string]interface{}, error) { js, err := ctyjson.Marshal(val, ty) if err != nil { return nil, err } var m map[string]interface{} - if err := json.Unmarshal(js, &m); err != nil { - return nil, err + if useJSONNumber { + if err := unmarshalJSON(js, &m); err != nil { + return nil, err + } + } else { + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } } return m, nil diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go index 344f8ba1fee..f1376c2d394 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/meta.go @@ -4,6 +4,8 @@ import ( "fmt" "reflect" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -57,3 +59,30 @@ func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { return allWarnings, allErrors } } + +// ToDiagFunc is a wrapper for legacy schema.SchemaValidateFunc +// converting it to schema.SchemaValidateDiagFunc +func ToDiagFunc(validator schema.SchemaValidateFunc) schema.SchemaValidateDiagFunc { + return func(i interface{}, p cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + attr := p[len(p)-1].(cty.GetAttrStep) + ws, es := validator(i, attr.Name) + + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: p, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: p, + }) + } + return diags + } +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go index c76a4ed999f..596c5754a48 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation/testing.go @@ -5,6 +5,8 @@ import ( testing "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -14,20 +16,15 @@ type testCase struct { expectedErr *regexp.Regexp } +type diagTestCase struct { + val interface{} + f schema.SchemaValidateDiagFunc + expectedErr *regexp.Regexp +} + func runTestCases(t testing.T, cases []testCase) { t.Helper() - matchErr := func(errs []error, r *regexp.Regexp) bool { - // err must match one provided - for _, err := range errs { - if r.MatchString(err.Error()) { - return true - } - } - - return false - } - for i, tc := range cases { _, errs := tc.f(tc.val, "test_property") @@ -39,8 +36,50 @@ func runTestCases(t testing.T, cases []testCase) { t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) } - if !matchErr(errs, tc.expectedErr) { + if !matchAnyError(errs, tc.expectedErr) { t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) } } } + +func matchAnyError(errs []error, r *regexp.Regexp) bool { + // err must match one provided + for _, err := range errs { + if r.MatchString(err.Error()) { + return true + } + } + return false +} + +func runDiagTestCases(t testing.T, cases []diagTestCase) { + t.Helper() + + for i, tc := range cases { + p := cty.Path{ + cty.GetAttrStep{Name: "test_property"}, + } + diags := tc.f(tc.val, p) + + if !diags.HasError() && tc.expectedErr == nil { + continue + } + + if diags.HasError() && tc.expectedErr == nil { + t.Fatalf("expected test case %d to produce no errors, got %v", i, diags) + } + + if !matchAnyDiagSummary(diags, tc.expectedErr) { + t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, diags) + } + } +} + +func matchAnyDiagSummary(ds diag.Diagnostics, r *regexp.Regexp) bool { + for _, d := range ds { + if r.MatchString(d.Summary) { + return true + } + } + return false +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go index c248b87c1c9..97353430aaf 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var SDKVersion = "2.3.0" +var SDKVersion = "2.4.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go index 11ecb85bf0c..baaab2d1d1c 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -1,12 +1,14 @@ package plugin import ( + "log" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "google.golang.org/grpc" "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-go/tfprotov5/server" + tf5server "github.com/hashicorp/terraform-plugin-go/tfprotov5/server" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -43,11 +45,31 @@ type ServeOpts struct { // plugin's lifecycle and communicate connection information. See the // go-plugin GoDoc for more information. TestConfig *plugin.ServeTestConfig + + // Set NoLogOutputOverride to not override the log output with an hclog + // adapter. This should only be used when running the plugin in + // acceptance tests. + NoLogOutputOverride bool } // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { + if !opts.NoLogOutputOverride { + // In order to allow go-plugin to correctly pass log-levels through to + // terraform, we need to use an hclog.Logger with JSON output. We can + // inject this into the std `log` package here, so existing providers will + // make use of it automatically. + logger := hclog.New(&hclog.LoggerOptions{ + // We send all output to terraform. Go-plugin will take the output and + // pass it through another hclog.Logger on the client side where it can + // be filtered. + Level: hclog.Trace, + JSONFormat: true, + }) + log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true})) + } + // since the plugins may not yet be aware of the new protocol, we // automatically wrap the plugins in the grpc shims. if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/.deepsource.toml b/awsproviderlint/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af855..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/.gitignore b/awsproviderlint/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412ba9..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/.travis.yml b/awsproviderlint/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index dad29725f86..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/awsproviderlint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907a0..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/LICENSE b/awsproviderlint/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298da..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/README.md b/awsproviderlint/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 02fc81e0626..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# Mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -[![GoDoc][3]][4] -[![GoCard][5]][6] -[![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge - -### Latest release - -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). - -### Important note - -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. - -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - -## Installation - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransfomer struct { -} - -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/doc.go b/awsproviderlint/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7baf35..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/map.go b/awsproviderlint/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index d83258b4dda..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - return err - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/merge.go b/awsproviderlint/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 3332c9c2a7a..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unsafe" -) - -func hasExportedField(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if isExportedComponent(&field) { - return true - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - name := field.Name - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) { - dst = dstIn - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - - if !src.IsValid() { - return - } - - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return dst, nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() { - err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind()) - return - } - - switch dst.Kind() { - case reflect.Struct: - if hasExportedField(dst) { - dstCp := reflect.New(dst.Type()).Elem() - for i, n := 0, dst.NumField(); i < n; i++ { - dstField := dst.Field(i) - structField := dst.Type().Field(i) - // copy un-exported struct fields - if !isExportedComponent(&structField) { - rf := dstCp.Field(i) - rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec - dstRF := dst.Field(i) - if !dst.Field(i).CanAddr() { - continue - } - - dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec - rf.Set(dstRF) - continue - } - dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config) - if err != nil { - return - } - dstCp.Field(i).Set(dstField) - } - - if dst.CanSet() { - dst.Set(dstCp) - } else { - dst = dstCp - } - return - } else { - if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst = src - } - } - - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - dstElement := dst.MapIndex(key) - if !srcElement.IsValid() { - continue - } - if dst.MapIndex(key).IsValid() { - k := dstElement.Interface() - dstElement = reflect.ValueOf(k) - } - if isReflectNil(srcElement) { - if overwrite || isReflectNil(dstElement) { - dst.SetMapIndex(key, srcElement) - } - continue - } - if !srcElement.CanInterface() { - continue - } - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - if dstElement.IsValid() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - } - dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config) - if err != nil { - return - } - dst.SetMapIndex(key, dstElement) - - } - case reflect.Slice: - newSlice := dst - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - if typeCheck && src.Type() != dst.Type() { - return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type()) - } - newSlice = src - } else if config.AppendSlice { - if typeCheck && src.Type() != dst.Type() { - err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - return - } - newSlice = reflect.AppendSlice(dst, src) - } - if dst.CanSet() { - dst.Set(newSlice) - } else { - dst = newSlice - } - case reflect.Ptr, reflect.Interface: - if isReflectNil(src) { - break - } - - if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { - if dst.IsNil() || overwrite { - if overwrite || isEmptyValue(dst) { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - dst = dst.Addr() - } else if dst.Elem().Type() == src.Type() { - if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return dst, ErrDifferentArgumentsTypes - } - break - } - if dst.IsNil() || overwrite { - if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - default: - overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) - if overwriteFull { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if !vDst.CanSet() { - return fmt.Errorf("cannot set dst, needs reference") - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - return err -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/awsproviderlint/vendor/github.com/imdario/mergo/mergo.go b/awsproviderlint/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index a82fea2fdcc..00000000000 --- a/awsproviderlint/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/awsproviderlint/vendor/github.com/jbenet/go-context/io/ctxio.go b/awsproviderlint/vendor/github.com/jbenet/go-context/io/ctxio.go deleted file mode 100644 index b4f2454235a..00000000000 --- a/awsproviderlint/vendor/github.com/jbenet/go-context/io/ctxio.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package ctxio provides io.Reader and io.Writer wrappers that -// respect context.Contexts. Use these at the interface between -// your context code and your io. -// -// WARNING: read the code. see how writes and reads will continue -// until you cancel the io. Maybe this package should provide -// versions of io.ReadCloser and io.WriteCloser that automatically -// call .Close when the context expires. But for now -- since in my -// use cases I have long-lived connections with ephemeral io wrappers -// -- this has yet to be a need. -package ctxio - -import ( - "io" - - context "golang.org/x/net/context" -) - -type ioret struct { - n int - err error -} - -type Writer interface { - io.Writer -} - -type ctxWriter struct { - w io.Writer - ctx context.Context -} - -// NewWriter wraps a writer to make it respect given Context. -// If there is a blocking write, the returned Writer will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _after_ you've cancelled the context, this io.Writer will -// first make a **copy** of the buffer. -func NewWriter(ctx context.Context, w io.Writer) *ctxWriter { - if ctx == nil { - ctx = context.Background() - } - return &ctxWriter{ctx: ctx, w: w} -} - -func (w *ctxWriter) Write(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - copy(buf2, buf) - - c := make(chan ioret, 1) - - go func() { - n, err := w.w.Write(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case r := <-c: - return r.n, r.err - case <-w.ctx.Done(): - return 0, w.ctx.Err() - } -} - -type Reader interface { - io.Reader -} - -type ctxReader struct { - r io.Reader - ctx context.Context -} - -// NewReader wraps a reader to make it respect given Context. -// If there is a blocking read, the returned Reader will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _before_ you've cancelled the context, this io.Reader will -// allocate a buffer of the same size, and **copy** into the client's -// if the read succeeds in time. -func NewReader(ctx context.Context, r io.Reader) *ctxReader { - return &ctxReader{ctx: ctx, r: r} -} - -func (r *ctxReader) Read(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - - c := make(chan ioret, 1) - - go func() { - n, err := r.r.Read(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case ret := <-c: - copy(buf, buf2) - return ret.n, ret.err - case <-r.ctx.Done(): - return 0, r.ctx.Err() - } -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitattributes b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitattributes deleted file mode 100644 index 44db5818894..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -testdata/dos-lines eol=crlf diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitignore b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.mailmap b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.mailmap deleted file mode 100644 index 253406b1cc6..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.mailmap +++ /dev/null @@ -1 +0,0 @@ -Kevin Burke Kevin Burke diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.travis.yml b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.travis.yml deleted file mode 100644 index 4306f30f854..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -go_import_path: github.com/kevinburke/ssh_config - -language: go - -go: - - 1.11.x - - 1.12.x - - master - -before_script: - - go get -u ./... - -script: - - make race-test diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt deleted file mode 100644 index cd3379400dc..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt +++ /dev/null @@ -1,5 +0,0 @@ -Eugene Terentev -Kevin Burke -Mark Nevill -Sergey Lukjanov -Wayne Ashley Berry diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/LICENSE b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/LICENSE deleted file mode 100644 index b9a770ac2a9..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/LICENSE +++ /dev/null @@ -1,49 +0,0 @@ -Copyright (c) 2017 Kevin Burke. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -=================== - -The lexer and parser borrow heavily from github.com/pelletier/go-toml. The -license for that project is copied below. - -The MIT License (MIT) - -Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/Makefile b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/Makefile deleted file mode 100644 index a1880d18e17..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -BUMP_VERSION := $(GOPATH)/bin/bump_version -STATICCHECK := $(GOPATH)/bin/staticcheck -WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap - -$(STATICCHECK): - go get honnef.co/go/tools/cmd/staticcheck - -lint: $(STATICCHECK) - go vet ./... - $(STATICCHECK) - -test: lint - @# the timeout helps guard against infinite recursion - go test -timeout=250ms ./... - -race-test: lint - go test -timeout=500ms -race ./... - -$(BUMP_VERSION): - go get -u github.com/kevinburke/bump_version - -release: test | $(BUMP_VERSION) - $(BUMP_VERSION) minor config.go - -force: ; - -AUTHORS.txt: force | $(WRITE_MAILMAP) - $(WRITE_MAILMAP) > AUTHORS.txt - -authors: AUTHORS.txt diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/README.md b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/README.md deleted file mode 100644 index 52cc1eac4d5..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# ssh_config - -This is a Go parser for `ssh_config` files. Importantly, this parser attempts -to preserve comments in a given file, so you can manipulate a `ssh_config` file -from a program, if your heart desires. - -It's designed to be used with the excellent -[x/crypto/ssh](https://golang.org/x/crypto/ssh) package, which handles SSH -negotiation but isn't very easy to configure. - -The `ssh_config` `Get()` and `GetStrict()` functions will attempt to read values -from `$HOME/.ssh/config` and fall back to `/etc/ssh/ssh_config`. The first -argument is the host name to match on, and the second argument is the key you -want to retrieve. - -```go -port := ssh_config.Get("myhost", "Port") -``` - -You can also load a config file and read values from it. - -```go -var config = ` -Host *.test - Compression yes -` - -cfg, err := ssh_config.Decode(strings.NewReader(config)) -fmt.Println(cfg.Get("example.test", "Port")) -``` - -Some SSH arguments have default values - for example, the default value for -`KeyboardAuthentication` is `"yes"`. If you call Get(), and no value for the -given Host/keyword pair exists in the config, we'll return a default for the -keyword if one exists. - -### Manipulating SSH config files - -Here's how you can manipulate an SSH config file, and then write it back to -disk. - -```go -f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) -cfg, _ := ssh_config.Decode(f) -for _, host := range cfg.Hosts { - fmt.Println("patterns:", host.Patterns) - for _, node := range host.Nodes { - // Manipulate the nodes as you see fit, or use a type switch to - // distinguish between Empty, KV, and Include nodes. - fmt.Println(node.String()) - } -} - -// Print the config to stdout: -fmt.Println(cfg.String()) -``` - -## Spec compliance - -Wherever possible we try to implement the specification as documented in -the `ssh_config` manpage. Unimplemented features should be present in the -[issues][issues] list. - -Notably, the `Match` directive is currently unsupported. - -[issues]: https://github.com/kevinburke/ssh_config/issues - -## Errata - -This is the second [comment-preserving configuration parser][blog] I've written, after -[an /etc/hosts parser][hostsfile]. Eventually, I will write one for every Linux -file format. - -[blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/ -[hostsfile]: https://github.com/kevinburke/hostsfile - -## Donating - -Donations free up time to make improvements to the library, and respond to -bug reports. You can send donations via Paypal's "Send Money" feature to -kev@inburke.com. Donations are not tax deductible in the USA. diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/config.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/config.go deleted file mode 100644 index 136f0c35c67..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/config.go +++ /dev/null @@ -1,649 +0,0 @@ -// Package ssh_config provides tools for manipulating SSH config files. -// -// Importantly, this parser attempts to preserve comments in a given file, so -// you can manipulate a `ssh_config` file from a program, if your heart desires. -// -// The Get() and GetStrict() functions will attempt to read values from -// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is -// the host name to match on ("example.com"), and the second argument is the key -// you want to retrieve ("Port"). The keywords are case insensitive. -// -// port := ssh_config.Get("myhost", "Port") -// -// You can also manipulate an SSH config file and then print it or write it back -// to disk. -// -// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) -// cfg, _ := ssh_config.Decode(f) -// for _, host := range cfg.Hosts { -// fmt.Println("patterns:", host.Patterns) -// for _, node := range host.Nodes { -// fmt.Println(node.String()) -// } -// } -// -// // Write the cfg back to disk: -// fmt.Println(cfg.String()) -// -// BUG: the Match directive is currently unsupported; parsing a config with -// a Match directive will trigger an error. -package ssh_config - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - osuser "os/user" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" -) - -const version = "1.0" - -var _ = version - -type configFinder func() string - -// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config -// files are parsed and cached the first time Get() or GetStrict() is called. -type UserSettings struct { - IgnoreErrors bool - systemConfig *Config - systemConfigFinder configFinder - userConfig *Config - userConfigFinder configFinder - loadConfigs sync.Once - onceErr error -} - -func homedir() string { - user, err := osuser.Current() - if err == nil { - return user.HomeDir - } else { - return os.Getenv("HOME") - } -} - -func userConfigFinder() string { - return filepath.Join(homedir(), ".ssh", "config") -} - -// DefaultUserSettings is the default UserSettings and is used by Get and -// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys, -// and it will return parse errors (if any) instead of swallowing them. -var DefaultUserSettings = &UserSettings{ - IgnoreErrors: false, - systemConfigFinder: systemConfigFinder, - userConfigFinder: userConfigFinder, -} - -func systemConfigFinder() string { - return filepath.Join("/", "etc", "ssh", "ssh_config") -} - -func findVal(c *Config, alias, key string) (string, error) { - if c == nil { - return "", nil - } - val, err := c.Get(alias, key) - if err != nil || val == "" { - return "", err - } - if err := validate(key, val); err != nil { - return "", err - } - return val, nil -} - -// Get finds the first value for key within a declaration that matches the -// alias. Get returns the empty string if no value was found, or if IgnoreErrors -// is false and we could not parse the configuration file. Use GetStrict to -// disambiguate the latter cases. -// -// The match for key is case insensitive. -// -// Get is a wrapper around DefaultUserSettings.Get. -func Get(alias, key string) string { - return DefaultUserSettings.Get(alias, key) -} - -// GetStrict finds the first value for key within a declaration that matches the -// alias. If key has a default value and no matching configuration is found, the -// default will be returned. For more information on default values and the way -// patterns are matched, see the manpage for ssh_config. -// -// error will be non-nil if and only if a user's configuration file or the -// system configuration file could not be parsed, and u.IgnoreErrors is false. -// -// GetStrict is a wrapper around DefaultUserSettings.GetStrict. -func GetStrict(alias, key string) (string, error) { - return DefaultUserSettings.GetStrict(alias, key) -} - -// Get finds the first value for key within a declaration that matches the -// alias. Get returns the empty string if no value was found, or if IgnoreErrors -// is false and we could not parse the configuration file. Use GetStrict to -// disambiguate the latter cases. -// -// The match for key is case insensitive. -func (u *UserSettings) Get(alias, key string) string { - val, err := u.GetStrict(alias, key) - if err != nil { - return "" - } - return val -} - -// GetStrict finds the first value for key within a declaration that matches the -// alias. If key has a default value and no matching configuration is found, the -// default will be returned. For more information on default values and the way -// patterns are matched, see the manpage for ssh_config. -// -// error will be non-nil if and only if a user's configuration file or the -// system configuration file could not be parsed, and u.IgnoreErrors is false. -func (u *UserSettings) GetStrict(alias, key string) (string, error) { - u.loadConfigs.Do(func() { - // can't parse user file, that's ok. - var filename string - if u.userConfigFinder == nil { - filename = userConfigFinder() - } else { - filename = u.userConfigFinder() - } - var err error - u.userConfig, err = parseFile(filename) - //lint:ignore S1002 I prefer it this way - if err != nil && os.IsNotExist(err) == false { - u.onceErr = err - return - } - if u.systemConfigFinder == nil { - filename = systemConfigFinder() - } else { - filename = u.systemConfigFinder() - } - u.systemConfig, err = parseFile(filename) - //lint:ignore S1002 I prefer it this way - if err != nil && os.IsNotExist(err) == false { - u.onceErr = err - return - } - }) - //lint:ignore S1002 I prefer it this way - if u.onceErr != nil && u.IgnoreErrors == false { - return "", u.onceErr - } - val, err := findVal(u.userConfig, alias, key) - if err != nil || val != "" { - return val, err - } - val2, err2 := findVal(u.systemConfig, alias, key) - if err2 != nil || val2 != "" { - return val2, err2 - } - return Default(key), nil -} - -func parseFile(filename string) (*Config, error) { - return parseWithDepth(filename, 0) -} - -func parseWithDepth(filename string, depth uint8) (*Config, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return decodeBytes(b, isSystem(filename), depth) -} - -func isSystem(filename string) bool { - // TODO: not sure this is the best way to detect a system repo - return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh") -} - -// Decode reads r into a Config, or returns an error if r could not be parsed as -// an SSH config file. -func Decode(r io.Reader) (*Config, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - return decodeBytes(b, false, 0) -} - -func decodeBytes(b []byte, system bool, depth uint8) (c *Config, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - if e, ok := r.(error); ok && e == ErrDepthExceeded { - err = e - return - } - err = errors.New(r.(string)) - } - }() - - c = parseSSH(lexSSH(b), system, depth) - return c, err -} - -// Config represents an SSH config file. -type Config struct { - // A list of hosts to match against. The file begins with an implicit - // "Host *" declaration matching all hosts. - Hosts []*Host - depth uint8 - position Position -} - -// Get finds the first value in the configuration that matches the alias and -// contains key. Get returns the empty string if no value was found, or if the -// Config contains an invalid conditional Include value. -// -// The match for key is case insensitive. -func (c *Config) Get(alias, key string) (string, error) { - lowerKey := strings.ToLower(key) - for _, host := range c.Hosts { - if !host.Matches(alias) { - continue - } - for _, node := range host.Nodes { - switch t := node.(type) { - case *Empty: - continue - case *KV: - // "keys are case insensitive" per the spec - lkey := strings.ToLower(t.Key) - if lkey == "match" { - panic("can't handle Match directives") - } - if lkey == lowerKey { - return t.Value, nil - } - case *Include: - val := t.Get(alias, key) - if val != "" { - return val, nil - } - default: - return "", fmt.Errorf("unknown Node type %v", t) - } - } - } - return "", nil -} - -// String returns a string representation of the Config file. -func (c Config) String() string { - return marshal(c).String() -} - -func (c Config) MarshalText() ([]byte, error) { - return marshal(c).Bytes(), nil -} - -func marshal(c Config) *bytes.Buffer { - var buf bytes.Buffer - for i := range c.Hosts { - buf.WriteString(c.Hosts[i].String()) - } - return &buf -} - -// Pattern is a pattern in a Host declaration. Patterns are read-only values; -// create a new one with NewPattern(). -type Pattern struct { - str string // Its appearance in the file, not the value that gets compiled. - regex *regexp.Regexp - not bool // True if this is a negated match -} - -// String prints the string representation of the pattern. -func (p Pattern) String() string { - return p.str -} - -// Copied from regexp.go with * and ? removed. -var specialBytes = []byte(`\.+()|[]{}^$`) - -func special(b byte) bool { - return bytes.IndexByte(specialBytes, b) >= 0 -} - -// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates -// a Pattern that matches all hosts. -// -// From the manpage, a pattern consists of zero or more non-whitespace -// characters, `*' (a wildcard that matches zero or more characters), or `?' (a -// wildcard that matches exactly one character). For example, to specify a set -// of declarations for any host in the ".co.uk" set of domains, the following -// pattern could be used: -// -// Host *.co.uk -// -// The following pattern would match any host in the 192.168.0.[0-9] network range: -// -// Host 192.168.0.? -func NewPattern(s string) (*Pattern, error) { - if s == "" { - return nil, errors.New("ssh_config: empty pattern") - } - negated := false - if s[0] == '!' { - negated = true - s = s[1:] - } - var buf bytes.Buffer - buf.WriteByte('^') - for i := 0; i < len(s); i++ { - // A byte loop is correct because all metacharacters are ASCII. - switch b := s[i]; b { - case '*': - buf.WriteString(".*") - case '?': - buf.WriteString(".?") - default: - // borrowing from QuoteMeta here. - if special(b) { - buf.WriteByte('\\') - } - buf.WriteByte(b) - } - } - buf.WriteByte('$') - r, err := regexp.Compile(buf.String()) - if err != nil { - return nil, err - } - return &Pattern{str: s, regex: r, not: negated}, nil -} - -// Host describes a Host directive and the keywords that follow it. -type Host struct { - // A list of host patterns that should match this host. - Patterns []*Pattern - // A Node is either a key/value pair or a comment line. - Nodes []Node - // EOLComment is the comment (if any) terminating the Host line. - EOLComment string - hasEquals bool - leadingSpace int // TODO: handle spaces vs tabs here. - // The file starts with an implicit "Host *" declaration. - implicit bool -} - -// Matches returns true if the Host matches for the given alias. For -// a description of the rules that provide a match, see the manpage for -// ssh_config. -func (h *Host) Matches(alias string) bool { - found := false - for i := range h.Patterns { - if h.Patterns[i].regex.MatchString(alias) { - if h.Patterns[i].not { - // Negated match. "A pattern entry may be negated by prefixing - // it with an exclamation mark (`!'). If a negated entry is - // matched, then the Host entry is ignored, regardless of - // whether any other patterns on the line match. Negated matches - // are therefore useful to provide exceptions for wildcard - // matches." - return false - } - found = true - } - } - return found -} - -// String prints h as it would appear in a config file. Minor tweaks may be -// present in the whitespace in the printed file. -func (h *Host) String() string { - var buf bytes.Buffer - //lint:ignore S1002 I prefer to write it this way - if h.implicit == false { - buf.WriteString(strings.Repeat(" ", int(h.leadingSpace))) - buf.WriteString("Host") - if h.hasEquals { - buf.WriteString(" = ") - } else { - buf.WriteString(" ") - } - for i, pat := range h.Patterns { - buf.WriteString(pat.String()) - if i < len(h.Patterns)-1 { - buf.WriteString(" ") - } - } - if h.EOLComment != "" { - buf.WriteString(" #") - buf.WriteString(h.EOLComment) - } - buf.WriteByte('\n') - } - for i := range h.Nodes { - buf.WriteString(h.Nodes[i].String()) - buf.WriteByte('\n') - } - return buf.String() -} - -// Node represents a line in a Config. -type Node interface { - Pos() Position - String() string -} - -// KV is a line in the config file that contains a key, a value, and possibly -// a comment. -type KV struct { - Key string - Value string - Comment string - hasEquals bool - leadingSpace int // Space before the key. TODO handle spaces vs tabs. - position Position -} - -// Pos returns k's Position. -func (k *KV) Pos() Position { - return k.position -} - -// String prints k as it was parsed in the config file. There may be slight -// changes to the whitespace between values. -func (k *KV) String() string { - if k == nil { - return "" - } - equals := " " - if k.hasEquals { - equals = " = " - } - line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value) - if k.Comment != "" { - line += " #" + k.Comment - } - return line -} - -// Empty is a line in the config file that contains only whitespace or comments. -type Empty struct { - Comment string - leadingSpace int // TODO handle spaces vs tabs. - position Position -} - -// Pos returns e's Position. -func (e *Empty) Pos() Position { - return e.position -} - -// String prints e as it was parsed in the config file. -func (e *Empty) String() string { - if e == nil { - return "" - } - if e.Comment == "" { - return "" - } - return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment) -} - -// Include holds the result of an Include directive, including the config files -// that have been parsed as part of that directive. At most 5 levels of Include -// statements will be parsed. -type Include struct { - // Comment is the contents of any comment at the end of the Include - // statement. - Comment string - // an include directive can include several different files, and wildcards - directives []string - - mu sync.Mutex - // 1:1 mapping between matches and keys in files array; matches preserves - // ordering - matches []string - // actual filenames are listed here - files map[string]*Config - leadingSpace int - position Position - depth uint8 - hasEquals bool -} - -const maxRecurseDepth = 5 - -// ErrDepthExceeded is returned if too many Include directives are parsed. -// Usually this indicates a recursive loop (an Include directive pointing to the -// file it contains). -var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded") - -func removeDups(arr []string) []string { - // Use map to record duplicates as we find them. - encountered := make(map[string]bool, len(arr)) - result := make([]string, 0) - - for v := range arr { - //lint:ignore S1002 I prefer it this way - if encountered[arr[v]] == false { - encountered[arr[v]] = true - result = append(result, arr[v]) - } - } - return result -} - -// NewInclude creates a new Include with a list of file globs to include. -// Configuration files are parsed greedily (e.g. as soon as this function runs). -// Any error encountered while parsing nested configuration files will be -// returned. -func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) { - if depth > maxRecurseDepth { - return nil, ErrDepthExceeded - } - inc := &Include{ - Comment: comment, - directives: directives, - files: make(map[string]*Config), - position: pos, - leadingSpace: pos.Col - 1, - depth: depth, - hasEquals: hasEquals, - } - // no need for inc.mu.Lock() since nothing else can access this inc - matches := make([]string, 0) - for i := range directives { - var path string - if filepath.IsAbs(directives[i]) { - path = directives[i] - } else if system { - path = filepath.Join("/etc/ssh", directives[i]) - } else { - path = filepath.Join(homedir(), ".ssh", directives[i]) - } - theseMatches, err := filepath.Glob(path) - if err != nil { - return nil, err - } - matches = append(matches, theseMatches...) - } - matches = removeDups(matches) - inc.matches = matches - for i := range matches { - config, err := parseWithDepth(matches[i], depth) - if err != nil { - return nil, err - } - inc.files[matches[i]] = config - } - return inc, nil -} - -// Pos returns the position of the Include directive in the larger file. -func (i *Include) Pos() Position { - return i.position -} - -// Get finds the first value in the Include statement matching the alias and the -// given key. -func (inc *Include) Get(alias, key string) string { - inc.mu.Lock() - defer inc.mu.Unlock() - // TODO: we search files in any order which is not correct - for i := range inc.matches { - cfg := inc.files[inc.matches[i]] - if cfg == nil { - panic("nil cfg") - } - val, err := cfg.Get(alias, key) - if err == nil && val != "" { - return val - } - } - return "" -} - -// String prints out a string representation of this Include directive. Note -// included Config files are not printed as part of this representation. -func (inc *Include) String() string { - equals := " " - if inc.hasEquals { - equals = " = " - } - line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " ")) - if inc.Comment != "" { - line += " #" + inc.Comment - } - return line -} - -var matchAll *Pattern - -func init() { - var err error - matchAll, err = NewPattern("*") - if err != nil { - panic(err) - } -} - -func newConfig() *Config { - return &Config{ - Hosts: []*Host{ - &Host{ - implicit: true, - Patterns: []*Pattern{matchAll}, - Nodes: make([]Node, 0), - }, - }, - depth: 0, - } -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/lexer.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/lexer.go deleted file mode 100644 index 11680b4c74d..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/lexer.go +++ /dev/null @@ -1,240 +0,0 @@ -package ssh_config - -import ( - "bytes" -) - -// Define state functions -type sshLexStateFn func() sshLexStateFn - -type sshLexer struct { - inputIdx int - input []rune // Textual source - - buffer []rune // Runes composing the current token - tokens chan token - line int - col int - endbufferLine int - endbufferCol int -} - -func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn { - return func() sshLexStateFn { - growingString := "" - for next := s.peek(); next != '\n' && next != eof; next = s.peek() { - if next == '\r' && s.follow("\r\n") { - break - } - growingString += string(next) - s.next() - } - s.emitWithValue(tokenComment, growingString) - s.skip() - return previousState - } -} - -// lex the space after an equals sign in a function -func (s *sshLexer) lexRspace() sshLexStateFn { - for { - next := s.peek() - if !isSpace(next) { - break - } - s.skip() - } - return s.lexRvalue -} - -func (s *sshLexer) lexEquals() sshLexStateFn { - for { - next := s.peek() - if next == '=' { - s.emit(tokenEquals) - s.skip() - return s.lexRspace - } - // TODO error handling here; newline eof etc. - if !isSpace(next) { - break - } - s.skip() - } - return s.lexRvalue -} - -func (s *sshLexer) lexKey() sshLexStateFn { - growingString := "" - - for r := s.peek(); isKeyChar(r); r = s.peek() { - // simplified a lot here - if isSpace(r) || r == '=' { - s.emitWithValue(tokenKey, growingString) - s.skip() - return s.lexEquals - } - growingString += string(r) - s.next() - } - s.emitWithValue(tokenKey, growingString) - return s.lexEquals -} - -func (s *sshLexer) lexRvalue() sshLexStateFn { - growingString := "" - for { - next := s.peek() - switch next { - case '\r': - if s.follow("\r\n") { - s.emitWithValue(tokenString, growingString) - s.skip() - return s.lexVoid - } - case '\n': - s.emitWithValue(tokenString, growingString) - s.skip() - return s.lexVoid - case '#': - s.emitWithValue(tokenString, growingString) - s.skip() - return s.lexComment(s.lexVoid) - case eof: - s.next() - } - if next == eof { - break - } - growingString += string(next) - s.next() - } - s.emit(tokenEOF) - return nil -} - -func (s *sshLexer) read() rune { - r := s.peek() - if r == '\n' { - s.endbufferLine++ - s.endbufferCol = 1 - } else { - s.endbufferCol++ - } - s.inputIdx++ - return r -} - -func (s *sshLexer) next() rune { - r := s.read() - - if r != eof { - s.buffer = append(s.buffer, r) - } - return r -} - -func (s *sshLexer) lexVoid() sshLexStateFn { - for { - next := s.peek() - switch next { - case '#': - s.skip() - return s.lexComment(s.lexVoid) - case '\r': - fallthrough - case '\n': - s.emit(tokenEmptyLine) - s.skip() - continue - } - - if isSpace(next) { - s.skip() - } - - if isKeyStartChar(next) { - return s.lexKey - } - - // removed IsKeyStartChar and lexKey. probably will need to readd - - if next == eof { - s.next() - break - } - } - - s.emit(tokenEOF) - return nil -} - -func (s *sshLexer) ignore() { - s.buffer = make([]rune, 0) - s.line = s.endbufferLine - s.col = s.endbufferCol -} - -func (s *sshLexer) skip() { - s.next() - s.ignore() -} - -func (s *sshLexer) emit(t tokenType) { - s.emitWithValue(t, string(s.buffer)) -} - -func (s *sshLexer) emitWithValue(t tokenType, value string) { - tok := token{ - Position: Position{s.line, s.col}, - typ: t, - val: value, - } - s.tokens <- tok - s.ignore() -} - -func (s *sshLexer) peek() rune { - if s.inputIdx >= len(s.input) { - return eof - } - - r := s.input[s.inputIdx] - return r -} - -func (s *sshLexer) follow(next string) bool { - inputIdx := s.inputIdx - for _, expectedRune := range next { - if inputIdx >= len(s.input) { - return false - } - r := s.input[inputIdx] - inputIdx++ - if expectedRune != r { - return false - } - } - return true -} - -func (s *sshLexer) run() { - for state := s.lexVoid; state != nil; { - state = state() - } - close(s.tokens) -} - -func lexSSH(input []byte) chan token { - runes := bytes.Runes(input) - l := &sshLexer{ - input: runes, - tokens: make(chan token), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - go l.run() - return l.tokens -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/parser.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/parser.go deleted file mode 100644 index 36c42055f54..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/parser.go +++ /dev/null @@ -1,191 +0,0 @@ -package ssh_config - -import ( - "fmt" - "strings" -) - -type sshParser struct { - flow chan token - config *Config - tokensBuffer []token - currentTable []string - seenTableKeys []string - // /etc/ssh parser or local parser - used to find the default for relative - // filepaths in the Include directive - system bool - depth uint8 -} - -type sshParserStateFn func() sshParserStateFn - -// Formats and panics an error message based on a token -func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) { - // TODO this format is ugly - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *sshParser) raiseError(tok *token, err error) { - if err == ErrDepthExceeded { - panic(err) - } - // TODO this format is ugly - panic(tok.Position.String() + ": " + err.Error()) -} - -func (p *sshParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *sshParser) peek() *token { - if len(p.tokensBuffer) != 0 { - return &(p.tokensBuffer[0]) - } - - tok, ok := <-p.flow - if !ok { - return nil - } - p.tokensBuffer = append(p.tokensBuffer, tok) - return &tok -} - -func (p *sshParser) getToken() *token { - if len(p.tokensBuffer) != 0 { - tok := p.tokensBuffer[0] - p.tokensBuffer = p.tokensBuffer[1:] - return &tok - } - tok, ok := <-p.flow - if !ok { - return nil - } - return &tok -} - -func (p *sshParser) parseStart() sshParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenComment, tokenEmptyLine: - return p.parseComment - case tokenKey: - return p.parseKV - case tokenEOF: - return nil - default: - p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok)) - } - return nil -} - -func (p *sshParser) parseKV() sshParserStateFn { - key := p.getToken() - hasEquals := false - val := p.getToken() - if val.typ == tokenEquals { - hasEquals = true - val = p.getToken() - } - comment := "" - tok := p.peek() - if tok == nil { - tok = &token{typ: tokenEOF} - } - if tok.typ == tokenComment && tok.Position.Line == val.Position.Line { - tok = p.getToken() - comment = tok.val - } - if strings.ToLower(key.val) == "match" { - // https://github.com/kevinburke/ssh_config/issues/6 - p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported") - return nil - } - if strings.ToLower(key.val) == "host" { - strPatterns := strings.Split(val.val, " ") - patterns := make([]*Pattern, 0) - for i := range strPatterns { - if strPatterns[i] == "" { - continue - } - pat, err := NewPattern(strPatterns[i]) - if err != nil { - p.raiseErrorf(val, "Invalid host pattern: %v", err) - return nil - } - patterns = append(patterns, pat) - } - p.config.Hosts = append(p.config.Hosts, &Host{ - Patterns: patterns, - Nodes: make([]Node, 0), - EOLComment: comment, - hasEquals: hasEquals, - }) - return p.parseStart - } - lastHost := p.config.Hosts[len(p.config.Hosts)-1] - if strings.ToLower(key.val) == "include" { - inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1) - if err == ErrDepthExceeded { - p.raiseError(val, err) - return nil - } - if err != nil { - p.raiseErrorf(val, "Error parsing Include directive: %v", err) - return nil - } - lastHost.Nodes = append(lastHost.Nodes, inc) - return p.parseStart - } - kv := &KV{ - Key: key.val, - Value: val.val, - Comment: comment, - hasEquals: hasEquals, - leadingSpace: key.Position.Col - 1, - position: key.Position, - } - lastHost.Nodes = append(lastHost.Nodes, kv) - return p.parseStart -} - -func (p *sshParser) parseComment() sshParserStateFn { - comment := p.getToken() - lastHost := p.config.Hosts[len(p.config.Hosts)-1] - lastHost.Nodes = append(lastHost.Nodes, &Empty{ - Comment: comment.val, - // account for the "#" as well - leadingSpace: comment.Position.Col - 2, - position: comment.Position, - }) - return p.parseStart -} - -func parseSSH(flow chan token, system bool, depth uint8) *Config { - // Ensure we consume tokens to completion even if parser exits early - defer func() { - for range flow { - } - }() - - result := newConfig() - result.position = Position{1, 1} - parser := &sshParser{ - flow: flow, - config: result, - tokensBuffer: make([]token, 0), - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - system: system, - depth: depth, - } - parser.run() - return result -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/position.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/position.go deleted file mode 100644 index e0b5e3fb33c..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/position.go +++ /dev/null @@ -1,25 +0,0 @@ -package ssh_config - -import "fmt" - -// Position of a document element within a SSH document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/token.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/token.go deleted file mode 100644 index a0ecbb2bb7d..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/token.go +++ /dev/null @@ -1,49 +0,0 @@ -package ssh_config - -import "fmt" - -type token struct { - Position - typ tokenType - val string -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - } - return fmt.Sprintf("%q", t.val) -} - -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenEmptyLine - tokenComment - tokenKey - tokenEquals - tokenString -) - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof) -} - -// I'm not sure that this is correct -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} diff --git a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/validators.go b/awsproviderlint/vendor/github.com/kevinburke/ssh_config/validators.go deleted file mode 100644 index 29fab6a9d2f..00000000000 --- a/awsproviderlint/vendor/github.com/kevinburke/ssh_config/validators.go +++ /dev/null @@ -1,162 +0,0 @@ -package ssh_config - -import ( - "fmt" - "strconv" - "strings" -) - -// Default returns the default value for the given keyword, for example "22" if -// the keyword is "Port". Default returns the empty string if the keyword has no -// default, or if the keyword is unknown. Keyword matching is case-insensitive. -// -// Default values are provided by OpenSSH_7.4p1 on a Mac. -func Default(keyword string) string { - return defaults[strings.ToLower(keyword)] -} - -// Arguments where the value must be "yes" or "no" and *only* yes or no. -var yesnos = map[string]bool{ - strings.ToLower("BatchMode"): true, - strings.ToLower("CanonicalizeFallbackLocal"): true, - strings.ToLower("ChallengeResponseAuthentication"): true, - strings.ToLower("CheckHostIP"): true, - strings.ToLower("ClearAllForwardings"): true, - strings.ToLower("Compression"): true, - strings.ToLower("EnableSSHKeysign"): true, - strings.ToLower("ExitOnForwardFailure"): true, - strings.ToLower("ForwardAgent"): true, - strings.ToLower("ForwardX11"): true, - strings.ToLower("ForwardX11Trusted"): true, - strings.ToLower("GatewayPorts"): true, - strings.ToLower("GSSAPIAuthentication"): true, - strings.ToLower("GSSAPIDelegateCredentials"): true, - strings.ToLower("HostbasedAuthentication"): true, - strings.ToLower("IdentitiesOnly"): true, - strings.ToLower("KbdInteractiveAuthentication"): true, - strings.ToLower("NoHostAuthenticationForLocalhost"): true, - strings.ToLower("PasswordAuthentication"): true, - strings.ToLower("PermitLocalCommand"): true, - strings.ToLower("PubkeyAuthentication"): true, - strings.ToLower("RhostsRSAAuthentication"): true, - strings.ToLower("RSAAuthentication"): true, - strings.ToLower("StreamLocalBindUnlink"): true, - strings.ToLower("TCPKeepAlive"): true, - strings.ToLower("UseKeychain"): true, - strings.ToLower("UsePrivilegedPort"): true, - strings.ToLower("VisualHostKey"): true, -} - -var uints = map[string]bool{ - strings.ToLower("CanonicalizeMaxDots"): true, - strings.ToLower("CompressionLevel"): true, // 1 to 9 - strings.ToLower("ConnectionAttempts"): true, - strings.ToLower("ConnectTimeout"): true, - strings.ToLower("NumberOfPasswordPrompts"): true, - strings.ToLower("Port"): true, - strings.ToLower("ServerAliveCountMax"): true, - strings.ToLower("ServerAliveInterval"): true, -} - -func mustBeYesOrNo(lkey string) bool { - return yesnos[lkey] -} - -func mustBeUint(lkey string) bool { - return uints[lkey] -} - -func validate(key, val string) error { - lkey := strings.ToLower(key) - if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") { - return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val) - } - if mustBeUint(lkey) { - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return fmt.Errorf("ssh_config: %v", err) - } - } - return nil -} - -var defaults = map[string]string{ - strings.ToLower("AddKeysToAgent"): "no", - strings.ToLower("AddressFamily"): "any", - strings.ToLower("BatchMode"): "no", - strings.ToLower("CanonicalizeFallbackLocal"): "yes", - strings.ToLower("CanonicalizeHostname"): "no", - strings.ToLower("CanonicalizeMaxDots"): "1", - strings.ToLower("ChallengeResponseAuthentication"): "yes", - strings.ToLower("CheckHostIP"): "yes", - // TODO is this still the correct cipher - strings.ToLower("Cipher"): "3des", - strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc", - strings.ToLower("ClearAllForwardings"): "no", - strings.ToLower("Compression"): "no", - strings.ToLower("CompressionLevel"): "6", - strings.ToLower("ConnectionAttempts"): "1", - strings.ToLower("ControlMaster"): "no", - strings.ToLower("EnableSSHKeysign"): "no", - strings.ToLower("EscapeChar"): "~", - strings.ToLower("ExitOnForwardFailure"): "no", - strings.ToLower("FingerprintHash"): "sha256", - strings.ToLower("ForwardAgent"): "no", - strings.ToLower("ForwardX11"): "no", - strings.ToLower("ForwardX11Timeout"): "20m", - strings.ToLower("ForwardX11Trusted"): "no", - strings.ToLower("GatewayPorts"): "no", - strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2", - strings.ToLower("GSSAPIAuthentication"): "no", - strings.ToLower("GSSAPIDelegateCredentials"): "no", - strings.ToLower("HashKnownHosts"): "no", - strings.ToLower("HostbasedAuthentication"): "no", - - strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", - strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", - // HostName has a dynamic default (the value passed at the command line). - - strings.ToLower("IdentitiesOnly"): "no", - strings.ToLower("IdentityFile"): "~/.ssh/identity", - - // IPQoS has a dynamic default based on interactive or non-interactive - // sessions. - - strings.ToLower("KbdInteractiveAuthentication"): "yes", - - strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1", - strings.ToLower("LogLevel"): "INFO", - strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1", - - strings.ToLower("NoHostAuthenticationForLocalhost"): "no", - strings.ToLower("NumberOfPasswordPrompts"): "3", - strings.ToLower("PasswordAuthentication"): "yes", - strings.ToLower("PermitLocalCommand"): "no", - strings.ToLower("Port"): "22", - - strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password", - strings.ToLower("Protocol"): "2", - strings.ToLower("ProxyUseFdpass"): "no", - strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", - strings.ToLower("PubkeyAuthentication"): "yes", - strings.ToLower("RekeyLimit"): "default none", - strings.ToLower("RhostsRSAAuthentication"): "no", - strings.ToLower("RSAAuthentication"): "yes", - - strings.ToLower("ServerAliveCountMax"): "3", - strings.ToLower("ServerAliveInterval"): "0", - strings.ToLower("StreamLocalBindMask"): "0177", - strings.ToLower("StreamLocalBindUnlink"): "no", - strings.ToLower("StrictHostKeyChecking"): "ask", - strings.ToLower("TCPKeepAlive"): "yes", - strings.ToLower("Tunnel"): "no", - strings.ToLower("TunnelDevice"): "any:any", - strings.ToLower("UpdateHostKeys"): "no", - strings.ToLower("UseKeychain"): "no", - strings.ToLower("UsePrivilegedPort"): "no", - - strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2", - strings.ToLower("VerifyHostKeyDNS"): "no", - strings.ToLower("VisualHostKey"): "no", - strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth", -} diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/.travis.yml b/awsproviderlint/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 00000000000..98db8f060bd --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/awsproviderlint/vendor/github.com/jbenet/go-context/LICENSE b/awsproviderlint/vendor/github.com/mattn/go-colorable/LICENSE similarity index 88% rename from awsproviderlint/vendor/github.com/jbenet/go-context/LICENSE rename to awsproviderlint/vendor/github.com/mattn/go-colorable/LICENSE index c7386b3c940..91b5cef30eb 100644 --- a/awsproviderlint/vendor/github.com/jbenet/go-context/LICENSE +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Juan Batiz-Benet +Copyright (c) 2016 Yasuhiro Matsumoto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/README.md b/awsproviderlint/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000000..56729a92ca6 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000000..0b0aef83700 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_others.go b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000000..3fb771dcca2 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_windows.go b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000000..1bd628f25c0 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1005 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/go.mod b/awsproviderlint/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 00000000000..ef3ca9d4c31 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.8 diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/go.sum b/awsproviderlint/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 00000000000..2c12960ec73 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/awsproviderlint/vendor/github.com/mattn/go-colorable/noncolorable.go b/awsproviderlint/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000000..95f2c6be257 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/.travis.yml b/awsproviderlint/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 00000000000..5597e026ddf --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/LICENSE b/awsproviderlint/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000000..65dc692b6b1 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/README.md b/awsproviderlint/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000000..1e69004bb03 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/doc.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000000..17d4f90ebcc --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/go.mod b/awsproviderlint/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 00000000000..a8ddf404fc1 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,5 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20191008105621-543471e840be + +go 1.14 diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/go.sum b/awsproviderlint/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 00000000000..c141fc53a95 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,4 @@ +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_android.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 00000000000..d3567cb5bf2 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000000..07e93039dbe --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_others.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000000..ff714a37615 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000000..bc0a70920f4 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,22 @@ +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(fd) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000000..bdd5c79a07f --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000000..453b025d0df --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_windows.go b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000000..1fa86915405 --- /dev/null +++ b/awsproviderlint/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/AUTHORS b/awsproviderlint/vendor/github.com/sergi/go-diff/AUTHORS deleted file mode 100644 index 2d7bb2bf572..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/AUTHORS +++ /dev/null @@ -1,25 +0,0 @@ -# This is the official list of go-diff authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Danny Yoo -James Kolb -Jonathan Amsterdam -Markus Zimmermann -Matt Kovars -Örjan Persson -Osman Masood -Robert Carlsen -Rory Flynn -Sergi Mansilla -Shatrugna Sadhu -Shawn Smith -Stas Maksimov -Tor Arvid Lund -Zac Bergquist diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/awsproviderlint/vendor/github.com/sergi/go-diff/CONTRIBUTORS deleted file mode 100644 index 369e3d55190..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/CONTRIBUTORS +++ /dev/null @@ -1,32 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the go-diff -# repository. -# -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, ACME Inc. employees would be listed here -# but not in AUTHORS, because ACME Inc. would hold the copyright. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file. -# -# Names should be added to this file like so: -# Name -# -# Please keep the list sorted. - -Danny Yoo -James Kolb -Jonathan Amsterdam -Markus Zimmermann -Matt Kovars -Örjan Persson -Osman Masood -Robert Carlsen -Rory Flynn -Sergi Mansilla -Shatrugna Sadhu -Shawn Smith -Stas Maksimov -Tor Arvid Lund -Zac Bergquist diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/LICENSE b/awsproviderlint/vendor/github.com/sergi/go-diff/LICENSE deleted file mode 100644 index 937942c2b2c..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go deleted file mode 100644 index cb25b437575..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ /dev/null @@ -1,1345 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "fmt" - "html" - "math" - "net/url" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Operation defines the operation of a diff item. -type Operation int8 - -//go:generate stringer -type=Operation -trimprefix=Diff - -const ( - // DiffDelete item represents a delete diff. - DiffDelete Operation = -1 - // DiffInsert item represents an insert diff. - DiffInsert Operation = 1 - // DiffEqual item represents an equal diff. - DiffEqual Operation = 0 -) - -// Diff represents one diff operation -type Diff struct { - Type Operation - Text string -} - -// splice removes amount elements from slice at index index, replacing them with elements. -func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { - if len(elements) == amount { - // Easy case: overwrite the relevant items. - copy(slice[index:], elements) - return slice - } - if len(elements) < amount { - // Fewer new items than old. - // Copy in the new items. - copy(slice[index:], elements) - // Shift the remaining items left. - copy(slice[index+len(elements):], slice[index+amount:]) - // Calculate the new end of the slice. - end := len(slice) - amount + len(elements) - // Zero stranded elements at end so that they can be garbage collected. - tail := slice[end:] - for i := range tail { - tail[i] = Diff{} - } - return slice[:end] - } - // More new items than old. - // Make room in slice for new elements. - // There's probably an even more efficient way to do this, - // but this is simple and clear. - need := len(slice) - amount + len(elements) - for len(slice) < need { - slice = append(slice, Diff{}) - } - // Shift slice elements right to make room for new elements. - copy(slice[index+len(elements):], slice[index+amount:]) - // Copy in new elements. - copy(slice[index:], elements) - return slice -} - -// DiffMain finds the differences between two texts. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { - return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) -} - -// DiffMainRunes finds the differences between two rune sequences. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { - var deadline time.Time - if dmp.DiffTimeout > 0 { - deadline = time.Now().Add(dmp.DiffTimeout) - } - return dmp.diffMainRunes(text1, text2, checklines, deadline) -} - -func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - if runesEqual(text1, text2) { - var diffs []Diff - if len(text1) > 0 { - diffs = append(diffs, Diff{DiffEqual, string(text1)}) - } - return diffs - } - // Trim off common prefix (speedup). - commonlength := commonPrefixLength(text1, text2) - commonprefix := text1[:commonlength] - text1 = text1[commonlength:] - text2 = text2[commonlength:] - - // Trim off common suffix (speedup). - commonlength = commonSuffixLength(text1, text2) - commonsuffix := text1[len(text1)-commonlength:] - text1 = text1[:len(text1)-commonlength] - text2 = text2[:len(text2)-commonlength] - - // Compute the diff on the middle block. - diffs := dmp.diffCompute(text1, text2, checklines, deadline) - - // Restore the prefix and suffix. - if len(commonprefix) != 0 { - diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) - } - if len(commonsuffix) != 0 { - diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) - } - - return dmp.DiffCleanupMerge(diffs) -} - -// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. -func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - diffs := []Diff{} - if len(text1) == 0 { - // Just add some text (speedup). - return append(diffs, Diff{DiffInsert, string(text2)}) - } else if len(text2) == 0 { - // Just delete some text (speedup). - return append(diffs, Diff{DiffDelete, string(text1)}) - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if i := runesIndex(longtext, shorttext); i != -1 { - op := DiffInsert - // Swap insertions for deletions if diff is reversed. - if len(text1) > len(text2) { - op = DiffDelete - } - // Shorter text is inside the longer text (speedup). - return []Diff{ - Diff{op, string(longtext[:i])}, - Diff{DiffEqual, string(shorttext)}, - Diff{op, string(longtext[i+len(shorttext):])}, - } - } else if len(shorttext) == 1 { - // Single character string. - // After the previous speedup, the character can't be an equality. - return []Diff{ - Diff{DiffDelete, string(text1)}, - Diff{DiffInsert, string(text2)}, - } - // Check to see if the problem can be split in two. - } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { - // A half-match was found, sort out the return data. - text1A := hm[0] - text1B := hm[1] - text2A := hm[2] - text2B := hm[3] - midCommon := hm[4] - // Send both pairs off for separate processing. - diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) - diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) - // Merge the results. - diffs := diffsA - diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) - diffs = append(diffs, diffsB...) - return diffs - } else if checklines && len(text1) > 100 && len(text2) > 100 { - return dmp.diffLineMode(text1, text2, deadline) - } - return dmp.diffBisect(text1, text2, deadline) -} - -// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { - // Scan the text on a line-by-line basis first. - text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) - - diffs := dmp.diffMainRunes(text1, text2, false, deadline) - - // Convert the diff back to original text. - diffs = dmp.DiffCharsToLines(diffs, linearray) - // Eliminate freak matches (e.g. blank lines) - diffs = dmp.DiffCleanupSemantic(diffs) - - // Rediff any replacement blocks, this time character-by-character. - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - - pointer := 0 - countDelete := 0 - countInsert := 0 - - // NOTE: Rune slices are slower than using strings in this case. - textDelete := "" - textInsert := "" - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert += diffs[pointer].Text - case DiffDelete: - countDelete++ - textDelete += diffs[pointer].Text - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete >= 1 && countInsert >= 1 { - // Delete the offending records and add the merged ones. - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert) - - pointer = pointer - countDelete - countInsert - a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) - for j := len(a) - 1; j >= 0; j-- { - diffs = splice(diffs, pointer, 0, a[j]) - } - pointer = pointer + len(a) - } - - countInsert = 0 - countDelete = 0 - textDelete = "" - textInsert = "" - } - pointer++ - } - - return diffs[:len(diffs)-1] // Remove the dummy entry at the end. -} - -// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { - // Unused in this code, but retained for interface compatibility. - return dmp.diffBisect([]rune(text1), []rune(text2), deadline) -} - -// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. -// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { - // Cache the text lengths to prevent multiple calls. - runes1Len, runes2Len := len(runes1), len(runes2) - - maxD := (runes1Len + runes2Len + 1) / 2 - vOffset := maxD - vLength := 2 * maxD - - v1 := make([]int, vLength) - v2 := make([]int, vLength) - for i := range v1 { - v1[i] = -1 - v2[i] = -1 - } - v1[vOffset+1] = 0 - v2[vOffset+1] = 0 - - delta := runes1Len - runes2Len - // If the total number of characters is odd, then the front path will collide with the reverse path. - front := (delta%2 != 0) - // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. - k1start := 0 - k1end := 0 - k2start := 0 - k2end := 0 - for d := 0; d < maxD; d++ { - // Bail out if deadline is reached. - if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { - break - } - - // Walk the front path one step. - for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { - k1Offset := vOffset + k1 - var x1 int - - if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { - x1 = v1[k1Offset+1] - } else { - x1 = v1[k1Offset-1] + 1 - } - - y1 := x1 - k1 - for x1 < runes1Len && y1 < runes2Len { - if runes1[x1] != runes2[y1] { - break - } - x1++ - y1++ - } - v1[k1Offset] = x1 - if x1 > runes1Len { - // Ran off the right of the graph. - k1end += 2 - } else if y1 > runes2Len { - // Ran off the bottom of the graph. - k1start += 2 - } else if front { - k2Offset := vOffset + delta - k1 - if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { - // Mirror x2 onto top-left coordinate system. - x2 := runes1Len - v2[k2Offset] - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - // Walk the reverse path one step. - for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { - k2Offset := vOffset + k2 - var x2 int - if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { - x2 = v2[k2Offset+1] - } else { - x2 = v2[k2Offset-1] + 1 - } - var y2 = x2 - k2 - for x2 < runes1Len && y2 < runes2Len { - if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { - break - } - x2++ - y2++ - } - v2[k2Offset] = x2 - if x2 > runes1Len { - // Ran off the left of the graph. - k2end += 2 - } else if y2 > runes2Len { - // Ran off the top of the graph. - k2start += 2 - } else if !front { - k1Offset := vOffset + delta - k2 - if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { - x1 := v1[k1Offset] - y1 := vOffset + x1 - k1Offset - // Mirror x2 onto top-left coordinate system. - x2 = runes1Len - x2 - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - } - // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. - return []Diff{ - Diff{DiffDelete, string(runes1)}, - Diff{DiffInsert, string(runes2)}, - } -} - -func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, - deadline time.Time) []Diff { - runes1a := runes1[:x] - runes2a := runes2[:y] - runes1b := runes1[x:] - runes2b := runes2[y:] - - // Compute both diffs serially. - diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) - diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) - - return append(diffs, diffsb...) -} - -// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. -// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. -func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { - chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) - return string(chars1), string(chars2), lineArray -} - -// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. -func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. - lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' - lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 - - chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) - chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) - - return chars1, chars2, lineArray -} - -func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { - return dmp.DiffLinesToRunes(string(text1), string(text2)) -} - -// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. -// We use strings instead of []runes as input mainly because you can't use []rune as a map key. -func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. - lineStart := 0 - lineEnd := -1 - runes := []rune{} - - for lineEnd < len(text)-1 { - lineEnd = indexOf(text, "\n", lineStart) - - if lineEnd == -1 { - lineEnd = len(text) - 1 - } - - line := text[lineStart : lineEnd+1] - lineStart = lineEnd + 1 - lineValue, ok := lineHash[line] - - if ok { - runes = append(runes, rune(lineValue)) - } else { - *lineArray = append(*lineArray, line) - lineHash[line] = len(*lineArray) - 1 - runes = append(runes, rune(len(*lineArray)-1)) - } - } - - return runes -} - -// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. -func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { - hydrated := make([]Diff, 0, len(diffs)) - for _, aDiff := range diffs { - chars := aDiff.Text - text := make([]string, len(chars)) - - for i, r := range chars { - text[i] = lineArray[r] - } - - aDiff.Text = strings.Join(text, "") - hydrated = append(hydrated, aDiff) - } - return hydrated -} - -// DiffCommonPrefix determines the common prefix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonPrefixLength([]rune(text1), []rune(text2)) -} - -// DiffCommonSuffix determines the common suffix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonSuffixLength([]rune(text1), []rune(text2)) -} - -// commonPrefixLength returns the length of the common prefix of two rune slices. -func commonPrefixLength(text1, text2 []rune) int { - // Linear search. See comment in commonSuffixLength. - n := 0 - for ; n < len(text1) && n < len(text2); n++ { - if text1[n] != text2[n] { - return n - } - } - return n -} - -// commonSuffixLength returns the length of the common suffix of two rune slices. -func commonSuffixLength(text1, text2 []rune) int { - // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. - // See discussion at https://github.com/sergi/go-diff/issues/54. - i1 := len(text1) - i2 := len(text2) - for n := 0; ; n++ { - i1-- - i2-- - if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { - return n - } - } -} - -// DiffCommonOverlap determines if the suffix of one string is the prefix of another. -func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { - // Cache the text lengths to prevent multiple calls. - text1Length := len(text1) - text2Length := len(text2) - // Eliminate the null case. - if text1Length == 0 || text2Length == 0 { - return 0 - } - // Truncate the longer string. - if text1Length > text2Length { - text1 = text1[text1Length-text2Length:] - } else if text1Length < text2Length { - text2 = text2[0:text1Length] - } - textLength := int(math.Min(float64(text1Length), float64(text2Length))) - // Quick check for the worst case. - if text1 == text2 { - return textLength - } - - // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ - best := 0 - length := 1 - for { - pattern := text1[textLength-length:] - found := strings.Index(text2, pattern) - if found == -1 { - break - } - length += found - if found == 0 || text1[textLength-length:] == text2[0:length] { - best = length - length++ - } - } - - return best -} - -// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { - // Unused in this code, but retained for interface compatibility. - runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) - if runeSlices == nil { - return nil - } - - result := make([]string, len(runeSlices)) - for i, r := range runeSlices { - result[i] = string(r) - } - return result -} - -func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { - if dmp.DiffTimeout <= 0 { - // Don't risk returning a non-optimal diff if we have unlimited time. - return nil - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { - return nil // Pointless. - } - - // First check if the second quarter is the seed for a half-match. - hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) - - // Check again based on the third quarter. - hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) - - hm := [][]rune{} - if hm1 == nil && hm2 == nil { - return nil - } else if hm2 == nil { - hm = hm1 - } else if hm1 == nil { - hm = hm2 - } else { - // Both matched. Select the longest. - if len(hm1[4]) > len(hm2[4]) { - hm = hm1 - } else { - hm = hm2 - } - } - - // A half-match was found, sort out the return data. - if len(text1) > len(text2) { - return hm - } - - return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} -} - -// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? -// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. -func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { - var bestCommonA []rune - var bestCommonB []rune - var bestCommonLen int - var bestLongtextA []rune - var bestLongtextB []rune - var bestShorttextA []rune - var bestShorttextB []rune - - // Start with a 1/4 length substring at position i as a seed. - seed := l[i : i+len(l)/4] - - for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { - prefixLength := commonPrefixLength(l[i:], s[j:]) - suffixLength := commonSuffixLength(l[:i], s[:j]) - - if bestCommonLen < suffixLength+prefixLength { - bestCommonA = s[j-suffixLength : j] - bestCommonB = s[j : j+prefixLength] - bestCommonLen = len(bestCommonA) + len(bestCommonB) - bestLongtextA = l[:i-suffixLength] - bestLongtextB = l[i+prefixLength:] - bestShorttextA = s[:j-suffixLength] - bestShorttextB = s[j+prefixLength:] - } - } - - if bestCommonLen*2 < len(l) { - return nil - } - - return [][]rune{ - bestLongtextA, - bestLongtextB, - bestShorttextA, - bestShorttextB, - append(bestCommonA, bestCommonB...), - } -} - -// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - equalities := make([]int, 0, len(diffs)) - - var lastequality string - // Always equal to diffs[equalities[equalitiesLength - 1]][1] - var pointer int // Index of current position. - // Number of characters that changed prior to the equality. - var lengthInsertions1, lengthDeletions1 int - // Number of characters that changed after the equality. - var lengthInsertions2, lengthDeletions2 int - - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { - // Equality found. - equalities = append(equalities, pointer) - lengthInsertions1 = lengthInsertions2 - lengthDeletions1 = lengthDeletions2 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = diffs[pointer].Text - } else { - // An insertion or deletion. - - if diffs[pointer].Type == DiffInsert { - lengthInsertions2 += len(diffs[pointer].Text) - } else { - lengthDeletions2 += len(diffs[pointer].Text) - } - // Eliminate an equality that is smaller or equal to the edits on both sides of it. - difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) - difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) - if len(lastequality) > 0 && - (len(lastequality) <= difference1) && - (len(lastequality) <= difference2) { - // Duplicate record. - insPoint := equalities[len(equalities)-1] - diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities[:len(equalities)-1] - - if len(equalities) > 0 { - equalities = equalities[:len(equalities)-1] - } - pointer = -1 - if len(equalities) > 0 { - pointer = equalities[len(equalities)-1] - } - - lengthInsertions1 = 0 // Reset the counters. - lengthDeletions1 = 0 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = "" - changes = true - } - } - pointer++ - } - - // Normalize the diff. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - diffs = dmp.DiffCleanupSemanticLossless(diffs) - // Find any overlaps between deletions and insertions. - // e.g: abcxxxxxxdef - // -> abcxxxdef - // e.g: xxxabcdefxxx - // -> defxxxabc - // Only extract an overlap if it is as big as the edit ahead or behind it. - pointer = 1 - for pointer < len(diffs) { - if diffs[pointer-1].Type == DiffDelete && - diffs[pointer].Type == DiffInsert { - deletion := diffs[pointer-1].Text - insertion := diffs[pointer].Text - overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) - overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) - if overlapLength1 >= overlapLength2 { - if float64(overlapLength1) >= float64(len(deletion))/2 || - float64(overlapLength1) >= float64(len(insertion))/2 { - - // Overlap found. Insert an equality and trim the surrounding edits. - diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) - diffs[pointer-1].Text = - deletion[0 : len(deletion)-overlapLength1] - diffs[pointer+1].Text = insertion[overlapLength1:] - pointer++ - } - } else { - if float64(overlapLength2) >= float64(len(deletion))/2 || - float64(overlapLength2) >= float64(len(insertion))/2 { - // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. - overlap := Diff{DiffEqual, deletion[:overlapLength2]} - diffs = splice(diffs, pointer, 0, overlap) - diffs[pointer-1].Type = DiffInsert - diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] - diffs[pointer+1].Type = DiffDelete - diffs[pointer+1].Text = deletion[overlapLength2:] - pointer++ - } - } - pointer++ - } - pointer++ - } - - return diffs -} - -// Define some regex patterns for matching boundaries. -var ( - nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) - whitespaceRegex = regexp.MustCompile(`\s`) - linebreakRegex = regexp.MustCompile(`[\r\n]`) - blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) - blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) -) - -// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. -// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. -func diffCleanupSemanticScore(one, two string) int { - if len(one) == 0 || len(two) == 0 { - // Edges are the best. - return 6 - } - - // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. - rune1, _ := utf8.DecodeLastRuneInString(one) - rune2, _ := utf8.DecodeRuneInString(two) - char1 := string(rune1) - char2 := string(rune2) - - nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) - nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) - whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) - whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) - lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) - lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) - blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) - blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) - - if blankLine1 || blankLine2 { - // Five points for blank lines. - return 5 - } else if lineBreak1 || lineBreak2 { - // Four points for line breaks. - return 4 - } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { - // Three points for end of sentences. - return 3 - } else if whitespace1 || whitespace2 { - // Two points for whitespace. - return 2 - } else if nonAlphaNumeric1 || nonAlphaNumeric2 { - // One point for non-alphanumeric. - return 1 - } - return 0 -} - -// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. -// E.g: The cat came. -> The cat came. -func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { - pointer := 1 - - // Intentionally ignore the first and last element (don't need checking). - for pointer < len(diffs)-1 { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - - // This is a single edit surrounded by equalities. - equality1 := diffs[pointer-1].Text - edit := diffs[pointer].Text - equality2 := diffs[pointer+1].Text - - // First, shift the edit as far left as possible. - commonOffset := dmp.DiffCommonSuffix(equality1, edit) - if commonOffset > 0 { - commonString := edit[len(edit)-commonOffset:] - equality1 = equality1[0 : len(equality1)-commonOffset] - edit = commonString + edit[:len(edit)-commonOffset] - equality2 = commonString + equality2 - } - - // Second, step character by character right, looking for the best fit. - bestEquality1 := equality1 - bestEdit := edit - bestEquality2 := equality2 - bestScore := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - - for len(edit) != 0 && len(equality2) != 0 { - _, sz := utf8.DecodeRuneInString(edit) - if len(equality2) < sz || edit[:sz] != equality2[:sz] { - break - } - equality1 += edit[:sz] - edit = edit[sz:] + equality2[:sz] - equality2 = equality2[sz:] - score := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - // The >= encourages trailing rather than leading whitespace on edits. - if score >= bestScore { - bestScore = score - bestEquality1 = equality1 - bestEdit = edit - bestEquality2 = equality2 - } - } - - if diffs[pointer-1].Text != bestEquality1 { - // We have an improvement, save it back to the diff. - if len(bestEquality1) != 0 { - diffs[pointer-1].Text = bestEquality1 - } else { - diffs = splice(diffs, pointer-1, 1) - pointer-- - } - - diffs[pointer].Text = bestEdit - if len(bestEquality2) != 0 { - diffs[pointer+1].Text = bestEquality2 - } else { - diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) - pointer-- - } - } - } - pointer++ - } - - return diffs -} - -// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - type equality struct { - data int - next *equality - } - var equalities *equality - // Always equal to equalities[equalitiesLength-1][1] - lastequality := "" - pointer := 0 // Index of current position. - // Is there an insertion operation before the last equality. - preIns := false - // Is there a deletion operation before the last equality. - preDel := false - // Is there an insertion operation after the last equality. - postIns := false - // Is there a deletion operation after the last equality. - postDel := false - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { // Equality found. - if len(diffs[pointer].Text) < dmp.DiffEditCost && - (postIns || postDel) { - // Candidate found. - equalities = &equality{ - data: pointer, - next: equalities, - } - preIns = postIns - preDel = postDel - lastequality = diffs[pointer].Text - } else { - // Not a candidate, and can never become one. - equalities = nil - lastequality = "" - } - postIns = false - postDel = false - } else { // An insertion or deletion. - if diffs[pointer].Type == DiffDelete { - postDel = true - } else { - postIns = true - } - - // Five types to be split: - // ABXYCD - // AXCD - // ABXC - // AXCD - // ABXC - var sumPres int - if preIns { - sumPres++ - } - if preDel { - sumPres++ - } - if postIns { - sumPres++ - } - if postDel { - sumPres++ - } - if len(lastequality) > 0 && - ((preIns && preDel && postIns && postDel) || - ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { - - insPoint := equalities.data - - // Duplicate record. - diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities.next - lastequality = "" - - if preIns && preDel { - // No changes made which could affect previous entry, keep going. - postIns = true - postDel = true - equalities = nil - } else { - if equalities != nil { - equalities = equalities.next - } - if equalities != nil { - pointer = equalities.data - } else { - pointer = -1 - } - postIns = false - postDel = false - } - changes = true - } - } - pointer++ - } - - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. -// Any edit section can move as long as it doesn't cross an equality. -func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - pointer := 0 - countDelete := 0 - countInsert := 0 - commonlength := 0 - textDelete := []rune(nil) - textInsert := []rune(nil) - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert = append(textInsert, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffDelete: - countDelete++ - textDelete = append(textDelete, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete+countInsert > 1 { - if countDelete != 0 && countInsert != 0 { - // Factor out any common prefixies. - commonlength = commonPrefixLength(textInsert, textDelete) - if commonlength != 0 { - x := pointer - countDelete - countInsert - if x > 0 && diffs[x-1].Type == DiffEqual { - diffs[x-1].Text += string(textInsert[:commonlength]) - } else { - diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) - pointer++ - } - textInsert = textInsert[commonlength:] - textDelete = textDelete[commonlength:] - } - // Factor out any common suffixies. - commonlength = commonSuffixLength(textInsert, textDelete) - if commonlength != 0 { - insertIndex := len(textInsert) - commonlength - deleteIndex := len(textDelete) - commonlength - diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text - textInsert = textInsert[:insertIndex] - textDelete = textDelete[:deleteIndex] - } - } - // Delete the offending records and add the merged ones. - if countDelete == 0 { - diffs = splice(diffs, pointer-countInsert, - countDelete+countInsert, - Diff{DiffInsert, string(textInsert)}) - } else if countInsert == 0 { - diffs = splice(diffs, pointer-countDelete, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}) - } else { - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}, - Diff{DiffInsert, string(textInsert)}) - } - - pointer = pointer - countDelete - countInsert + 1 - if countDelete != 0 { - pointer++ - } - if countInsert != 0 { - pointer++ - } - } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { - // Merge this equality with the previous one. - diffs[pointer-1].Text += diffs[pointer].Text - diffs = append(diffs[:pointer], diffs[pointer+1:]...) - } else { - pointer++ - } - countInsert = 0 - countDelete = 0 - textDelete = nil - textInsert = nil - break - } - } - - if len(diffs[len(diffs)-1].Text) == 0 { - diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. - } - - // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC - changes := false - pointer = 1 - // Intentionally ignore the first and last element (don't need checking). - for pointer < (len(diffs) - 1) { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - // This is a single edit surrounded by equalities. - if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { - // Shift the edit over the previous equality. - diffs[pointer].Text = diffs[pointer-1].Text + - diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] - diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text - diffs = splice(diffs, pointer-1, 1) - changes = true - } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { - // Shift the edit over the next equality. - diffs[pointer-1].Text += diffs[pointer+1].Text - diffs[pointer].Text = - diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text - diffs = splice(diffs, pointer+1, 1) - changes = true - } - } - pointer++ - } - - // If shifts were made, the diff needs reordering and another shift sweep. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffXIndex returns the equivalent location in s2. -func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { - chars1 := 0 - chars2 := 0 - lastChars1 := 0 - lastChars2 := 0 - lastDiff := Diff{} - for i := 0; i < len(diffs); i++ { - aDiff := diffs[i] - if aDiff.Type != DiffInsert { - // Equality or deletion. - chars1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - // Equality or insertion. - chars2 += len(aDiff.Text) - } - if chars1 > loc { - // Overshot the location. - lastDiff = aDiff - break - } - lastChars1 = chars1 - lastChars2 = chars2 - } - if lastDiff.Type == DiffDelete { - // The location was deleted. - return lastChars2 - } - // Add the remaining character length. - return lastChars2 + (loc - lastChars1) -} - -// DiffPrettyHtml converts a []Diff into a pretty HTML report. -// It is intended as an example from which to write one's own display functions. -func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
      ", -1) - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffDelete: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffEqual: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - } - } - return buff.String() -} - -// DiffPrettyText converts a []Diff into a colored text report. -func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := diff.Text - - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("\x1b[32m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffDelete: - _, _ = buff.WriteString("\x1b[31m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffEqual: - _, _ = buff.WriteString(text) - } - } - - return buff.String() -} - -// DiffText1 computes and returns the source text (all equalities and deletions). -func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { - //StringBuilder text = new StringBuilder() - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffInsert { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffText2 computes and returns the destination text (all equalities and insertions). -func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffDelete { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. -func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { - levenshtein := 0 - insertions := 0 - deletions := 0 - - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - insertions += utf8.RuneCountInString(aDiff.Text) - case DiffDelete: - deletions += utf8.RuneCountInString(aDiff.Text) - case DiffEqual: - // A deletion and an insertion is one substitution. - levenshtein += max(insertions, deletions) - insertions = 0 - deletions = 0 - } - } - - levenshtein += max(insertions, deletions) - return levenshtein -} - -// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. -// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. -func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { - var text bytes.Buffer - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\t") - break - case DiffDelete: - _, _ = text.WriteString("-") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - case DiffEqual: - _, _ = text.WriteString("=") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - } - } - delta := text.String() - if len(delta) != 0 { - // Strip off trailing tab character. - delta = delta[0 : utf8.RuneCountInString(delta)-1] - delta = unescaper.Replace(delta) - } - return delta -} - -// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. -func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { - i := 0 - runes := []rune(text1) - - for _, token := range strings.Split(delta, "\t") { - if len(token) == 0 { - // Blank tokens are ok (from a trailing \t). - continue - } - - // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). - param := token[1:] - - switch op := token[0]; op { - case '+': - // Decode would Diff all "+" to " " - param = strings.Replace(param, "+", "%2b", -1) - param, err = url.QueryUnescape(param) - if err != nil { - return nil, err - } - if !utf8.ValidString(param) { - return nil, fmt.Errorf("invalid UTF-8 token: %q", param) - } - - diffs = append(diffs, Diff{DiffInsert, param}) - case '=', '-': - n, err := strconv.ParseInt(param, 10, 0) - if err != nil { - return nil, err - } else if n < 0 { - return nil, errors.New("Negative number in DiffFromDelta: " + param) - } - - i += int(n) - // Break out if we are out of bounds, go1.6 can't handle this very well - if i > len(runes) { - break - } - // Remember that string slicing is by byte - we want by rune here. - text := string(runes[i-int(n) : i]) - - if op == '=' { - diffs = append(diffs, Diff{DiffEqual, text}) - } else { - diffs = append(diffs, Diff{DiffDelete, text}) - } - default: - // Anything else is an error. - return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) - } - } - - if i != len(runes) { - return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) - } - - return diffs, nil -} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go deleted file mode 100644 index d3acc32ce13..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. -package diffmatchpatch - -import ( - "time" -) - -// DiffMatchPatch holds the configuration for diff-match-patch operations. -type DiffMatchPatch struct { - // Number of seconds to map a diff before giving up (0 for infinity). - DiffTimeout time.Duration - // Cost of an empty edit operation in terms of edit characters. - DiffEditCost int - // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). - MatchDistance int - // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. - PatchDeleteThreshold float64 - // Chunk size for context length. - PatchMargin int - // The number of bits in an int. - MatchMaxBits int - // At what point is no match declared (0.0 = perfection, 1.0 = very loose). - MatchThreshold float64 -} - -// New creates a new DiffMatchPatch object with default parameters. -func New() *DiffMatchPatch { - // Defaults. - return &DiffMatchPatch{ - DiffTimeout: time.Second, - DiffEditCost: 4, - MatchThreshold: 0.5, - MatchDistance: 1000, - PatchDeleteThreshold: 0.5, - PatchMargin: 4, - MatchMaxBits: 32, - } -} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go deleted file mode 100644 index 17374e109fe..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "math" -) - -// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. -// Returns -1 if no match found. -func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { - // Check for null inputs not needed since null can't be passed in C#. - - loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) - if text == pattern { - // Shortcut (potentially not guaranteed by the algorithm) - return 0 - } else if len(text) == 0 { - // Nothing to match. - return -1 - } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { - // Perfect match at the perfect spot! (Includes case of null pattern) - return loc - } - // Do a fuzzy compare. - return dmp.MatchBitap(text, pattern, loc) -} - -// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. -// Returns -1 if no match was found. -func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { - // Initialise the alphabet. - s := dmp.MatchAlphabet(pattern) - - // Highest score beyond which we give up. - scoreThreshold := dmp.MatchThreshold - // Is there a nearby exact match? (speedup) - bestLoc := indexOf(text, pattern, loc) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - // What about in the other direction? (speedup) - bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - } - } - - // Initialise the bit arrays. - matchmask := 1 << uint((len(pattern) - 1)) - bestLoc = -1 - - var binMin, binMid int - binMax := len(pattern) + len(text) - lastRd := []int{} - for d := 0; d < len(pattern); d++ { - // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. - binMin = 0 - binMid = binMax - for binMin < binMid { - if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { - binMin = binMid - } else { - binMax = binMid - } - binMid = (binMax-binMin)/2 + binMin - } - // Use the result from this iteration as the maximum for the next. - binMax = binMid - start := int(math.Max(1, float64(loc-binMid+1))) - finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) - - rd := make([]int, finish+2) - rd[finish+1] = (1 << uint(d)) - 1 - - for j := finish; j >= start; j-- { - var charMatch int - if len(text) <= j-1 { - // Out of range. - charMatch = 0 - } else if _, ok := s[text[j-1]]; !ok { - charMatch = 0 - } else { - charMatch = s[text[j-1]] - } - - if d == 0 { - // First pass: exact match. - rd[j] = ((rd[j+1] << 1) | 1) & charMatch - } else { - // Subsequent passes: fuzzy match. - rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] - } - if (rd[j] & matchmask) != 0 { - score := dmp.matchBitapScore(d, j-1, loc, pattern) - // This match will almost certainly be better than any existing match. But check anyway. - if score <= scoreThreshold { - // Told you so. - scoreThreshold = score - bestLoc = j - 1 - if bestLoc > loc { - // When passing loc, don't exceed our current distance from loc. - start = int(math.Max(1, float64(2*loc-bestLoc))) - } else { - // Already passed loc, downhill from here on in. - break - } - } - } - } - if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { - // No hope for a (better) match at greater error levels. - break - } - lastRd = rd - } - return bestLoc -} - -// matchBitapScore computes and returns the score for a match with e errors and x location. -func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { - accuracy := float64(e) / float64(len(pattern)) - proximity := math.Abs(float64(loc - x)) - if dmp.MatchDistance == 0 { - // Dodge divide by zero error. - if proximity == 0 { - return accuracy - } - - return 1.0 - } - return accuracy + (proximity / float64(dmp.MatchDistance)) -} - -// MatchAlphabet initialises the alphabet for the Bitap algorithm. -func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { - s := map[byte]int{} - charPattern := []byte(pattern) - for _, c := range charPattern { - _, ok := s[c] - if !ok { - s[c] = 0 - } - } - i := 0 - - for _, c := range charPattern { - value := s[c] | int(uint(1)< y { - return x - } - return y -} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go deleted file mode 100644 index 533ec0da7b3..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. - -package diffmatchpatch - -import "fmt" - -const _Operation_name = "DeleteEqualInsert" - -var _Operation_index = [...]uint8{0, 6, 11, 17} - -func (i Operation) String() string { - i -= -1 - if i < 0 || i >= Operation(len(_Operation_index)-1) { - return fmt.Sprintf("Operation(%d)", i+-1) - } - return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] -} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go deleted file mode 100644 index 223c43c4268..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "math" - "net/url" - "regexp" - "strconv" - "strings" -) - -// Patch represents one patch operation. -type Patch struct { - diffs []Diff - Start1 int - Start2 int - Length1 int - Length2 int -} - -// String emulates GNU diff's format. -// Header: @@ -382,8 +481,9 @@ -// Indices are printed as 1-based, not 0-based. -func (p *Patch) String() string { - var coords1, coords2 string - - if p.Length1 == 0 { - coords1 = strconv.Itoa(p.Start1) + ",0" - } else if p.Length1 == 1 { - coords1 = strconv.Itoa(p.Start1 + 1) - } else { - coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) - } - - if p.Length2 == 0 { - coords2 = strconv.Itoa(p.Start2) + ",0" - } else if p.Length2 == 1 { - coords2 = strconv.Itoa(p.Start2 + 1) - } else { - coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) - } - - var text bytes.Buffer - _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") - - // Escape the body of the patch with %xx notation. - for _, aDiff := range p.diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - case DiffDelete: - _, _ = text.WriteString("-") - case DiffEqual: - _, _ = text.WriteString(" ") - } - - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\n") - } - - return unescaper.Replace(text.String()) -} - -// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. -func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { - if len(text) == 0 { - return patch - } - - pattern := text[patch.Start2 : patch.Start2+patch.Length1] - padding := 0 - - // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. - for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && - len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { - padding += dmp.PatchMargin - maxStart := max(0, patch.Start2-padding) - minEnd := min(len(text), patch.Start2+patch.Length1+padding) - pattern = text[maxStart:minEnd] - } - // Add one chunk for good luck. - padding += dmp.PatchMargin - - // Add the prefix. - prefix := text[max(0, patch.Start2-padding):patch.Start2] - if len(prefix) != 0 { - patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) - } - // Add the suffix. - suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] - if len(suffix) != 0 { - patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) - } - - // Roll back the start points. - patch.Start1 -= len(prefix) - patch.Start2 -= len(prefix) - // Extend the lengths. - patch.Length1 += len(prefix) + len(suffix) - patch.Length2 += len(prefix) + len(suffix) - - return patch -} - -// PatchMake computes a list of patches. -func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { - if len(opt) == 1 { - diffs, _ := opt[0].([]Diff) - text1 := dmp.DiffText1(diffs) - return dmp.PatchMake(text1, diffs) - } else if len(opt) == 2 { - text1 := opt[0].(string) - switch t := opt[1].(type) { - case string: - diffs := dmp.DiffMain(text1, t, true) - if len(diffs) > 2 { - diffs = dmp.DiffCleanupSemantic(diffs) - diffs = dmp.DiffCleanupEfficiency(diffs) - } - return dmp.PatchMake(text1, diffs) - case []Diff: - return dmp.patchMake2(text1, t) - } - } else if len(opt) == 3 { - return dmp.PatchMake(opt[0], opt[2]) - } - return []Patch{} -} - -// patchMake2 computes a list of patches to turn text1 into text2. -// text2 is not provided, diffs are the delta between text1 and text2. -func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { - // Check for null inputs not needed since null can't be passed in C#. - patches := []Patch{} - if len(diffs) == 0 { - return patches // Get rid of the null case. - } - - patch := Patch{} - charCount1 := 0 // Number of characters into the text1 string. - charCount2 := 0 // Number of characters into the text2 string. - // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. - prepatchText := text1 - postpatchText := text1 - - for i, aDiff := range diffs { - if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { - // A new patch starts here. - patch.Start1 = charCount1 - patch.Start2 = charCount2 - } - - switch aDiff.Type { - case DiffInsert: - patch.diffs = append(patch.diffs, aDiff) - patch.Length2 += len(aDiff.Text) - postpatchText = postpatchText[:charCount2] + - aDiff.Text + postpatchText[charCount2:] - case DiffDelete: - patch.Length1 += len(aDiff.Text) - patch.diffs = append(patch.diffs, aDiff) - postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] - case DiffEqual: - if len(aDiff.Text) <= 2*dmp.PatchMargin && - len(patch.diffs) != 0 && i != len(diffs)-1 { - // Small equality inside a patch. - patch.diffs = append(patch.diffs, aDiff) - patch.Length1 += len(aDiff.Text) - patch.Length2 += len(aDiff.Text) - } - if len(aDiff.Text) >= 2*dmp.PatchMargin { - // Time for a new patch. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - patch = Patch{} - // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. - prepatchText = postpatchText - charCount1 = charCount2 - } - } - } - - // Update the current character count. - if aDiff.Type != DiffInsert { - charCount1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - charCount2 += len(aDiff.Text) - } - } - - // Pick up the leftover patch if not empty. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - } - - return patches -} - -// PatchDeepCopy returns an array that is identical to a given an array of patches. -func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { - patchesCopy := []Patch{} - for _, aPatch := range patches { - patchCopy := Patch{} - for _, aDiff := range aPatch.diffs { - patchCopy.diffs = append(patchCopy.diffs, Diff{ - aDiff.Type, - aDiff.Text, - }) - } - patchCopy.Start1 = aPatch.Start1 - patchCopy.Start2 = aPatch.Start2 - patchCopy.Length1 = aPatch.Length1 - patchCopy.Length2 = aPatch.Length2 - patchesCopy = append(patchesCopy, patchCopy) - } - return patchesCopy -} - -// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. -func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { - if len(patches) == 0 { - return text, []bool{} - } - - // Deep copy the patches so that no changes are made to originals. - patches = dmp.PatchDeepCopy(patches) - - nullPadding := dmp.PatchAddPadding(patches) - text = nullPadding + text + nullPadding - patches = dmp.PatchSplitMax(patches) - - x := 0 - // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. - delta := 0 - results := make([]bool, len(patches)) - for _, aPatch := range patches { - expectedLoc := aPatch.Start2 + delta - text1 := dmp.DiffText1(aPatch.diffs) - var startLoc int - endLoc := -1 - if len(text1) > dmp.MatchMaxBits { - // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. - startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) - if startLoc != -1 { - endLoc = dmp.MatchMain(text, - text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) - if endLoc == -1 || startLoc >= endLoc { - // Can't find valid trailing context. Drop this patch. - startLoc = -1 - } - } - } else { - startLoc = dmp.MatchMain(text, text1, expectedLoc) - } - if startLoc == -1 { - // No match found. :( - results[x] = false - // Subtract the delta for this failed patch from subsequent patches. - delta -= aPatch.Length2 - aPatch.Length1 - } else { - // Found a match. :) - results[x] = true - delta = startLoc - expectedLoc - var text2 string - if endLoc == -1 { - text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] - } else { - text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] - } - if text1 == text2 { - // Perfect match, just shove the Replacement text in. - text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] - } else { - // Imperfect match. Run a diff to get a framework of equivalent indices. - diffs := dmp.DiffMain(text1, text2, false) - if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { - // The end points match, but the content is unacceptably bad. - results[x] = false - } else { - diffs = dmp.DiffCleanupSemanticLossless(diffs) - index1 := 0 - for _, aDiff := range aPatch.diffs { - if aDiff.Type != DiffEqual { - index2 := dmp.DiffXIndex(diffs, index1) - if aDiff.Type == DiffInsert { - // Insertion - text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] - } else if aDiff.Type == DiffDelete { - // Deletion - startIndex := startLoc + index2 - text = text[:startIndex] + - text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] - } - } - if aDiff.Type != DiffDelete { - index1 += len(aDiff.Text) - } - } - } - } - } - x++ - } - // Strip the padding off. - text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] - return text, results -} - -// PatchAddPadding adds some padding on text start and end so that edges can match something. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { - paddingLength := dmp.PatchMargin - nullPadding := "" - for x := 1; x <= paddingLength; x++ { - nullPadding += string(x) - } - - // Bump all the patches forward. - for i := range patches { - patches[i].Start1 += paddingLength - patches[i].Start2 += paddingLength - } - - // Add some padding on start of first diff. - if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { - // Add nullPadding equality. - patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) - patches[0].Start1 -= paddingLength // Should be 0. - patches[0].Start2 -= paddingLength // Should be 0. - patches[0].Length1 += paddingLength - patches[0].Length2 += paddingLength - } else if paddingLength > len(patches[0].diffs[0].Text) { - // Grow first equality. - extraLength := paddingLength - len(patches[0].diffs[0].Text) - patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text - patches[0].Start1 -= extraLength - patches[0].Start2 -= extraLength - patches[0].Length1 += extraLength - patches[0].Length2 += extraLength - } - - // Add some padding on end of last diff. - last := len(patches) - 1 - if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { - // Add nullPadding equality. - patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) - patches[last].Length1 += paddingLength - patches[last].Length2 += paddingLength - } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { - // Grow last equality. - lastDiff := patches[last].diffs[len(patches[last].diffs)-1] - extraLength := paddingLength - len(lastDiff.Text) - patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] - patches[last].Length1 += extraLength - patches[last].Length2 += extraLength - } - - return nullPadding -} - -// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { - patchSize := dmp.MatchMaxBits - for x := 0; x < len(patches); x++ { - if patches[x].Length1 <= patchSize { - continue - } - bigpatch := patches[x] - // Remove the big old patch. - patches = append(patches[:x], patches[x+1:]...) - x-- - - Start1 := bigpatch.Start1 - Start2 := bigpatch.Start2 - precontext := "" - for len(bigpatch.diffs) != 0 { - // Create one of several smaller patches. - patch := Patch{} - empty := true - patch.Start1 = Start1 - len(precontext) - patch.Start2 = Start2 - len(precontext) - if len(precontext) != 0 { - patch.Length1 = len(precontext) - patch.Length2 = len(precontext) - patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) - } - for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { - diffType := bigpatch.diffs[0].Type - diffText := bigpatch.diffs[0].Text - if diffType == DiffInsert { - // Insertions are harmless. - patch.Length2 += len(diffText) - Start2 += len(diffText) - patch.diffs = append(patch.diffs, bigpatch.diffs[0]) - bigpatch.diffs = bigpatch.diffs[1:] - empty = false - } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { - // This is a large deletion. Let it pass in one chunk. - patch.Length1 += len(diffText) - Start1 += len(diffText) - empty = false - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - bigpatch.diffs = bigpatch.diffs[1:] - } else { - // Deletion or equality. Only take as much as we can stomach. - diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] - - patch.Length1 += len(diffText) - Start1 += len(diffText) - if diffType == DiffEqual { - patch.Length2 += len(diffText) - Start2 += len(diffText) - } else { - empty = false - } - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - if diffText == bigpatch.diffs[0].Text { - bigpatch.diffs = bigpatch.diffs[1:] - } else { - bigpatch.diffs[0].Text = - bigpatch.diffs[0].Text[len(diffText):] - } - } - } - // Compute the head context for the next patch. - precontext = dmp.DiffText2(patch.diffs) - precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] - - postcontext := "" - // Append the end context for this patch. - if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { - postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] - } else { - postcontext = dmp.DiffText1(bigpatch.diffs) - } - - if len(postcontext) != 0 { - patch.Length1 += len(postcontext) - patch.Length2 += len(postcontext) - if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { - patch.diffs[len(patch.diffs)-1].Text += postcontext - } else { - patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) - } - } - if !empty { - x++ - patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) - } - } - } - return patches -} - -// PatchToText takes a list of patches and returns a textual representation. -func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { - var text bytes.Buffer - for _, aPatch := range patches { - _, _ = text.WriteString(aPatch.String()) - } - return text.String() -} - -// PatchFromText parses a textual representation of patches and returns a List of Patch objects. -func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { - patches := []Patch{} - if len(textline) == 0 { - return patches, nil - } - text := strings.Split(textline, "\n") - textPointer := 0 - patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") - - var patch Patch - var sign uint8 - var line string - for textPointer < len(text) { - - if !patchHeader.MatchString(text[textPointer]) { - return patches, errors.New("Invalid patch string: " + text[textPointer]) - } - - patch = Patch{} - m := patchHeader.FindStringSubmatch(text[textPointer]) - - patch.Start1, _ = strconv.Atoi(m[1]) - if len(m[2]) == 0 { - patch.Start1-- - patch.Length1 = 1 - } else if m[2] == "0" { - patch.Length1 = 0 - } else { - patch.Start1-- - patch.Length1, _ = strconv.Atoi(m[2]) - } - - patch.Start2, _ = strconv.Atoi(m[3]) - - if len(m[4]) == 0 { - patch.Start2-- - patch.Length2 = 1 - } else if m[4] == "0" { - patch.Length2 = 0 - } else { - patch.Start2-- - patch.Length2, _ = strconv.Atoi(m[4]) - } - textPointer++ - - for textPointer < len(text) { - if len(text[textPointer]) > 0 { - sign = text[textPointer][0] - } else { - textPointer++ - continue - } - - line = text[textPointer][1:] - line = strings.Replace(line, "+", "%2b", -1) - line, _ = url.QueryUnescape(line) - if sign == '-' { - // Deletion. - patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) - } else if sign == '+' { - // Insertion. - patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) - } else if sign == ' ' { - // Minor equality. - patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) - } else if sign == '@' { - // Start of next patch. - break - } else { - // WTF? - return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) - } - textPointer++ - } - - patches = append(patches, patch) - } - return patches, nil -} diff --git a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go deleted file mode 100644 index 265f29cc7e5..00000000000 --- a/awsproviderlint/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "strings" - "unicode/utf8" -) - -// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. -// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. -var unescaper = strings.NewReplacer( - "%21", "!", "%7E", "~", "%27", "'", - "%28", "(", "%29", ")", "%3B", ";", - "%2F", "/", "%3F", "?", "%3A", ":", - "%40", "@", "%26", "&", "%3D", "=", - "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") - -// indexOf returns the first index of pattern in str, starting at str[i]. -func indexOf(str string, pattern string, i int) int { - if i > len(str)-1 { - return -1 - } - if i <= 0 { - return strings.Index(str, pattern) - } - ind := strings.Index(str[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -// lastIndexOf returns the last index of pattern in str, starting at str[i]. -func lastIndexOf(str string, pattern string, i int) int { - if i < 0 { - return -1 - } - if i >= len(str) { - return strings.LastIndex(str, pattern) - } - _, size := utf8.DecodeRuneInString(str[i:]) - return strings.LastIndex(str[:i+size], pattern) -} - -// runesIndexOf returns the index of pattern in target, starting at target[i]. -func runesIndexOf(target, pattern []rune, i int) int { - if i > len(target)-1 { - return -1 - } - if i <= 0 { - return runesIndex(target, pattern) - } - ind := runesIndex(target[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -func runesEqual(r1, r2 []rune) bool { - if len(r1) != len(r2) { - return false - } - for i, c := range r1 { - if c != r2[i] { - return false - } - } - return true -} - -// runesIndex is the equivalent of strings.Index for rune slices. -func runesIndex(r1, r2 []rune) int { - last := len(r1) - len(r2) - for i := 0; i <= last; i++ { - if runesEqual(r1[i:i+len(r2)], r2) { - return i - } - } - return -1 -} diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/.gitignore b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/LICENSE b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/LICENSE deleted file mode 100644 index 8f71f43fee3..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/README.md b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/README.md deleted file mode 100644 index d93af40a0c2..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# ssh-agent - -Create a new [agent.Agent](https://godoc.org/golang.org/x/crypto/ssh/agent#Agent) on any type of OS (so including Windows) from any [Go](https://golang.org) application. - -## Limitations - -When compiled for Windows, it will only support [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant) as the SSH authentication agent. - -## Credits - -Big thanks to [Давид Мзареулян (David Mzareulyan)](https://github.com/davidmz) for creating the [go-pageant](https://github.com/davidmz/go-pageant) package! - -## Issues - -If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ssh-agent/issues) - -## Author - -Sander van Harmelen () - -## License - -The files `pageant_windows.go` and `sshagent_windows.go` have their own license (see file headers). The rest of this package is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.mod b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.mod deleted file mode 100644 index 6664c4888e2..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/xanzy/ssh-agent - -require ( - golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 - golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0 // indirect -) diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.sum b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.sum deleted file mode 100644 index a9a0016921b..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 h1:NwxKRvbkH5MsNkvOtPZi3/3kmI8CAzs3mtv+GLQMkNo= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0 h1:bzeyCHgoAyjZjAhvTpks+qM7sdlh4cCSitmXeCEO3B4= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/pageant_windows.go deleted file mode 100644 index 62956079663..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/pageant_windows.go +++ /dev/null @@ -1,146 +0,0 @@ -// -// Copyright (c) 2014 David Mzareulyan -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software -// and associated documentation files (the "Software"), to deal in the Software without restriction, -// including without limitation the rights to use, copy, modify, merge, publish, distribute, -// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all copies or substantial -// portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -// +build windows - -package sshagent - -// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155 -// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py - -import ( - "encoding/binary" - "errors" - "fmt" - "sync" - "syscall" - "unsafe" -) - -// Maximum size of message can be sent to pageant -const MaxMessageLen = 8192 - -var ( - ErrPageantNotFound = errors.New("pageant process not found") - ErrSendMessage = errors.New("error sending message") - - ErrMessageTooLong = errors.New("message too long") - ErrInvalidMessageFormat = errors.New("invalid message format") - ErrResponseTooLong = errors.New("response too long") -) - -const ( - agentCopydataID = 0x804e50ba - wmCopydata = 74 -) - -type copyData struct { - dwData uintptr - cbData uint32 - lpData unsafe.Pointer -} - -var ( - lock sync.Mutex - - winFindWindow = winAPI("user32.dll", "FindWindowW") - winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId") - winSendMessage = winAPI("user32.dll", "SendMessageW") -) - -func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) { - proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName) - return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) } -} - -// Available returns true if Pageant is running -func Available() bool { return pageantWindow() != 0 } - -// Query sends message msg to Pageant and returns response or error. -// 'msg' is raw agent request with length prefix -// Response is raw agent response with length prefix -func query(msg []byte) ([]byte, error) { - if len(msg) > MaxMessageLen { - return nil, ErrMessageTooLong - } - - msgLen := binary.BigEndian.Uint32(msg[:4]) - if len(msg) != int(msgLen)+4 { - return nil, ErrInvalidMessageFormat - } - - lock.Lock() - defer lock.Unlock() - - paWin := pageantWindow() - - if paWin == 0 { - return nil, ErrPageantNotFound - } - - thID, _, _ := winGetCurrentThreadID() - mapName := fmt.Sprintf("PageantRequest%08x", thID) - pMapName, _ := syscall.UTF16PtrFromString(mapName) - - mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(mmap) - - ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0) - if err != nil { - return nil, err - } - defer syscall.UnmapViewOfFile(ptr) - - mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:] - - copy(mmSlice, msg) - - mapNameBytesZ := append([]byte(mapName), 0) - - cds := copyData{ - dwData: agentCopydataID, - cbData: uint32(len(mapNameBytesZ)), - lpData: unsafe.Pointer(&(mapNameBytesZ[0])), - } - - resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds))) - - if resp == 0 { - return nil, ErrSendMessage - } - - respLen := binary.BigEndian.Uint32(mmSlice[:4]) - if respLen > MaxMessageLen-4 { - return nil, ErrResponseTooLong - } - - respData := make([]byte, respLen+4) - copy(respData, mmSlice) - - return respData, nil -} - -func pageantWindow() uintptr { - nameP, _ := syscall.UTF16PtrFromString("Pageant") - h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP))) - return h -} diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent.go b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent.go deleted file mode 100644 index 259fea2b63d..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent.go +++ /dev/null @@ -1,49 +0,0 @@ -// -// Copyright 2015, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// +build !windows - -package sshagent - -import ( - "errors" - "fmt" - "net" - "os" - - "golang.org/x/crypto/ssh/agent" -) - -// New returns a new agent.Agent that uses a unix socket -func New() (agent.Agent, net.Conn, error) { - if !Available() { - return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified") - } - - sshAuthSock := os.Getenv("SSH_AUTH_SOCK") - - conn, err := net.Dial("unix", sshAuthSock) - if err != nil { - return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err) - } - - return agent.NewClient(conn), conn, nil -} - -// Available returns true is a auth socket is defined -func Available() bool { - return os.Getenv("SSH_AUTH_SOCK") != "" -} diff --git a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go deleted file mode 100644 index c46710e88e4..00000000000 --- a/awsproviderlint/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go +++ /dev/null @@ -1,80 +0,0 @@ -// -// Copyright (c) 2014 David Mzareulyan -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software -// and associated documentation files (the "Software"), to deal in the Software without restriction, -// including without limitation the rights to use, copy, modify, merge, publish, distribute, -// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all copies or substantial -// portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING -// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -// - -// +build windows - -package sshagent - -import ( - "errors" - "io" - "net" - "sync" - - "golang.org/x/crypto/ssh/agent" -) - -// New returns a new agent.Agent and the (custom) connection it uses -// to communicate with a running pagent.exe instance (see README.md) -func New() (agent.Agent, net.Conn, error) { - if !Available() { - return nil, nil, errors.New("SSH agent requested but Pageant not running") - } - - return agent.NewClient(&conn{}), nil, nil -} - -type conn struct { - sync.Mutex - buf []byte -} - -func (c *conn) Close() { - c.Lock() - defer c.Unlock() - c.buf = nil -} - -func (c *conn) Write(p []byte) (int, error) { - c.Lock() - defer c.Unlock() - - resp, err := query(p) - if err != nil { - return 0, err - } - - c.buf = append(c.buf, resp...) - - return len(p), nil -} - -func (c *conn) Read(p []byte) (int, error) { - c.Lock() - defer c.Unlock() - - if len(c.buf) == 0 { - return 0, io.EOF - } - - n := copy(p, c.buf) - c.buf = c.buf[n:] - - return n, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/block.go b/awsproviderlint/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f19521b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/cipher.go b/awsproviderlint/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204afe..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/const.go b/awsproviderlint/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d04077595ab..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go deleted file mode 100644 index b799e440b4a..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11,!gccgo,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s deleted file mode 100644 index 891481539a1..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11,!gccgo,!purego - -#include "textflag.h" - -#define NUM_ROUNDS 10 - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD dst+0(FP), R1 - MOVD src+24(FP), R2 - MOVD src_len+32(FP), R3 - MOVD key+48(FP), R4 - MOVD nonce+56(FP), R6 - MOVD counter+64(FP), R7 - - MOVD $·constants(SB), R10 - MOVD $·incRotMatrix(SB), R11 - - MOVW (R7), R20 - - AND $~255, R3, R13 - ADD R2, R13, R12 // R12 for block end - AND $255, R3, R13 -loop: - MOVD $NUM_ROUNDS, R21 - VLD1 (R11), [V30.S4, V31.S4] - - // load contants - // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] - WORD $0x4D60E940 - - // load keys - // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] - WORD $0x4DFFE884 - // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] - WORD $0x4DFFE888 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V12.S4] - WORD $0x4D40C8EC - - // VLD3R (R6), [V13.S4, V14.S4, V15.S4] - WORD $0x4D40E8CD - - // update counter - VADD V30.S4, V12.S4, V12.S4 - -chacha: - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) - VADD V8.S4, V12.S4, V8.S4 - VADD V9.S4, V13.S4, V9.S4 - VADD V10.S4, V14.S4, V10.S4 - VADD V11.S4, V15.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $12, V16.S4, V4.S4 - VSHL $12, V17.S4, V5.S4 - VSHL $12, V18.S4, V6.S4 - VSHL $12, V19.S4, V7.S4 - VSRI $20, V16.S4, V4.S4 - VSRI $20, V17.S4, V5.S4 - VSRI $20, V18.S4, V6.S4 - VSRI $20, V19.S4, V7.S4 - - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) - VADD V12.S4, V8.S4, V8.S4 - VADD V13.S4, V9.S4, V9.S4 - VADD V14.S4, V10.S4, V10.S4 - VADD V15.S4, V11.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $7, V16.S4, V4.S4 - VSHL $7, V17.S4, V5.S4 - VSHL $7, V18.S4, V6.S4 - VSHL $7, V19.S4, V7.S4 - VSRI $25, V16.S4, V4.S4 - VSRI $25, V17.S4, V5.S4 - VSRI $25, V18.S4, V6.S4 - VSRI $25, V19.S4, V7.S4 - - // V0..V3 += V5..V7, V4 - // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) - VADD V0.S4, V5.S4, V0.S4 - VADD V1.S4, V6.S4, V1.S4 - VADD V2.S4, V7.S4, V2.S4 - VADD V3.S4, V4.S4, V3.S4 - VEOR V15.B16, V0.B16, V15.B16 - VEOR V12.B16, V1.B16, V12.B16 - VEOR V13.B16, V2.B16, V13.B16 - VEOR V14.B16, V3.B16, V14.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 12) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $12, V16.S4, V5.S4 - VSHL $12, V17.S4, V6.S4 - VSHL $12, V18.S4, V7.S4 - VSHL $12, V19.S4, V4.S4 - VSRI $20, V16.S4, V5.S4 - VSRI $20, V17.S4, V6.S4 - VSRI $20, V18.S4, V7.S4 - VSRI $20, V19.S4, V4.S4 - - // V0 += V5; V15 <<<= ((V0 XOR V15), 8) - // ... - VADD V5.S4, V0.S4, V0.S4 - VADD V6.S4, V1.S4, V1.S4 - VADD V7.S4, V2.S4, V2.S4 - VADD V4.S4, V3.S4, V3.S4 - VEOR V0.B16, V15.B16, V15.B16 - VEOR V1.B16, V12.B16, V12.B16 - VEOR V2.B16, V13.B16, V13.B16 - VEOR V3.B16, V14.B16, V14.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 7) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $7, V16.S4, V5.S4 - VSHL $7, V17.S4, V6.S4 - VSHL $7, V18.S4, V7.S4 - VSHL $7, V19.S4, V4.S4 - VSRI $25, V16.S4, V5.S4 - VSRI $25, V17.S4, V6.S4 - VSRI $25, V18.S4, V7.S4 - VSRI $25, V19.S4, V4.S4 - - SUB $1, R21 - CBNZ R21, chacha - - // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] - WORD $0x4D60E950 - - // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] - WORD $0x4DFFE894 - VADD V30.S4, V12.S4, V12.S4 - VADD V16.S4, V0.S4, V0.S4 - VADD V17.S4, V1.S4, V1.S4 - VADD V18.S4, V2.S4, V2.S4 - VADD V19.S4, V3.S4, V3.S4 - // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] - WORD $0x4DFFE898 - // restore R4 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V28.S4] - WORD $0x4D40C8FC - // VLD3R (R6), [V29.S4, V30.S4, V31.S4] - WORD $0x4D40E8DD - - VADD V20.S4, V4.S4, V4.S4 - VADD V21.S4, V5.S4, V5.S4 - VADD V22.S4, V6.S4, V6.S4 - VADD V23.S4, V7.S4, V7.S4 - VADD V24.S4, V8.S4, V8.S4 - VADD V25.S4, V9.S4, V9.S4 - VADD V26.S4, V10.S4, V10.S4 - VADD V27.S4, V11.S4, V11.S4 - VADD V28.S4, V12.S4, V12.S4 - VADD V29.S4, V13.S4, V13.S4 - VADD V30.S4, V14.S4, V14.S4 - VADD V31.S4, V15.S4, V15.S4 - - VZIP1 V1.S4, V0.S4, V16.S4 - VZIP2 V1.S4, V0.S4, V17.S4 - VZIP1 V3.S4, V2.S4, V18.S4 - VZIP2 V3.S4, V2.S4, V19.S4 - VZIP1 V5.S4, V4.S4, V20.S4 - VZIP2 V5.S4, V4.S4, V21.S4 - VZIP1 V7.S4, V6.S4, V22.S4 - VZIP2 V7.S4, V6.S4, V23.S4 - VZIP1 V9.S4, V8.S4, V24.S4 - VZIP2 V9.S4, V8.S4, V25.S4 - VZIP1 V11.S4, V10.S4, V26.S4 - VZIP2 V11.S4, V10.S4, V27.S4 - VZIP1 V13.S4, V12.S4, V28.S4 - VZIP2 V13.S4, V12.S4, V29.S4 - VZIP1 V15.S4, V14.S4, V30.S4 - VZIP2 V15.S4, V14.S4, V31.S4 - VZIP1 V18.D2, V16.D2, V0.D2 - VZIP2 V18.D2, V16.D2, V4.D2 - VZIP1 V19.D2, V17.D2, V8.D2 - VZIP2 V19.D2, V17.D2, V12.D2 - VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] - - VZIP1 V22.D2, V20.D2, V1.D2 - VZIP2 V22.D2, V20.D2, V5.D2 - VZIP1 V23.D2, V21.D2, V9.D2 - VZIP2 V23.D2, V21.D2, V13.D2 - VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] - VZIP1 V26.D2, V24.D2, V2.D2 - VZIP2 V26.D2, V24.D2, V6.D2 - VZIP1 V27.D2, V25.D2, V10.D2 - VZIP2 V27.D2, V25.D2, V14.D2 - VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] - VZIP1 V30.D2, V28.D2, V3.D2 - VZIP2 V30.D2, V28.D2, V7.D2 - VZIP1 V31.D2, V29.D2, V11.D2 - VZIP2 V31.D2, V29.D2, V15.D2 - VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] - VEOR V0.B16, V16.B16, V16.B16 - VEOR V1.B16, V17.B16, V17.B16 - VEOR V2.B16, V18.B16, V18.B16 - VEOR V3.B16, V19.B16, V19.B16 - VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) - VEOR V4.B16, V20.B16, V20.B16 - VEOR V5.B16, V21.B16, V21.B16 - VEOR V6.B16, V22.B16, V22.B16 - VEOR V7.B16, V23.B16, V23.B16 - VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) - VEOR V8.B16, V24.B16, V24.B16 - VEOR V9.B16, V25.B16, V25.B16 - VEOR V10.B16, V26.B16, V26.B16 - VEOR V11.B16, V27.B16, V27.B16 - VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) - VEOR V12.B16, V28.B16, V28.B16 - VEOR V13.B16, V29.B16, V29.B16 - VEOR V14.B16, V30.B16, V30.B16 - VEOR V15.B16, V31.B16, V31.B16 - VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) - - ADD $4, R20 - MOVW R20, (R7) // update counter - - CMP R2, R12 - BGT loop - - RET - - -DATA ·constants+0x00(SB)/4, $0x61707865 -DATA ·constants+0x04(SB)/4, $0x3320646e -DATA ·constants+0x08(SB)/4, $0x79622d32 -DATA ·constants+0x0c(SB)/4, $0x6b206574 -GLOBL ·constants(SB), NOPTR|RODATA, $32 - -DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 -DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 -DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 -DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 -DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 -DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 -DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B -DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F -GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_generic.go deleted file mode 100644 index a2ecf5c325b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms -// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/internal/subtle" -) - -const ( - // KeySize is the size of the key used by this cipher, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // cipher, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20 variant of - // this cipher, in bytes. - NonceSizeX = 24 -) - -// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter - // (incremented after each block), and 3 of nonce. - key [8]uint32 - counter uint32 - nonce [3]uint32 - - // The last len bytes of buf are leftover key stream bytes from the previous - // XORKeyStream invocation. The size of buf depends on how many blocks are - // computed at a time by xorKeyStreamBlocks. - buf [bufSize]byte - len int - - // overflow is set when the counter overflowed, no more blocks can be - // generated, and the next XORKeyStream call should panic. - overflow bool - - // The counter-independent results of the first round are cached after they - // are computed the first time. - precompDone bool - p1, p5, p9, p13 uint32 - p2, p6, p10, p14 uint32 - p3, p7, p11, p15 uint32 -} - -var _ cipher.Stream = (*Cipher)(nil) - -// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given -// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, -// the XChaCha20 construction will be used. It returns an error if key or nonce -// have any other length. -// -// Note that ChaCha20, like all stream ciphers, is not authenticated and allows -// attackers to silently tamper with the plaintext. For this reason, it is more -// appropriate as a building block than as a standalone encryption mechanism. -// Instead, consider using package golang.org/x/crypto/chacha20poly1305. -func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { - // This function is split into a wrapper so that the Cipher allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - c := &Cipher{} - return newUnauthenticatedCipher(c, key, nonce) -} - -func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong key size") - } - if len(nonce) == NonceSizeX { - // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a - // derived key, allowing it to operate on a nonce of 24 bytes. See - // draft-irtf-cfrg-xchacha-01, Section 2.3. - key, _ = HChaCha20(key, nonce[0:16]) - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - nonce = cNonce - } else if len(nonce) != NonceSize { - return nil, errors.New("chacha20: wrong nonce size") - } - - key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint - c.key = [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - } - c.nonce = [3]uint32{ - binary.LittleEndian.Uint32(nonce[0:4]), - binary.LittleEndian.Uint32(nonce[4:8]), - binary.LittleEndian.Uint32(nonce[8:12]), - } - return c, nil -} - -// The constant first 4 words of the ChaCha20 state. -const ( - j0 uint32 = 0x61707865 // expa - j1 uint32 = 0x3320646e // nd 3 - j2 uint32 = 0x79622d32 // 2-by - j3 uint32 = 0x6b206574 // te k -) - -const blockSize = 64 - -// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. -// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 -// words each round, in columnar or diagonal groups of 4 at a time. -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = bits.RotateLeft32(d, 16) - c += d - b ^= c - b = bits.RotateLeft32(b, 12) - a += b - d ^= a - d = bits.RotateLeft32(d, 8) - c += d - b ^= c - b = bits.RotateLeft32(b, 7) - return a, b, c, d -} - -// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will -// behave as if (64 * counter) bytes had been encrypted so far. -// -// To prevent accidental counter reuse, SetCounter panics if counter is less -// than the current value. -// -// Note that the execution time of XORKeyStream is not independent of the -// counter value. -func (s *Cipher) SetCounter(counter uint32) { - // Internally, s may buffer multiple blocks, which complicates this - // implementation slightly. When checking whether the counter has rolled - // back, we must use both s.counter and s.len to determine how many blocks - // we have already output. - outputCounter := s.counter - uint32(s.len)/blockSize - if s.overflow || counter < outputCounter { - panic("chacha20: SetCounter attempted to rollback counter") - } - - // In the general case, we set the new counter value and reset s.len to 0, - // causing the next call to XORKeyStream to refill the buffer. However, if - // we're advancing within the existing buffer, we can save work by simply - // setting s.len. - if counter < s.counter { - s.len = int(s.counter-counter) * blockSize - } else { - s.counter = counter - s.len = 0 - } -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(src) == 0 { - return - } - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - dst = dst[:len(src)] - if subtle.InexactOverlap(dst, src) { - panic("chacha20: invalid buffer overlap") - } - - // First, drain any remaining key stream from a previous XORKeyStream. - if s.len != 0 { - keyStream := s.buf[bufSize-s.len:] - if len(src) < len(keyStream) { - keyStream = keyStream[:len(src)] - } - _ = src[len(keyStream)-1] // bounds check elimination hint - for i, b := range keyStream { - dst[i] = src[i] ^ b - } - s.len -= len(keyStream) - dst, src = dst[len(keyStream):], src[len(keyStream):] - } - if len(src) == 0 { - return - } - - // If we'd need to let the counter overflow and keep generating output, - // panic immediately. If instead we'd only reach the last block, remember - // not to generate any more output after the buffer is drained. - numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize - if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { - panic("chacha20: counter overflow") - } else if uint64(s.counter)+numBlocks == 1<<32 { - s.overflow = true - } - - // xorKeyStreamBlocks implementations expect input lengths that are a - // multiple of bufSize. Platform-specific ones process multiple blocks at a - // time, so have bufSizes that are a multiple of blockSize. - - full := len(src) - len(src)%bufSize - if full > 0 { - s.xorKeyStreamBlocks(dst[:full], src[:full]) - } - dst, src = dst[full:], src[full:] - - // If using a multi-block xorKeyStreamBlocks would overflow, use the generic - // one that does one block at a time. - const blocksPerBuf = bufSize / blockSize - if uint64(s.counter)+blocksPerBuf > 1<<32 { - s.buf = [bufSize]byte{} - numBlocks := (len(src) + blockSize - 1) / blockSize - buf := s.buf[bufSize-numBlocks*blockSize:] - copy(buf, src) - s.xorKeyStreamBlocksGeneric(buf, buf) - s.len = len(buf) - copy(dst, buf) - return - } - - // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and - // keep the leftover keystream for the next XORKeyStream invocation. - if len(src) > 0 { - s.buf = [bufSize]byte{} - copy(s.buf[:], src) - s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) - s.len = bufSize - copy(dst, s.buf[:]) - } -} - -func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { - if len(dst) != len(src) || len(dst)%blockSize != 0 { - panic("chacha20: internal error: wrong dst and/or src length") - } - - // To generate each block of key stream, the initial cipher state - // (represented below) is passed through 20 rounds of shuffling, - // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) - // or by diagonals (like 1, 6, 11, 12). - // - // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc - // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk - // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk - // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn - // - // c=constant k=key b=blockcount n=nonce - var ( - c0, c1, c2, c3 = j0, j1, j2, j3 - c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] - c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] - _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] - ) - - // Three quarters of the first round don't depend on the counter, so we can - // calculate them here, and reuse them for multiple blocks in the loop, and - // for future XORKeyStream invocations. - if !s.precompDone { - s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) - s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) - s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) - s.precompDone = true - } - - // A condition of len(src) > 0 would be sufficient, but this also - // acts as a bounds check elimination hint. - for len(src) >= 64 && len(dst) >= 64 { - // The remainder of the first column round. - fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) - - // The second diagonal round. - x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) - x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) - x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) - x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) - - // The remaining 18 rounds. - for i := 0; i < 9; i++ { - // Column round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Diagonal round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - // Add back the initial state to generate the key stream, then - // XOR the key stream with the source and write out the result. - addXor(dst[0:4], src[0:4], x0, c0) - addXor(dst[4:8], src[4:8], x1, c1) - addXor(dst[8:12], src[8:12], x2, c2) - addXor(dst[12:16], src[12:16], x3, c3) - addXor(dst[16:20], src[16:20], x4, c4) - addXor(dst[20:24], src[20:24], x5, c5) - addXor(dst[24:28], src[24:28], x6, c6) - addXor(dst[28:32], src[28:32], x7, c7) - addXor(dst[32:36], src[32:36], x8, c8) - addXor(dst[36:40], src[36:40], x9, c9) - addXor(dst[40:44], src[40:44], x10, c10) - addXor(dst[44:48], src[44:48], x11, c11) - addXor(dst[48:52], src[48:52], x12, s.counter) - addXor(dst[52:56], src[52:56], x13, c13) - addXor(dst[56:60], src[56:60], x14, c14) - addXor(dst[60:64], src[60:64], x15, c15) - - s.counter += 1 - - src, dst = src[blockSize:], dst[blockSize:] - } -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes -// key and a 16 bytes nonce. It returns an error if key or nonce have any other -// length. It is used as part of the XChaCha20 construction. -func HChaCha20(key, nonce []byte) ([]byte, error) { - // This function is split into a wrapper so that the slice allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - out := make([]byte, 32) - return hChaCha20(out, key, nonce) -} - -func hChaCha20(out, key, nonce []byte) ([]byte, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong HChaCha20 key size") - } - if len(nonce) != 16 { - return nil, errors.New("chacha20: wrong HChaCha20 nonce size") - } - - x0, x1, x2, x3 := j0, j1, j2, j3 - x4 := binary.LittleEndian.Uint32(key[0:4]) - x5 := binary.LittleEndian.Uint32(key[4:8]) - x6 := binary.LittleEndian.Uint32(key[8:12]) - x7 := binary.LittleEndian.Uint32(key[12:16]) - x8 := binary.LittleEndian.Uint32(key[16:20]) - x9 := binary.LittleEndian.Uint32(key[20:24]) - x10 := binary.LittleEndian.Uint32(key[24:28]) - x11 := binary.LittleEndian.Uint32(key[28:32]) - x12 := binary.LittleEndian.Uint32(nonce[0:4]) - x13 := binary.LittleEndian.Uint32(nonce[4:8]) - x14 := binary.LittleEndian.Uint32(nonce[8:12]) - x15 := binary.LittleEndian.Uint32(nonce[12:16]) - - for i := 0; i < 10; i++ { - // Diagonal round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Column round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - _ = out[31] // bounds check elimination hint - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x12) - binary.LittleEndian.PutUint32(out[20:24], x13) - binary.LittleEndian.PutUint32(out[24:28], x14) - binary.LittleEndian.PutUint32(out[28:32], x15) - return out, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go deleted file mode 100644 index 4635307b8f2..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo purego - -package chacha20 - -const bufSize = blockSize - -func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { - s.xorKeyStreamBlocksGeneric(dst, src) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go deleted file mode 100644 index b7993303415..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s deleted file mode 100644 index 23c60216430..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on CRYPTOGAMS code with the following comment: -// # ==================================================================== -// # Written by Andy Polyakov for the OpenSSL -// # project. The module is, however, dual licensed under OpenSSL and -// # CRYPTOGAMS licenses depending on where you obtain it. For further -// # details see http://www.openssl.org/~appro/cryptogams/. -// # ==================================================================== - -// Code for the perl script that generates the ppc64 assembler -// can be found in the cryptogams repository at the link below. It is based on -// the original from openssl. - -// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 - -// The differences in this and the original implementation are -// due to the calling conventions and initialization of constants. - -// +build !gccgo,!purego - -#include "textflag.h" - -#define OUT R3 -#define INP R4 -#define LEN R5 -#define KEY R6 -#define CNT R7 -#define TMP R15 - -#define CONSTBASE R16 -#define BLOCKS R17 - -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 - -//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) -TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 - MOVD out+0(FP), OUT - MOVD inp+8(FP), INP - MOVD len+16(FP), LEN - MOVD key+24(FP), KEY - MOVD counter+32(FP), CNT - - // Addressing for constants - MOVD $consts<>+0x00(SB), CONSTBASE - MOVD $16, R8 - MOVD $32, R9 - MOVD $48, R10 - MOVD $64, R11 - SRD $6, LEN, BLOCKS - // V16 - LXVW4X (CONSTBASE)(R0), VS48 - ADD $80,CONSTBASE - - // Load key into V17,V18 - LXVW4X (KEY)(R0), VS49 - LXVW4X (KEY)(R8), VS50 - - // Load CNT, NONCE into V19 - LXVW4X (CNT)(R0), VS51 - - // Clear V27 - VXOR V27, V27, V27 - - // V28 - LXVW4X (CONSTBASE)(R11), VS60 - - // splat slot from V19 -> V26 - VSPLTW $0, V19, V26 - - VSLDOI $4, V19, V27, V19 - VSLDOI $12, V27, V19, V19 - - VADDUWM V26, V28, V26 - - MOVD $10, R14 - MOVD R14, CTR - -loop_outer_vsx: - // V0, V1, V2, V3 - LXVW4X (R0)(CONSTBASE), VS32 - LXVW4X (R8)(CONSTBASE), VS33 - LXVW4X (R9)(CONSTBASE), VS34 - LXVW4X (R10)(CONSTBASE), VS35 - - // splat values from V17, V18 into V4-V11 - VSPLTW $0, V17, V4 - VSPLTW $1, V17, V5 - VSPLTW $2, V17, V6 - VSPLTW $3, V17, V7 - VSPLTW $0, V18, V8 - VSPLTW $1, V18, V9 - VSPLTW $2, V18, V10 - VSPLTW $3, V18, V11 - - // VOR - VOR V26, V26, V12 - - // splat values from V19 -> V13, V14, V15 - VSPLTW $1, V19, V13 - VSPLTW $2, V19, V14 - VSPLTW $3, V19, V15 - - // splat const values - VSPLTISW $-16, V27 - VSPLTISW $12, V28 - VSPLTISW $8, V29 - VSPLTISW $7, V30 - -loop_vsx: - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V28, V4 - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V30, V4 - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - VRLW V4, V28, V4 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - VRLW V4, V30, V4 - BC 16, LT, loop_vsx - - VADDUWM V12, V26, V12 - - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 - - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 - - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 - - XXPERMDI VS32, VS34, $0, VS33 - XXPERMDI VS32, VS34, $3, VS35 - XXPERMDI VS59, VS60, $0, VS32 - XXPERMDI VS59, VS60, $3, VS34 - - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 - - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 - - XXPERMDI VS36, VS38, $0, VS37 - XXPERMDI VS36, VS38, $3, VS39 - XXPERMDI VS61, VS62, $0, VS36 - XXPERMDI VS61, VS62, $3, VS38 - - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 - - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 - - XXPERMDI VS40, VS42, $0, VS41 - XXPERMDI VS40, VS42, $3, VS43 - XXPERMDI VS59, VS60, $0, VS40 - XXPERMDI VS59, VS60, $3, VS42 - - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 - - VSPLTISW $4, V27 - VADDUWM V26, V27, V26 - - XXPERMDI VS44, VS46, $0, VS45 - XXPERMDI VS44, VS46, $3, VS47 - XXPERMDI VS61, VS62, $0, VS44 - XXPERMDI VS61, VS62, $3, VS46 - - VADDUWM V0, V16, V0 - VADDUWM V4, V17, V4 - VADDUWM V8, V18, V8 - VADDUWM V12, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - // Bottom of loop - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V1, V16, V0 - VADDUWM V5, V17, V4 - VADDUWM V9, V18, V8 - VADDUWM V13, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 - - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(V10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V2, V16, V0 - VADDUWM V6, V17, V4 - VADDUWM V10, V18, V8 - VADDUWM V14, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V3, V16, V0 - VADDUWM V7, V17, V4 - VADDUWM V11, V18, V8 - VADDUWM V15, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - - MOVD $10, R14 - MOVD R14, CTR - BNE loop_outer_vsx - -done_vsx: - // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 - ADD BLOCKS, R14 - MOVD R14, (CNT) - RET - -tail_vsx: - ADD $32, R1, R11 - MOVD LEN, CTR - - // Save values on stack to copy from - STXVW4X VS32, (R11)(R0) - STXVW4X VS36, (R11)(R8) - STXVW4X VS40, (R11)(R9) - STXVW4X VS44, (R11)(R10) - ADD $-1, R11, R12 - ADD $-1, INP - ADD $-1, OUT - -looptail_vsx: - // Copying the result to OUT - // in bytes. - MOVBZU 1(R12), KEY - MOVBZU 1(INP), TMP - XOR KEY, TMP, KEY - MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx - - // Clear the stack values - STXVW4X VS48, (R11)(R0) - STXVW4X VS48, (R11)(R8) - STXVW4X VS48, (R11)(R9) - STXVW4X VS48, (R11)(R10) - BR done_vsx diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go deleted file mode 100644 index a9244bdf4db..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -package chacha20 - -import "golang.org/x/sys/cpu" - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. Implementation in asm_s390x.s. -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - if cpu.S390X.HasVX { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } else { - c.xorKeyStreamBlocksGeneric(dst, src) - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s deleted file mode 100644 index 89c658c410b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -#include "go_asm.h" -#include "textflag.h" - -// This is an implementation of the ChaCha20 encryption algorithm as -// specified in RFC 7539. It uses vector instructions to compute -// 4 keystream blocks in parallel (256 bytes) which are then XORed -// with the bytes in the input slice. - -GLOBL ·constants<>(SB), RODATA|NOPTR, $32 -// BSWAP: swap bytes in each 4-byte element -DATA ·constants<>+0x00(SB)/4, $0x03020100 -DATA ·constants<>+0x04(SB)/4, $0x07060504 -DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 -DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c -// J0: [j0, j1, j2, j3] -DATA ·constants<>+0x10(SB)/4, $0x61707865 -DATA ·constants<>+0x14(SB)/4, $0x3320646e -DATA ·constants<>+0x18(SB)/4, $0x79622d32 -DATA ·constants<>+0x1c(SB)/4, $0x6b206574 - -#define BSWAP V5 -#define J0 V6 -#define KEY0 V7 -#define KEY1 V8 -#define NONCE V9 -#define CTR V10 -#define M0 V11 -#define M1 V12 -#define M2 V13 -#define M3 V14 -#define INC V15 -#define X0 V16 -#define X1 V17 -#define X2 V18 -#define X3 V19 -#define X4 V20 -#define X5 V21 -#define X6 V22 -#define X7 V23 -#define X8 V24 -#define X9 V25 -#define X10 V26 -#define X11 V27 -#define X12 V28 -#define X13 V29 -#define X14 V30 -#define X15 V31 - -#define NUM_ROUNDS 20 - -#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $16, a2, a2 \ - VERLLF $16, b2, b2 \ - VERLLF $16, c2, c2 \ - VERLLF $16, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $12, a1, a1 \ - VERLLF $12, b1, b1 \ - VERLLF $12, c1, c1 \ - VERLLF $12, d1, d1 \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $8, a2, a2 \ - VERLLF $8, b2, b2 \ - VERLLF $8, c2, c2 \ - VERLLF $8, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $7, a1, a1 \ - VERLLF $7, b1, b1 \ - VERLLF $7, c1, c1 \ - VERLLF $7, d1, d1 - -#define PERMUTE(mask, v0, v1, v2, v3) \ - VPERM v0, v0, mask, v0 \ - VPERM v1, v1, mask, v1 \ - VPERM v2, v2, mask, v2 \ - VPERM v3, v3, mask, v3 - -#define ADDV(x, v0, v1, v2, v3) \ - VAF x, v0, v0 \ - VAF x, v1, v1 \ - VAF x, v2, v2 \ - VAF x, v3, v3 - -#define XORV(off, dst, src, v0, v1, v2, v3) \ - VLM off(src), M0, M3 \ - PERMUTE(BSWAP, v0, v1, v2, v3) \ - VX v0, M0, M0 \ - VX v1, M1, M1 \ - VX v2, M2, M2 \ - VX v3, M3, M3 \ - VSTM M0, M3, off(dst) - -#define SHUFFLE(a, b, c, d, t, u, v, w) \ - VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} - VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} - VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} - VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} - VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} - VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} - VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} - VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD $·constants<>(SB), R1 - MOVD dst+0(FP), R2 // R2=&dst[0] - LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) - MOVD key+48(FP), R5 // R5=key - MOVD nonce+56(FP), R6 // R6=nonce - MOVD counter+64(FP), R7 // R7=counter - - // load BSWAP and J0 - VLM (R1), BSWAP, J0 - - // setup - MOVD $95, R0 - VLM (R5), KEY0, KEY1 - VLL R0, (R6), NONCE - VZERO M0 - VLEIB $7, $32, M0 - VSRLB M0, NONCE, NONCE - - // initialize counter values - VLREPF (R7), CTR - VZERO INC - VLEIF $1, $1, INC - VLEIF $2, $2, INC - VLEIF $3, $3, INC - VAF INC, CTR, CTR - VREPIF $4, INC - -chacha: - VREPF $0, J0, X0 - VREPF $1, J0, X1 - VREPF $2, J0, X2 - VREPF $3, J0, X3 - VREPF $0, KEY0, X4 - VREPF $1, KEY0, X5 - VREPF $2, KEY0, X6 - VREPF $3, KEY0, X7 - VREPF $0, KEY1, X8 - VREPF $1, KEY1, X9 - VREPF $2, KEY1, X10 - VREPF $3, KEY1, X11 - VLR CTR, X12 - VREPF $1, NONCE, X13 - VREPF $2, NONCE, X14 - VREPF $3, NONCE, X15 - - MOVD $(NUM_ROUNDS/2), R1 - -loop: - ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) - ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) - - ADD $-1, R1 - BNE loop - - // decrement length - ADD $-256, R4 - - // rearrange vectors - SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) - ADDV(J0, X0, X1, X2, X3) - SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) - ADDV(KEY0, X4, X5, X6, X7) - SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) - ADDV(KEY1, X8, X9, X10, X11) - VAF CTR, X12, X12 - SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) - ADDV(NONCE, X12, X13, X14, X15) - - // increment counters - VAF INC, CTR, CTR - - // xor keystream with plaintext - XORV(0*64, R2, R3, X0, X4, X8, X12) - XORV(1*64, R2, R3, X1, X5, X9, X13) - XORV(2*64, R2, R3, X2, X6, X10, X14) - XORV(3*64, R2, R3, X3, X7, X11, X15) - - // increment pointers - MOVD $256(R2), R2 - MOVD $256(R3), R3 - - CMPBNE R4, $0, chacha - - VSTEF $0, CTR, (R7) - RET diff --git a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/xor.go b/awsproviderlint/vendor/golang.org/x/crypto/chacha20/xor.go deleted file mode 100644 index c2d04851e0d..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/chacha20/xor.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found src the LICENSE file. - -package chacha20 - -import "runtime" - -// Platforms that have fast unaligned 32-bit little endian accesses. -const unaligned = runtime.GOARCH == "386" || - runtime.GOARCH == "amd64" || - runtime.GOARCH == "arm64" || - runtime.GOARCH == "ppc64le" || - runtime.GOARCH == "s390x" - -// addXor reads a little endian uint32 from src, XORs it with (a + b) and -// places the result in little endian byte order in dst. -func addXor(dst, src []byte, a, b uint32) { - _, _ = src[3], dst[3] // bounds check elimination hint - if unaligned { - // The compiler should optimize this code into - // 32-bit unaligned little endian loads and stores. - // TODO: delete once the compiler does a reliably - // good job with the generic code below. - // See issue #25111 for more details. - v := uint32(src[0]) - v |= uint32(src[1]) << 8 - v |= uint32(src[2]) << 16 - v |= uint32(src[3]) << 24 - v ^= a + b - dst[0] = byte(v) - dst[1] = byte(v >> 8) - dst[2] = byte(v >> 16) - dst[3] = byte(v >> 24) - } else { - a += b - dst[0] = src[0] ^ byte(a) - dst[1] = src[1] ^ byte(a>>8) - dst[2] = src[2] ^ byte(a>>16) - dst[3] = src[3] ^ byte(a>>24) - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519.go b/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 4b9a655d1b5..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -package curve25519 // import "golang.org/x/crypto/curve25519" - -import ( - "crypto/subtle" - "fmt" -) - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -func init() { Basepoint = basePoint[:] } - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) - } - if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go b/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go deleted file mode 100644 index 5120b779b9b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine,!purego - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s b/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s deleted file mode 100644 index 0250c888592..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine,!purego - -#define REDMASK51 0x0007FFFFFFFFFFFF - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$296-8 - MOVQ inout+0(FP),DI - - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,0(SP) - MOVQ DX,8(SP) - MOVQ CX,16(SP) - MOVQ R8,24(SP) - MOVQ R9,32(SP) - MOVQ AX,40(SP) - MOVQ R10,48(SP) - MOVQ R11,56(SP) - MOVQ R12,64(SP) - MOVQ R13,72(SP) - MOVQ 40(SP),AX - MULQ 40(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 48(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 48(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(SP) - MOVQ R8,88(SP) - MOVQ R9,96(SP) - MOVQ AX,104(SP) - MOVQ R10,112(SP) - MOVQ 0(SP),AX - MULQ 0(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 8(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 32(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(SP) - MOVQ R8,128(SP) - MOVQ R9,136(SP) - MOVQ AX,144(SP) - MOVQ R10,152(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 80(SP),SI - SUBQ 88(SP),DX - SUBQ 96(SP),CX - SUBQ 104(SP),R8 - SUBQ 112(SP),R9 - MOVQ SI,160(SP) - MOVQ DX,168(SP) - MOVQ CX,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,200(SP) - MOVQ DX,208(SP) - MOVQ CX,216(SP) - MOVQ R8,224(SP) - MOVQ R9,232(SP) - MOVQ AX,240(SP) - MOVQ R10,248(SP) - MOVQ R11,256(SP) - MOVQ R12,264(SP) - MOVQ R13,272(SP) - MOVQ 224(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,280(SP) - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 232(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,288(SP) - MULQ 48(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 40(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 200(SP),AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 200(SP),AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 200(SP),AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 208(SP),AX - MULQ 40(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 208(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),AX - MULQ 40(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 216(SP),AX - MULQ 48(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 216(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 224(SP),AX - MULQ 40(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 224(SP),AX - MULQ 48(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 280(SP),AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 232(SP),AX - MULQ 40(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 288(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 288(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(SP) - MOVQ R8,48(SP) - MOVQ R9,56(SP) - MOVQ AX,64(SP) - MOVQ R10,72(SP) - MOVQ 264(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,200(SP) - MULQ 16(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,208(SP) - MULQ 8(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 0(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 240(SP),AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 240(SP),AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 248(SP),AX - MULQ 0(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 248(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 248(SP),AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 248(SP),AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 248(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 0(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 256(SP),AX - MULQ 8(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 256(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 0(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 8(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 200(SP),AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 0(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),AX - MULQ 16(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 24(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 40(SP),SI - ADDQ 48(SP),R8 - ADDQ 56(SP),R9 - ADDQ 64(SP),AX - ADDQ 72(SP),R10 - SUBQ 40(SP),DX - SUBQ 48(SP),CX - SUBQ 56(SP),R11 - SUBQ 64(SP),R12 - SUBQ 72(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 144(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 152(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 88(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(SP),AX - MULQ 96(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(SP),AX - MULQ 104(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(SP),AX - MULQ 112(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 128(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 136(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 136(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(SP),AX - MULQ 80(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 144(SP),AX - MULQ 88(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 104(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 112(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 160(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 168(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 176(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 184(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 192(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 80(SP),SI - ADDQ 88(SP),CX - ADDQ 96(SP),R8 - ADDQ 104(SP),R9 - ADDQ 112(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 176(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 168(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 176(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 184(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 192(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 176(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 184(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 176(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 160(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 168(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 184(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 192(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 176(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 184(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 192(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - RET - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R8,R9 - ANDQ SI,R8 - SHLQ $13,R10,R11 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BX,BP - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,CX,R8 - ANDQ SI,CX - SHLQ $13,R9,R10 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R11,R12 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R13,R14 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,R15,BX - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go b/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go deleted file mode 100644 index c43b13fc83e..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package curve25519 - -import "encoding/binary" - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 0x7fffff) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMultGeneric(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go deleted file mode 100644 index 047d49afc27..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 gccgo appengine purego - -package curve25519 - -func scalarMult(out, in, base *[32]byte) { - scalarMultGeneric(out, in, base) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519.go b/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index c7f8c7e64ec..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// In Go 1.13, the ed25519 package was promoted to the standard library as -// crypto/ed25519, and this package became a wrapper for the standard library one. -// -// +build !go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -package ed25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go deleted file mode 100644 index d1448d8d220..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1d8..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252af4..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index f38797bfa1b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go deleted file mode 100644 index 0cc4a8a642c..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_compat.go deleted file mode 100644 index 157a69f61bd..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go deleted file mode 100644 index a0a185f0fc7..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/mac_noasm.go deleted file mode 100644 index d118f30ed56..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!ppc64le,!s390x gccgo purego - -package poly1305 - -type mac struct{ macGeneric } diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/poly1305.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index 9d7a6af09fe..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 // import "golang.org/x/crypto/poly1305" - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := New(key) - h.Write(m) - h.Sum(out[:0]) -} - -// Verify returns true if mac is a valid authenticator for m with the given key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - m := &MAC{} - initialize(key, &m.macState) - return m -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum or Verify causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum or Verify. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum or Verify") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} - -// Verify returns whether the authenticator of all data written to -// the message authentication code matches the expected value. -func (h *MAC) Verify(expected []byte) bool { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return subtle.ConstantTimeCompare(expected, mac[:]) == 1 -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index 99e5a1d50ef..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.s deleted file mode 100644 index 8d394a212ee..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_generic.go deleted file mode 100644 index c942a65904f..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides the generic implementation of Sum and MAC. Other files -// might provide optimized assembly implementations of some of this code. - -package poly1305 - -import "encoding/binary" - -// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag -// for a 64 bytes message is approximately -// -// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 -// -// for some secret r and s. It can be computed sequentially like -// -// for len(msg) > 0: -// h += read(msg, 16) -// h *= r -// h %= 2¹³⁰ - 5 -// return h + s -// -// All the complexity is about doing performant constant-time math on numbers -// larger than any available numeric type. - -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) macGeneric { - m := macGeneric{} - initialize(key, &m.macState) - return m -} - -// macState holds numbers in saturated 64-bit little-endian limbs. That is, -// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. -type macState struct { - // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but - // can grow larger during and after rounds. It must, however, remain below - // 2 * (2¹³⁰ - 5). - h [3]uint64 - // r and s are the private key components. - r [2]uint64 - s [2]uint64 -} - -type macGeneric struct { - macState - - buffer [TagSize]byte - offset int -} - -// Write splits the incoming message into TagSize chunks, and passes them to -// update. It buffers incomplete chunks. -func (h *macGeneric) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - updateGeneric(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - updateGeneric(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -// Sum flushes the last incomplete chunk from the buffer, if any, and generates -// the MAC output. It does not modify its state, in order to allow for multiple -// calls to Sum, even if no Write is allowed after Sum. -func (h *macGeneric) Sum(out *[TagSize]byte) { - state := h.macState - if h.offset > 0 { - updateGeneric(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} - -// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It -// clears some bits of the secret coefficient to make it possible to implement -// multiplication more efficiently. -const ( - rMask0 = 0x0FFFFFFC0FFFFFFF - rMask1 = 0x0FFFFFFC0FFFFFFC -) - -// initialize loads the 256-bit key into the two 128-bit secret values r and s. -func initialize(key *[32]byte, m *macState) { - m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - m.s[0] = binary.LittleEndian.Uint64(key[16:24]) - m.s[1] = binary.LittleEndian.Uint64(key[24:32]) -} - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) - return uint128{lo, hi} -} - -func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) - if c != 0 { - panic("poly1305: unexpected overflow") - } - return uint128{lo, hi} -} - -func shiftRightBy2(a uint128) uint128 { - a.lo = a.lo>>2 | (a.hi&3)<<62 - a.hi = a.hi >> 2 - return a -} - -// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of -// 128 bits of message, it computes -// -// h₊ = (h + m) * r mod 2¹³⁰ - 5 -// -// If the msg length is not a multiple of TagSize, it assumes the last -// incomplete chunk is the final one. -func updateGeneric(state *macState, msg []byte) { - h0, h1, h2 := state.h[0], state.h[1], state.h[2] - r0, r1 := state.r[0], state.r[1] - - for len(msg) > 0 { - var c uint64 - - // For the first step, h + m, we use a chain of bits.Add64 intrinsics. - // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially - // reduced at the end of the multiplication below. - // - // The spec requires us to set a bit just above the message size, not to - // hide leading zeroes. For full chunks, that's 1 << 128, so we can just - // add 1 to the most significant (2¹²⁸) limb, h2. - if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) - h2 += c + 1 - - msg = msg[TagSize:] - } else { - var buf [TagSize]byte - copy(buf[:], msg) - buf[len(msg)] = 1 - - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) - h2 += c - - msg = nil - } - - // Multiplication of big number limbs is similar to elementary school - // columnar multiplication. Instead of digits, there are 64-bit limbs. - // - // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. - // - // h2 h1 h0 x - // r1 r0 = - // ---------------- - // h2r0 h1r0 h0r0 <-- individual 128-bit products - // + h2r1 h1r1 h0r1 - // ------------------------ - // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs - // ------------------------ - // m3.hi m2.hi m1.hi m0.hi <-- carry propagation - // + m3.lo m2.lo m1.lo m0.lo - // ------------------------------- - // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs - // - // The main difference from pen-and-paper multiplication is that we do - // carry propagation in a separate step, as if we wrote two digit sums - // at first (the 128-bit limbs), and then carried the tens all at once. - - h0r0 := mul64(h0, r0) - h1r0 := mul64(h1, r0) - h2r0 := mul64(h2, r0) - h0r1 := mul64(h0, r1) - h1r1 := mul64(h1, r1) - h2r1 := mul64(h2, r1) - - // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their - // top 4 bits cleared by rMask{0,1}, we know that their product is not going - // to overflow 64 bits, so we can ignore the high part of the products. - // - // This also means that the product doesn't have a fifth limb (t4). - if h2r0.hi != 0 { - panic("poly1305: unexpected overflow") - } - if h2r1.hi != 0 { - panic("poly1305: unexpected overflow") - } - - m0 := h0r0 - m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again - m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. - m3 := h2r1 - - t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) - - // Now we have the result as 4 64-bit limbs, and we need to reduce it - // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do - // a cheap partial reduction according to the reduction identity - // - // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 - // - // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is - // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the - // assumptions we make about h in the rest of the code. - // - // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 - - // We split the final result at the 2¹³⁰ mark into h and cc, the carry. - // Note that the carry bits are effectively shifted left by 2, in other - // words, cc = c * 4 for the c in the reduction identity. - h0, h1, h2 = t0, t1, t2&maskLow2Bits - cc := uint128{t2 & maskNotLow2Bits, t3} - - // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - cc = shiftRightBy2(cc) - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most - // - // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 - } - - state.h[0], state.h[1], state.h[2] = h0, h1, h2 -} - -const ( - maskLow2Bits uint64 = 0x0000000000000003 - maskNotLow2Bits uint64 = ^maskLow2Bits -) - -// select64 returns x if v == 1 and y if v == 0, in constant time. -func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } - -// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. -const ( - p0 = 0xFFFFFFFFFFFFFFFB - p1 = 0xFFFFFFFFFFFFFFFF - p2 = 0x0000000000000003 -) - -// finalize completes the modular reduction of h and computes -// -// out = h + s mod 2¹²⁸ -// -func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { - h0, h1, h2 := h[0], h[1], h[2] - - // After the partial reduction in updateGeneric, h might be more than - // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction - // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the - // result if the subtraction underflows, and t otherwise. - - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) - - // h = h if h < p else h - p - h0 = select64(b, h0, hMinusP0) - h1 = select64(b, h1, hMinusP1) - - // Finally, we compute the last Poly1305 step - // - // tag = h + s mod 2¹²⁸ - // - // by just doing a wide addition with the 128 low bits of h and discarding - // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) - - binary.LittleEndian.PutUint64(out[0:8], h0) - binary.LittleEndian.PutUint64(out[8:16], h1) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go deleted file mode 100644 index 2e7a120b192..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s deleted file mode 100644 index 4e028138796..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -#include "textflag.h" - -// This was ported from the amd64 implementation. - -#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ - MOVD $1, t2; \ - ADDC t0, h0, h0; \ - ADDE t1, h1, h1; \ - ADDE t2, h2; \ - ADD $16, msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ - MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ - MULHDU r0, h0, t1; \ - MULHDU r0, h1, t5; \ - ADDC t4, t1, t1; \ - MULLD r0, h2, t2; \ - ADDZE t5; \ - MULHDU r1, h0, t4; \ - MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ - ADDC h0, t1, t1; \ - MULLD h2, r1, t3; \ - ADDZE t4, h0; \ - MULHDU r1, h1, t5; \ - MULLD r1, h1, t4; \ - ADDC t4, t2, t2; \ - ADDE t5, t3, t3; \ - ADDC h0, t2, t2; \ - MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ - ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ - ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ - SLD $62, t3, t4; \ - SRD $2, t2; \ - ADDZE h2; \ - OR t4, t2, t2; \ - SRD $2, t3; \ - ADDC t2, h0, h0; \ - ADDE t3, h1, h1; \ - ADDZE h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVD state+0(FP), R3 - MOVD msg_base+8(FP), R4 - MOVD msg_len+16(FP), R5 - - MOVD 0(R3), R8 // h0 - MOVD 8(R3), R9 // h1 - MOVD 16(R3), R10 // h2 - MOVD 24(R3), R11 // r0 - MOVD 32(R3), R12 // r1 - - CMP R5, $16 - BLT bytes_between_0_and_15 - -loop: - POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) - ADD $-16, R5 - CMP R5, $16 - BGE loop - -bytes_between_0_and_15: - CMP $0, R5 - BEQ done - MOVD $0, R16 // h0 - MOVD $0, R17 // h1 - -flush_buffer: - CMP R5, $8 - BLE just1 - - MOVD $8, R21 - SUB R21, R5, R21 - - // Greater than 8 -- load the rightmost remaining bytes in msg - // and put into R17 (h1) - MOVD (R4)(R21), R17 - MOVD $16, R22 - - // Find the offset to those bytes - SUB R5, R22, R22 - SLD $3, R22 - - // Shift to get only the bytes in msg - SRD R22, R17, R17 - - // Put 1 at high end - MOVD $1, R23 - SLD $3, R21 - SLD R21, R23, R23 - OR R23, R17, R17 - - // Remainder is 8 - MOVD $8, R5 - -just1: - CMP R5, $8 - BLT less8 - - // Exactly 8 - MOVD (R4), R16 - - CMP $0, R17 - - // Check if we've already set R17; if not - // set 1 to indicate end of msg. - BNE carry - MOVD $1, R17 - BR carry - -less8: - MOVD $0, R16 // h0 - MOVD $0, R22 // shift count - CMP R5, $4 - BLT less4 - MOVWZ (R4), R16 - ADD $4, R4 - ADD $-4, R5 - MOVD $32, R22 - -less4: - CMP R5, $2 - BLT less2 - MOVHZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $16, R22 - ADD $-2, R5 - ADD $2, R4 - -less2: - CMP $0, R5 - BEQ insert1 - MOVBZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $8, R22 - -insert1: - // Insert 1 at end of msg - MOVD $1, R21 - SLD R22, R21, R21 - OR R16, R21, R16 - -carry: - // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDE $0, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply - -done: - // Save h0, h1, h2 in state - MOVD R8, 0(R3) - MOVD R9, 8(R3) - MOVD R10, 16(R3) - RET diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.go deleted file mode 100644 index 958fedc0790..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// updateVX is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -//go:noescape -func updateVX(state *macState, msg []byte) - -// mac is a replacement for macGeneric that uses a larger buffer and redirects -// calls that would have gone to updateGeneric to updateVX if the vector -// facility is installed. -// -// A larger buffer is required for good performance because the vector -// implementation has a higher fixed cost per call than the generic -// implementation. -type mac struct { - macState - - buffer [16 * TagSize]byte // size must be a multiple of block size (16) - offset int -} - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < len(h.buffer) { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - if cpu.S390X.HasVX { - updateVX(&h.macState, h.buffer[:]) - } else { - updateGeneric(&h.macState, h.buffer[:]) - } - } - - tail := len(p) % len(h.buffer) // number of bytes to copy into buffer - body := len(p) - tail // number of bytes to process now - if body > 0 { - if cpu.S390X.HasVX { - updateVX(&h.macState, p[:body]) - } else { - updateGeneric(&h.macState, p[:body]) - } - } - h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 - return nn, nil -} - -func (h *mac) Sum(out *[TagSize]byte) { - state := h.macState - remainder := h.buffer[:h.offset] - - // Use the generic implementation if we have 2 or fewer blocks left - // to sum. The vector implementation has a higher startup time. - if cpu.S390X.HasVX && len(remainder) > 2*TagSize { - updateVX(&state, remainder) - } else if len(remainder) > 0 { - updateGeneric(&state, remainder) - } - finalize(out, &state.h, &state.s) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.s deleted file mode 100644 index 0fa9ee6e0bf..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo,!purego - -#include "textflag.h" - -// This implementation of Poly1305 uses the vector facility (vx) -// to process up to 2 blocks (32 bytes) per iteration using an -// algorithm based on the one described in: -// -// NEON crypto, Daniel J. Bernstein & Peter Schwabe -// https://cryptojedi.org/papers/neoncrypto-20120320.pdf -// -// This algorithm uses 5 26-bit limbs to represent a 130-bit -// value. These limbs are, for the most part, zero extended and -// placed into 64-bit vector register elements. Each vector -// register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accomodate -// accumulations before and after multiplication without -// overflowing either 32-bits (before multiplication) or 64-bits -// (after multiplication). -// -// In order to parallelise the operations required to calculate -// the sum we use two separate accumulators and then sum those -// in an extra final step. For compatibility with the generic -// implementation we perform this summation at the end of every -// updateVX call. -// -// To use two accumulators we must multiply the message blocks -// by r² rather than r. Only the final message block should be -// multiplied by r. -// -// Example: -// -// We want to calculate the sum (h) for a 64 byte message (m): -// -// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r -// -// To do this we split the calculation into the even indices -// and odd indices of the message. These form our SIMD 'lanes': -// -// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 -// m[16:32]r³ + m[48:64]r <- lane 1 -// -// To calculate this iteratively we refactor so that both lanes -// are written in terms of r² and r: -// -// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 -// (m[16:32]r² + m[48:64])r <- lane 1 -// ^ ^ -// | coefficients for second iteration -// coefficients for first iteration -// -// So in this case we would have two iterations. In the first -// both lanes are multiplied by r². In the second only the -// first lane is multiplied by r² and the second lane is -// instead multiplied by r. This gives use the odd and even -// powers of r that we need from the original equation. -// -// Notation: -// -// h - accumulator -// r - key -// m - message -// -// [a, b] - SIMD register holding two 64-bit values -// [a, b, c, d] - SIMD register holding four 32-bit values -// xᵢ[n] - limb n of variable x with bit width i -// -// Limbs are expressed in little endian order, so for 26-bit -// limbs x₂₆[4] will be the most significant limb and x₂₆[0] -// will be the least significant limb. - -// masking constants -#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits -#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits - -// expansion constants (see EXPAND macro) -#define EX0 V2 -#define EX1 V3 -#define EX2 V4 - -// key (r², r or 1 depending on context) -#define R_0 V5 -#define R_1 V6 -#define R_2 V7 -#define R_3 V8 -#define R_4 V9 - -// precalculated coefficients (5r², 5r or 0 depending on context) -#define R5_1 V10 -#define R5_2 V11 -#define R5_3 V12 -#define R5_4 V13 - -// message block (m) -#define M_0 V14 -#define M_1 V15 -#define M_2 V16 -#define M_3 V17 -#define M_4 V18 - -// accumulator (h) -#define H_0 V19 -#define H_1 V20 -#define H_2 V21 -#define H_3 V22 -#define H_4 V23 - -// temporary registers (for short-lived values) -#define T_0 V24 -#define T_1 V25 -#define T_2 V26 -#define T_3 V27 -#define T_4 V28 - -GLOBL ·constants<>(SB), RODATA, $0x30 -// EX0 -DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 -DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d - -// MULTIPLY multiplies each lane of f and g, partially reduced -// modulo 2¹³⁰ - 5. The result, h, consists of partial products -// in each lane that need to be reduced further to produce the -// final result. -// -// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ -// -// Note that the multiplication by 5 of the high bits is -// achieved by precalculating the multiplication of four of the -// g coefficients by 5. These are g51-g54. -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g4, h4 \ - VMLOF f0, g2, h2 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g3, T_4 \ - VMLOF f1, g1, T_2 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g0, h4, h4 \ - VMALOF f4, g53, h2, h2 \ - VAG T_0, h0, h0 \ - VAG T_3, h3, h3 \ - VAG T_1, h1, h1 \ - VAG T_4, h4, h4 \ - VAG T_2, h2, h2 - -// REDUCE performs the following carry operations in four -// stages, as specified in Bernstein & Schwabe: -// -// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] -// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] -// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] -// 4: h₂₆[3]->h₂₆[4] -// -// The result is that all of the limbs are limited to 26-bits -// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. -// -// Note that although each limb is aligned at 26-bit intervals -// they may contain values that exceed 2²⁶ - 1, hence the need -// to carry the excess bits in each limb. -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// EXPAND splits the 128-bit little-endian values in0 and in1 -// into 26-bit big-endian limbs and places the results into -// the first and second lane of d₂₆[0:4] respectively. -// -// The EX0, EX1 and EX2 constants are arrays of byte indices -// for permutation. The permutation both reverses the bytes -// in the input and ensures the bytes are copied into the -// destination limb ready to be shifted into their final -// position. -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VPERM in0, in1, EX2, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] - VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] - VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] - VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] - VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] - -// func updateVX(state *macState, msg []byte) -TEXT ·updateVX(SB), NOSPLIT, $0 - MOVD state+0(FP), R1 - LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // generate masks - VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] - VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - - // load h (accumulator) and r (key) from state - VZERO T_1 // [0, 0] - VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] - VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] - VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] - VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] - VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - - // unpack h and r into 26-bit limbs - // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value - VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] - VZERO H_1 // [0, 0] - VZERO H_3 // [0, 0] - VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out - VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] - VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] - VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only - VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] - VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only - VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete - VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - - // replicate r across all 4 vector elements - VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] - VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] - VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] - VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] - VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] - - // zero out lane 1 of h - VLEIG $1, $0, H_0 // [h₂₆[0], 0] - VLEIG $1, $0, H_1 // [h₂₆[1], 0] - VLEIG $1, $0, H_2 // [h₂₆[2], 0] - VLEIG $1, $0, H_3 // [h₂₆[3], 0] - VLEIG $1, $0, H_4 // [h₂₆[4], 0] - - // calculate 5r (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] - - // skip r² calculation if we are only calculating one block - CMPBLE R3, $16, skip - - // calculate r² - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) - REDUCE(M_0, M_1, M_2, M_3, M_4) - VGBM $0x0f0f, T_0 - VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] - VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] - VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] - VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] - VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - - // calculate 5r² (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] - -loop: - CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients - - // load next 2 blocks from message - VLM (R2), T_0, T_1 - - // update message slice - SUB $32, R3 - MOVD $32(R2), R2 - - // unpack message blocks into 26-bit big-endian limbs - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // add 2¹²⁸ to each message block value - VLEIB $4, $1, M_4 - VLEIB $12, $1, M_4 - -multiply: - // accumulate the incoming message - VAG H_0, M_0, M_0 - VAG H_3, M_3, M_3 - VAG H_1, M_1, M_1 - VAG H_4, M_4, M_4 - VAG H_2, M_2, M_2 - - // multiply the accumulator by the key coefficient - MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - - // carry and partially reduce the partial products - REDUCE(H_0, H_1, H_2, H_3, H_4) - - CMPBNE R3, $0, loop - -finish: - // sum lane 0 and lane 1 and put the result in lane 1 - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_3, T_0, H_3 - VSUMQG H_1, T_0, H_1 - VSUMQG H_4, T_0, H_4 - VSUMQG H_2, T_0, H_2 - - // reduce again after summation - // TODO(mundaym): there might be a more efficient way to do this - // now that we only have 1 active lane. For example, we could - // simultaneously pack the values as we reduce them. - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 - // TODO(mundaym): in testing this final carry was unnecessary. - // Needs a proof before it can be removed though. - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2(2¹³⁰ - 5) - // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. - VESLG $26, H_1, H_1 - VESLG $26, H_3, H_3 - VO H_0, H_1, H_0 - VO H_2, H_3, H_2 - VESLG $4, H_2, H_2 - VLEIB $7, $48, H_1 - VSLB H_1, H_2, H_2 - VO H_0, H_2, H_0 - VLEIB $7, $104, H_1 - VSLB H_1, H_4, H_3 - VO H_3, H_0, H_0 - VLEIB $7, $24, H_1 - VSRLB H_1, H_4, H_1 - - // update state - VSTEG $1, H_0, 0(R1) - VSTEG $0, H_0, 8(R1) - VSTEG $1, H_1, 16(R1) - RET - -b2: // 2 or fewer blocks remaining - CMPBLE R3, $16, b1 - - // Load the 2 remaining blocks (17-32 bytes remaining). - MOVD $-17(R3), R0 // index of final byte to load modulo 16 - VL (R2), T_0 // load full 16 byte block - VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) - CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long - VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 - - // Split both blocks into 26-bit limbs in the appropriate lanes. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the second to last block. - VLEIB $4, $1, M_4 - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, M_4 - - // Finally, set up the coefficients for the final multiplication. - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r² so that can be kept the - // same. We want lane 1 to be multiplied by r so we need to move - // the saved r value into the 32-bit odd index in lane 1 by - // rotating the 64-bit lane by 32. - VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only - VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] - VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] - VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] - VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] - VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] - VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] - VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] - VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] - VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] - - MOVD $0, R3 - BR multiply - -skip: - CMPBEQ R3, $0, finish - -b1: // 1 block remaining - - // Load the final block (1-16 bytes). This will be placed into - // lane 0. - MOVD $-1(R3), R0 - VLL R0, (R2), T_0 // pad to 16 bytes with zeros - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - - // Set the message block in lane 1 to the value 0 so that it - // can be accumulated without affecting the final result. - VZERO T_1 - - // Split the final message block into 26-bit limbs in lane 0. - // Lane 1 will be contain 0. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, M_4 - - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r so we need to move the - // saved r value into the 32-bit odd index in lane 0. We want - // lane 1 to be set to the value 1. This makes multiplication - // a no-op. We do this by setting lane 1 in every register to 0 - // and then just setting the 32-bit index 3 in R_0 to 1. - VZERO T_0 - MOVD $0, R0 - MOVD $0x10111213, R12 - VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] - VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] - VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] - VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] - VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] - VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] - VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] - VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] - VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] - VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] - - // Set the value of lane 1 to be 1. - VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] - - MOVD $0, R3 - BR multiply diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/client.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/client.go deleted file mode 100644 index b909471cc06..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/client.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package agent implements the ssh-agent protocol, and provides both -// a client and a server. The client can talk to a standard ssh-agent -// that uses UNIX sockets, and one could implement an alternative -// ssh-agent process using the sample server. -// -// References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "sync" - - "crypto" - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// SignatureFlags represent additional flags that can be passed to the signature -// requests an defined in [PROTOCOL.agent] section 4.5.1. -type SignatureFlags uint32 - -// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3. -const ( - SignatureFlagReserved SignatureFlags = 1 << iota - SignatureFlagRsaSha256 - SignatureFlagRsaSha512 -) - -// Agent represents the capabilities of an ssh-agent. -type Agent interface { - // List returns the identities known to the agent. - List() ([]*Key, error) - - // Sign has the agent sign the data using a protocol 2 key as defined - // in [PROTOCOL.agent] section 2.6.2. - Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) - - // Add adds a private key to the agent. - Add(key AddedKey) error - - // Remove removes all identities with the given public key. - Remove(key ssh.PublicKey) error - - // RemoveAll removes all identities. - RemoveAll() error - - // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. - Lock(passphrase []byte) error - - // Unlock undoes the effect of Lock - Unlock(passphrase []byte) error - - // Signers returns signers for all the known keys. - Signers() ([]ssh.Signer, error) -} - -type ExtendedAgent interface { - Agent - - // SignWithFlags signs like Sign, but allows for additional flags to be sent/received - SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) - - // Extension processes a custom extension request. Standard-compliant agents are not - // required to support any extensions, but this method allows agents to implement - // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7. - // If agent extensions are unsupported entirely this method MUST return an - // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in - // the request is unsupported by the agent then ErrExtensionUnsupported MUST be - // returned. - // - // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents - // of the response are unspecified (including the type of the message), the complete - // response will be returned as a []byte slice, including the "type" byte of the message. - Extension(extensionType string, contents []byte) ([]byte, error) -} - -// ConstraintExtension describes an optional constraint defined by users. -type ConstraintExtension struct { - // ExtensionName consist of a UTF-8 string suffixed by the - // implementation domain following the naming scheme defined - // in Section 4.2 of [RFC4251], e.g. "foo@example.com". - ExtensionName string - // ExtensionDetails contains the actual content of the extended - // constraint. - ExtensionDetails []byte -} - -// AddedKey describes an SSH key to be added to an Agent. -type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, - // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the - // agent. - PrivateKey interface{} - // Certificate, if not nil, is communicated to the agent and will be - // stored with the key. - Certificate *ssh.Certificate - // Comment is an optional, free-form string. - Comment string - // LifetimeSecs, if not zero, is the number of seconds that the - // agent will store the key for. - LifetimeSecs uint32 - // ConfirmBeforeUse, if true, requests that the agent confirm with the - // user before each use of this key. - ConfirmBeforeUse bool - // ConstraintExtensions are the experimental or private-use constraints - // defined by users. - ConstraintExtensions []ConstraintExtension -} - -// See [PROTOCOL.agent], section 3. -const ( - agentRequestV1Identities = 1 - agentRemoveAllV1Identities = 9 - - // 3.2 Requests from client to agent for protocol 2 key operations - agentAddIdentity = 17 - agentRemoveIdentity = 18 - agentRemoveAllIdentities = 19 - agentAddIDConstrained = 25 - - // 3.3 Key-type independent requests from client to agent - agentAddSmartcardKey = 20 - agentRemoveSmartcardKey = 21 - agentLock = 22 - agentUnlock = 23 - agentAddSmartcardKeyConstrained = 26 - - // 3.7 Key constraint identifiers - agentConstrainLifetime = 1 - agentConstrainConfirm = 2 - agentConstrainExtension = 3 -) - -// maxAgentResponseBytes is the maximum agent reply size that is accepted. This -// is a sanity check, not a limit in the spec. -const maxAgentResponseBytes = 16 << 20 - -// Agent messages: -// These structures mirror the wire format of the corresponding ssh agent -// messages found in [PROTOCOL.agent]. - -// 3.4 Generic replies from agent to client -const agentFailure = 5 - -type failureAgentMsg struct{} - -const agentSuccess = 6 - -type successAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentRequestIdentities = 11 - -type requestIdentitiesAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentIdentitiesAnswer = 12 - -type identitiesAnswerAgentMsg struct { - NumKeys uint32 `sshtype:"12"` - Keys []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 2.6.2. -const agentSignRequest = 13 - -type signRequestAgentMsg struct { - KeyBlob []byte `sshtype:"13"` - Data []byte - Flags uint32 -} - -// See [PROTOCOL.agent], section 2.6.2. - -// 3.6 Replies from agent to client for protocol 2 key operations -const agentSignResponse = 14 - -type signResponseAgentMsg struct { - SigBlob []byte `sshtype:"14"` -} - -type publicKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -// 3.7 Key constraint identifiers -type constrainLifetimeAgentMsg struct { - LifetimeSecs uint32 `sshtype:"1"` -} - -type constrainExtensionAgentMsg struct { - ExtensionName string `sshtype:"3"` - ExtensionDetails []byte - - // Rest is a field used for parsing, not part of message - Rest []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 4.7 -const agentExtension = 27 -const agentExtensionFailure = 28 - -// ErrExtensionUnsupported indicates that an extension defined in -// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this -// error indicates that the agent returned a standard SSH_AGENT_FAILURE message -// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol -// specification (and therefore this error) does not distinguish between a -// specific extension being unsupported and extensions being unsupported entirely. -var ErrExtensionUnsupported = errors.New("agent: extension unsupported") - -type extensionAgentMsg struct { - ExtensionType string `sshtype:"27"` - Contents []byte -} - -// Key represents a protocol 2 public key as defined in -// [PROTOCOL.agent], section 2.5.2. -type Key struct { - Format string - Blob []byte - Comment string -} - -func clientErr(err error) error { - return fmt.Errorf("agent: client error: %v", err) -} - -// String returns the storage form of an agent key with the format, base64 -// encoded serialized key, and the comment if it is not empty. -func (k *Key) String() string { - s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) - - if k.Comment != "" { - s += " " + k.Comment - } - - return s -} - -// Type returns the public key type. -func (k *Key) Type() string { - return k.Format -} - -// Marshal returns key blob to satisfy the ssh.PublicKey interface. -func (k *Key) Marshal() []byte { - return k.Blob -} - -// Verify satisfies the ssh.PublicKey interface. -func (k *Key) Verify(data []byte, sig *ssh.Signature) error { - pubKey, err := ssh.ParsePublicKey(k.Blob) - if err != nil { - return fmt.Errorf("agent: bad public key: %v", err) - } - return pubKey.Verify(data, sig) -} - -type wireKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -func parseKey(in []byte) (out *Key, rest []byte, err error) { - var record struct { - Blob []byte - Comment string - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(in, &record); err != nil { - return nil, nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(record.Blob, &wk); err != nil { - return nil, nil, err - } - - return &Key{ - Format: wk.Format, - Blob: record.Blob, - Comment: record.Comment, - }, record.Rest, nil -} - -// client is a client for an ssh-agent process. -type client struct { - // conn is typically a *net.UnixConn - conn io.ReadWriter - // mu is used to prevent concurrent access to the agent - mu sync.Mutex -} - -// NewClient returns an Agent that talks to an ssh-agent process over -// the given connection. -func NewClient(rw io.ReadWriter) ExtendedAgent { - return &client{conn: rw} -} - -// call sends an RPC to the agent. On success, the reply is -// unmarshaled into reply and replyType is set to the first byte of -// the reply, which contains the type of the message. -func (c *client) call(req []byte) (reply interface{}, err error) { - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - reply, err = unmarshal(buf) - if err != nil { - return nil, clientErr(err) - } - return reply, nil -} - -// callRaw sends an RPC to the agent. On success, the raw -// bytes of the response are returned; no unmarshalling is -// performed on the response. -func (c *client) callRaw(req []byte) (reply []byte, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - msg := make([]byte, 4+len(req)) - binary.BigEndian.PutUint32(msg, uint32(len(req))) - copy(msg[4:], req) - if _, err = c.conn.Write(msg); err != nil { - return nil, clientErr(err) - } - - var respSizeBuf [4]byte - if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { - return nil, clientErr(err) - } - respSize := binary.BigEndian.Uint32(respSizeBuf[:]) - if respSize > maxAgentResponseBytes { - return nil, clientErr(errors.New("response too large")) - } - - buf := make([]byte, respSize) - if _, err = io.ReadFull(c.conn, buf); err != nil { - return nil, clientErr(err) - } - return buf, nil -} - -func (c *client) simpleCall(req []byte) error { - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -func (c *client) RemoveAll() error { - return c.simpleCall([]byte{agentRemoveAllIdentities}) -} - -func (c *client) Remove(key ssh.PublicKey) error { - req := ssh.Marshal(&agentRemoveIdentityMsg{ - KeyBlob: key.Marshal(), - }) - return c.simpleCall(req) -} - -func (c *client) Lock(passphrase []byte) error { - req := ssh.Marshal(&agentLockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -func (c *client) Unlock(passphrase []byte) error { - req := ssh.Marshal(&agentUnlockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -// List returns the identities known to the agent. -func (c *client) List() ([]*Key, error) { - // see [PROTOCOL.agent] section 2.5.2. - req := []byte{agentRequestIdentities} - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *identitiesAnswerAgentMsg: - if msg.NumKeys > maxAgentResponseBytes/8 { - return nil, errors.New("agent: too many keys in agent reply") - } - keys := make([]*Key, msg.NumKeys) - data := msg.Keys - for i := uint32(0); i < msg.NumKeys; i++ { - var key *Key - var err error - if key, data, err = parseKey(data); err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to list keys") - } - panic("unreachable") -} - -// Sign has the agent sign the data using a protocol 2 key as defined -// in [PROTOCOL.agent] section 2.6.2. -func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return c.SignWithFlags(key, data, 0) -} - -func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - req := ssh.Marshal(signRequestAgentMsg{ - KeyBlob: key.Marshal(), - Data: data, - Flags: uint32(flags), - }) - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *signResponseAgentMsg: - var sig ssh.Signature - if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { - return nil, err - } - - return &sig, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to sign challenge") - } - panic("unreachable") -} - -// unmarshal parses an agent message in packet, returning the parsed -// form and the message type of packet. -func unmarshal(packet []byte) (interface{}, error) { - if len(packet) < 1 { - return nil, errors.New("agent: empty packet") - } - var msg interface{} - switch packet[0] { - case agentFailure: - return new(failureAgentMsg), nil - case agentSuccess: - return new(successAgentMsg), nil - case agentIdentitiesAnswer: - msg = new(identitiesAnswerAgentMsg) - case agentSignResponse: - msg = new(signResponseAgentMsg) - case agentV1IdentitiesAnswer: - msg = new(agentV1IdentityMsg) - default: - return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) - } - if err := ssh.Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -type rsaKeyMsg struct { - Type string `sshtype:"17|25"` - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaKeyMsg struct { - Type string `sshtype:"17|25"` - P *big.Int - Q *big.Int - G *big.Int - Y *big.Int - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaKeyMsg struct { - Type string `sshtype:"17|25"` - Curve string - KeyBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519KeyMsg struct { - Type string `sshtype:"17|25"` - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Insert adds a private key to the agent. -func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaKeyMsg{ - Type: ssh.KeyAlgoRSA, - N: k.N, - E: big.NewInt(int64(k.E)), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaKeyMsg{ - Type: ssh.KeyAlgoDSA, - P: k.P, - Q: k.Q, - G: k.G, - Y: k.Y, - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) - req = ssh.Marshal(ecdsaKeyMsg{ - Type: "ecdsa-sha2-" + nistID, - Curve: nistID, - KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -type rsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519CertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Add adds a private key to the agent. If a certificate is given, -// that certificate is added instead as public key. -func (c *client) Add(key AddedKey) error { - var constraints []byte - - if secs := key.LifetimeSecs; secs != 0 { - constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) - } - - if key.ConfirmBeforeUse { - constraints = append(constraints, agentConstrainConfirm) - } - - cert := key.Certificate - if cert == nil { - return c.insertKey(key.PrivateKey, key.Comment, constraints) - } - return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) -} - -func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - req = ssh.Marshal(ecdsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - signer, err := ssh.NewSignerFromKey(s) - if err != nil { - return err - } - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return errors.New("agent: signer and cert have different public key") - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -// Signers provides a callback for client authentication. -func (c *client) Signers() ([]ssh.Signer, error) { - keys, err := c.List() - if err != nil { - return nil, err - } - - var result []ssh.Signer - for _, k := range keys { - result = append(result, &agentKeyringSigner{c, k}) - } - return result, nil -} - -type agentKeyringSigner struct { - agent *client - pub ssh.PublicKey -} - -func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { - return s.pub -} - -func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { - // The agent has its own entropy source, so the rand argument is ignored. - return s.agent.Sign(s.pub, data) -} - -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { - var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } - } - return s.agent.SignWithFlags(s.pub, data, flags) -} - -// Calls an extension method. It is up to the agent implementation as to whether or not -// any particular extension is supported and may always return an error. Because the -// type of the response is up to the implementation, this returns the bytes of the -// response and does not attempt any type of unmarshalling. -func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) { - req := ssh.Marshal(extensionAgentMsg{ - ExtensionType: extensionType, - Contents: contents, - }) - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - if len(buf) == 0 { - return nil, errors.New("agent: failure; empty response") - } - // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message - // represents an agent that does not support the extension - if buf[0] == agentFailure { - return nil, ErrExtensionUnsupported - } - if buf[0] == agentExtensionFailure { - return nil, errors.New("agent: generic extension failure") - } - - return buf, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/forward.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/forward.go deleted file mode 100644 index fd24ba900d2..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/forward.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "errors" - "io" - "net" - "sync" - - "golang.org/x/crypto/ssh" -) - -// RequestAgentForwarding sets up agent forwarding for the session. -// ForwardToAgent or ForwardToRemote should be called to route -// the authentication requests. -func RequestAgentForwarding(session *ssh.Session) error { - ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) - if err != nil { - return err - } - if !ok { - return errors.New("forwarding request denied") - } - return nil -} - -// ForwardToAgent routes authentication requests to the given keyring. -func ForwardToAgent(client *ssh.Client, keyring Agent) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go func() { - ServeAgent(keyring, channel) - channel.Close() - }() - } - }() - return nil -} - -const channelType = "auth-agent@openssh.com" - -// ForwardToRemote routes authentication requests to the ssh-agent -// process serving on the given unix socket. -func ForwardToRemote(client *ssh.Client, addr string) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - conn, err := net.Dial("unix", addr) - if err != nil { - return err - } - conn.Close() - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go forwardUnixSocket(channel, addr) - } - }() - return nil -} - -func forwardUnixSocket(channel ssh.Channel, addr string) { - conn, err := net.Dial("unix", addr) - if err != nil { - return - } - - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(conn, channel) - conn.(*net.UnixConn).CloseWrite() - wg.Done() - }() - go func() { - io.Copy(channel, conn) - channel.CloseWrite() - wg.Done() - }() - - wg.Wait() - conn.Close() - channel.Close() -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/keyring.go deleted file mode 100644 index c9d97943071..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "bytes" - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "sync" - "time" - - "golang.org/x/crypto/ssh" -) - -type privKey struct { - signer ssh.Signer - comment string - expire *time.Time -} - -type keyring struct { - mu sync.Mutex - keys []privKey - - locked bool - passphrase []byte -} - -var errLocked = errors.New("agent: locked") - -// NewKeyring returns an Agent that holds keys in memory. It is safe -// for concurrent use by multiple goroutines. -func NewKeyring() Agent { - return &keyring{} -} - -// RemoveAll removes all identities. -func (r *keyring) RemoveAll() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.keys = nil - return nil -} - -// removeLocked does the actual key removal. The caller must already be holding the -// keyring mutex. -func (r *keyring) removeLocked(want []byte) error { - found := false - for i := 0; i < len(r.keys); { - if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { - found = true - r.keys[i] = r.keys[len(r.keys)-1] - r.keys = r.keys[:len(r.keys)-1] - continue - } else { - i++ - } - } - - if !found { - return errors.New("agent: key not found") - } - return nil -} - -// Remove removes all identities with the given public key. -func (r *keyring) Remove(key ssh.PublicKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - return r.removeLocked(key.Marshal()) -} - -// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. -func (r *keyring) Lock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.locked = true - r.passphrase = passphrase - return nil -} - -// Unlock undoes the effect of Lock -func (r *keyring) Unlock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.locked { - return errors.New("agent: not locked") - } - if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { - return fmt.Errorf("agent: incorrect passphrase") - } - - r.locked = false - r.passphrase = nil - return nil -} - -// expireKeysLocked removes expired keys from the keyring. If a key was added -// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. -func (r *keyring) expireKeysLocked() { - for _, k := range r.keys { - if k.expire != nil && time.Now().After(*k.expire) { - r.removeLocked(k.signer.PublicKey().Marshal()) - } - } -} - -// List returns the identities known to the agent. -func (r *keyring) List() ([]*Key, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - // section 2.7: locked agents return empty. - return nil, nil - } - - r.expireKeysLocked() - var ids []*Key - for _, k := range r.keys { - pub := k.signer.PublicKey() - ids = append(ids, &Key{ - Format: pub.Type(), - Blob: pub.Marshal(), - Comment: k.comment}) - } - return ids, nil -} - -// Insert adds a private key to the keyring. If a certificate -// is given, that certificate is added as public key. Note that -// any constraints given are ignored. -func (r *keyring) Add(key AddedKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - signer, err := ssh.NewSignerFromKey(key.PrivateKey) - - if err != nil { - return err - } - - if cert := key.Certificate; cert != nil { - signer, err = ssh.NewCertSigner(cert, signer) - if err != nil { - return err - } - } - - p := privKey{ - signer: signer, - comment: key.Comment, - } - - if key.LifetimeSecs > 0 { - t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) - p.expire = &t - } - - r.keys = append(r.keys, p) - - return nil -} - -// Sign returns a signature for the data. -func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return r.SignWithFlags(key, data, 0) -} - -func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - wanted := key.Marshal() - for _, k := range r.keys { - if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { - if flags == 0 { - return k.signer.Sign(rand.Reader, data) - } else { - if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok { - return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer) - } else { - var algorithm string - switch flags { - case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 - case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 - default: - return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) - } - return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) - } - } - } - } - return nil, errors.New("not found") -} - -// Signers returns signers for all the known keys. -func (r *keyring) Signers() ([]ssh.Signer, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - s := make([]ssh.Signer, 0, len(r.keys)) - for _, k := range r.keys { - s = append(s, k.signer) - } - return s, nil -} - -// The keyring does not support any extensions -func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) { - return nil, ErrExtensionUnsupported -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/server.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/server.go deleted file mode 100644 index 6e7a1e02f27..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/agent/server.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "math/big" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// Server wraps an Agent and uses it to implement the agent side of -// the SSH-agent, wire protocol. -type server struct { - agent Agent -} - -func (s *server) processRequestBytes(reqData []byte) []byte { - rep, err := s.processRequest(reqData) - if err != nil { - if err != errLocked { - // TODO(hanwen): provide better logging interface? - log.Printf("agent %d: %v", reqData[0], err) - } - return []byte{agentFailure} - } - - if err == nil && rep == nil { - return []byte{agentSuccess} - } - - return ssh.Marshal(rep) -} - -func marshalKey(k *Key) []byte { - var record struct { - Blob []byte - Comment string - } - record.Blob = k.Marshal() - record.Comment = k.Comment - - return ssh.Marshal(&record) -} - -// See [PROTOCOL.agent], section 2.5.1. -const agentV1IdentitiesAnswer = 2 - -type agentV1IdentityMsg struct { - Numkeys uint32 `sshtype:"2"` -} - -type agentRemoveIdentityMsg struct { - KeyBlob []byte `sshtype:"18"` -} - -type agentLockMsg struct { - Passphrase []byte `sshtype:"22"` -} - -type agentUnlockMsg struct { - Passphrase []byte `sshtype:"23"` -} - -func (s *server) processRequest(data []byte) (interface{}, error) { - switch data[0] { - case agentRequestV1Identities: - return &agentV1IdentityMsg{0}, nil - - case agentRemoveAllV1Identities: - return nil, nil - - case agentRemoveIdentity: - var req agentRemoveIdentityMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) - - case agentRemoveAllIdentities: - return nil, s.agent.RemoveAll() - - case agentLock: - var req agentLockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - return nil, s.agent.Lock(req.Passphrase) - - case agentUnlock: - var req agentUnlockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - return nil, s.agent.Unlock(req.Passphrase) - - case agentSignRequest: - var req signRequestAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - k := &Key{ - Format: wk.Format, - Blob: req.KeyBlob, - } - - var sig *ssh.Signature - var err error - if extendedAgent, ok := s.agent.(ExtendedAgent); ok { - sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags)) - } else { - sig, err = s.agent.Sign(k, req.Data) - } - - if err != nil { - return nil, err - } - return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil - - case agentRequestIdentities: - keys, err := s.agent.List() - if err != nil { - return nil, err - } - - rep := identitiesAnswerAgentMsg{ - NumKeys: uint32(len(keys)), - } - for _, k := range keys { - rep.Keys = append(rep.Keys, marshalKey(k)...) - } - return rep, nil - - case agentAddIDConstrained, agentAddIdentity: - return nil, s.insertIdentity(data) - - case agentExtension: - // Return a stub object where the whole contents of the response gets marshaled. - var responseStub struct { - Rest []byte `ssh:"rest"` - } - - if extendedAgent, ok := s.agent.(ExtendedAgent); !ok { - // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7 - // requires that we return a standard SSH_AGENT_FAILURE message. - responseStub.Rest = []byte{agentFailure} - } else { - var req extensionAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - res, err := extendedAgent.Extension(req.ExtensionType, req.Contents) - if err != nil { - // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE - // message as required by [PROTOCOL.agent] section 4.7. - if err == ErrExtensionUnsupported { - responseStub.Rest = []byte{agentFailure} - } else { - // As the result of any other error processing an extension request, - // [PROTOCOL.agent] section 4.7 requires that we return a - // SSH_AGENT_EXTENSION_FAILURE code. - responseStub.Rest = []byte{agentExtensionFailure} - } - } else { - if len(res) == 0 { - return nil, nil - } - responseStub.Rest = res - } - } - - return responseStub, nil - } - - return nil, fmt.Errorf("unknown opcode %d", data[0]) -} - -func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { - for len(constraints) != 0 { - switch constraints[0] { - case agentConstrainLifetime: - lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) - constraints = constraints[5:] - case agentConstrainConfirm: - confirmBeforeUse = true - constraints = constraints[1:] - case agentConstrainExtension: - var msg constrainExtensionAgentMsg - if err = ssh.Unmarshal(constraints, &msg); err != nil { - return 0, false, nil, err - } - extensions = append(extensions, ConstraintExtension{ - ExtensionName: msg.ExtensionName, - ExtensionDetails: msg.ExtensionDetails, - }) - constraints = msg.Rest - default: - return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) - } - } - return -} - -func setConstraints(key *AddedKey, constraintBytes []byte) error { - lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) - if err != nil { - return err - } - - key.LifetimeSecs = lifetimeSecs - key.ConfirmBeforeUse = confirmBeforeUse - key.ConstraintExtensions = constraintExtensions - return nil -} - -func parseRSAKey(req []byte) (*AddedKey, error) { - var k rsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - if k.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(k.E.Int64()), - N: k.N, - }, - D: k.D, - Primes: []*big.Int{k.P, k.Q}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseEd25519Key(req []byte) (*AddedKey, error) { - var k ed25519KeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - - addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSAKey(req []byte) (*AddedKey, error) { - var k dsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { - priv = &ecdsa.PrivateKey{ - D: privScalar, - } - - switch curveName { - case "nistp256": - priv.Curve = elliptic.P256() - case "nistp384": - priv.Curve = elliptic.P384() - case "nistp521": - priv.Curve = elliptic.P521() - default: - return nil, fmt.Errorf("agent: unknown curve %q", curveName) - } - - priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) - if priv.X == nil || priv.Y == nil { - return nil, errors.New("agent: point not on curve") - } - - return priv, nil -} - -func parseEd25519Cert(req []byte) (*AddedKey, error) { - var k ed25519CertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ED25519 certificate") - } - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSAKey(req []byte) (*AddedKey, error) { - var k ecdsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseRSACert(req []byte) (*AddedKey, error) { - var k rsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad RSA certificate") - } - - // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go - var rsaPub struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - if rsaPub.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - - priv := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(rsaPub.E.Int64()), - N: rsaPub.N, - }, - D: k.D, - Primes: []*big.Int{k.Q, k.P}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSACert(req []byte) (*AddedKey, error) { - var k dsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad DSA certificate") - } - - // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go - var w struct { - Name string - P, Q, G, Y *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSACert(req []byte) (*AddedKey, error) { - var k ecdsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ECDSA certificate") - } - - // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go - var ecdsaPub struct { - Name string - ID string - Key []byte - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func (s *server) insertIdentity(req []byte) error { - var record struct { - Type string `sshtype:"17|25"` - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(req, &record); err != nil { - return err - } - - var addedKey *AddedKey - var err error - - switch record.Type { - case ssh.KeyAlgoRSA: - addedKey, err = parseRSAKey(req) - case ssh.KeyAlgoDSA: - addedKey, err = parseDSAKey(req) - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - addedKey, err = parseECDSAKey(req) - case ssh.KeyAlgoED25519: - addedKey, err = parseEd25519Key(req) - case ssh.CertAlgoRSAv01: - addedKey, err = parseRSACert(req) - case ssh.CertAlgoDSAv01: - addedKey, err = parseDSACert(req) - case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: - addedKey, err = parseECDSACert(req) - case ssh.CertAlgoED25519v01: - addedKey, err = parseEd25519Cert(req) - default: - return fmt.Errorf("agent: not implemented: %q", record.Type) - } - - if err != nil { - return err - } - return s.agent.Add(*addedKey) -} - -// ServeAgent serves the agent protocol on the given connection. It -// returns when an I/O error occurs. -func ServeAgent(agent Agent, c io.ReadWriter) error { - s := &server{agent} - - var length [4]byte - for { - if _, err := io.ReadFull(c, length[:]); err != nil { - return err - } - l := binary.BigEndian.Uint32(length[:]) - if l == 0 { - return fmt.Errorf("agent: request size is 0") - } - if l > maxAgentResponseBytes { - // We also cap requests. - return fmt.Errorf("agent: request too large: %d", l) - } - - req := make([]byte, l) - if _, err := io.ReadFull(c, req); err != nil { - return err - } - - repData := s.processRequestBytes(req) - if len(repData) > maxAgentResponseBytes { - return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) - } - - binary.BigEndian.PutUint32(length[:], uint32(len(repData))) - if _, err := c.Write(length[:]); err != nil { - return err - } - if _, err := c.Write(repData); err != nil { - return err - } - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/buffer.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d078db..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/certs.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 916c840b698..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/channel.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00dfe..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/cipher.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 8bd6b3daff5..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/client.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 7b00bff1caa..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/client_auth.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index f3265655eec..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/common.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 290382d059e..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/connection.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681b51..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/doc.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c058..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/handshake.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 2b10b05a498..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d266546..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/kex.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 7eedb209fa7..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - g, p *big.Int - hashFunc crypto.Hash -} - -const numMRTests = 64 - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) - } - - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if p is safe by verifing that p and (p-1)/2 are primes - one := big.NewInt(1) - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - if !gex.p.ProbablyPrime(numMRTests) || !pHalf.ProbablyPrime(numMRTests) { - return nil, fmt.Errorf("ssh: server provided gex p is not safe") - } - - // Check if g is safe by verifing that g > 1 and g < p - 1 - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(gex.g, x, gex.p) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err - } - - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) - - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, - } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/keys.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 31f26349a05..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" -) - -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 -const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := sha256.New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go deleted file mode 100644 index 260cfe58c65..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package knownhosts implements a parser for the OpenSSH known_hosts -// host key database, and provides utility functions for writing -// OpenSSH compliant known_hosts files. -package knownhosts - -import ( - "bufio" - "bytes" - "crypto/hmac" - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "os" - "strings" - - "golang.org/x/crypto/ssh" -) - -// See the sshd manpage -// (http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT) for -// background. - -type addr struct{ host, port string } - -func (a *addr) String() string { - h := a.host - if strings.Contains(h, ":") { - h = "[" + h + "]" - } - return h + ":" + a.port -} - -type matcher interface { - match(addr) bool -} - -type hostPattern struct { - negate bool - addr addr -} - -func (p *hostPattern) String() string { - n := "" - if p.negate { - n = "!" - } - - return n + p.addr.String() -} - -type hostPatterns []hostPattern - -func (ps hostPatterns) match(a addr) bool { - matched := false - for _, p := range ps { - if !p.match(a) { - continue - } - if p.negate { - return false - } - matched = true - } - return matched -} - -// See -// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/addrmatch.c -// The matching of * has no regard for separators, unlike filesystem globs -func wildcardMatch(pat []byte, str []byte) bool { - for { - if len(pat) == 0 { - return len(str) == 0 - } - if len(str) == 0 { - return false - } - - if pat[0] == '*' { - if len(pat) == 1 { - return true - } - - for j := range str { - if wildcardMatch(pat[1:], str[j:]) { - return true - } - } - return false - } - - if pat[0] == '?' || pat[0] == str[0] { - pat = pat[1:] - str = str[1:] - } else { - return false - } - } -} - -func (p *hostPattern) match(a addr) bool { - return wildcardMatch([]byte(p.addr.host), []byte(a.host)) && p.addr.port == a.port -} - -type keyDBLine struct { - cert bool - matcher matcher - knownKey KnownKey -} - -func serialize(k ssh.PublicKey) string { - return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) -} - -func (l *keyDBLine) match(a addr) bool { - return l.matcher.match(a) -} - -type hostKeyDB struct { - // Serialized version of revoked keys - revoked map[string]*KnownKey - lines []keyDBLine -} - -func newHostKeyDB() *hostKeyDB { - db := &hostKeyDB{ - revoked: make(map[string]*KnownKey), - } - - return db -} - -func keyEq(a, b ssh.PublicKey) bool { - return bytes.Equal(a.Marshal(), b.Marshal()) -} - -// IsAuthorityForHost can be used as a callback in ssh.CertChecker -func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool { - h, p, err := net.SplitHostPort(address) - if err != nil { - return false - } - a := addr{host: h, port: p} - - for _, l := range db.lines { - if l.cert && keyEq(l.knownKey.Key, remote) && l.match(a) { - return true - } - } - return false -} - -// IsRevoked can be used as a callback in ssh.CertChecker -func (db *hostKeyDB) IsRevoked(key *ssh.Certificate) bool { - _, ok := db.revoked[string(key.Marshal())] - return ok -} - -const markerCert = "@cert-authority" -const markerRevoked = "@revoked" - -func nextWord(line []byte) (string, []byte) { - i := bytes.IndexAny(line, "\t ") - if i == -1 { - return string(line), nil - } - - return string(line[:i]), bytes.TrimSpace(line[i:]) -} - -func parseLine(line []byte) (marker, host string, key ssh.PublicKey, err error) { - if w, next := nextWord(line); w == markerCert || w == markerRevoked { - marker = w - line = next - } - - host, line = nextWord(line) - if len(line) == 0 { - return "", "", nil, errors.New("knownhosts: missing host pattern") - } - - // ignore the keytype as it's in the key blob anyway. - _, line = nextWord(line) - if len(line) == 0 { - return "", "", nil, errors.New("knownhosts: missing key type pattern") - } - - keyBlob, _ := nextWord(line) - - keyBytes, err := base64.StdEncoding.DecodeString(keyBlob) - if err != nil { - return "", "", nil, err - } - key, err = ssh.ParsePublicKey(keyBytes) - if err != nil { - return "", "", nil, err - } - - return marker, host, key, nil -} - -func (db *hostKeyDB) parseLine(line []byte, filename string, linenum int) error { - marker, pattern, key, err := parseLine(line) - if err != nil { - return err - } - - if marker == markerRevoked { - db.revoked[string(key.Marshal())] = &KnownKey{ - Key: key, - Filename: filename, - Line: linenum, - } - - return nil - } - - entry := keyDBLine{ - cert: marker == markerCert, - knownKey: KnownKey{ - Filename: filename, - Line: linenum, - Key: key, - }, - } - - if pattern[0] == '|' { - entry.matcher, err = newHashedHost(pattern) - } else { - entry.matcher, err = newHostnameMatcher(pattern) - } - - if err != nil { - return err - } - - db.lines = append(db.lines, entry) - return nil -} - -func newHostnameMatcher(pattern string) (matcher, error) { - var hps hostPatterns - for _, p := range strings.Split(pattern, ",") { - if len(p) == 0 { - continue - } - - var a addr - var negate bool - if p[0] == '!' { - negate = true - p = p[1:] - } - - if len(p) == 0 { - return nil, errors.New("knownhosts: negation without following hostname") - } - - var err error - if p[0] == '[' { - a.host, a.port, err = net.SplitHostPort(p) - if err != nil { - return nil, err - } - } else { - a.host, a.port, err = net.SplitHostPort(p) - if err != nil { - a.host = p - a.port = "22" - } - } - hps = append(hps, hostPattern{ - negate: negate, - addr: a, - }) - } - return hps, nil -} - -// KnownKey represents a key declared in a known_hosts file. -type KnownKey struct { - Key ssh.PublicKey - Filename string - Line int -} - -func (k *KnownKey) String() string { - return fmt.Sprintf("%s:%d: %s", k.Filename, k.Line, serialize(k.Key)) -} - -// KeyError is returned if we did not find the key in the host key -// database, or there was a mismatch. Typically, in batch -// applications, this should be interpreted as failure. Interactive -// applications can offer an interactive prompt to the user. -type KeyError struct { - // Want holds the accepted host keys. For each key algorithm, - // there can be one hostkey. If Want is empty, the host is - // unknown. If Want is non-empty, there was a mismatch, which - // can signify a MITM attack. - Want []KnownKey -} - -func (u *KeyError) Error() string { - if len(u.Want) == 0 { - return "knownhosts: key is unknown" - } - return "knownhosts: key mismatch" -} - -// RevokedError is returned if we found a key that was revoked. -type RevokedError struct { - Revoked KnownKey -} - -func (r *RevokedError) Error() string { - return "knownhosts: key is revoked" -} - -// check checks a key against the host database. This should not be -// used for verifying certificates. -func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.PublicKey) error { - if revoked := db.revoked[string(remoteKey.Marshal())]; revoked != nil { - return &RevokedError{Revoked: *revoked} - } - - host, port, err := net.SplitHostPort(remote.String()) - if err != nil { - return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err) - } - - hostToCheck := addr{host, port} - if address != "" { - // Give preference to the hostname if available. - host, port, err := net.SplitHostPort(address) - if err != nil { - return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err) - } - - hostToCheck = addr{host, port} - } - - return db.checkAddr(hostToCheck, remoteKey) -} - -// checkAddr checks if we can find the given public key for the -// given address. If we only find an entry for the IP address, -// or only the hostname, then this still succeeds. -func (db *hostKeyDB) checkAddr(a addr, remoteKey ssh.PublicKey) error { - // TODO(hanwen): are these the right semantics? What if there - // is just a key for the IP address, but not for the - // hostname? - - // Algorithm => key. - knownKeys := map[string]KnownKey{} - for _, l := range db.lines { - if l.match(a) { - typ := l.knownKey.Key.Type() - if _, ok := knownKeys[typ]; !ok { - knownKeys[typ] = l.knownKey - } - } - } - - keyErr := &KeyError{} - for _, v := range knownKeys { - keyErr.Want = append(keyErr.Want, v) - } - - // Unknown remote host. - if len(knownKeys) == 0 { - return keyErr - } - - // If the remote host starts using a different, unknown key type, we - // also interpret that as a mismatch. - if known, ok := knownKeys[remoteKey.Type()]; !ok || !keyEq(known.Key, remoteKey) { - return keyErr - } - - return nil -} - -// The Read function parses file contents. -func (db *hostKeyDB) Read(r io.Reader, filename string) error { - scanner := bufio.NewScanner(r) - - lineNum := 0 - for scanner.Scan() { - lineNum++ - line := scanner.Bytes() - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '#' { - continue - } - - if err := db.parseLine(line, filename, lineNum); err != nil { - return fmt.Errorf("knownhosts: %s:%d: %v", filename, lineNum, err) - } - } - return scanner.Err() -} - -// New creates a host key callback from the given OpenSSH host key -// files. The returned callback is for use in -// ssh.ClientConfig.HostKeyCallback. By preference, the key check -// operates on the hostname if available, i.e. if a server changes its -// IP address, the host key check will still succeed, even though a -// record of the new IP address is not available. -func New(files ...string) (ssh.HostKeyCallback, error) { - db := newHostKeyDB() - for _, fn := range files { - f, err := os.Open(fn) - if err != nil { - return nil, err - } - defer f.Close() - if err := db.Read(f, fn); err != nil { - return nil, err - } - } - - var certChecker ssh.CertChecker - certChecker.IsHostAuthority = db.IsHostAuthority - certChecker.IsRevoked = db.IsRevoked - certChecker.HostKeyFallback = db.check - - return certChecker.CheckHostKey, nil -} - -// Normalize normalizes an address into the form used in known_hosts -func Normalize(address string) string { - host, port, err := net.SplitHostPort(address) - if err != nil { - host = address - port = "22" - } - entry := host - if port != "22" { - entry = "[" + entry + "]:" + port - } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") { - entry = "[" + entry + "]" - } - return entry -} - -// Line returns a line to add append to the known_hosts files. -func Line(addresses []string, key ssh.PublicKey) string { - var trimmed []string - for _, a := range addresses { - trimmed = append(trimmed, Normalize(a)) - } - - return strings.Join(trimmed, ",") + " " + serialize(key) -} - -// HashHostname hashes the given hostname. The hostname is not -// normalized before hashing. -func HashHostname(hostname string) string { - // TODO(hanwen): check if we can safely normalize this always. - salt := make([]byte, sha1.Size) - - _, err := rand.Read(salt) - if err != nil { - panic(fmt.Sprintf("crypto/rand failure %v", err)) - } - - hash := hashHost(hostname, salt) - return encodeHash(sha1HashType, salt, hash) -} - -func decodeHash(encoded string) (hashType string, salt, hash []byte, err error) { - if len(encoded) == 0 || encoded[0] != '|' { - err = errors.New("knownhosts: hashed host must start with '|'") - return - } - components := strings.Split(encoded, "|") - if len(components) != 4 { - err = fmt.Errorf("knownhosts: got %d components, want 3", len(components)) - return - } - - hashType = components[1] - if salt, err = base64.StdEncoding.DecodeString(components[2]); err != nil { - return - } - if hash, err = base64.StdEncoding.DecodeString(components[3]); err != nil { - return - } - return -} - -func encodeHash(typ string, salt []byte, hash []byte) string { - return strings.Join([]string{"", - typ, - base64.StdEncoding.EncodeToString(salt), - base64.StdEncoding.EncodeToString(hash), - }, "|") -} - -// See https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 -func hashHost(hostname string, salt []byte) []byte { - mac := hmac.New(sha1.New, salt) - mac.Write([]byte(hostname)) - return mac.Sum(nil) -} - -type hashedHost struct { - salt []byte - hash []byte -} - -const sha1HashType = "1" - -func newHashedHost(encoded string) (*hashedHost, error) { - typ, salt, hash, err := decodeHash(encoded) - if err != nil { - return nil, err - } - - // The type field seems for future algorithm agility, but it's - // actually hardcoded in openssh currently, see - // https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 - if typ != sha1HashType { - return nil, fmt.Errorf("knownhosts: got hash type %s, must be '1'", typ) - } - - return &hashedHost{salt: salt, hash: hash}, nil -} - -func (h *hashedHost) match(a addr) bool { - return bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash) -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/mac.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a06285e6..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/messages.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index ac41a4168bf..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/mux.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c01869a..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/server.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index 7d42a8c88d2..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,716 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/session.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b784..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e830..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/streamlocal.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330bc3..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/tcpip.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5ec18..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/awsproviderlint/vendor/golang.org/x/crypto/ssh/transport.go b/awsproviderlint/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 49ddc2e7de4..00000000000 --- a/awsproviderlint/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/net/internal/socks/client.go b/awsproviderlint/vendor/golang.org/x/net/internal/socks/client.go deleted file mode 100644 index 3d6f516a595..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/internal/socks/client.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" - "time" -) - -var ( - noDeadline = time.Time{} - aLongTimeAgo = time.Unix(1, 0) -) - -func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { - host, port, err := splitHostPort(address) - if err != nil { - return nil, err - } - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - c.SetDeadline(deadline) - defer c.SetDeadline(noDeadline) - } - if ctx != context.Background() { - errCh := make(chan error, 1) - done := make(chan struct{}) - defer func() { - close(done) - if ctxErr == nil { - ctxErr = <-errCh - } - }() - go func() { - select { - case <-ctx.Done(): - c.SetDeadline(aLongTimeAgo) - errCh <- ctx.Err() - case <-done: - errCh <- nil - } - }() - } - - b := make([]byte, 0, 6+len(host)) // the size here is just an estimate - b = append(b, Version5) - if len(d.AuthMethods) == 0 || d.Authenticate == nil { - b = append(b, 1, byte(AuthMethodNotRequired)) - } else { - ams := d.AuthMethods - if len(ams) > 255 { - return nil, errors.New("too many authentication methods") - } - b = append(b, byte(len(ams))) - for _, am := range ams { - b = append(b, byte(am)) - } - } - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - am := AuthMethod(b[1]) - if am == AuthMethodNoAcceptableMethods { - return nil, errors.New("no acceptable authentication methods") - } - if d.Authenticate != nil { - if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { - return - } - } - - b = b[:0] - b = append(b, Version5, byte(d.cmd), 0) - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - b = append(b, AddrTypeIPv4) - b = append(b, ip4...) - } else if ip6 := ip.To16(); ip6 != nil { - b = append(b, AddrTypeIPv6) - b = append(b, ip6...) - } else { - return nil, errors.New("unknown address type") - } - } else { - if len(host) > 255 { - return nil, errors.New("FQDN too long") - } - b = append(b, AddrTypeFQDN) - b = append(b, byte(len(host))) - b = append(b, host...) - } - b = append(b, byte(port>>8), byte(port)) - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { - return nil, errors.New("unknown error " + cmdErr.String()) - } - if b[2] != 0 { - return nil, errors.New("non-zero reserved field") - } - l := 2 - var a Addr - switch b[3] { - case AddrTypeIPv4: - l += net.IPv4len - a.IP = make(net.IP, net.IPv4len) - case AddrTypeIPv6: - l += net.IPv6len - a.IP = make(net.IP, net.IPv6len) - case AddrTypeFQDN: - if _, err := io.ReadFull(c, b[:1]); err != nil { - return nil, err - } - l += int(b[0]) - default: - return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) - } - if cap(b) < l { - b = make([]byte, l) - } else { - b = b[:l] - } - if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { - return - } - if a.IP != nil { - copy(a.IP, b) - } else { - a.Name = string(b[:len(b)-2]) - } - a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) - return &a, nil -} - -func splitHostPort(address string) (string, int, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", 0, err - } - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, err - } - if 1 > portnum || portnum > 0xffff { - return "", 0, errors.New("port number out of range " + port) - } - return host, portnum, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/net/internal/socks/socks.go b/awsproviderlint/vendor/golang.org/x/net/internal/socks/socks.go deleted file mode 100644 index 97db2340ec9..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/internal/socks/socks.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package socks provides a SOCKS version 5 client implementation. -// -// SOCKS protocol version 5 is defined in RFC 1928. -// Username/Password authentication for SOCKS version 5 is defined in -// RFC 1929. -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" -) - -// A Command represents a SOCKS command. -type Command int - -func (cmd Command) String() string { - switch cmd { - case CmdConnect: - return "socks connect" - case cmdBind: - return "socks bind" - default: - return "socks " + strconv.Itoa(int(cmd)) - } -} - -// An AuthMethod represents a SOCKS authentication method. -type AuthMethod int - -// A Reply represents a SOCKS command reply code. -type Reply int - -func (code Reply) String() string { - switch code { - case StatusSucceeded: - return "succeeded" - case 0x01: - return "general SOCKS server failure" - case 0x02: - return "connection not allowed by ruleset" - case 0x03: - return "network unreachable" - case 0x04: - return "host unreachable" - case 0x05: - return "connection refused" - case 0x06: - return "TTL expired" - case 0x07: - return "command not supported" - case 0x08: - return "address type not supported" - default: - return "unknown code: " + strconv.Itoa(int(code)) - } -} - -// Wire protocol constants. -const ( - Version5 = 0x05 - - AddrTypeIPv4 = 0x01 - AddrTypeFQDN = 0x03 - AddrTypeIPv6 = 0x04 - - CmdConnect Command = 0x01 // establishes an active-open forward proxy connection - cmdBind Command = 0x02 // establishes a passive-open forward proxy connection - - AuthMethodNotRequired AuthMethod = 0x00 // no authentication required - AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password - AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods - - StatusSucceeded Reply = 0x00 -) - -// An Addr represents a SOCKS-specific address. -// Either Name or IP is used exclusively. -type Addr struct { - Name string // fully-qualified domain name - IP net.IP - Port int -} - -func (a *Addr) Network() string { return "socks" } - -func (a *Addr) String() string { - if a == nil { - return "" - } - port := strconv.Itoa(a.Port) - if a.IP == nil { - return net.JoinHostPort(a.Name, port) - } - return net.JoinHostPort(a.IP.String(), port) -} - -// A Conn represents a forward proxy connection. -type Conn struct { - net.Conn - - boundAddr net.Addr -} - -// BoundAddr returns the address assigned by the proxy server for -// connecting to the command target address from the proxy server. -func (c *Conn) BoundAddr() net.Addr { - if c == nil { - return nil - } - return c.boundAddr -} - -// A Dialer holds SOCKS-specific options. -type Dialer struct { - cmd Command // either CmdConnect or cmdBind - proxyNetwork string // network between a proxy server and a client - proxyAddress string // proxy server address - - // ProxyDial specifies the optional dial function for - // establishing the transport connection. - ProxyDial func(context.Context, string, string) (net.Conn, error) - - // AuthMethods specifies the list of request authentication - // methods. - // If empty, SOCKS client requests only AuthMethodNotRequired. - AuthMethods []AuthMethod - - // Authenticate specifies the optional authentication - // function. It must be non-nil when AuthMethods is not empty. - // It must return an error when the authentication is failed. - Authenticate func(context.Context, io.ReadWriter, AuthMethod) error -} - -// DialContext connects to the provided address on the provided -// network. -// -// The returned error value may be a net.OpError. When the Op field of -// net.OpError contains "socks", the Source field contains a proxy -// server address and the Addr field contains a command target -// address. -// -// See func Dial of the net package of standard library for a -// description of the network and address parameters. -func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) - } else { - var dd net.Dialer - c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - a, err := d.connect(ctx, c, address) - if err != nil { - c.Close() - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return &Conn{Conn: c, boundAddr: a}, nil -} - -// DialWithConn initiates a connection from SOCKS server to the target -// network and address using the connection c that is already -// connected to the SOCKS server. -// -// It returns the connection's local address assigned by the SOCKS -// server. -func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - a, err := d.connect(ctx, c, address) - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return a, nil -} - -// Dial connects to the provided address on the provided network. -// -// Unlike DialContext, it returns a raw transport connection instead -// of a forward proxy connection. -// -// Deprecated: Use DialContext or DialWithConn instead. -func (d *Dialer) Dial(network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) - } else { - c, err = net.Dial(d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { - c.Close() - return nil, err - } - return c, nil -} - -func (d *Dialer) validateTarget(network, address string) error { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return errors.New("network not implemented") - } - switch d.cmd { - case CmdConnect, cmdBind: - default: - return errors.New("command not implemented") - } - return nil -} - -func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { - for i, s := range []string{d.proxyAddress, address} { - host, port, err := splitHostPort(s) - if err != nil { - return nil, nil, err - } - a := &Addr{Port: port} - a.IP = net.ParseIP(host) - if a.IP == nil { - a.Name = host - } - if i == 0 { - proxy = a - } else { - dst = a - } - } - return -} - -// NewDialer returns a new Dialer that dials through the provided -// proxy server's network and address. -func NewDialer(network, address string) *Dialer { - return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} -} - -const ( - authUsernamePasswordVersion = 0x01 - authStatusSucceeded = 0x00 -) - -// UsernamePassword are the credentials for the username/password -// authentication method. -type UsernamePassword struct { - Username string - Password string -} - -// Authenticate authenticates a pair of username and password with the -// proxy server. -func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { - switch auth { - case AuthMethodNotRequired: - return nil - case AuthMethodUsernamePassword: - if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { - return errors.New("invalid username/password") - } - b := []byte{authUsernamePasswordVersion} - b = append(b, byte(len(up.Username))) - b = append(b, up.Username...) - b = append(b, byte(len(up.Password))) - b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if - // necessary - if _, err := rw.Write(b); err != nil { - return err - } - if _, err := io.ReadFull(rw, b[:2]); err != nil { - return err - } - if b[0] != authUsernamePasswordVersion { - return errors.New("invalid username/password version") - } - if b[1] != authStatusSucceeded { - return errors.New("username/password authentication failed") - } - return nil - } - return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) -} diff --git a/awsproviderlint/vendor/golang.org/x/net/proxy/dial.go b/awsproviderlint/vendor/golang.org/x/net/proxy/dial.go deleted file mode 100644 index 811c2e4e962..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/proxy/dial.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -// A ContextDialer dials using a context. -type ContextDialer interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. -// -// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. -// -// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer -// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. -// -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func Dial(ctx context.Context, network, address string) (net.Conn, error) { - d := FromEnvironment() - if xd, ok := d.(ContextDialer); ok { - return xd.DialContext(ctx, network, address) - } - return dialContext(ctx, d, network, address) -} - -// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { - var ( - conn net.Conn - done = make(chan struct{}, 1) - err error - ) - go func() { - conn, err = d.Dial(network, address) - close(done) - if conn != nil && ctx.Err() != nil { - conn.Close() - } - }() - select { - case <-ctx.Done(): - err = ctx.Err() - case <-done: - } - return conn, err -} diff --git a/awsproviderlint/vendor/golang.org/x/net/proxy/direct.go b/awsproviderlint/vendor/golang.org/x/net/proxy/direct.go deleted file mode 100644 index 3d66bdef9d7..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/proxy/direct.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -type direct struct{} - -// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. -var Direct = direct{} - -var ( - _ Dialer = Direct - _ ContextDialer = Direct -) - -// Dial directly invokes net.Dial with the supplied parameters. -func (direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. -func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, addr) -} diff --git a/awsproviderlint/vendor/golang.org/x/net/proxy/per_host.go b/awsproviderlint/vendor/golang.org/x/net/proxy/per_host.go deleted file mode 100644 index 573fe79e86e..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/proxy/per_host.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - "strings" -) - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type PerHost struct { - def, bypass Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func NewPerHost(defaultDialer, bypass Dialer) *PerHost { - return &PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -// DialContext connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - d := p.dialerForRequest(host) - if x, ok := d.(ContextDialer); ok { - return x.DialContext(ctx, network, addr) - } - return dialContext(ctx, d, network, addr) -} - -func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} diff --git a/awsproviderlint/vendor/golang.org/x/net/proxy/proxy.go b/awsproviderlint/vendor/golang.org/x/net/proxy/proxy.go deleted file mode 100644 index 9ff4b9a7767..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/proxy/proxy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proxy provides support for a variety of protocols to proxy network -// data. -package proxy // import "golang.org/x/net/proxy" - -import ( - "errors" - "net" - "net/url" - "os" - "sync" -) - -// A Dialer is a means to establish a connection. -// Custom dialers should also implement ContextDialer. -type Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy-related -// variables in the environment and makes underlying connections -// directly. -func FromEnvironment() Dialer { - return FromEnvironmentUsing(Direct) -} - -// FromEnvironmentUsing returns the dialer specify by the proxy-related -// variables in the environment and makes underlying connections -// using the provided forwarding Dialer (for instance, a *net.Dialer -// with desired configuration). -func FromEnvironmentUsing(forward Dialer) Dialer { - allProxy := allProxyEnv.Get() - if len(allProxy) == 0 { - return forward - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return forward - } - proxy, err := FromURL(proxyURL, forward) - if err != nil { - return forward - } - - noProxy := noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := NewPerHost(proxy, forward) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { - if proxySchemes == nil { - proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) - } - proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func FromURL(u *url.URL, forward Dialer) (Dialer, error) { - var auth *Auth - if u.User != nil { - auth = new(Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5", "socks5h": - addr := u.Hostname() - port := u.Port() - if port == "" { - port = "1080" - } - return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxySchemes != nil { - if f, ok := proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - allProxyEnv = &envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// reset is used by tests -func (e *envOnce) reset() { - e.once = sync.Once{} - e.val = "" -} diff --git a/awsproviderlint/vendor/golang.org/x/net/proxy/socks5.go b/awsproviderlint/vendor/golang.org/x/net/proxy/socks5.go deleted file mode 100644 index c91651f96db..00000000000 --- a/awsproviderlint/vendor/golang.org/x/net/proxy/socks5.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - - "golang.org/x/net/internal/socks" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given -// address with an optional username and password. -// See RFC 1928 and RFC 1929. -func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { - d := socks.NewDialer(network, address) - if forward != nil { - if f, ok := forward.(ContextDialer); ok { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return f.DialContext(ctx, network, address) - } - } else { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return dialContext(ctx, forward, network, address) - } - } - } - if auth != nil { - up := socks.UsernamePassword{ - Username: auth.User, - Password: auth.Password, - } - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = up.Authenticate - } - return d, nil -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/awsproviderlint/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s deleted file mode 100644 index 06f84b85558..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// -// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go -// - -TEXT ·syscall6(SB),NOSPLIT,$0-88 - JMP syscall·syscall6(SB) - -TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 - JMP syscall·rawSyscall6(SB) diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/byteorder.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/byteorder.go deleted file mode 100644 index dcbb14ef35a..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/byteorder.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "runtime" -) - -// byteOrder is a subset of encoding/binary.ByteOrder. -type byteOrder interface { - Uint32([]byte) uint32 - Uint64([]byte) uint64 -} - -type littleEndian struct{} -type bigEndian struct{} - -func (littleEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func (littleEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func (bigEndian) Uint32(b []byte) uint32 { - _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 -} - -func (bigEndian) Uint64(b []byte) uint64 { - _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | - uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 -} - -// hostByteOrder returns littleEndian on little-endian machines and -// bigEndian on big-endian machines. -func hostByteOrder() byteOrder { - switch runtime.GOARCH { - case "386", "amd64", "amd64p32", - "alpha", - "arm", "arm64", - "mipsle", "mips64le", "mips64p32le", - "nios2", - "ppc64le", - "riscv", "riscv64", - "sh": - return littleEndian{} - case "armbe", "arm64be", - "m68k", - "mips", "mips64", "mips64p32", - "ppc", "ppc64", - "s390", "s390x", - "shbe", - "sparc", "sparc64": - return bigEndian{} - } - panic("unknown architecture") -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu.go deleted file mode 100644 index f77701fe868..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cpu implements processor feature detection for -// various CPU architectures. -package cpu - -import ( - "os" - "strings" -) - -// Initialized reports whether the CPU features were initialized. -// -// For some GOOS/GOARCH combinations initialization of the CPU features depends -// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm -// Initialized will report false if reading the file fails. -var Initialized bool - -// CacheLinePad is used to pad structs to avoid false sharing. -type CacheLinePad struct{ _ [cacheLineSize]byte } - -// X86 contains the supported CPU features of the -// current X86/AMD64 platform. If the current platform -// is not X86/AMD64 then all feature flags are false. -// -// X86 is padded to avoid false sharing. Further the HasAVX -// and HasAVX2 are only set if the OS supports XMM and YMM -// registers in addition to the CPUID feature bit being set. -var X86 struct { - _ CacheLinePad - HasAES bool // AES hardware implementation (AES NI) - HasADX bool // Multi-precision add-carry instruction extensions - HasAVX bool // Advanced vector extension - HasAVX2 bool // Advanced vector extension 2 - HasAVX512 bool // Advanced vector extension 512 - HasAVX512F bool // Advanced vector extension 512 Foundation Instructions - HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions - HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions - HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions - HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions - HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions - HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add - HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions - HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision - HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision - HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions - HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations - HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions - HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions - HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions - HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 - HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms - HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions - HasBMI1 bool // Bit manipulation instruction set 1 - HasBMI2 bool // Bit manipulation instruction set 2 - HasERMS bool // Enhanced REP for MOVSB and STOSB - HasFMA bool // Fused-multiply-add instructions - HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. - HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM - HasPOPCNT bool // Hamming weight instruction POPCNT. - HasRDRAND bool // RDRAND instruction (on-chip random number generator) - HasRDSEED bool // RDSEED instruction (on-chip random number generator) - HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) - HasSSE3 bool // Streaming SIMD extension 3 - HasSSSE3 bool // Supplemental streaming SIMD extension 3 - HasSSE41 bool // Streaming SIMD extension 4 and 4.1 - HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - _ CacheLinePad -} - -// ARM64 contains the supported CPU features of the -// current ARMv8(aarch64) platform. If the current platform -// is not arm64 then all feature flags are false. -var ARM64 struct { - _ CacheLinePad - HasFP bool // Floating-point instruction set (always available) - HasASIMD bool // Advanced SIMD (always available) - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - HasATOMICS bool // Atomic memory operation instruction set - HasFPHP bool // Half precision floating-point instruction set - HasASIMDHP bool // Advanced SIMD half precision instruction set - HasCPUID bool // CPUID identification scheme registers - HasASIMDRDM bool // Rounding double multiply add/subtract instruction set - HasJSCVT bool // Javascript conversion from floating-point to integer - HasFCMA bool // Floating-point multiplication and addition of complex numbers - HasLRCPC bool // Release Consistent processor consistent support - HasDCPOP bool // Persistent memory support - HasSHA3 bool // SHA3 hardware implementation - HasSM3 bool // SM3 hardware implementation - HasSM4 bool // SM4 hardware implementation - HasASIMDDP bool // Advanced SIMD double precision instruction set - HasSHA512 bool // SHA512 hardware implementation - HasSVE bool // Scalable Vector Extensions - HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 - _ CacheLinePad -} - -// ARM contains the supported CPU features of the current ARM (32-bit) platform. -// All feature flags are false if: -// 1. the current platform is not arm, or -// 2. the current operating system is not Linux. -var ARM struct { - _ CacheLinePad - HasSWP bool // SWP instruction support - HasHALF bool // Half-word load and store support - HasTHUMB bool // ARM Thumb instruction set - Has26BIT bool // Address space limited to 26-bits - HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support - HasFPA bool // Floating point arithmetic support - HasVFP bool // Vector floating point support - HasEDSP bool // DSP Extensions support - HasJAVA bool // Java instruction set - HasIWMMXT bool // Intel Wireless MMX technology support - HasCRUNCH bool // MaverickCrunch context switching and handling - HasTHUMBEE bool // Thumb EE instruction set - HasNEON bool // NEON instruction set - HasVFPv3 bool // Vector floating point version 3 support - HasVFPv3D16 bool // Vector floating point version 3 D8-D15 - HasTLS bool // Thread local storage support - HasVFPv4 bool // Vector floating point version 4 support - HasIDIVA bool // Integer divide instruction support in ARM mode - HasIDIVT bool // Integer divide instruction support in Thumb mode - HasVFPD32 bool // Vector floating point version 3 D15-D31 - HasLPAE bool // Large Physical Address Extensions - HasEVTSTRM bool // Event stream support - HasAES bool // AES hardware implementation - HasPMULL bool // Polynomial multiplication instruction set - HasSHA1 bool // SHA1 hardware implementation - HasSHA2 bool // SHA2 hardware implementation - HasCRC32 bool // CRC32 hardware implementation - _ CacheLinePad -} - -// MIPS64X contains the supported CPU features of the current mips64/mips64le -// platforms. If the current platform is not mips64/mips64le or the current -// operating system is not Linux then all feature flags are false. -var MIPS64X struct { - _ CacheLinePad - HasMSA bool // MIPS SIMD architecture - _ CacheLinePad -} - -// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. -// If the current platform is not ppc64/ppc64le then all feature flags are false. -// -// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, -// since there are no optional categories. There are some exceptions that also -// require kernel support to work (DARN, SCV), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07). -// The struct is padded to avoid false sharing. -var PPC64 struct { - _ CacheLinePad - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ CacheLinePad -} - -// S390X contains the supported CPU features of the current IBM Z -// (s390x) platform. If the current platform is not IBM Z then all -// feature flags are false. -// -// S390X is padded to avoid false sharing. Further HasVX is only set -// if the OS supports vector registers in addition to the STFLE -// feature bit being set. -var S390X struct { - _ CacheLinePad - HasZARCH bool // z/Architecture mode is active [mandatory] - HasSTFLE bool // store facility list extended - HasLDISP bool // long (20-bit) displacements - HasEIMM bool // 32-bit immediates - HasDFP bool // decimal floating point - HasETF3EH bool // ETF-3 enhanced - HasMSA bool // message security assist (CPACF) - HasAES bool // KM-AES{128,192,256} functions - HasAESCBC bool // KMC-AES{128,192,256} functions - HasAESCTR bool // KMCTR-AES{128,192,256} functions - HasAESGCM bool // KMA-GCM-AES{128,192,256} functions - HasGHASH bool // KIMD-GHASH function - HasSHA1 bool // K{I,L}MD-SHA-1 functions - HasSHA256 bool // K{I,L}MD-SHA-256 functions - HasSHA512 bool // K{I,L}MD-SHA-512 functions - HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions - HasVX bool // vector facility - HasVXE bool // vector-enhancements facility 1 - _ CacheLinePad -} - -func init() { - archInit() - initOptions() - processOptions() -} - -// options contains the cpu debug options that can be used in GODEBUG. -// Options are arch dependent and are added by the arch specific initOptions functions. -// Features that are mandatory for the specific GOARCH should have the Required field set -// (e.g. SSE2 on amd64). -var options []option - -// Option names should be lower case. e.g. avx instead of AVX. -type option struct { - Name string - Feature *bool - Specified bool // whether feature value was specified in GODEBUG - Enable bool // whether feature should be enabled - Required bool // whether feature is mandatory and can not be disabled -} - -func processOptions() { - env := os.Getenv("GODEBUG") -field: - for env != "" { - field := "" - i := strings.IndexByte(env, ',') - if i < 0 { - field, env = env, "" - } else { - field, env = env[:i], env[i+1:] - } - if len(field) < 4 || field[:4] != "cpu." { - continue - } - i = strings.IndexByte(field, '=') - if i < 0 { - print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") - continue - } - key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" - - var enable bool - switch value { - case "on": - enable = true - case "off": - enable = false - default: - print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") - continue field - } - - if key == "all" { - for i := range options { - options[i].Specified = true - options[i].Enable = enable || options[i].Required - } - continue field - } - - for i := range options { - if options[i].Name == key { - options[i].Specified = true - options[i].Enable = enable - continue field - } - } - - print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") - } - - for _, o := range options { - if !o.Specified { - continue - } - - if o.Enable && !*o.Feature { - print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") - continue - } - - if !o.Enable && o.Required { - print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") - continue - } - - *o.Feature = o.Enable - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_aix.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_aix.go deleted file mode 100644 index 464a209cf59..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build aix - -package cpu - -const ( - // getsystemcfg constants - _SC_IMPL = 2 - _IMPL_POWER8 = 0x10000 - _IMPL_POWER9 = 0x20000 -) - -func archInit() { - impl := getsystemcfg(_SC_IMPL) - if impl&_IMPL_POWER8 != 0 { - PPC64.IsPOWER8 = true - } - if impl&_IMPL_POWER9 != 0 { - PPC64.IsPOWER9 = true - } - - Initialized = true -} - -func getsystemcfg(label int) (n uint64) { - r0, _ := callgetsystemcfg(label) - n = uint64(r0) - return -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm.go deleted file mode 100644 index 301b752e9c5..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 32 - -// HWCAP/HWCAP2 bits. -// These are specific to Linux. -const ( - hwcap_SWP = 1 << 0 - hwcap_HALF = 1 << 1 - hwcap_THUMB = 1 << 2 - hwcap_26BIT = 1 << 3 - hwcap_FAST_MULT = 1 << 4 - hwcap_FPA = 1 << 5 - hwcap_VFP = 1 << 6 - hwcap_EDSP = 1 << 7 - hwcap_JAVA = 1 << 8 - hwcap_IWMMXT = 1 << 9 - hwcap_CRUNCH = 1 << 10 - hwcap_THUMBEE = 1 << 11 - hwcap_NEON = 1 << 12 - hwcap_VFPv3 = 1 << 13 - hwcap_VFPv3D16 = 1 << 14 - hwcap_TLS = 1 << 15 - hwcap_VFPv4 = 1 << 16 - hwcap_IDIVA = 1 << 17 - hwcap_IDIVT = 1 << 18 - hwcap_VFPD32 = 1 << 19 - hwcap_LPAE = 1 << 20 - hwcap_EVTSTRM = 1 << 21 - - hwcap2_AES = 1 << 0 - hwcap2_PMULL = 1 << 1 - hwcap2_SHA1 = 1 << 2 - hwcap2_SHA2 = 1 << 3 - hwcap2_CRC32 = 1 << 4 -) - -func initOptions() { - options = []option{ - {Name: "pmull", Feature: &ARM.HasPMULL}, - {Name: "sha1", Feature: &ARM.HasSHA1}, - {Name: "sha2", Feature: &ARM.HasSHA2}, - {Name: "swp", Feature: &ARM.HasSWP}, - {Name: "thumb", Feature: &ARM.HasTHUMB}, - {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, - {Name: "tls", Feature: &ARM.HasTLS}, - {Name: "vfp", Feature: &ARM.HasVFP}, - {Name: "vfpd32", Feature: &ARM.HasVFPD32}, - {Name: "vfpv3", Feature: &ARM.HasVFPv3}, - {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, - {Name: "vfpv4", Feature: &ARM.HasVFPv4}, - {Name: "half", Feature: &ARM.HasHALF}, - {Name: "26bit", Feature: &ARM.Has26BIT}, - {Name: "fastmul", Feature: &ARM.HasFASTMUL}, - {Name: "fpa", Feature: &ARM.HasFPA}, - {Name: "edsp", Feature: &ARM.HasEDSP}, - {Name: "java", Feature: &ARM.HasJAVA}, - {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, - {Name: "crunch", Feature: &ARM.HasCRUNCH}, - {Name: "neon", Feature: &ARM.HasNEON}, - {Name: "idivt", Feature: &ARM.HasIDIVT}, - {Name: "idiva", Feature: &ARM.HasIDIVA}, - {Name: "lpae", Feature: &ARM.HasLPAE}, - {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, - {Name: "aes", Feature: &ARM.HasAES}, - {Name: "crc32", Feature: &ARM.HasCRC32}, - } - -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.go deleted file mode 100644 index 951078f2e82..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "fp", Feature: &ARM64.HasFP}, - {Name: "asimd", Feature: &ARM64.HasASIMD}, - {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, - {Name: "aes", Feature: &ARM64.HasAES}, - {Name: "fphp", Feature: &ARM64.HasFPHP}, - {Name: "jscvt", Feature: &ARM64.HasJSCVT}, - {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, - {Name: "pmull", Feature: &ARM64.HasPMULL}, - {Name: "sha1", Feature: &ARM64.HasSHA1}, - {Name: "sha2", Feature: &ARM64.HasSHA2}, - {Name: "sha3", Feature: &ARM64.HasSHA3}, - {Name: "sha512", Feature: &ARM64.HasSHA512}, - {Name: "sm3", Feature: &ARM64.HasSM3}, - {Name: "sm4", Feature: &ARM64.HasSM4}, - {Name: "sve", Feature: &ARM64.HasSVE}, - {Name: "crc32", Feature: &ARM64.HasCRC32}, - {Name: "atomics", Feature: &ARM64.HasATOMICS}, - {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, - {Name: "cpuid", Feature: &ARM64.HasCPUID}, - {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, - {Name: "fcma", Feature: &ARM64.HasFCMA}, - {Name: "dcpop", Feature: &ARM64.HasDCPOP}, - {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, - {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, - } -} - -func archInit() { - switch runtime.GOOS { - case "android", "darwin", "ios", "netbsd": - // Android and iOS don't seem to allow reading these registers. - // - // NetBSD: - // ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0. - // It can be read via sysctl(3). Example for future implementers: - // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c - // - // Fake the minimal features expected by - // TestARM64minimalFeatures. - ARM64.HasASIMD = true - ARM64.HasFP = true - case "linux": - doinit() - default: - readARM64Registers() - } -} - -func readARM64Registers() { - Initialized = true - - // ID_AA64ISAR0_EL1 - isar0 := getisar0() - - switch extractBits(isar0, 4, 7) { - case 1: - ARM64.HasAES = true - case 2: - ARM64.HasAES = true - ARM64.HasPMULL = true - } - - switch extractBits(isar0, 8, 11) { - case 1: - ARM64.HasSHA1 = true - } - - switch extractBits(isar0, 12, 15) { - case 1: - ARM64.HasSHA2 = true - case 2: - ARM64.HasSHA2 = true - ARM64.HasSHA512 = true - } - - switch extractBits(isar0, 16, 19) { - case 1: - ARM64.HasCRC32 = true - } - - switch extractBits(isar0, 20, 23) { - case 2: - ARM64.HasATOMICS = true - } - - switch extractBits(isar0, 28, 31) { - case 1: - ARM64.HasASIMDRDM = true - } - - switch extractBits(isar0, 32, 35) { - case 1: - ARM64.HasSHA3 = true - } - - switch extractBits(isar0, 36, 39) { - case 1: - ARM64.HasSM3 = true - } - - switch extractBits(isar0, 40, 43) { - case 1: - ARM64.HasSM4 = true - } - - switch extractBits(isar0, 44, 47) { - case 1: - ARM64.HasASIMDDP = true - } - - // ID_AA64ISAR1_EL1 - isar1 := getisar1() - - switch extractBits(isar1, 0, 3) { - case 1: - ARM64.HasDCPOP = true - } - - switch extractBits(isar1, 12, 15) { - case 1: - ARM64.HasJSCVT = true - } - - switch extractBits(isar1, 16, 19) { - case 1: - ARM64.HasFCMA = true - } - - switch extractBits(isar1, 20, 23) { - case 1: - ARM64.HasLRCPC = true - } - - // ID_AA64PFR0_EL1 - pfr0 := getpfr0() - - switch extractBits(pfr0, 16, 19) { - case 0: - ARM64.HasFP = true - case 1: - ARM64.HasFP = true - ARM64.HasFPHP = true - } - - switch extractBits(pfr0, 20, 23) { - case 0: - ARM64.HasASIMD = true - case 1: - ARM64.HasASIMD = true - ARM64.HasASIMDHP = true - } - - switch extractBits(pfr0, 32, 35) { - case 1: - ARM64.HasSVE = true - } -} - -func extractBits(data uint64, start, end uint) uint { - return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.s deleted file mode 100644 index a54436e3909..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// func getisar0() uint64 -TEXT ·getisar0(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 - MOVD R0, ret+0(FP) - RET - -// func getisar1() uint64 -TEXT ·getisar1(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 - MOVD R0, ret+0(FP) - RET - -// func getpfr0() uint64 -TEXT ·getpfr0(SB),NOSPLIT,$0-8 - // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 - MOVD R0, ret+0(FP) - RET diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go deleted file mode 100644 index 7b88e865a42..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -package cpu - -func getisar0() uint64 -func getisar1() uint64 -func getpfr0() uint64 diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index 568bcd031aa..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index f7cb46971cb..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build !gccgo - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go deleted file mode 100644 index 53ca8d65c37..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -package cpu - -func getisar0() uint64 { return 0 } -func getisar1() uint64 { return 0 } -func getpfr0() uint64 { return 0 } diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index aa986f77825..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c deleted file mode 100644 index e363c7d1319..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -// -// TODO: Replace with a better alternative: -// -// #include -// -// #pragma GCC target("xsave") -// -// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { -// unsigned long long x = _xgetbv(0); -// *eax = x & 0xffffffff; -// *edx = (x >> 32) & 0xffffffff; -// } -// -// Note that _xgetbv is defined starting with GCC 8. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - __asm(" xorl %%ecx, %%ecx\n" - " xgetbv" - : "=a"(*eax), "=d"(*edx)); -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go deleted file mode 100644 index ba49b91bd39..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 -// +build gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 6fc874f7fef..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !386,!amd64,!amd64p32,!arm64 - -package cpu - -func archInit() { - if err := readHWCAP(); err != nil { - return - } - doinit() - Initialized = true -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go deleted file mode 100644 index 2057006dce4..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -func doinit() { - ARM.HasSWP = isSet(hwCap, hwcap_SWP) - ARM.HasHALF = isSet(hwCap, hwcap_HALF) - ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) - ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) - ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) - ARM.HasFPA = isSet(hwCap, hwcap_FPA) - ARM.HasVFP = isSet(hwCap, hwcap_VFP) - ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) - ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) - ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) - ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) - ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) - ARM.HasNEON = isSet(hwCap, hwcap_NEON) - ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) - ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) - ARM.HasTLS = isSet(hwCap, hwcap_TLS) - ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) - ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) - ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) - ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) - ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) - ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM.HasAES = isSet(hwCap2, hwcap2_AES) - ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) - ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) - ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) - ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index 79a38a0b9bc..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -func doinit() { - if err := readHWCAP(); err != nil { - // failed to read /proc/self/auxv, try reading registers directly - readARM64Registers() - return - } - - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go deleted file mode 100644 index 5a418900538..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux -// +build mips64 mips64le - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel 5.4. -const ( - // CPU features - hwcap_MIPS_MSA = 1 << 1 -) - -func doinit() { - // HWCAP feature bits - MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go deleted file mode 100644 index 42b5d33cb69..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x - -package cpu - -func doinit() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 99f8a6399ef..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux -// +build ppc64 ppc64le - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index b88d6b8f662..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // cryptography facilities - msa4 facility = 77 // message-security-assist extension 4 - msa8 facility = 146 // message-security-assist extension 8 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index 57b5b677de0..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips64 mips64le - -package cpu - -const cacheLineSize = 32 - -func initOptions() { - options = []option{ - {Name: "msa", Feature: &MIPS64X.HasMSA}, - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index cfc1946b7bb..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build mips mipsle - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm.go deleted file mode 100644 index b412efc1bd1..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux,arm - -package cpu - -func archInit() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index 3ffc4afa03c..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !linux,arm64 - -package cpu - -func doinit() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go deleted file mode 100644 index d28d675b5f1..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ppc64 ppc64le - -package cpu - -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "darn", Feature: &PPC64.HasDARN}, - {Name: "scv", Feature: &PPC64.HasSCV}, - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_riscv64.go deleted file mode 100644 index 8b08de341b8..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build riscv64 - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.go deleted file mode 100644 index 544cd621cee..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -func initOptions() { - options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH}, - {Name: "stfle", Feature: &S390X.HasSTFLE}, - {Name: "ldisp", Feature: &S390X.HasLDISP}, - {Name: "eimm", Feature: &S390X.HasEIMM}, - {Name: "dfp", Feature: &S390X.HasDFP}, - {Name: "etf3eh", Feature: &S390X.HasETF3EH}, - {Name: "msa", Feature: &S390X.HasMSA}, - {Name: "aes", Feature: &S390X.HasAES}, - {Name: "aescbc", Feature: &S390X.HasAESCBC}, - {Name: "aesctr", Feature: &S390X.HasAESCTR}, - {Name: "aesgcm", Feature: &S390X.HasAESGCM}, - {Name: "ghash", Feature: &S390X.HasGHASH}, - {Name: "sha1", Feature: &S390X.HasSHA1}, - {Name: "sha256", Feature: &S390X.HasSHA256}, - {Name: "sha3", Feature: &S390X.HasSHA3}, - {Name: "sha512", Feature: &S390X.HasSHA512}, - {Name: "vx", Feature: &S390X.HasVX}, - {Name: "vxe", Feature: &S390X.HasVXE}, - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index e5037d92e06..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index 5382f2a227a..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func initOptions() {} - -func archInit() {} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_x86.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index 48d42933195..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 amd64p32 - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "adx", Feature: &X86.HasADX}, - {Name: "aes", Feature: &X86.HasAES}, - {Name: "avx", Feature: &X86.HasAVX}, - {Name: "avx2", Feature: &X86.HasAVX2}, - {Name: "avx512", Feature: &X86.HasAVX512}, - {Name: "avx512f", Feature: &X86.HasAVX512F}, - {Name: "avx512cd", Feature: &X86.HasAVX512CD}, - {Name: "avx512er", Feature: &X86.HasAVX512ER}, - {Name: "avx512pf", Feature: &X86.HasAVX512PF}, - {Name: "avx512vl", Feature: &X86.HasAVX512VL}, - {Name: "avx512bw", Feature: &X86.HasAVX512BW}, - {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, - {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, - {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, - {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, - {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, - {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, - {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, - {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, - {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, - {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, - {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, - {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, - {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, - {Name: "bmi1", Feature: &X86.HasBMI1}, - {Name: "bmi2", Feature: &X86.HasBMI2}, - {Name: "erms", Feature: &X86.HasERMS}, - {Name: "fma", Feature: &X86.HasFMA}, - {Name: "osxsave", Feature: &X86.HasOSXSAVE}, - {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, - {Name: "popcnt", Feature: &X86.HasPOPCNT}, - {Name: "rdrand", Feature: &X86.HasRDRAND}, - {Name: "rdseed", Feature: &X86.HasRDSEED}, - {Name: "sse3", Feature: &X86.HasSSE3}, - {Name: "sse41", Feature: &X86.HasSSE41}, - {Name: "sse42", Feature: &X86.HasSSE42}, - {Name: "ssse3", Feature: &X86.HasSSSE3}, - - // These capabilities should always be enabled on amd64: - {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, - } -} - -func archInit() { - - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - var osSupportsAVX, osSupportsAVX512 bool - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - - // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension - if X86.HasAVX512 { - X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) - } -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func readHWCAP() error { - buf, err := ioutil.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return err - } - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go deleted file mode 100644 index 76fbe40b762..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Recreate a getsystemcfg syscall handler instead of -// using the one provided by x/sys/unix to avoid having -// the dependency between them. (See golang.org/issue/32102) -// Morever, this file will be used during the building of -// gccgo's libgo and thus must not used a CGo method. - -// +build aix -// +build gccgo - -package cpu - -import ( - "syscall" -) - -//extern getsystemcfg -func gccgoGetsystemcfg(label uint32) (r uint64) - -func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { - r1 = uintptr(gccgoGetsystemcfg(uint32(label))) - e1 = syscall.GetErrno() - return -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go deleted file mode 100644 index 78fe25e86fb..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on AIX without depending on x/sys/unix. -// (See golang.org/issue/32102) - -// +build aix,ppc64 -// +build !gccgo - -package cpu - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" - -//go:linkname libc_getsystemcfg libc_getsystemcfg - -type syscallFunc uintptr - -var libc_getsystemcfg syscallFunc - -type errno = syscall.Errno - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) - -func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) - return -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/aliases.go b/awsproviderlint/vendor/golang.org/x/sys/windows/aliases.go deleted file mode 100644 index af3af60db97..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/aliases.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows -// +build go1.9 - -package windows - -import "syscall" - -type Errno = syscall.Errno -type SysProcAttr = syscall.SysProcAttr diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/dll_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/dll_windows.go deleted file mode 100644 index 82076fb74ff..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/dll_windows.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "sync" - "sync/atomic" - "syscall" - "unsafe" -) - -// We need to use LoadLibrary and GetProcAddress from the Go runtime, because -// the these symbols are loaded by the system linker and are required to -// dynamically load additional symbols. Note that in the Go runtime, these -// return syscall.Handle and syscall.Errno, but these are the same, in fact, -// as windows.Handle and windows.Errno, and we intend to keep these the same. - -//go:linkname syscall_loadlibrary syscall.loadlibrary -func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) - -//go:linkname syscall_getprocaddress syscall.getprocaddress -func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) - -// DLLError describes reasons for DLL load failures. -type DLLError struct { - Err error - ObjName string - Msg string -} - -func (e *DLLError) Error() string { return e.Msg } - -// A DLL implements access to a single DLL. -type DLL struct { - Name string - Handle Handle -} - -// LoadDLL loads DLL file into memory. -// -// Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. -func LoadDLL(name string) (dll *DLL, err error) { - namep, err := UTF16PtrFromString(name) - if err != nil { - return nil, err - } - h, e := syscall_loadlibrary(namep) - if e != 0 { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to load " + name + ": " + e.Error(), - } - } - d := &DLL{ - Name: name, - Handle: h, - } - return d, nil -} - -// MustLoadDLL is like LoadDLL but panics if load operation failes. -func MustLoadDLL(name string) *DLL { - d, e := LoadDLL(name) - if e != nil { - panic(e) - } - return d -} - -// FindProc searches DLL d for procedure named name and returns *Proc -// if found. It returns an error if search fails. -func (d *DLL) FindProc(name string) (proc *Proc, err error) { - namep, err := BytePtrFromString(name) - if err != nil { - return nil, err - } - a, e := syscall_getprocaddress(d.Handle, namep) - if e != 0 { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), - } - } - p := &Proc{ - Dll: d, - Name: name, - addr: a, - } - return p, nil -} - -// MustFindProc is like FindProc but panics if search fails. -func (d *DLL) MustFindProc(name string) *Proc { - p, e := d.FindProc(name) - if e != nil { - panic(e) - } - return p -} - -// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc -// if found. It returns an error if search fails. -func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { - a, e := GetProcAddressByOrdinal(d.Handle, ordinal) - name := "#" + itoa(int(ordinal)) - if e != nil { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), - } - } - p := &Proc{ - Dll: d, - Name: name, - addr: a, - } - return p, nil -} - -// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. -func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { - p, e := d.FindProcByOrdinal(ordinal) - if e != nil { - panic(e) - } - return p -} - -// Release unloads DLL d from memory. -func (d *DLL) Release() (err error) { - return FreeLibrary(d.Handle) -} - -// A Proc implements access to a procedure inside a DLL. -type Proc struct { - Dll *DLL - Name string - addr uintptr -} - -// Addr returns the address of the procedure represented by p. -// The return value can be passed to Syscall to run the procedure. -func (p *Proc) Addr() uintptr { - return p.addr -} - -//go:uintptrescapes - -// Call executes procedure p with arguments a. It will panic, if more than 15 arguments -// are supplied. -// -// The returned error is always non-nil, constructed from the result of GetLastError. -// Callers must inspect the primary return value to decide whether an error occurred -// (according to the semantics of the specific function being called) before consulting -// the error. The error will be guaranteed to contain windows.Errno. -func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - switch len(a) { - case 0: - return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) - case 1: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) - case 2: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) - case 3: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) - case 4: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) - case 5: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) - case 6: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) - case 7: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) - case 8: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) - case 9: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) - case 10: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) - case 11: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) - case 12: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) - case 13: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) - case 14: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) - case 15: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) - default: - panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") - } -} - -// A LazyDLL implements access to a single DLL. -// It will delay the load of the DLL until the first -// call to its Handle method or to one of its -// LazyProc's Addr method. -type LazyDLL struct { - Name string - - // System determines whether the DLL must be loaded from the - // Windows System directory, bypassing the normal DLL search - // path. - System bool - - mu sync.Mutex - dll *DLL // non nil once DLL is loaded -} - -// Load loads DLL file d.Name into memory. It returns an error if fails. -// Load will not try to load DLL, if it is already loaded into memory. -func (d *LazyDLL) Load() error { - // Non-racy version of: - // if d.dll != nil { - if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { - return nil - } - d.mu.Lock() - defer d.mu.Unlock() - if d.dll != nil { - return nil - } - - // kernel32.dll is special, since it's where LoadLibraryEx comes from. - // The kernel already special-cases its name, so it's always - // loaded from system32. - var dll *DLL - var err error - if d.Name == "kernel32.dll" { - dll, err = LoadDLL(d.Name) - } else { - dll, err = loadLibraryEx(d.Name, d.System) - } - if err != nil { - return err - } - - // Non-racy version of: - // d.dll = dll - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) - return nil -} - -// mustLoad is like Load but panics if search fails. -func (d *LazyDLL) mustLoad() { - e := d.Load() - if e != nil { - panic(e) - } -} - -// Handle returns d's module handle. -func (d *LazyDLL) Handle() uintptr { - d.mustLoad() - return uintptr(d.dll.Handle) -} - -// NewProc returns a LazyProc for accessing the named procedure in the DLL d. -func (d *LazyDLL) NewProc(name string) *LazyProc { - return &LazyProc{l: d, Name: name} -} - -// NewLazyDLL creates new LazyDLL associated with DLL file. -func NewLazyDLL(name string) *LazyDLL { - return &LazyDLL{Name: name} -} - -// NewLazySystemDLL is like NewLazyDLL, but will only -// search Windows System directory for the DLL if name is -// a base name (like "advapi32.dll"). -func NewLazySystemDLL(name string) *LazyDLL { - return &LazyDLL{Name: name, System: true} -} - -// A LazyProc implements access to a procedure inside a LazyDLL. -// It delays the lookup until the Addr method is called. -type LazyProc struct { - Name string - - mu sync.Mutex - l *LazyDLL - proc *Proc -} - -// Find searches DLL for procedure named p.Name. It returns -// an error if search fails. Find will not search procedure, -// if it is already found and loaded into memory. -func (p *LazyProc) Find() error { - // Non-racy version of: - // if p.proc == nil { - if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { - p.mu.Lock() - defer p.mu.Unlock() - if p.proc == nil { - e := p.l.Load() - if e != nil { - return e - } - proc, e := p.l.dll.FindProc(p.Name) - if e != nil { - return e - } - // Non-racy version of: - // p.proc = proc - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) - } - } - return nil -} - -// mustFind is like Find but panics if search fails. -func (p *LazyProc) mustFind() { - e := p.Find() - if e != nil { - panic(e) - } -} - -// Addr returns the address of the procedure represented by p. -// The return value can be passed to Syscall to run the procedure. -// It will panic if the procedure cannot be found. -func (p *LazyProc) Addr() uintptr { - p.mustFind() - return p.proc.Addr() -} - -//go:uintptrescapes - -// Call executes procedure p with arguments a. It will panic, if more than 15 arguments -// are supplied. It will also panic if the procedure cannot be found. -// -// The returned error is always non-nil, constructed from the result of GetLastError. -// Callers must inspect the primary return value to decide whether an error occurred -// (according to the semantics of the specific function being called) before consulting -// the error. The error will be guaranteed to contain windows.Errno. -func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - p.mustFind() - return p.proc.Call(a...) -} - -var canDoSearchSystem32Once struct { - sync.Once - v bool -} - -func initCanDoSearchSystem32() { - // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: - // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows - // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on - // systems that have KB2533623 installed. To determine whether the - // flags are available, use GetProcAddress to get the address of the - // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories - // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* - // flags can be used with LoadLibraryEx." - canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) -} - -func canDoSearchSystem32() bool { - canDoSearchSystem32Once.Do(initCanDoSearchSystem32) - return canDoSearchSystem32Once.v -} - -func isBaseName(name string) bool { - for _, c := range name { - if c == ':' || c == '/' || c == '\\' { - return false - } - } - return true -} - -// loadLibraryEx wraps the Windows LoadLibraryEx function. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx -// -// If name is not an absolute path, LoadLibraryEx searches for the DLL -// in a variety of automatic locations unless constrained by flags. -// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx -func loadLibraryEx(name string, system bool) (*DLL, error) { - loadDLL := name - var flags uintptr - if system { - if canDoSearchSystem32() { - const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 - flags = LOAD_LIBRARY_SEARCH_SYSTEM32 - } else if isBaseName(name) { - // WindowsXP or unpatched Windows machine - // trying to load "foo.dll" out of the system - // folder, but LoadLibraryEx doesn't support - // that yet on their system, so emulate it. - systemdir, err := GetSystemDirectory() - if err != nil { - return nil, err - } - loadDLL = systemdir + "\\" + name - } - } - h, err := LoadLibraryEx(loadDLL, 0, flags) - if err != nil { - return nil, err - } - return &DLL{Name: name, Handle: h}, nil -} - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/empty.s b/awsproviderlint/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index 69309e4da55..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/env_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/env_windows.go deleted file mode 100644 index 92ac05ff4ea..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/env_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Windows environment variables. - -package windows - -import ( - "syscall" - "unsafe" -) - -func Getenv(key string) (value string, found bool) { - return syscall.Getenv(key) -} - -func Setenv(key, value string) error { - return syscall.Setenv(key, value) -} - -func Clearenv() { - syscall.Clearenv() -} - -func Environ() []string { - return syscall.Environ() -} - -// Returns a default environment associated with the token, rather than the current -// process. If inheritExisting is true, then this environment also inherits the -// environment of the current process. -func (token Token) Environ(inheritExisting bool) (env []string, err error) { - var block *uint16 - err = CreateEnvironmentBlock(&block, token, inheritExisting) - if err != nil { - return nil, err - } - defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) - for { - entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp))) - if len(entry) == 0 { - break - } - env = append(env, entry) - blockp += 2 * (uintptr(len(entry)) + 1) - } - return env, nil -} - -func Unsetenv(key string) error { - return syscall.Unsetenv(key) -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/eventlog.go b/awsproviderlint/vendor/golang.org/x/sys/windows/eventlog.go deleted file mode 100644 index 40af946e162..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/eventlog.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows - -const ( - EVENTLOG_SUCCESS = 0 - EVENTLOG_ERROR_TYPE = 1 - EVENTLOG_WARNING_TYPE = 2 - EVENTLOG_INFORMATION_TYPE = 4 - EVENTLOG_AUDIT_SUCCESS = 8 - EVENTLOG_AUDIT_FAILURE = 16 -) - -//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW -//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource -//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/exec_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/exec_windows.go deleted file mode 100644 index 3606c3a8b36..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/exec_windows.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Fork, exec, wait, etc. - -package windows - -// EscapeArg rewrites command line argument s as prescribed -// in http://msdn.microsoft.com/en-us/library/ms880421. -// This function returns "" (2 double quotes) if s is empty. -// Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. -func EscapeArg(s string) string { - if len(s) == 0 { - return "\"\"" - } - n := len(s) - hasSpace := false - for i := 0; i < len(s); i++ { - switch s[i] { - case '"', '\\': - n++ - case ' ', '\t': - hasSpace = true - } - } - if hasSpace { - n += 2 - } - if n == len(s) { - return s - } - - qs := make([]byte, n) - j := 0 - if hasSpace { - qs[j] = '"' - j++ - } - slashes := 0 - for i := 0; i < len(s); i++ { - switch s[i] { - default: - slashes = 0 - qs[j] = s[i] - case '\\': - slashes++ - qs[j] = s[i] - case '"': - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '\\' - j++ - qs[j] = s[i] - } - j++ - } - if hasSpace { - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '"' - j++ - } - return string(qs[:j]) -} - -func CloseOnExec(fd Handle) { - SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) -} - -// FullPath retrieves the full path of the specified file. -func FullPath(name string) (path string, err error) { - p, err := UTF16PtrFromString(name) - if err != nil { - return "", err - } - n := uint32(100) - for { - buf := make([]uint16, n) - n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) - if err != nil { - return "", err - } - if n <= uint32(len(buf)) { - return UTF16ToString(buf[:n]), nil - } - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/memory_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/memory_windows.go deleted file mode 100644 index e409d76f0fd..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/memory_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -const ( - MEM_COMMIT = 0x00001000 - MEM_RESERVE = 0x00002000 - MEM_DECOMMIT = 0x00004000 - MEM_RELEASE = 0x00008000 - MEM_RESET = 0x00080000 - MEM_TOP_DOWN = 0x00100000 - MEM_WRITE_WATCH = 0x00200000 - MEM_PHYSICAL = 0x00400000 - MEM_RESET_UNDO = 0x01000000 - MEM_LARGE_PAGES = 0x20000000 - - PAGE_NOACCESS = 0x01 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 - - QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 - QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 - QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 - QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 -) diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/mkerrors.bash b/awsproviderlint/vendor/golang.org/x/sys/windows/mkerrors.bash deleted file mode 100644 index 2163843a11d..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/mkerrors.bash +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" -[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } - -declare -A errors - -{ - echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "import \"syscall\"" - echo "const (" - - while read -r line; do - unset vtype - if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - else - continue - fi - [[ -n $key && -n $value ]] || continue - [[ -z ${errors["$key"]} ]] || continue - errors["$key"]="$value" - if [[ -v vtype ]]; then - if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then - vtype="" - elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then - vtype="Handle" - else - vtype="syscall.Errno" - fi - last_vtype="$vtype" - else - vtype="" - if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then - value="S_OK" - elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then - value="ERROR_SUCCESS" - fi - fi - - echo "$key $vtype = $value" - done < "$winerror" - - echo ")" -} | gofmt > "zerrors_windows.go" diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/awsproviderlint/vendor/golang.org/x/sys/windows/mkknownfolderids.bash deleted file mode 100644 index ab8924e936f..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/mkknownfolderids.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" -[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } - -{ - echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "type KNOWNFOLDERID GUID" - echo "var (" - while read -r line; do - [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue - printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ - "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ - $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ - $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) - done < "$knownfolders" - echo ")" -} | gofmt > "zknownfolderids_windows.go" diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/mksyscall.go b/awsproviderlint/vendor/golang.org/x/sys/windows/mksyscall.go deleted file mode 100644 index 328e3b2ace2..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/mksyscall.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build generate - -package windows - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/race.go b/awsproviderlint/vendor/golang.org/x/sys/windows/race.go deleted file mode 100644 index a74e3e24b55..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/race.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,race - -package windows - -import ( - "runtime" - "unsafe" -) - -const raceenabled = true - -func raceAcquire(addr unsafe.Pointer) { - runtime.RaceAcquire(addr) -} - -func raceReleaseMerge(addr unsafe.Pointer) { - runtime.RaceReleaseMerge(addr) -} - -func raceReadRange(addr unsafe.Pointer, len int) { - runtime.RaceReadRange(addr, len) -} - -func raceWriteRange(addr unsafe.Pointer, len int) { - runtime.RaceWriteRange(addr, len) -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/race0.go b/awsproviderlint/vendor/golang.org/x/sys/windows/race0.go deleted file mode 100644 index e44a3cbf679..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/race0.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!race - -package windows - -import ( - "unsafe" -) - -const raceenabled = false - -func raceAcquire(addr unsafe.Pointer) { -} - -func raceReleaseMerge(addr unsafe.Pointer) { -} - -func raceReadRange(addr unsafe.Pointer, len int) { -} - -func raceWriteRange(addr unsafe.Pointer, len int) { -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/security_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/security_windows.go deleted file mode 100644 index 9e3c44a8557..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/security_windows.go +++ /dev/null @@ -1,1406 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -const ( - NameUnknown = 0 - NameFullyQualifiedDN = 1 - NameSamCompatible = 2 - NameDisplay = 3 - NameUniqueId = 6 - NameCanonical = 7 - NameUserPrincipal = 8 - NameCanonicalEx = 9 - NameServicePrincipal = 10 - NameDnsDomain = 12 -) - -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx -//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW -//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW - -// TranslateAccountName converts a directory service -// object name from one format to another. -func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { - u, e := UTF16PtrFromString(username) - if e != nil { - return "", e - } - n := uint32(50) - for { - b := make([]uint16, n) - e = TranslateName(u, from, to, &b[0], &n) - if e == nil { - return UTF16ToString(b[:n]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -const ( - // do not reorder - NetSetupUnknownStatus = iota - NetSetupUnjoined - NetSetupWorkgroupName - NetSetupDomainName -) - -type UserInfo10 struct { - Name *uint16 - Comment *uint16 - UsrComment *uint16 - FullName *uint16 -} - -//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo -//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation -//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree - -const ( - // do not reorder - SidTypeUser = 1 + iota - SidTypeGroup - SidTypeDomain - SidTypeAlias - SidTypeWellKnownGroup - SidTypeDeletedAccount - SidTypeInvalid - SidTypeUnknown - SidTypeComputer - SidTypeLabel -) - -type SidIdentifierAuthority struct { - Value [6]byte -} - -var ( - SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} - SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} - SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} - SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} - SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} - SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} - SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} -) - -const ( - SECURITY_NULL_RID = 0 - SECURITY_WORLD_RID = 0 - SECURITY_LOCAL_RID = 0 - SECURITY_CREATOR_OWNER_RID = 0 - SECURITY_CREATOR_GROUP_RID = 1 - SECURITY_DIALUP_RID = 1 - SECURITY_NETWORK_RID = 2 - SECURITY_BATCH_RID = 3 - SECURITY_INTERACTIVE_RID = 4 - SECURITY_LOGON_IDS_RID = 5 - SECURITY_SERVICE_RID = 6 - SECURITY_LOCAL_SYSTEM_RID = 18 - SECURITY_BUILTIN_DOMAIN_RID = 32 - SECURITY_PRINCIPAL_SELF_RID = 10 - SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 - SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 - SECURITY_LOGON_IDS_RID_COUNT = 0x3 - SECURITY_ANONYMOUS_LOGON_RID = 0x7 - SECURITY_PROXY_RID = 0x8 - SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 - SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID - SECURITY_AUTHENTICATED_USER_RID = 0xb - SECURITY_RESTRICTED_CODE_RID = 0xc - SECURITY_NT_NON_UNIQUE_RID = 0x15 -) - -// Predefined domain-relative RIDs for local groups. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx -const ( - DOMAIN_ALIAS_RID_ADMINS = 0x220 - DOMAIN_ALIAS_RID_USERS = 0x221 - DOMAIN_ALIAS_RID_GUESTS = 0x222 - DOMAIN_ALIAS_RID_POWER_USERS = 0x223 - DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 - DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 - DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 - DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 - DOMAIN_ALIAS_RID_REPLICATOR = 0x228 - DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 - DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a - DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b - DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c - DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d - DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e - DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f - DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 - DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 - DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 - DOMAIN_ALIAS_RID_IUSERS = 0x238 - DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 - DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b - DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c - DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d - DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e -) - -//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW -//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW -//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW -//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid -//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid -//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid -//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid -//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid -//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid -//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid -//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority -//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount -//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority -//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid - -// The security identifier (SID) structure is a variable-length -// structure used to uniquely identify users or groups. -type SID struct{} - -// StringToSid converts a string-format security identifier -// SID into a valid, functional SID. -func StringToSid(s string) (*SID, error) { - var sid *SID - p, e := UTF16PtrFromString(s) - if e != nil { - return nil, e - } - e = ConvertStringSidToSid(p, &sid) - if e != nil { - return nil, e - } - defer LocalFree((Handle)(unsafe.Pointer(sid))) - return sid.Copy() -} - -// LookupSID retrieves a security identifier SID for the account -// and the name of the domain on which the account was found. -// System specify target computer to search. -func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { - if len(account) == 0 { - return nil, "", 0, syscall.EINVAL - } - acc, e := UTF16PtrFromString(account) - if e != nil { - return nil, "", 0, e - } - var sys *uint16 - if len(system) > 0 { - sys, e = UTF16PtrFromString(system) - if e != nil { - return nil, "", 0, e - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]byte, n) - db := make([]uint16, dn) - sid = (*SID)(unsafe.Pointer(&b[0])) - e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) - if e == nil { - return sid, UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, "", 0, e - } - if n <= uint32(len(b)) { - return nil, "", 0, e - } - } -} - -// String converts SID to a string format suitable for display, storage, or transmission. -func (sid *SID) String() string { - var s *uint16 - e := ConvertSidToStringSid(sid, &s) - if e != nil { - return "" - } - defer LocalFree((Handle)(unsafe.Pointer(s))) - return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]) -} - -// Len returns the length, in bytes, of a valid security identifier SID. -func (sid *SID) Len() int { - return int(GetLengthSid(sid)) -} - -// Copy creates a duplicate of security identifier SID. -func (sid *SID) Copy() (*SID, error) { - b := make([]byte, sid.Len()) - sid2 := (*SID)(unsafe.Pointer(&b[0])) - e := CopySid(uint32(len(b)), sid2, sid) - if e != nil { - return nil, e - } - return sid2, nil -} - -// IdentifierAuthority returns the identifier authority of the SID. -func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { - return *getSidIdentifierAuthority(sid) -} - -// SubAuthorityCount returns the number of sub-authorities in the SID. -func (sid *SID) SubAuthorityCount() uint8 { - return *getSidSubAuthorityCount(sid) -} - -// SubAuthority returns the sub-authority of the SID as specified by -// the index, which must be less than sid.SubAuthorityCount(). -func (sid *SID) SubAuthority(idx uint32) uint32 { - if idx >= uint32(sid.SubAuthorityCount()) { - panic("sub-authority index out of range") - } - return *getSidSubAuthority(sid, idx) -} - -// IsValid returns whether the SID has a valid revision and length. -func (sid *SID) IsValid() bool { - return isValidSid(sid) -} - -// Equals compares two SIDs for equality. -func (sid *SID) Equals(sid2 *SID) bool { - return EqualSid(sid, sid2) -} - -// IsWellKnown determines whether the SID matches the well-known sidType. -func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { - return isWellKnownSid(sid, sidType) -} - -// LookupAccount retrieves the name of the account for this SID -// and the name of the first domain on which this SID is found. -// System specify target computer to search for. -func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { - var sys *uint16 - if len(system) > 0 { - sys, err = UTF16PtrFromString(system) - if err != nil { - return "", "", 0, err - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]uint16, n) - db := make([]uint16, dn) - e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) - if e == nil { - return UTF16ToString(b), UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", "", 0, e - } - if n <= uint32(len(b)) { - return "", "", 0, e - } - } -} - -// Various types of pre-specified SIDs that can be synthesized and compared at runtime. -type WELL_KNOWN_SID_TYPE uint32 - -const ( - WinNullSid = 0 - WinWorldSid = 1 - WinLocalSid = 2 - WinCreatorOwnerSid = 3 - WinCreatorGroupSid = 4 - WinCreatorOwnerServerSid = 5 - WinCreatorGroupServerSid = 6 - WinNtAuthoritySid = 7 - WinDialupSid = 8 - WinNetworkSid = 9 - WinBatchSid = 10 - WinInteractiveSid = 11 - WinServiceSid = 12 - WinAnonymousSid = 13 - WinProxySid = 14 - WinEnterpriseControllersSid = 15 - WinSelfSid = 16 - WinAuthenticatedUserSid = 17 - WinRestrictedCodeSid = 18 - WinTerminalServerSid = 19 - WinRemoteLogonIdSid = 20 - WinLogonIdsSid = 21 - WinLocalSystemSid = 22 - WinLocalServiceSid = 23 - WinNetworkServiceSid = 24 - WinBuiltinDomainSid = 25 - WinBuiltinAdministratorsSid = 26 - WinBuiltinUsersSid = 27 - WinBuiltinGuestsSid = 28 - WinBuiltinPowerUsersSid = 29 - WinBuiltinAccountOperatorsSid = 30 - WinBuiltinSystemOperatorsSid = 31 - WinBuiltinPrintOperatorsSid = 32 - WinBuiltinBackupOperatorsSid = 33 - WinBuiltinReplicatorSid = 34 - WinBuiltinPreWindows2000CompatibleAccessSid = 35 - WinBuiltinRemoteDesktopUsersSid = 36 - WinBuiltinNetworkConfigurationOperatorsSid = 37 - WinAccountAdministratorSid = 38 - WinAccountGuestSid = 39 - WinAccountKrbtgtSid = 40 - WinAccountDomainAdminsSid = 41 - WinAccountDomainUsersSid = 42 - WinAccountDomainGuestsSid = 43 - WinAccountComputersSid = 44 - WinAccountControllersSid = 45 - WinAccountCertAdminsSid = 46 - WinAccountSchemaAdminsSid = 47 - WinAccountEnterpriseAdminsSid = 48 - WinAccountPolicyAdminsSid = 49 - WinAccountRasAndIasServersSid = 50 - WinNTLMAuthenticationSid = 51 - WinDigestAuthenticationSid = 52 - WinSChannelAuthenticationSid = 53 - WinThisOrganizationSid = 54 - WinOtherOrganizationSid = 55 - WinBuiltinIncomingForestTrustBuildersSid = 56 - WinBuiltinPerfMonitoringUsersSid = 57 - WinBuiltinPerfLoggingUsersSid = 58 - WinBuiltinAuthorizationAccessSid = 59 - WinBuiltinTerminalServerLicenseServersSid = 60 - WinBuiltinDCOMUsersSid = 61 - WinBuiltinIUsersSid = 62 - WinIUserSid = 63 - WinBuiltinCryptoOperatorsSid = 64 - WinUntrustedLabelSid = 65 - WinLowLabelSid = 66 - WinMediumLabelSid = 67 - WinHighLabelSid = 68 - WinSystemLabelSid = 69 - WinWriteRestrictedCodeSid = 70 - WinCreatorOwnerRightsSid = 71 - WinCacheablePrincipalsGroupSid = 72 - WinNonCacheablePrincipalsGroupSid = 73 - WinEnterpriseReadonlyControllersSid = 74 - WinAccountReadonlyControllersSid = 75 - WinBuiltinEventLogReadersGroup = 76 - WinNewEnterpriseReadonlyControllersSid = 77 - WinBuiltinCertSvcDComAccessGroup = 78 - WinMediumPlusLabelSid = 79 - WinLocalLogonSid = 80 - WinConsoleLogonSid = 81 - WinThisOrganizationCertificateSid = 82 - WinApplicationPackageAuthoritySid = 83 - WinBuiltinAnyPackageSid = 84 - WinCapabilityInternetClientSid = 85 - WinCapabilityInternetClientServerSid = 86 - WinCapabilityPrivateNetworkClientServerSid = 87 - WinCapabilityPicturesLibrarySid = 88 - WinCapabilityVideosLibrarySid = 89 - WinCapabilityMusicLibrarySid = 90 - WinCapabilityDocumentsLibrarySid = 91 - WinCapabilitySharedUserCertificatesSid = 92 - WinCapabilityEnterpriseAuthenticationSid = 93 - WinCapabilityRemovableStorageSid = 94 - WinBuiltinRDSRemoteAccessServersSid = 95 - WinBuiltinRDSEndpointServersSid = 96 - WinBuiltinRDSManagementServersSid = 97 - WinUserModeDriversSid = 98 - WinBuiltinHyperVAdminsSid = 99 - WinAccountCloneableControllersSid = 100 - WinBuiltinAccessControlAssistanceOperatorsSid = 101 - WinBuiltinRemoteManagementUsersSid = 102 - WinAuthenticationAuthorityAssertedSid = 103 - WinAuthenticationServiceAssertedSid = 104 - WinLocalAccountSid = 105 - WinLocalAccountAndAdministratorSid = 106 - WinAccountProtectedUsersSid = 107 - WinCapabilityAppointmentsSid = 108 - WinCapabilityContactsSid = 109 - WinAccountDefaultSystemManagedSid = 110 - WinBuiltinDefaultSystemManagedGroupSid = 111 - WinBuiltinStorageReplicaAdminsSid = 112 - WinAccountKeyAdminsSid = 113 - WinAccountEnterpriseKeyAdminsSid = 114 - WinAuthenticationKeyTrustSid = 115 - WinAuthenticationKeyPropertyMFASid = 116 - WinAuthenticationKeyPropertyAttestationSid = 117 - WinAuthenticationFreshKeyAuthSid = 118 - WinBuiltinDeviceOwnersSid = 119 -) - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the local machine. -func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { - return CreateWellKnownDomainSid(sidType, nil) -} - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the domain specified by the domainSid parameter. -func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { - n := uint32(50) - for { - b := make([]byte, n) - sid := (*SID)(unsafe.Pointer(&b[0])) - err := createWellKnownSid(sidType, domainSid, sid, &n) - if err == nil { - return sid, nil - } - if err != ERROR_INSUFFICIENT_BUFFER { - return nil, err - } - if n <= uint32(len(b)) { - return nil, err - } - } -} - -const ( - // do not reorder - TOKEN_ASSIGN_PRIMARY = 1 << iota - TOKEN_DUPLICATE - TOKEN_IMPERSONATE - TOKEN_QUERY - TOKEN_QUERY_SOURCE - TOKEN_ADJUST_PRIVILEGES - TOKEN_ADJUST_GROUPS - TOKEN_ADJUST_DEFAULT - TOKEN_ADJUST_SESSIONID - - TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | - TOKEN_ASSIGN_PRIMARY | - TOKEN_DUPLICATE | - TOKEN_IMPERSONATE | - TOKEN_QUERY | - TOKEN_QUERY_SOURCE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT | - TOKEN_ADJUST_SESSIONID - TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY - TOKEN_WRITE = STANDARD_RIGHTS_WRITE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT - TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE -) - -const ( - // do not reorder - TokenUser = 1 + iota - TokenGroups - TokenPrivileges - TokenOwner - TokenPrimaryGroup - TokenDefaultDacl - TokenSource - TokenType - TokenImpersonationLevel - TokenStatistics - TokenRestrictedSids - TokenSessionId - TokenGroupsAndPrivileges - TokenSessionReference - TokenSandBoxInert - TokenAuditPolicy - TokenOrigin - TokenElevationType - TokenLinkedToken - TokenElevation - TokenHasRestrictions - TokenAccessInformation - TokenVirtualizationAllowed - TokenVirtualizationEnabled - TokenIntegrityLevel - TokenUIAccess - TokenMandatoryPolicy - TokenLogonSid - MaxTokenInfoClass -) - -// Group attributes inside of Tokengroups.Groups[i].Attributes -const ( - SE_GROUP_MANDATORY = 0x00000001 - SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 - SE_GROUP_ENABLED = 0x00000004 - SE_GROUP_OWNER = 0x00000008 - SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 - SE_GROUP_INTEGRITY = 0x00000020 - SE_GROUP_INTEGRITY_ENABLED = 0x00000040 - SE_GROUP_LOGON_ID = 0xC0000000 - SE_GROUP_RESOURCE = 0x20000000 - SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED -) - -// Privilege attributes -const ( - SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 - SE_PRIVILEGE_ENABLED = 0x00000002 - SE_PRIVILEGE_REMOVED = 0x00000004 - SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 - SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS -) - -// Token types -const ( - TokenPrimary = 1 - TokenImpersonation = 2 -) - -// Impersonation levels -const ( - SecurityAnonymous = 0 - SecurityIdentification = 1 - SecurityImpersonation = 2 - SecurityDelegation = 3 -) - -type LUID struct { - LowPart uint32 - HighPart int32 -} - -type LUIDAndAttributes struct { - Luid LUID - Attributes uint32 -} - -type SIDAndAttributes struct { - Sid *SID - Attributes uint32 -} - -type Tokenuser struct { - User SIDAndAttributes -} - -type Tokenprimarygroup struct { - PrimaryGroup *SID -} - -type Tokengroups struct { - GroupCount uint32 - Groups [1]SIDAndAttributes // Use AllGroups() for iterating. -} - -// AllGroups returns a slice that can be used to iterate over the groups in g. -func (g *Tokengroups) AllGroups() []SIDAndAttributes { - return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] -} - -type Tokenprivileges struct { - PrivilegeCount uint32 - Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. -} - -// AllPrivileges returns a slice that can be used to iterate over the privileges in p. -func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { - return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] -} - -type Tokenmandatorylabel struct { - Label SIDAndAttributes -} - -func (tml *Tokenmandatorylabel) Size() uint32 { - return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) -} - -// Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken -//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf -//sys RevertToSelf() (err error) = advapi32.RevertToSelf -//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken -//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW -//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges -//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups -//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation -//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation -//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx -//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW -//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW -//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW -//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW - -// An access token contains the security information for a logon session. -// The system creates an access token when a user logs on, and every -// process executed on behalf of the user has a copy of the token. -// The token identifies the user, the user's groups, and the user's -// privileges. The system uses the token to control access to securable -// objects and to control the ability of the user to perform various -// system-related operations on the local computer. -type Token Handle - -// OpenCurrentProcessToken opens an access token associated with current -// process with TOKEN_QUERY access. It is a real token that needs to be closed. -// -// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) -// with the desired access instead, or use GetCurrentProcessToken for a -// TOKEN_QUERY token. -func OpenCurrentProcessToken() (Token, error) { - var token Token - err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) - return token, err -} - -// GetCurrentProcessToken returns the access token associated with -// the current process. It is a pseudo token that does not need -// to be closed. -func GetCurrentProcessToken() Token { - return Token(^uintptr(4 - 1)) -} - -// GetCurrentThreadToken return the access token associated with -// the current thread. It is a pseudo token that does not need -// to be closed. -func GetCurrentThreadToken() Token { - return Token(^uintptr(5 - 1)) -} - -// GetCurrentThreadEffectiveToken returns the effective access token -// associated with the current thread. It is a pseudo token that does -// not need to be closed. -func GetCurrentThreadEffectiveToken() Token { - return Token(^uintptr(6 - 1)) -} - -// Close releases access to access token. -func (t Token) Close() error { - return CloseHandle(Handle(t)) -} - -// getInfo retrieves a specified type of information about an access token. -func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { - n := uint32(initSize) - for { - b := make([]byte, n) - e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) - if e == nil { - return unsafe.Pointer(&b[0]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, e - } - if n <= uint32(len(b)) { - return nil, e - } - } -} - -// GetTokenUser retrieves access token t user account information. -func (t Token) GetTokenUser() (*Tokenuser, error) { - i, e := t.getInfo(TokenUser, 50) - if e != nil { - return nil, e - } - return (*Tokenuser)(i), nil -} - -// GetTokenGroups retrieves group accounts associated with access token t. -func (t Token) GetTokenGroups() (*Tokengroups, error) { - i, e := t.getInfo(TokenGroups, 50) - if e != nil { - return nil, e - } - return (*Tokengroups)(i), nil -} - -// GetTokenPrimaryGroup retrieves access token t primary group information. -// A pointer to a SID structure representing a group that will become -// the primary group of any objects created by a process using this access token. -func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { - i, e := t.getInfo(TokenPrimaryGroup, 50) - if e != nil { - return nil, e - } - return (*Tokenprimarygroup)(i), nil -} - -// GetUserProfileDirectory retrieves path to the -// root directory of the access token t user's profile. -func (t Token) GetUserProfileDirectory() (string, error) { - n := uint32(100) - for { - b := make([]uint16, n) - e := GetUserProfileDirectory(t, &b[0], &n) - if e == nil { - return UTF16ToString(b), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -// IsElevated returns whether the current token is elevated from a UAC perspective. -func (token Token) IsElevated() bool { - var isElevated uint32 - var outLen uint32 - err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) - if err != nil { - return false - } - return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 -} - -// GetLinkedToken returns the linked token, which may be an elevated UAC token. -func (token Token) GetLinkedToken() (Token, error) { - var linkedToken Token - var outLen uint32 - err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) - if err != nil { - return Token(0), err - } - return linkedToken, nil -} - -// GetSystemDirectory retrieves the path to current location of the system -// directory, which is typically, though not always, `C:\Windows\System32`. -func GetSystemDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetWindowsDirectory retrieves the path to current location of the Windows -// directory, which is typically, though not always, `C:\Windows`. This may -// be a private user directory in the case that the application is running -// under a terminal server. -func GetWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetSystemWindowsDirectory retrieves the path to current location of the -// Windows directory, which is typically, though not always, `C:\Windows`. -func GetSystemWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// IsMember reports whether the access token t is a member of the provided SID. -func (t Token) IsMember(sid *SID) (bool, error) { - var b int32 - if e := checkTokenMembership(t, sid, &b); e != nil { - return false, e - } - return b != 0, nil -} - -const ( - WTS_CONSOLE_CONNECT = 0x1 - WTS_CONSOLE_DISCONNECT = 0x2 - WTS_REMOTE_CONNECT = 0x3 - WTS_REMOTE_DISCONNECT = 0x4 - WTS_SESSION_LOGON = 0x5 - WTS_SESSION_LOGOFF = 0x6 - WTS_SESSION_LOCK = 0x7 - WTS_SESSION_UNLOCK = 0x8 - WTS_SESSION_REMOTE_CONTROL = 0x9 - WTS_SESSION_CREATE = 0xa - WTS_SESSION_TERMINATE = 0xb -) - -const ( - WTSActive = 0 - WTSConnected = 1 - WTSConnectQuery = 2 - WTSShadow = 3 - WTSDisconnected = 4 - WTSIdle = 5 - WTSListen = 6 - WTSReset = 7 - WTSDown = 8 - WTSInit = 9 -) - -type WTSSESSION_NOTIFICATION struct { - Size uint32 - SessionID uint32 -} - -type WTS_SESSION_INFO struct { - SessionID uint32 - WindowStationName *uint16 - State uint32 -} - -//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken -//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW -//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory - -type ACL struct { - aclRevision byte - sbz1 byte - aclSize uint16 - aceCount uint16 - sbz2 uint16 -} - -type SECURITY_DESCRIPTOR struct { - revision byte - sbz1 byte - control SECURITY_DESCRIPTOR_CONTROL - owner *SID - group *SID - sacl *ACL - dacl *ACL -} - -type SecurityAttributes struct { - Length uint32 - SecurityDescriptor *SECURITY_DESCRIPTOR - InheritHandle uint32 -} - -type SE_OBJECT_TYPE uint32 - -// Constants for type SE_OBJECT_TYPE -const ( - SE_UNKNOWN_OBJECT_TYPE = 0 - SE_FILE_OBJECT = 1 - SE_SERVICE = 2 - SE_PRINTER = 3 - SE_REGISTRY_KEY = 4 - SE_LMSHARE = 5 - SE_KERNEL_OBJECT = 6 - SE_WINDOW_OBJECT = 7 - SE_DS_OBJECT = 8 - SE_DS_OBJECT_ALL = 9 - SE_PROVIDER_DEFINED_OBJECT = 10 - SE_WMIGUID_OBJECT = 11 - SE_REGISTRY_WOW64_32KEY = 12 - SE_REGISTRY_WOW64_64KEY = 13 -) - -type SECURITY_INFORMATION uint32 - -// Constants for type SECURITY_INFORMATION -const ( - OWNER_SECURITY_INFORMATION = 0x00000001 - GROUP_SECURITY_INFORMATION = 0x00000002 - DACL_SECURITY_INFORMATION = 0x00000004 - SACL_SECURITY_INFORMATION = 0x00000008 - LABEL_SECURITY_INFORMATION = 0x00000010 - ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 - SCOPE_SECURITY_INFORMATION = 0x00000040 - BACKUP_SECURITY_INFORMATION = 0x00010000 - PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 - PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 - UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 - UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 -) - -type SECURITY_DESCRIPTOR_CONTROL uint16 - -// Constants for type SECURITY_DESCRIPTOR_CONTROL -const ( - SE_OWNER_DEFAULTED = 0x0001 - SE_GROUP_DEFAULTED = 0x0002 - SE_DACL_PRESENT = 0x0004 - SE_DACL_DEFAULTED = 0x0008 - SE_SACL_PRESENT = 0x0010 - SE_SACL_DEFAULTED = 0x0020 - SE_DACL_AUTO_INHERIT_REQ = 0x0100 - SE_SACL_AUTO_INHERIT_REQ = 0x0200 - SE_DACL_AUTO_INHERITED = 0x0400 - SE_SACL_AUTO_INHERITED = 0x0800 - SE_DACL_PROTECTED = 0x1000 - SE_SACL_PROTECTED = 0x2000 - SE_RM_CONTROL_VALID = 0x4000 - SE_SELF_RELATIVE = 0x8000 -) - -type ACCESS_MASK uint32 - -// Constants for type ACCESS_MASK -const ( - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - SYNCHRONIZE = 0x00100000 - STANDARD_RIGHTS_REQUIRED = 0x000F0000 - STANDARD_RIGHTS_READ = READ_CONTROL - STANDARD_RIGHTS_WRITE = READ_CONTROL - STANDARD_RIGHTS_EXECUTE = READ_CONTROL - STANDARD_RIGHTS_ALL = 0x001F0000 - SPECIFIC_RIGHTS_ALL = 0x0000FFFF - ACCESS_SYSTEM_SECURITY = 0x01000000 - MAXIMUM_ALLOWED = 0x02000000 - GENERIC_READ = 0x80000000 - GENERIC_WRITE = 0x40000000 - GENERIC_EXECUTE = 0x20000000 - GENERIC_ALL = 0x10000000 -) - -type ACCESS_MODE uint32 - -// Constants for type ACCESS_MODE -const ( - NOT_USED_ACCESS = 0 - GRANT_ACCESS = 1 - SET_ACCESS = 2 - DENY_ACCESS = 3 - REVOKE_ACCESS = 4 - SET_AUDIT_SUCCESS = 5 - SET_AUDIT_FAILURE = 6 -) - -// Constants for AceFlags and Inheritance fields -const ( - NO_INHERITANCE = 0x0 - SUB_OBJECTS_ONLY_INHERIT = 0x1 - SUB_CONTAINERS_ONLY_INHERIT = 0x2 - SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 - INHERIT_NO_PROPAGATE = 0x4 - INHERIT_ONLY = 0x8 - INHERITED_ACCESS_ENTRY = 0x10 - INHERITED_PARENT = 0x10000000 - INHERITED_GRANDPARENT = 0x20000000 - OBJECT_INHERIT_ACE = 0x1 - CONTAINER_INHERIT_ACE = 0x2 - NO_PROPAGATE_INHERIT_ACE = 0x4 - INHERIT_ONLY_ACE = 0x8 - INHERITED_ACE = 0x10 - VALID_INHERIT_FLAGS = 0x1F -) - -type MULTIPLE_TRUSTEE_OPERATION uint32 - -// Constants for MULTIPLE_TRUSTEE_OPERATION -const ( - NO_MULTIPLE_TRUSTEE = 0 - TRUSTEE_IS_IMPERSONATE = 1 -) - -type TRUSTEE_FORM uint32 - -// Constants for TRUSTEE_FORM -const ( - TRUSTEE_IS_SID = 0 - TRUSTEE_IS_NAME = 1 - TRUSTEE_BAD_FORM = 2 - TRUSTEE_IS_OBJECTS_AND_SID = 3 - TRUSTEE_IS_OBJECTS_AND_NAME = 4 -) - -type TRUSTEE_TYPE uint32 - -// Constants for TRUSTEE_TYPE -const ( - TRUSTEE_IS_UNKNOWN = 0 - TRUSTEE_IS_USER = 1 - TRUSTEE_IS_GROUP = 2 - TRUSTEE_IS_DOMAIN = 3 - TRUSTEE_IS_ALIAS = 4 - TRUSTEE_IS_WELL_KNOWN_GROUP = 5 - TRUSTEE_IS_DELETED = 6 - TRUSTEE_IS_INVALID = 7 - TRUSTEE_IS_COMPUTER = 8 -) - -// Constants for ObjectsPresent field -const ( - ACE_OBJECT_TYPE_PRESENT = 0x1 - ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 -) - -type EXPLICIT_ACCESS struct { - AccessPermissions ACCESS_MASK - AccessMode ACCESS_MODE - Inheritance uint32 - Trustee TRUSTEE -} - -// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. -type TrusteeValue uintptr - -func TrusteeValueFromString(str string) TrusteeValue { - return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) -} -func TrusteeValueFromSID(sid *SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(sid)) -} -func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndSid)) -} -func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndName)) -} - -type TRUSTEE struct { - MultipleTrustee *TRUSTEE - MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION - TrusteeForm TRUSTEE_FORM - TrusteeType TRUSTEE_TYPE - TrusteeValue TrusteeValue -} - -type OBJECTS_AND_SID struct { - ObjectsPresent uint32 - ObjectTypeGuid GUID - InheritedObjectTypeGuid GUID - Sid *SID -} - -type OBJECTS_AND_NAME struct { - ObjectsPresent uint32 - ObjectType SE_OBJECT_TYPE - ObjectTypeName *uint16 - InheritedObjectTypeName *uint16 - Name *uint16 -} - -//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo -//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo -//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW -//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW - -//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW -//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor - -//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl -//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl -//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl -//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner -//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup -//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength -//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl -//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor - -//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl -//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl -//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl -//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner -//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup -//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl - -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW - -//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD -//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD - -//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW - -// Control returns the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { - err = getSecurityDescriptorControl(sd, &control, &revision) - return -} - -// SetControl sets the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { - return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) -} - -// RMControl returns the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { - err = getSecurityDescriptorRMControl(sd, &control) - return -} - -// SetRMControl sets the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { - setSecurityDescriptorRMControl(sd, &rmControl) -} - -// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil -// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetDACL sets the absolute security descriptor DACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) -} - -// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil -// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetSACL sets the absolute security descriptor SACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) -} - -// Owner returns the security descriptor owner and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { - err = getSecurityDescriptorOwner(sd, &owner, &defaulted) - return -} - -// SetOwner sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { - return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) -} - -// Group returns the security descriptor group and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { - err = getSecurityDescriptorGroup(sd, &group, &defaulted) - return -} - -// SetGroup sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { - return setSecurityDescriptorGroup(absoluteSD, group, defaulted) -} - -// Length returns the length of the security descriptor. -func (sd *SECURITY_DESCRIPTOR) Length() uint32 { - return getSecurityDescriptorLength(sd) -} - -// IsValid returns whether the security descriptor is valid. -func (sd *SECURITY_DESCRIPTOR) IsValid() bool { - return isValidSecurityDescriptor(sd) -} - -// String returns the SDDL form of the security descriptor, with a function signature that can be -// used with %v formatting directives. -func (sd *SECURITY_DESCRIPTOR) String() string { - var sddl *uint16 - err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) - if err != nil { - return "" - } - defer LocalFree(Handle(unsafe.Pointer(sddl))) - return UTF16PtrToString(sddl) -} - -// ToAbsolute converts a self-relative security descriptor into an absolute one. -func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := selfRelativeSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE == 0 { - err = ERROR_INVALID_PARAMETER - return - } - var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 - err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, - nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeAbsoluteSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) - } - var ( - dacl *ACL - sacl *ACL - owner *SID - group *SID - ) - if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) - } - if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) - } - if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) - } - if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) - } - err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, - dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) - return -} - -// ToSelfRelative converts an absolute security descriptor into a self-relative one. -func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := absoluteSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE != 0 { - err = ERROR_INVALID_PARAMETER - return - } - var selfRelativeSDSize uint32 - err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeSelfRelativeSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if selfRelativeSDSize > 0 { - selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) - } - err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) - return -} - -func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := (int)(selfRelativeSD.Length()) - - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - - dst := make([]byte, sdLen) - copy(dst, src) - return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) -} - -// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a -// self-relative security descriptor object allocated on the Go heap. -func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetSecurityInfo queries the security information for a given handle and returns the self-relative security -// descriptor result on the Go heap. -func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security -// descriptor result on the Go heap. -func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and -// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor -// result on the Go heap. -func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - var winHeapSDSize uint32 - var firstAccessEntry *EXPLICIT_ACCESS - if len(accessEntries) > 0 { - firstAccessEntry = &accessEntries[0] - } - var firstAuditEntry *EXPLICIT_ACCESS - if len(auditEntries) > 0 { - firstAuditEntry = &auditEntries[0] - } - err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// NewSecurityDescriptor creates and initializes a new absolute security descriptor. -func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - absoluteSD = &SECURITY_DESCRIPTOR{} - err = initializeSecurityDescriptor(absoluteSD, 1) - return -} - -// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. -// Both explicitEntries and mergedACL are optional and can be nil. -func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { - var firstExplicitEntry *EXPLICIT_ACCESS - if len(explicitEntries) > 0 { - firstExplicitEntry = &explicitEntries[0] - } - var winHeapACL *ACL - err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) - aclBytes := make([]byte, winHeapACL.aclSize) - copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)]) - return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/service.go b/awsproviderlint/vendor/golang.org/x/sys/windows/service.go deleted file mode 100644 index f54ff90aacd..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/service.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows - -const ( - SC_MANAGER_CONNECT = 1 - SC_MANAGER_CREATE_SERVICE = 2 - SC_MANAGER_ENUMERATE_SERVICE = 4 - SC_MANAGER_LOCK = 8 - SC_MANAGER_QUERY_LOCK_STATUS = 16 - SC_MANAGER_MODIFY_BOOT_CONFIG = 32 - SC_MANAGER_ALL_ACCESS = 0xf003f -) - -//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW - -const ( - SERVICE_KERNEL_DRIVER = 1 - SERVICE_FILE_SYSTEM_DRIVER = 2 - SERVICE_ADAPTER = 4 - SERVICE_RECOGNIZER_DRIVER = 8 - SERVICE_WIN32_OWN_PROCESS = 16 - SERVICE_WIN32_SHARE_PROCESS = 32 - SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS - SERVICE_INTERACTIVE_PROCESS = 256 - SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER - SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS - - SERVICE_BOOT_START = 0 - SERVICE_SYSTEM_START = 1 - SERVICE_AUTO_START = 2 - SERVICE_DEMAND_START = 3 - SERVICE_DISABLED = 4 - - SERVICE_ERROR_IGNORE = 0 - SERVICE_ERROR_NORMAL = 1 - SERVICE_ERROR_SEVERE = 2 - SERVICE_ERROR_CRITICAL = 3 - - SC_STATUS_PROCESS_INFO = 0 - - SC_ACTION_NONE = 0 - SC_ACTION_RESTART = 1 - SC_ACTION_REBOOT = 2 - SC_ACTION_RUN_COMMAND = 3 - - SERVICE_STOPPED = 1 - SERVICE_START_PENDING = 2 - SERVICE_STOP_PENDING = 3 - SERVICE_RUNNING = 4 - SERVICE_CONTINUE_PENDING = 5 - SERVICE_PAUSE_PENDING = 6 - SERVICE_PAUSED = 7 - SERVICE_NO_CHANGE = 0xffffffff - - SERVICE_ACCEPT_STOP = 1 - SERVICE_ACCEPT_PAUSE_CONTINUE = 2 - SERVICE_ACCEPT_SHUTDOWN = 4 - SERVICE_ACCEPT_PARAMCHANGE = 8 - SERVICE_ACCEPT_NETBINDCHANGE = 16 - SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 - SERVICE_ACCEPT_POWEREVENT = 64 - SERVICE_ACCEPT_SESSIONCHANGE = 128 - SERVICE_ACCEPT_PRESHUTDOWN = 256 - - SERVICE_CONTROL_STOP = 1 - SERVICE_CONTROL_PAUSE = 2 - SERVICE_CONTROL_CONTINUE = 3 - SERVICE_CONTROL_INTERROGATE = 4 - SERVICE_CONTROL_SHUTDOWN = 5 - SERVICE_CONTROL_PARAMCHANGE = 6 - SERVICE_CONTROL_NETBINDADD = 7 - SERVICE_CONTROL_NETBINDREMOVE = 8 - SERVICE_CONTROL_NETBINDENABLE = 9 - SERVICE_CONTROL_NETBINDDISABLE = 10 - SERVICE_CONTROL_DEVICEEVENT = 11 - SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 - SERVICE_CONTROL_POWEREVENT = 13 - SERVICE_CONTROL_SESSIONCHANGE = 14 - SERVICE_CONTROL_PRESHUTDOWN = 15 - - SERVICE_ACTIVE = 1 - SERVICE_INACTIVE = 2 - SERVICE_STATE_ALL = 3 - - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL - - SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 - SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 - SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 - SERVICE_CONFIG_SERVICE_SID_INFO = 5 - SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 - SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 - SERVICE_CONFIG_TRIGGER_INFO = 8 - SERVICE_CONFIG_PREFERRED_NODE = 9 - SERVICE_CONFIG_LAUNCH_PROTECTED = 12 - - SERVICE_SID_TYPE_NONE = 0 - SERVICE_SID_TYPE_UNRESTRICTED = 1 - SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED - - SC_ENUM_PROCESS_INFO = 0 - - SERVICE_NOTIFY_STATUS_CHANGE = 2 - SERVICE_NOTIFY_STOPPED = 0x00000001 - SERVICE_NOTIFY_START_PENDING = 0x00000002 - SERVICE_NOTIFY_STOP_PENDING = 0x00000004 - SERVICE_NOTIFY_RUNNING = 0x00000008 - SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 - SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 - SERVICE_NOTIFY_PAUSED = 0x00000040 - SERVICE_NOTIFY_CREATED = 0x00000080 - SERVICE_NOTIFY_DELETED = 0x00000100 - SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 -) - -type SERVICE_STATUS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 -} - -type SERVICE_TABLE_ENTRY struct { - ServiceName *uint16 - ServiceProc uintptr -} - -type QUERY_SERVICE_CONFIG struct { - ServiceType uint32 - StartType uint32 - ErrorControl uint32 - BinaryPathName *uint16 - LoadOrderGroup *uint16 - TagId uint32 - Dependencies *uint16 - ServiceStartName *uint16 - DisplayName *uint16 -} - -type SERVICE_DESCRIPTION struct { - Description *uint16 -} - -type SERVICE_DELAYED_AUTO_START_INFO struct { - IsDelayedAutoStartUp uint32 -} - -type SERVICE_STATUS_PROCESS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 - ProcessId uint32 - ServiceFlags uint32 -} - -type ENUM_SERVICE_STATUS_PROCESS struct { - ServiceName *uint16 - DisplayName *uint16 - ServiceStatusProcess SERVICE_STATUS_PROCESS -} - -type SERVICE_NOTIFY struct { - Version uint32 - NotifyCallback uintptr - Context uintptr - NotificationStatus uint32 - ServiceStatus SERVICE_STATUS_PROCESS - NotificationTriggered uint32 - ServiceNames *uint16 -} - -type SERVICE_FAILURE_ACTIONS struct { - ResetPeriod uint32 - RebootMsg *uint16 - Command *uint16 - ActionsCount uint32 - Actions *SC_ACTION -} - -type SC_ACTION struct { - Type uint32 - Delay uint32 -} - -type QUERY_SERVICE_LOCK_STATUS struct { - IsLocked uint32 - LockOwner *uint16 - LockDuration uint32 -} - -//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle -//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW -//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW -//sys DeleteService(service Handle) (err error) = advapi32.DeleteService -//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW -//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus -//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW -//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService -//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW -//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus -//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW -//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW -//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W -//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W -//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx -//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/str.go b/awsproviderlint/vendor/golang.org/x/sys/windows/str.go deleted file mode 100644 index 917cc2aae4e..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/str.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + itoa(-val) - } - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/syscall.go b/awsproviderlint/vendor/golang.org/x/sys/windows/syscall.go deleted file mode 100644 index af828a91bcf..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/syscall.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package windows contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and -// by default, godoc will display the OS-specific documentation for the current -// system. If you want godoc to display syscall documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if -// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS -// to freebsd and $GOARCH to arm. -// -// The primary use of this package is inside other packages that provide a more -// portable interface to the system, such as "os", "time" and "net". Use -// those packages rather than this one if you can. -// -// For details of the functions and data types in this package consult -// the manuals for the appropriate operating system. -// -// These calls return err == nil to indicate success; otherwise -// err represents an operating system error describing the failure and -// holds a value of type syscall.Errno. -package windows // import "golang.org/x/sys/windows" - -import ( - "syscall" -) - -// ByteSliceFromString returns a NUL-terminated slice of bytes -// containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } - } - a := make([]byte, len(s)+1) - copy(a, s) - return a, nil -} - -// BytePtrFromString returns a pointer to a NUL-terminated array of -// bytes containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func BytePtrFromString(s string) (*byte, error) { - a, err := ByteSliceFromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// Single-word zero for use when we need a valid pointer to 0 bytes. -// See mksyscall.pl. -var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/syscall_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/syscall_windows.go deleted file mode 100644 index 2aa29e8396d..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/syscall_windows.go +++ /dev/null @@ -1,1490 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Windows system calls. - -package windows - -import ( - errorspkg "errors" - "sync" - "syscall" - "time" - "unicode/utf16" - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -type Handle uintptr - -const ( - InvalidHandle = ^Handle(0) - - // Flags for DefineDosDevice. - DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 - DDD_NO_BROADCAST_SYSTEM = 0x00000008 - DDD_RAW_TARGET_PATH = 0x00000001 - DDD_REMOVE_DEFINITION = 0x00000002 - - // Return values for GetDriveType. - DRIVE_UNKNOWN = 0 - DRIVE_NO_ROOT_DIR = 1 - DRIVE_REMOVABLE = 2 - DRIVE_FIXED = 3 - DRIVE_REMOTE = 4 - DRIVE_CDROM = 5 - DRIVE_RAMDISK = 6 - - // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. - FILE_CASE_SENSITIVE_SEARCH = 0x00000001 - FILE_CASE_PRESERVED_NAMES = 0x00000002 - FILE_FILE_COMPRESSION = 0x00000010 - FILE_DAX_VOLUME = 0x20000000 - FILE_NAMED_STREAMS = 0x00040000 - FILE_PERSISTENT_ACLS = 0x00000008 - FILE_READ_ONLY_VOLUME = 0x00080000 - FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 - FILE_SUPPORTS_ENCRYPTION = 0x00020000 - FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 - FILE_SUPPORTS_HARD_LINKS = 0x00400000 - FILE_SUPPORTS_OBJECT_IDS = 0x00010000 - FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 - FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 - FILE_SUPPORTS_SPARSE_FILES = 0x00000040 - FILE_SUPPORTS_TRANSACTIONS = 0x00200000 - FILE_SUPPORTS_USN_JOURNAL = 0x02000000 - FILE_UNICODE_ON_DISK = 0x00000004 - FILE_VOLUME_IS_COMPRESSED = 0x00008000 - FILE_VOLUME_QUOTAS = 0x00000020 - - // Flags for LockFileEx. - LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 - LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC -) - -// StringToUTF16 is deprecated. Use UTF16FromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16(s string) []uint16 { - a, err := UTF16FromString(s) - if err != nil { - panic("windows: string with NUL passed to StringToUTF16") - } - return a -} - -// UTF16FromString returns the UTF-16 encoding of the UTF-8 string -// s, with a terminating NUL added. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func UTF16FromString(s string) ([]uint16, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } - } - return utf16.Encode([]rune(s + "\x00")), nil -} - -// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, -// with a terminating NUL removed. -func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[0:i] - break - } - } - return string(utf16.Decode(s)) -} - -// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } - -// UTF16PtrFromString returns pointer to the UTF-16 encoding of -// the UTF-8 string s, with a terminating NUL added. If s -// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). -func UTF16PtrFromString(s string) (*uint16, error) { - a, err := UTF16FromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. -// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated -// at a zero word; if the zero word is not present, the program may crash. -func UTF16PtrToString(p *uint16) string { - if p == nil { - return "" - } - if *p == 0 { - return "" - } - - // Find NUL terminator. - n := 0 - for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { - ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) - } - - var s []uint16 - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(utf16.Decode(s)) -} - -func Getpagesize() int { return 4096 } - -// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallback(fn interface{}) uintptr { - return syscall.NewCallback(fn) -} - -// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallbackCDecl(fn interface{}) uintptr { - return syscall.NewCallbackCDecl(fn) -} - -// windows api calls - -//sys GetLastError() (lasterr error) -//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW -//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW -//sys FreeLibrary(handle Handle) (err error) -//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) -//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW -//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW -//sys GetVersion() (ver uint32, err error) -//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW -//sys ExitProcess(exitcode uint32) -//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW -//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) -//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] -//sys CloseHandle(handle Handle) (err error) -//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] -//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) -//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW -//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW -//sys FindClose(handle Handle) (err error) -//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) -//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) -//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW -//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW -//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW -//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW -//sys DeleteFile(path *uint16) (err error) = DeleteFileW -//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW -//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW -//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW -//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW -//sys SetEndOfFile(handle Handle) (err error) -//sys GetSystemTimeAsFileTime(time *Filetime) -//sys GetSystemTimePreciseAsFileTime(time *Filetime) -//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) -//sys CancelIo(s Handle) (err error) -//sys CancelIoEx(s Handle, o *Overlapped) (err error) -//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW -//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath -//sys TerminateProcess(handle Handle, exitcode uint32) (err error) -//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW -//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) -//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) -//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] -//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects -//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW -//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) -//sys GetFileType(filehandle Handle) (n uint32, err error) -//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW -//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext -//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom -//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW -//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW -//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW -//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW -//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock -//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock -//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 -//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) -//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW -//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW -//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW -//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW -//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] -//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) -//sys FlushFileBuffers(handle Handle) (err error) -//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW -//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW -//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW -//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) -//sys UnmapViewOfFile(addr uintptr) (err error) -//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) -//sys VirtualLock(addr uintptr, length uintptr) (err error) -//sys VirtualUnlock(addr uintptr, length uintptr) (err error) -//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc -//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree -//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect -//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile -//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW -//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW -//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore -//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore -//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore -//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore -//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain -//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain -//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext -//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext -//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy -//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW -//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey -//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW -//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW -//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId -//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode -//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode -//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW -//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW -//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot -//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW -//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW -//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW -//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW -//sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW -//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW -//sys SetEvent(event Handle) (err error) = kernel32.SetEvent -//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent -//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW -//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW -//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex -//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx -//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW -//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject -//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject -//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode -//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread -//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass -//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass -//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject -//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) -//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) -//sys GetProcessId(process Handle) (id uint32, err error) -//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) -//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost -//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) -//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) - -// Volume Management Functions -//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW -//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW -//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW -//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW -//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW -//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW -//sys FindVolumeClose(findVolume Handle) (err error) -//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) -//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW -//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW -//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] -//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW -//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW -//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW -//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW -//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW -//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW -//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW -//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW -//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW -//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW -//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx -//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW -//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters -//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters -//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString -//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 -//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid -//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers -//sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages -//sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages -//sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages -//sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages - -// Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses - -// syscall interface implementation for other packages - -// GetCurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentProcess for the same Handle without the nil -// error. -func GetCurrentProcess() (Handle, error) { - return CurrentProcess(), nil -} - -// CurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } - -// GetCurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentThread for the same Handle without the nil -// error. -func GetCurrentThread() (Handle, error) { - return CurrentThread(), nil -} - -// CurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } - -// GetProcAddressByOrdinal retrieves the address of the exported -// function from module by ordinal. -func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Exit(code int) { ExitProcess(uint32(code)) } - -func makeInheritSa() *SecurityAttributes { - var sa SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func Open(path string, mode int, perm uint32) (fd Handle, err error) { - if len(path) == 0 { - return InvalidHandle, ERROR_FILE_NOT_FOUND - } - pathp, err := UTF16PtrFromString(path) - if err != nil { - return InvalidHandle, err - } - var access uint32 - switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { - case O_RDONLY: - access = GENERIC_READ - case O_WRONLY: - access = GENERIC_WRITE - case O_RDWR: - access = GENERIC_READ | GENERIC_WRITE - } - if mode&O_CREAT != 0 { - access |= GENERIC_WRITE - } - if mode&O_APPEND != 0 { - access &^= GENERIC_WRITE - access |= FILE_APPEND_DATA - } - sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) - var sa *SecurityAttributes - if mode&O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): - createmode = CREATE_NEW - case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): - createmode = CREATE_ALWAYS - case mode&O_CREAT == O_CREAT: - createmode = OPEN_ALWAYS - case mode&O_TRUNC == O_TRUNC: - createmode = TRUNCATE_EXISTING - default: - createmode = OPEN_EXISTING - } - var attrs uint32 = FILE_ATTRIBUTE_NORMAL - if perm&S_IWRITE == 0 { - attrs = FILE_ATTRIBUTE_READONLY - } - h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) - return h, e -} - -func Read(fd Handle, p []byte) (n int, err error) { - var done uint32 - e := ReadFile(fd, p, &done, nil) - if e != nil { - if e == ERROR_BROKEN_PIPE { - // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin - return 0, nil - } - return 0, e - } - if raceenabled { - if done > 0 { - raceWriteRange(unsafe.Pointer(&p[0]), int(done)) - } - raceAcquire(unsafe.Pointer(&ioSync)) - } - return int(done), nil -} - -func Write(fd Handle, p []byte) (n int, err error) { - if raceenabled { - raceReleaseMerge(unsafe.Pointer(&ioSync)) - } - var done uint32 - e := WriteFile(fd, p, &done, nil) - if e != nil { - return 0, e - } - if raceenabled && done > 0 { - raceReadRange(unsafe.Pointer(&p[0]), int(done)) - } - return int(done), nil -} - -var ioSync int64 - -func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { - var w uint32 - switch whence { - case 0: - w = FILE_BEGIN - case 1: - w = FILE_CURRENT - case 2: - w = FILE_END - } - hi := int32(offset >> 32) - lo := int32(offset) - // use GetFileType to check pipe, pipe can't do seek - ft, _ := GetFileType(fd) - if ft == FILE_TYPE_PIPE { - return 0, syscall.EPIPE - } - rlo, e := SetFilePointer(fd, lo, &hi, w) - if e != nil { - return 0, e - } - return int64(hi)<<32 + int64(rlo), nil -} - -func Close(fd Handle) (err error) { - return CloseHandle(fd) -} - -var ( - Stdin = getStdHandle(STD_INPUT_HANDLE) - Stdout = getStdHandle(STD_OUTPUT_HANDLE) - Stderr = getStdHandle(STD_ERROR_HANDLE) -) - -func getStdHandle(stdhandle uint32) (fd Handle) { - r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) - return r -} - -const ImplementsGetwd = true - -func Getwd() (wd string, err error) { - b := make([]uint16, 300) - n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func Chdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return SetCurrentDirectory(pathp) -} - -func Mkdir(path string, mode uint32) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return CreateDirectory(pathp, nil) -} - -func Rmdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return RemoveDirectory(pathp) -} - -func Unlink(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return DeleteFile(pathp) -} - -func Rename(oldpath, newpath string) (err error) { - from, err := UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := UTF16PtrFromString(newpath) - if err != nil { - return err - } - return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) -} - -func ComputerName() (name string, err error) { - var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 - b := make([]uint16, n) - e := GetComputerName(&b[0], &n) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func DurationSinceBoot() time.Duration { - return time.Duration(getTickCount64()) * time.Millisecond -} - -func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e - } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil -} - -func Gettimeofday(tv *Timeval) (err error) { - var ft Filetime - GetSystemTimeAsFileTime(&ft) - *tv = NsecToTimeval(ft.Nanoseconds()) - return nil -} - -func Pipe(p []Handle) (err error) { - if len(p) != 2 { - return syscall.EINVAL - } - var r, w Handle - e := CreatePipe(&r, &w, makeInheritSa(), 0) - if e != nil { - return e - } - p[0] = r - p[1] = w - return nil -} - -func Utimes(path string, tv []Timeval) (err error) { - if len(tv) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer Close(h) - a := NsecToFiletime(tv[0].Nanoseconds()) - w := NsecToFiletime(tv[1].Nanoseconds()) - return SetFileTime(h, nil, &a, &w) -} - -func UtimesNano(path string, ts []Timespec) (err error) { - if len(ts) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer Close(h) - a := NsecToFiletime(TimespecToNsec(ts[0])) - w := NsecToFiletime(TimespecToNsec(ts[1])) - return SetFileTime(h, nil, &a, &w) -} - -func Fsync(fd Handle) (err error) { - return FlushFileBuffers(fd) -} - -func Chmod(path string, mode uint32) (err error) { - p, e := UTF16PtrFromString(path) - if e != nil { - return e - } - attrs, e := GetFileAttributes(p) - if e != nil { - return e - } - if mode&S_IWRITE != 0 { - attrs &^= FILE_ATTRIBUTE_READONLY - } else { - attrs |= FILE_ATTRIBUTE_READONLY - } - return SetFileAttributes(p, attrs) -} - -func LoadGetSystemTimePreciseAsFileTime() error { - return procGetSystemTimePreciseAsFileTime.Find() -} - -func LoadCancelIoEx() error { - return procCancelIoEx.Find() -} - -func LoadSetFileCompletionNotificationModes() error { - return procSetFileCompletionNotificationModes.Find() -} - -func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - // Every other win32 array API takes arguments as "pointer, count", except for this function. So we - // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore - // trivially stub this ourselves. - - var handlePtr *Handle - if len(handles) > 0 { - handlePtr = &handles[0] - } - return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) -} - -// net api calls - -const socket_error = uintptr(^uint32(0)) - -//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup -//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup -//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl -//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket -//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto -//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom -//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt -//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt -//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind -//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect -//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname -//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername -//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen -//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown -//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket -//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx -//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs -//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv -//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend -//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom -//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo -//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname -//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname -//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs -//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname -//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W -//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree -//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W -//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW -//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW -//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry -//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo -//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes -//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW -//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses -//sys GetACP() (acp uint32) = kernel32.GetACP -//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar - -// For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. -var SocketDisableIPv6 bool - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [100]int8 -} - -type Sockaddr interface { - sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs -} - -type SockaddrInet4 struct { - Port int - Addr [4]byte - raw RawSockaddrInet4 -} - -func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type SockaddrInet6 struct { - Port int - ZoneId uint32 - Addr [16]byte - raw RawSockaddrInet6 -} - -func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET6 - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type RawSockaddrUnix struct { - Family uint16 - Path [UNIX_PATH_MAX]int8 -} - -type SockaddrUnix struct { - Name string - raw RawSockaddrUnix -} - -func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { - name := sa.Name - n := len(name) - if n > len(sa.raw.Path) { - return nil, 0, syscall.EINVAL - } - if n == len(sa.raw.Path) && name[0] != '@' { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { - sa.raw.Path[i] = int8(name[i]) - } - // length is family (uint16), name, NUL. - sl := int32(2) - if n > 0 { - sl += int32(n) + 1 - } - if sa.raw.Path[0] == '@' { - sa.raw.Path[0] = 0 - // Don't count trailing NUL for abstract address. - sl-- - } - - return unsafe.Pointer(&sa.raw), sl, nil -} - -func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { - switch rsa.Addr.Family { - case AF_UNIX: - pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) - sa := new(SockaddrUnix) - if pp.Path[0] == 0 { - // "Abstract" Unix domain socket. - // Rewrite leading NUL as @ for textual display. - // (This is the standard convention.) - // Not friendly to overwrite in place, - // but the callers below don't care. - pp.Path[0] = '@' - } - - // Assume path ends at NUL. - // This is not technically the Linux semantics for - // abstract Unix domain sockets--they are supposed - // to be uninterpreted fixed-size binary blobs--but - // everyone uses this convention. - n := 0 - for n < len(pp.Path) && pp.Path[n] != 0 { - n++ - } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) - return sa, nil - - case AF_INET: - pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet4) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } - return sa, nil - - case AF_INET6: - pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet6) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } - return sa, nil - } - return nil, syscall.EAFNOSUPPORT -} - -func Socket(domain, typ, proto int) (fd Handle, err error) { - if domain == AF_INET6 && SocketDisableIPv6 { - return InvalidHandle, syscall.EAFNOSUPPORT - } - return socket(int32(domain), int32(typ), int32(proto)) -} - -func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { - v := int32(value) - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) -} - -func Bind(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return bind(fd, ptr, n) -} - -func Connect(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connect(fd, ptr, n) -} - -func Getsockname(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getsockname(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Getpeername(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getpeername(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Listen(s Handle, n int) (err error) { - return listen(s, int32(n)) -} - -func Shutdown(fd Handle, how int) (err error) { - return shutdown(fd, int32(how)) -} - -func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - rsa, l, err := to.sockaddr() - if err != nil { - return err - } - return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) -} - -func LoadGetAddrInfo() error { - return procGetAddrInfoW.Find() -} - -var connectExFunc struct { - once sync.Once - addr uintptr - err error -} - -func LoadConnectEx() error { - connectExFunc.once.Do(func() { - var s Handle - s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) - if connectExFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - connectExFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), - uint32(unsafe.Sizeof(WSAID_CONNECTEX)), - (*byte)(unsafe.Pointer(&connectExFunc.addr)), - uint32(unsafe.Sizeof(connectExFunc.addr)), - &n, nil, 0) - }) - return connectExFunc.err -} - -func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { - err := LoadConnectEx() - if err != nil { - return errorspkg.New("failed to find ConnectEx: " + err.Error()) - } - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -var sendRecvMsgFunc struct { - once sync.Once - sendAddr uintptr - recvAddr uintptr - err error -} - -func loadWSASendRecvMsg() error { - sendRecvMsgFunc.once.Do(func() { - var s Handle - s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) - if sendRecvMsgFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), - uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), - &n, nil, 0) - if sendRecvMsgFunc.err != nil { - return - } - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), - uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), - &n, nil, 0) - }) - return sendRecvMsgFunc.err -} - -func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return err -} - -func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return err -} - -// Invented structures to support what package os expects. -type Rusage struct { - CreationTime Filetime - ExitTime Filetime - KernelTime Filetime - UserTime Filetime -} - -type WaitStatus struct { - ExitCode uint32 -} - -func (w WaitStatus) Exited() bool { return true } - -func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } - -func (w WaitStatus) Signal() Signal { return -1 } - -func (w WaitStatus) CoreDump() bool { return false } - -func (w WaitStatus) Stopped() bool { return false } - -func (w WaitStatus) Continued() bool { return false } - -func (w WaitStatus) StopSignal() Signal { return -1 } - -func (w WaitStatus) Signaled() bool { return false } - -func (w WaitStatus) TrapCause() int { return -1 } - -// Timespec is an invented structure on Windows, but here for -// consistency with the corresponding package for other operating systems. -type Timespec struct { - Sec int64 - Nsec int64 -} - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -// TODO(brainman): fix all needed for net - -func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } - -func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) - n = int(n32) - if err != nil { - return - } - from, err = rsa.Sockaddr() - return -} - -func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { - ptr, l, err := to.sockaddr() - if err != nil { - return err - } - return sendto(fd, p, int32(flags), ptr, l) -} - -func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } - -// The Linger struct is wrong but we only noticed after Go 1. -// sysLinger is the real system call structure. - -// BUG(brainman): The definition of Linger is not appropriate for direct use -// with Setsockopt and Getsockopt. -// Use SetsockoptLinger instead. - -type Linger struct { - Onoff int32 - Linger int32 -} - -type sysLinger struct { - Onoff uint16 - Linger uint16 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -func GetsockoptInt(fd Handle, level, opt int) (int, error) { - v := int32(0) - l := int32(unsafe.Sizeof(v)) - err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l) - return int(v), err -} - -func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { - sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) -} - -func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) -} -func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) -} -func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { - return syscall.EWINDOWS -} - -func Getpid() (pid int) { return int(GetCurrentProcessId()) } - -func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { - // NOTE(rsc): The Win32finddata struct is wrong for the system call: - // the two paths are each one uint16 short. Use the correct struct, - // a win32finddata1, and then copy the results out. - // There is no loss of expressivity here, because the final - // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. - // For Go 1.1, we might avoid the allocation of win32finddata1 here - // by adding a final Bug [2]uint16 field to the struct and then - // adjusting the fields in the result directly. - var data1 win32finddata1 - handle, err = findFirstFile1(name, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func FindNextFile(handle Handle, data *Win32finddata) (err error) { - var data1 win32finddata1 - err = findNextFile1(handle, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func getProcessEntry(pid int) (*ProcessEntry32, error) { - snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer CloseHandle(snapshot) - var procEntry ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -func Getppid() (ppid int) { - pe, err := getProcessEntry(Getpid()) - if err != nil { - return -1 - } - return int(pe.ParentProcessID) -} - -// TODO(brainman): fix all needed for os -func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } -func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } -func Symlink(path, link string) (err error) { return syscall.EWINDOWS } - -func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } -func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } - -func Getuid() (uid int) { return -1 } -func Geteuid() (euid int) { return -1 } -func Getgid() (gid int) { return -1 } -func Getegid() (egid int) { return -1 } -func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } - -type Signal int - -func (s Signal) Signal() {} - -func (s Signal) String() string { - if 0 <= s && int(s) < len(signals) { - str := signals[s] - if str != "" { - return str - } - } - return "signal " + itoa(int(s)) -} - -func LoadCreateSymbolicLink() error { - return procCreateSymbolicLinkW.Find() -} - -// Readlink returns the destination of the named symbolic link. -func Readlink(path string, buf []byte) (n int, err error) { - fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, - FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return -1, err - } - defer CloseHandle(fd) - - rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) - var bytesReturned uint32 - err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) - if err != nil { - return -1, err - } - - rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) - var s string - switch rdb.ReparseTag { - case IO_REPARSE_TAG_SYMLINK: - data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - case IO_REPARSE_TAG_MOUNT_POINT: - data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - default: - // the path is not a symlink or junction but another type of reparse - // point - return -1, syscall.ENOENT - } - n = copy(buf, []byte(s)) - - return n, nil -} - -// GUIDFromString parses a string in the form of -// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. -func GUIDFromString(str string) (GUID, error) { - guid := GUID{} - str16, err := syscall.UTF16PtrFromString(str) - if err != nil { - return guid, err - } - err = clsidFromString(str16, &guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// GenerateGUID creates a new random GUID. -func GenerateGUID() (GUID, error) { - guid := GUID{} - err := coCreateGuid(&guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// String returns the canonical string form of the GUID, -// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". -func (guid GUID) String() string { - var str [100]uint16 - chars := stringFromGUID2(&guid, &str[0], int32(len(str))) - if chars <= 1 { - return "" - } - return string(utf16.Decode(str[:chars-1])) -} - -// KnownFolderPath returns a well-known folder path for the current user, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - return Token(0).KnownFolderPath(folderID, flags) -} - -// KnownFolderPath returns a well-known folder path for the user token, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - var p *uint16 - err := shGetKnownFolderPath(folderID, flags, t, &p) - if err != nil { - return "", err - } - defer CoTaskMemFree(unsafe.Pointer(p)) - return UTF16PtrToString(p), nil -} - -// RtlGetVersion returns the version of the underlying operating system, ignoring -// manifest semantics but is affected by the application compatibility layer. -func RtlGetVersion() *OsVersionInfoEx { - info := &OsVersionInfoEx{} - info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) - // According to documentation, this function always succeeds. - // The function doesn't even check the validity of the - // osVersionInfoSize member. Disassembling ntdll.dll indicates - // that the documentation is indeed correct about that. - _ = rtlGetVersion(info) - return info -} - -// RtlGetNtVersionNumbers returns the version of the underlying operating system, -// ignoring manifest semantics and the application compatibility layer. -func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { - rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) - buildNumber &= 0xffff - return -} - -// GetProcessPreferredUILanguages retrieves the process preferred UI languages. -func GetProcessPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getProcessPreferredUILanguages) -} - -// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread. -func GetThreadPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getThreadPreferredUILanguages) -} - -// GetUserPreferredUILanguages retrieves information about the user preferred UI languages. -func GetUserPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getUserPreferredUILanguages) -} - -// GetSystemPreferredUILanguages retrieves the system preferred UI languages. -func GetSystemPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getSystemPreferredUILanguages) -} - -func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) { - size := uint32(128) - for { - var numLanguages uint32 - buf := make([]uint16, size) - err := f(flags, &numLanguages, &buf[0], &size) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return nil, err - } - buf = buf[:size] - if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0" - return []string{}, nil - } - if buf[len(buf)-1] == 0 { - buf = buf[:len(buf)-1] // remove terminating null - } - languages := make([]string, 0, numLanguages) - from := 0 - for i, c := range buf { - if c == 0 { - languages = append(languages, string(utf16.Decode(buf[from:i]))) - from = i + 1 - } - } - return languages, nil - } -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows.go deleted file mode 100644 index da1652e74b0..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows.go +++ /dev/null @@ -1,1774 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "net" - "syscall" - "unsafe" -) - -const ( - // Invented values to support what package os expects. - O_RDONLY = 0x00000 - O_WRONLY = 0x00001 - O_RDWR = 0x00002 - O_CREAT = 0x00040 - O_EXCL = 0x00080 - O_NOCTTY = 0x00100 - O_TRUNC = 0x00200 - O_NONBLOCK = 0x00800 - O_APPEND = 0x00400 - O_SYNC = 0x01000 - O_ASYNC = 0x02000 - O_CLOEXEC = 0x80000 -) - -const ( - // More invented values for signals - SIGHUP = Signal(0x1) - SIGINT = Signal(0x2) - SIGQUIT = Signal(0x3) - SIGILL = Signal(0x4) - SIGTRAP = Signal(0x5) - SIGABRT = Signal(0x6) - SIGBUS = Signal(0x7) - SIGFPE = Signal(0x8) - SIGKILL = Signal(0x9) - SIGSEGV = Signal(0xb) - SIGPIPE = Signal(0xd) - SIGALRM = Signal(0xe) - SIGTERM = Signal(0xf) -) - -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", -} - -const ( - FILE_LIST_DIRECTORY = 0x00000001 - FILE_APPEND_DATA = 0x00000004 - FILE_WRITE_ATTRIBUTES = 0x00000100 - - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_DEVICE = 0x00000040 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_TEMPORARY = 0x00000100 - FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 - FILE_ATTRIBUTE_COMPRESSED = 0x00000800 - FILE_ATTRIBUTE_OFFLINE = 0x00001000 - FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 - FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 - FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 - FILE_ATTRIBUTE_VIRTUAL = 0x00010000 - FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 - FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 - FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 - - INVALID_FILE_ATTRIBUTES = 0xffffffff - - CREATE_NEW = 1 - CREATE_ALWAYS = 2 - OPEN_EXISTING = 3 - OPEN_ALWAYS = 4 - TRUNCATE_EXISTING = 5 - - FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 - FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 - FILE_FLAG_OPEN_NO_RECALL = 0x00100000 - FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 - FILE_FLAG_SESSION_AWARE = 0x00800000 - FILE_FLAG_POSIX_SEMANTICS = 0x01000000 - FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 - FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 - FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 - FILE_FLAG_RANDOM_ACCESS = 0x10000000 - FILE_FLAG_NO_BUFFERING = 0x20000000 - FILE_FLAG_OVERLAPPED = 0x40000000 - FILE_FLAG_WRITE_THROUGH = 0x80000000 - - HANDLE_FLAG_INHERIT = 0x00000001 - STARTF_USESTDHANDLES = 0x00000100 - STARTF_USESHOWWINDOW = 0x00000001 - DUPLICATE_CLOSE_SOURCE = 0x00000001 - DUPLICATE_SAME_ACCESS = 0x00000002 - - STD_INPUT_HANDLE = -10 & (1<<32 - 1) - STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) - STD_ERROR_HANDLE = -12 & (1<<32 - 1) - - FILE_BEGIN = 0 - FILE_CURRENT = 1 - FILE_END = 2 - - LANG_ENGLISH = 0x09 - SUBLANG_ENGLISH_US = 0x01 - - FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 - FORMAT_MESSAGE_IGNORE_INSERTS = 512 - FORMAT_MESSAGE_FROM_STRING = 1024 - FORMAT_MESSAGE_FROM_HMODULE = 2048 - FORMAT_MESSAGE_FROM_SYSTEM = 4096 - FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 - FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 - - MAX_PATH = 260 - MAX_LONG_PATH = 32768 - - MAX_COMPUTERNAME_LENGTH = 15 - - TIME_ZONE_ID_UNKNOWN = 0 - TIME_ZONE_ID_STANDARD = 1 - - TIME_ZONE_ID_DAYLIGHT = 2 - IGNORE = 0 - INFINITE = 0xffffffff - - WAIT_ABANDONED = 0x00000080 - WAIT_OBJECT_0 = 0x00000000 - WAIT_FAILED = 0xFFFFFFFF - - // Access rights for process. - PROCESS_CREATE_PROCESS = 0x0080 - PROCESS_CREATE_THREAD = 0x0002 - PROCESS_DUP_HANDLE = 0x0040 - PROCESS_QUERY_INFORMATION = 0x0400 - PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 - PROCESS_SET_INFORMATION = 0x0200 - PROCESS_SET_QUOTA = 0x0100 - PROCESS_SUSPEND_RESUME = 0x0800 - PROCESS_TERMINATE = 0x0001 - PROCESS_VM_OPERATION = 0x0008 - PROCESS_VM_READ = 0x0010 - PROCESS_VM_WRITE = 0x0020 - - // Access rights for thread. - THREAD_DIRECT_IMPERSONATION = 0x0200 - THREAD_GET_CONTEXT = 0x0008 - THREAD_IMPERSONATE = 0x0100 - THREAD_QUERY_INFORMATION = 0x0040 - THREAD_QUERY_LIMITED_INFORMATION = 0x0800 - THREAD_SET_CONTEXT = 0x0010 - THREAD_SET_INFORMATION = 0x0020 - THREAD_SET_LIMITED_INFORMATION = 0x0400 - THREAD_SET_THREAD_TOKEN = 0x0080 - THREAD_SUSPEND_RESUME = 0x0002 - THREAD_TERMINATE = 0x0001 - - FILE_MAP_COPY = 0x01 - FILE_MAP_WRITE = 0x02 - FILE_MAP_READ = 0x04 - FILE_MAP_EXECUTE = 0x20 - - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 - CTRL_CLOSE_EVENT = 2 - CTRL_LOGOFF_EVENT = 5 - CTRL_SHUTDOWN_EVENT = 6 - - // Windows reserves errors >= 1<<29 for application use. - APPLICATION_ERROR = 1 << 29 -) - -const ( - // Process creation flags. - CREATE_BREAKAWAY_FROM_JOB = 0x01000000 - CREATE_DEFAULT_ERROR_MODE = 0x04000000 - CREATE_NEW_CONSOLE = 0x00000010 - CREATE_NEW_PROCESS_GROUP = 0x00000200 - CREATE_NO_WINDOW = 0x08000000 - CREATE_PROTECTED_PROCESS = 0x00040000 - CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 - CREATE_SEPARATE_WOW_VDM = 0x00000800 - CREATE_SHARED_WOW_VDM = 0x00001000 - CREATE_SUSPENDED = 0x00000004 - CREATE_UNICODE_ENVIRONMENT = 0x00000400 - DEBUG_ONLY_THIS_PROCESS = 0x00000002 - DEBUG_PROCESS = 0x00000001 - DETACHED_PROCESS = 0x00000008 - EXTENDED_STARTUPINFO_PRESENT = 0x00080000 - INHERIT_PARENT_AFFINITY = 0x00010000 -) - -const ( - // flags for CreateToolhelp32Snapshot - TH32CS_SNAPHEAPLIST = 0x01 - TH32CS_SNAPPROCESS = 0x02 - TH32CS_SNAPTHREAD = 0x04 - TH32CS_SNAPMODULE = 0x08 - TH32CS_SNAPMODULE32 = 0x10 - TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD - TH32CS_INHERIT = 0x80000000 -) - -const ( - // filters for ReadDirectoryChangesW - FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 - FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 - FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 - FILE_NOTIFY_CHANGE_SIZE = 0x008 - FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 - FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 - FILE_NOTIFY_CHANGE_CREATION = 0x040 - FILE_NOTIFY_CHANGE_SECURITY = 0x100 -) - -const ( - // do not reorder - FILE_ACTION_ADDED = iota + 1 - FILE_ACTION_REMOVED - FILE_ACTION_MODIFIED - FILE_ACTION_RENAMED_OLD_NAME - FILE_ACTION_RENAMED_NEW_NAME -) - -const ( - // wincrypt.h - PROV_RSA_FULL = 1 - PROV_RSA_SIG = 2 - PROV_DSS = 3 - PROV_FORTEZZA = 4 - PROV_MS_EXCHANGE = 5 - PROV_SSL = 6 - PROV_RSA_SCHANNEL = 12 - PROV_DSS_DH = 13 - PROV_EC_ECDSA_SIG = 14 - PROV_EC_ECNRA_SIG = 15 - PROV_EC_ECDSA_FULL = 16 - PROV_EC_ECNRA_FULL = 17 - PROV_DH_SCHANNEL = 18 - PROV_SPYRUS_LYNKS = 20 - PROV_RNG = 21 - PROV_INTEL_SEC = 22 - PROV_REPLACE_OWF = 23 - PROV_RSA_AES = 24 - CRYPT_VERIFYCONTEXT = 0xF0000000 - CRYPT_NEWKEYSET = 0x00000008 - CRYPT_DELETEKEYSET = 0x00000010 - CRYPT_MACHINE_KEYSET = 0x00000020 - CRYPT_SILENT = 0x00000040 - CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 - - USAGE_MATCH_TYPE_AND = 0 - USAGE_MATCH_TYPE_OR = 1 - - /* msgAndCertEncodingType values for CertOpenStore function */ - X509_ASN_ENCODING = 0x00000001 - PKCS_7_ASN_ENCODING = 0x00010000 - - /* storeProvider values for CertOpenStore function */ - CERT_STORE_PROV_MSG = 1 - CERT_STORE_PROV_MEMORY = 2 - CERT_STORE_PROV_FILE = 3 - CERT_STORE_PROV_REG = 4 - CERT_STORE_PROV_PKCS7 = 5 - CERT_STORE_PROV_SERIALIZED = 6 - CERT_STORE_PROV_FILENAME_A = 7 - CERT_STORE_PROV_FILENAME_W = 8 - CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W - CERT_STORE_PROV_SYSTEM_A = 9 - CERT_STORE_PROV_SYSTEM_W = 10 - CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W - CERT_STORE_PROV_COLLECTION = 11 - CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 - CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 - CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W - CERT_STORE_PROV_PHYSICAL_W = 14 - CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W - CERT_STORE_PROV_SMART_CARD_W = 15 - CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W - CERT_STORE_PROV_LDAP_W = 16 - CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W - CERT_STORE_PROV_PKCS12 = 17 - - /* store characteristics (low WORD of flag) for CertOpenStore function */ - CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 - CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 - CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 - CERT_STORE_DELETE_FLAG = 0x00000010 - CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 - CERT_STORE_SHARE_STORE_FLAG = 0x00000040 - CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 - CERT_STORE_MANIFOLD_FLAG = 0x00000100 - CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 - CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 - CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 - CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 - CERT_STORE_CREATE_NEW_FLAG = 0x00002000 - CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 - CERT_STORE_READONLY_FLAG = 0x00008000 - - /* store locations (high WORD of flag) for CertOpenStore function */ - CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 - CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 - CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 - CERT_SYSTEM_STORE_SERVICES = 0x00050000 - CERT_SYSTEM_STORE_USERS = 0x00060000 - CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 - CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 - CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 - - /* Miscellaneous high-WORD flags for CertOpenStore function */ - CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 - CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 - CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 - CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 - CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 - CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 - CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 - CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 - CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 - CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 - CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 - - /* addDisposition values for CertAddCertificateContextToStore function */ - CERT_STORE_ADD_NEW = 1 - CERT_STORE_ADD_USE_EXISTING = 2 - CERT_STORE_ADD_REPLACE_EXISTING = 3 - CERT_STORE_ADD_ALWAYS = 4 - CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 - CERT_STORE_ADD_NEWER = 6 - CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 - - /* ErrorStatus values for CertTrustStatus struct */ - CERT_TRUST_NO_ERROR = 0x00000000 - CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 - CERT_TRUST_IS_REVOKED = 0x00000004 - CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 - CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 - CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 - CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 - CERT_TRUST_IS_CYCLIC = 0x00000080 - CERT_TRUST_INVALID_EXTENSION = 0x00000100 - CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 - CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 - CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 - CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 - CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 - CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 - CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 - CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 - CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 - CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 - CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 - CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 - CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 - CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 - CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 - CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 - - /* InfoStatus values for CertTrustStatus struct */ - CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 - CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 - CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 - CERT_TRUST_IS_SELF_SIGNED = 0x00000008 - CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 - CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 - CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 - CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 - CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 - CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 - CERT_TRUST_IS_CA_TRUSTED = 0x00004000 - CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 - - /* policyOID values for CertVerifyCertificateChainPolicy function */ - CERT_CHAIN_POLICY_BASE = 1 - CERT_CHAIN_POLICY_AUTHENTICODE = 2 - CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 - CERT_CHAIN_POLICY_SSL = 4 - CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 - CERT_CHAIN_POLICY_NT_AUTH = 6 - CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 - CERT_CHAIN_POLICY_EV = 8 - CERT_CHAIN_POLICY_SSL_F12 = 9 - - /* AuthType values for SSLExtraCertChainPolicyPara struct */ - AUTHTYPE_CLIENT = 1 - AUTHTYPE_SERVER = 2 - - /* Checks values for SSLExtraCertChainPolicyPara struct */ - SECURITY_FLAG_IGNORE_REVOCATION = 0x00000080 - SECURITY_FLAG_IGNORE_UNKNOWN_CA = 0x00000100 - SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200 - SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000 - SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 -) - -const ( - // flags for SetErrorMode - SEM_FAILCRITICALERRORS = 0x0001 - SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 - SEM_NOGPFAULTERRORBOX = 0x0002 - SEM_NOOPENFILEERRORBOX = 0x8000 -) - -const ( - // Priority class. - ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 - BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 - HIGH_PRIORITY_CLASS = 0x00000080 - IDLE_PRIORITY_CLASS = 0x00000040 - NORMAL_PRIORITY_CLASS = 0x00000020 - PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 - PROCESS_MODE_BACKGROUND_END = 0x00200000 - REALTIME_PRIORITY_CLASS = 0x00000100 -) - -var ( - OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") - OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") - OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") -) - -// Pointer represents a pointer to an arbitrary Windows type. -// -// Pointer-typed fields may point to one of many different types. It's -// up to the caller to provide a pointer to the appropriate type, cast -// to Pointer. The caller must obey the unsafe.Pointer rules while -// doing so. -type Pointer *struct{} - -// Invented values to support what package os expects. -type Timeval struct { - Sec int32 - Usec int32 -} - -func (tv *Timeval) Nanoseconds() int64 { - return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return -} - -type Overlapped struct { - Internal uintptr - InternalHigh uintptr - Offset uint32 - OffsetHigh uint32 - HEvent Handle -} - -type FileNotifyInformation struct { - NextEntryOffset uint32 - Action uint32 - FileNameLength uint32 - FileName uint16 -} - -type Filetime struct { - LowDateTime uint32 - HighDateTime uint32 -} - -// Nanoseconds returns Filetime ft in nanoseconds -// since Epoch (00:00:00 UTC, January 1, 1970). -func (ft *Filetime) Nanoseconds() int64 { - // 100-nanosecond intervals since January 1, 1601 - nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) - // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) - nsec -= 116444736000000000 - // convert into nanoseconds - nsec *= 100 - return nsec -} - -func NsecToFiletime(nsec int64) (ft Filetime) { - // convert into 100-nanosecond - nsec /= 100 - // change starting time to January 1, 1601 - nsec += 116444736000000000 - // split into high / low - ft.LowDateTime = uint32(nsec & 0xffffffff) - ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) - return ft -} - -type Win32finddata struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH - 1]uint16 - AlternateFileName [13]uint16 -} - -// This is the actual system call structure. -// Win32finddata is what we committed to in Go 1. -type win32finddata1 struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH]uint16 - AlternateFileName [14]uint16 -} - -func copyFindData(dst *Win32finddata, src *win32finddata1) { - dst.FileAttributes = src.FileAttributes - dst.CreationTime = src.CreationTime - dst.LastAccessTime = src.LastAccessTime - dst.LastWriteTime = src.LastWriteTime - dst.FileSizeHigh = src.FileSizeHigh - dst.FileSizeLow = src.FileSizeLow - dst.Reserved0 = src.Reserved0 - dst.Reserved1 = src.Reserved1 - - // The src is 1 element bigger than dst, but it must be NUL. - copy(dst.FileName[:], src.FileName[:]) - copy(dst.AlternateFileName[:], src.AlternateFileName[:]) -} - -type ByHandleFileInformation struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - VolumeSerialNumber uint32 - FileSizeHigh uint32 - FileSizeLow uint32 - NumberOfLinks uint32 - FileIndexHigh uint32 - FileIndexLow uint32 -} - -const ( - GetFileExInfoStandard = 0 - GetFileExMaxInfoLevel = 1 -) - -type Win32FileAttributeData struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 -} - -// ShowWindow constants -const ( - // winuser.h - SW_HIDE = 0 - SW_NORMAL = 1 - SW_SHOWNORMAL = 1 - SW_SHOWMINIMIZED = 2 - SW_SHOWMAXIMIZED = 3 - SW_MAXIMIZE = 3 - SW_SHOWNOACTIVATE = 4 - SW_SHOW = 5 - SW_MINIMIZE = 6 - SW_SHOWMINNOACTIVE = 7 - SW_SHOWNA = 8 - SW_RESTORE = 9 - SW_SHOWDEFAULT = 10 - SW_FORCEMINIMIZE = 11 -) - -type StartupInfo struct { - Cb uint32 - _ *uint16 - Desktop *uint16 - Title *uint16 - X uint32 - Y uint32 - XSize uint32 - YSize uint32 - XCountChars uint32 - YCountChars uint32 - FillAttribute uint32 - Flags uint32 - ShowWindow uint16 - _ uint16 - _ *byte - StdInput Handle - StdOutput Handle - StdErr Handle -} - -type ProcessInformation struct { - Process Handle - Thread Handle - ProcessId uint32 - ThreadId uint32 -} - -type ProcessEntry32 struct { - Size uint32 - Usage uint32 - ProcessID uint32 - DefaultHeapID uintptr - ModuleID uint32 - Threads uint32 - ParentProcessID uint32 - PriClassBase int32 - Flags uint32 - ExeFile [MAX_PATH]uint16 -} - -type ThreadEntry32 struct { - Size uint32 - Usage uint32 - ThreadID uint32 - OwnerProcessID uint32 - BasePri int32 - DeltaPri int32 - Flags uint32 -} - -type Systemtime struct { - Year uint16 - Month uint16 - DayOfWeek uint16 - Day uint16 - Hour uint16 - Minute uint16 - Second uint16 - Milliseconds uint16 -} - -type Timezoneinformation struct { - Bias int32 - StandardName [32]uint16 - StandardDate Systemtime - StandardBias int32 - DaylightName [32]uint16 - DaylightDate Systemtime - DaylightBias int32 -} - -// Socket related. - -const ( - AF_UNSPEC = 0 - AF_UNIX = 1 - AF_INET = 2 - AF_NETBIOS = 17 - AF_INET6 = 23 - AF_IRDA = 26 - AF_BTH = 32 - - SOCK_STREAM = 1 - SOCK_DGRAM = 2 - SOCK_RAW = 3 - SOCK_RDM = 4 - SOCK_SEQPACKET = 5 - - IPPROTO_IP = 0 - IPPROTO_ICMP = 1 - IPPROTO_IGMP = 2 - BTHPROTO_RFCOMM = 3 - IPPROTO_TCP = 6 - IPPROTO_UDP = 17 - IPPROTO_IPV6 = 41 - IPPROTO_ICMPV6 = 58 - IPPROTO_RM = 113 - - SOL_SOCKET = 0xffff - SO_REUSEADDR = 4 - SO_KEEPALIVE = 8 - SO_DONTROUTE = 16 - SO_BROADCAST = 32 - SO_LINGER = 128 - SO_RCVBUF = 0x1002 - SO_RCVTIMEO = 0x1006 - SO_SNDBUF = 0x1001 - SO_UPDATE_ACCEPT_CONTEXT = 0x700b - SO_UPDATE_CONNECT_CONTEXT = 0x7010 - - IOC_OUT = 0x40000000 - IOC_IN = 0x80000000 - IOC_VENDOR = 0x18000000 - IOC_INOUT = IOC_IN | IOC_OUT - IOC_WS2 = 0x08000000 - SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 - SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 - SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 - - // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 - - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_LOOP = 0xb - IP_ADD_MEMBERSHIP = 0xc - IP_DROP_MEMBERSHIP = 0xd - - IPV6_V6ONLY = 0x1b - IPV6_UNICAST_HOPS = 0x4 - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_LOOP = 0xb - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_DONTROUTE = 0x4 - MSG_WAITALL = 0x8 - - MSG_TRUNC = 0x0100 - MSG_CTRUNC = 0x0200 - MSG_BCAST = 0x0400 - MSG_MCAST = 0x0800 - - SOMAXCONN = 0x7fffffff - - TCP_NODELAY = 1 - - SHUT_RD = 0 - SHUT_WR = 1 - SHUT_RDWR = 2 - - WSADESCRIPTION_LEN = 256 - WSASYS_STATUS_LEN = 128 -) - -type WSABuf struct { - Len uint32 - Buf *byte -} - -type WSAMsg struct { - Name *syscall.RawSockaddrAny - Namelen int32 - Buffers *WSABuf - BufferCount uint32 - Control WSABuf - Flags uint32 -} - -// Invented values to support what package os expects. -const ( - S_IFMT = 0x1f000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - -const ( - FILE_TYPE_CHAR = 0x0002 - FILE_TYPE_DISK = 0x0001 - FILE_TYPE_PIPE = 0x0003 - FILE_TYPE_REMOTE = 0x8000 - FILE_TYPE_UNKNOWN = 0x0000 -) - -type Hostent struct { - Name *byte - Aliases **byte - AddrType uint16 - Length uint16 - AddrList **byte -} - -type Protoent struct { - Name *byte - Aliases **byte - Proto uint16 -} - -const ( - DNS_TYPE_A = 0x0001 - DNS_TYPE_NS = 0x0002 - DNS_TYPE_MD = 0x0003 - DNS_TYPE_MF = 0x0004 - DNS_TYPE_CNAME = 0x0005 - DNS_TYPE_SOA = 0x0006 - DNS_TYPE_MB = 0x0007 - DNS_TYPE_MG = 0x0008 - DNS_TYPE_MR = 0x0009 - DNS_TYPE_NULL = 0x000a - DNS_TYPE_WKS = 0x000b - DNS_TYPE_PTR = 0x000c - DNS_TYPE_HINFO = 0x000d - DNS_TYPE_MINFO = 0x000e - DNS_TYPE_MX = 0x000f - DNS_TYPE_TEXT = 0x0010 - DNS_TYPE_RP = 0x0011 - DNS_TYPE_AFSDB = 0x0012 - DNS_TYPE_X25 = 0x0013 - DNS_TYPE_ISDN = 0x0014 - DNS_TYPE_RT = 0x0015 - DNS_TYPE_NSAP = 0x0016 - DNS_TYPE_NSAPPTR = 0x0017 - DNS_TYPE_SIG = 0x0018 - DNS_TYPE_KEY = 0x0019 - DNS_TYPE_PX = 0x001a - DNS_TYPE_GPOS = 0x001b - DNS_TYPE_AAAA = 0x001c - DNS_TYPE_LOC = 0x001d - DNS_TYPE_NXT = 0x001e - DNS_TYPE_EID = 0x001f - DNS_TYPE_NIMLOC = 0x0020 - DNS_TYPE_SRV = 0x0021 - DNS_TYPE_ATMA = 0x0022 - DNS_TYPE_NAPTR = 0x0023 - DNS_TYPE_KX = 0x0024 - DNS_TYPE_CERT = 0x0025 - DNS_TYPE_A6 = 0x0026 - DNS_TYPE_DNAME = 0x0027 - DNS_TYPE_SINK = 0x0028 - DNS_TYPE_OPT = 0x0029 - DNS_TYPE_DS = 0x002B - DNS_TYPE_RRSIG = 0x002E - DNS_TYPE_NSEC = 0x002F - DNS_TYPE_DNSKEY = 0x0030 - DNS_TYPE_DHCID = 0x0031 - DNS_TYPE_UINFO = 0x0064 - DNS_TYPE_UID = 0x0065 - DNS_TYPE_GID = 0x0066 - DNS_TYPE_UNSPEC = 0x0067 - DNS_TYPE_ADDRS = 0x00f8 - DNS_TYPE_TKEY = 0x00f9 - DNS_TYPE_TSIG = 0x00fa - DNS_TYPE_IXFR = 0x00fb - DNS_TYPE_AXFR = 0x00fc - DNS_TYPE_MAILB = 0x00fd - DNS_TYPE_MAILA = 0x00fe - DNS_TYPE_ALL = 0x00ff - DNS_TYPE_ANY = 0x00ff - DNS_TYPE_WINS = 0xff01 - DNS_TYPE_WINSR = 0xff02 - DNS_TYPE_NBSTAT = 0xff01 -) - -const ( - // flags inside DNSRecord.Dw - DnsSectionQuestion = 0x0000 - DnsSectionAnswer = 0x0001 - DnsSectionAuthority = 0x0002 - DnsSectionAdditional = 0x0003 -) - -type DNSSRVData struct { - Target *uint16 - Priority uint16 - Weight uint16 - Port uint16 - Pad uint16 -} - -type DNSPTRData struct { - Host *uint16 -} - -type DNSMXData struct { - NameExchange *uint16 - Preference uint16 - Pad uint16 -} - -type DNSTXTData struct { - StringCount uint16 - StringArray [1]*uint16 -} - -type DNSRecord struct { - Next *DNSRecord - Name *uint16 - Type uint16 - Length uint16 - Dw uint32 - Ttl uint32 - Reserved uint32 - Data [40]byte -} - -const ( - TF_DISCONNECT = 1 - TF_REUSE_SOCKET = 2 - TF_WRITE_BEHIND = 4 - TF_USE_DEFAULT_WORKER = 0 - TF_USE_SYSTEM_THREAD = 16 - TF_USE_KERNEL_APC = 32 -) - -type TransmitFileBuffers struct { - Head uintptr - HeadLength uint32 - Tail uintptr - TailLength uint32 -} - -const ( - IFF_UP = 1 - IFF_BROADCAST = 2 - IFF_LOOPBACK = 4 - IFF_POINTTOPOINT = 8 - IFF_MULTICAST = 16 -) - -const SIO_GET_INTERFACE_LIST = 0x4004747F - -// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. -// will be fixed to change variable type as suitable. - -type SockaddrGen [24]byte - -type InterfaceInfo struct { - Flags uint32 - Address SockaddrGen - BroadcastAddress SockaddrGen - Netmask SockaddrGen -} - -type IpAddressString struct { - String [16]byte -} - -type IpMaskString IpAddressString - -type IpAddrString struct { - Next *IpAddrString - IpAddress IpAddressString - IpMask IpMaskString - Context uint32 -} - -const MAX_ADAPTER_NAME_LENGTH = 256 -const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 -const MAX_ADAPTER_ADDRESS_LENGTH = 8 - -type IpAdapterInfo struct { - Next *IpAdapterInfo - ComboIndex uint32 - AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte - Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte - AddressLength uint32 - Address [MAX_ADAPTER_ADDRESS_LENGTH]byte - Index uint32 - Type uint32 - DhcpEnabled uint32 - CurrentIpAddress *IpAddrString - IpAddressList IpAddrString - GatewayList IpAddrString - DhcpServer IpAddrString - HaveWins bool - PrimaryWinsServer IpAddrString - SecondaryWinsServer IpAddrString - LeaseObtained int64 - LeaseExpires int64 -} - -const MAXLEN_PHYSADDR = 8 -const MAX_INTERFACE_NAME_LEN = 256 -const MAXLEN_IFDESCR = 256 - -type MibIfRow struct { - Name [MAX_INTERFACE_NAME_LEN]uint16 - Index uint32 - Type uint32 - Mtu uint32 - Speed uint32 - PhysAddrLen uint32 - PhysAddr [MAXLEN_PHYSADDR]byte - AdminStatus uint32 - OperStatus uint32 - LastChange uint32 - InOctets uint32 - InUcastPkts uint32 - InNUcastPkts uint32 - InDiscards uint32 - InErrors uint32 - InUnknownProtos uint32 - OutOctets uint32 - OutUcastPkts uint32 - OutNUcastPkts uint32 - OutDiscards uint32 - OutErrors uint32 - OutQLen uint32 - DescrLen uint32 - Descr [MAXLEN_IFDESCR]byte -} - -type CertInfo struct { - // Not implemented -} - -type CertContext struct { - EncodingType uint32 - EncodedCert *byte - Length uint32 - CertInfo *CertInfo - Store Handle -} - -type CertChainContext struct { - Size uint32 - TrustStatus CertTrustStatus - ChainCount uint32 - Chains **CertSimpleChain - LowerQualityChainCount uint32 - LowerQualityChains **CertChainContext - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertTrustListInfo struct { - // Not implemented -} - -type CertSimpleChain struct { - Size uint32 - TrustStatus CertTrustStatus - NumElements uint32 - Elements **CertChainElement - TrustListInfo *CertTrustListInfo - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertChainElement struct { - Size uint32 - CertContext *CertContext - TrustStatus CertTrustStatus - RevocationInfo *CertRevocationInfo - IssuanceUsage *CertEnhKeyUsage - ApplicationUsage *CertEnhKeyUsage - ExtendedErrorInfo *uint16 -} - -type CertRevocationCrlInfo struct { - // Not implemented -} - -type CertRevocationInfo struct { - Size uint32 - RevocationResult uint32 - RevocationOid *byte - OidSpecificInfo Pointer - HasFreshnessTime uint32 - FreshnessTime uint32 - CrlInfo *CertRevocationCrlInfo -} - -type CertTrustStatus struct { - ErrorStatus uint32 - InfoStatus uint32 -} - -type CertUsageMatch struct { - Type uint32 - Usage CertEnhKeyUsage -} - -type CertEnhKeyUsage struct { - Length uint32 - UsageIdentifiers **byte -} - -type CertChainPara struct { - Size uint32 - RequestedUsage CertUsageMatch - RequstedIssuancePolicy CertUsageMatch - URLRetrievalTimeout uint32 - CheckRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 - CacheResync *Filetime -} - -type CertChainPolicyPara struct { - Size uint32 - Flags uint32 - ExtraPolicyPara Pointer -} - -type SSLExtraCertChainPolicyPara struct { - Size uint32 - AuthType uint32 - Checks uint32 - ServerName *uint16 -} - -type CertChainPolicyStatus struct { - Size uint32 - Error uint32 - ChainIndex uint32 - ElementIndex uint32 - ExtraPolicyStatus Pointer -} - -const ( - // do not reorder - HKEY_CLASSES_ROOT = 0x80000000 + iota - HKEY_CURRENT_USER - HKEY_LOCAL_MACHINE - HKEY_USERS - HKEY_PERFORMANCE_DATA - HKEY_CURRENT_CONFIG - HKEY_DYN_DATA - - KEY_QUERY_VALUE = 1 - KEY_SET_VALUE = 2 - KEY_CREATE_SUB_KEY = 4 - KEY_ENUMERATE_SUB_KEYS = 8 - KEY_NOTIFY = 16 - KEY_CREATE_LINK = 32 - KEY_WRITE = 0x20006 - KEY_EXECUTE = 0x20019 - KEY_READ = 0x20019 - KEY_WOW64_64KEY = 0x0100 - KEY_WOW64_32KEY = 0x0200 - KEY_ALL_ACCESS = 0xf003f -) - -const ( - // do not reorder - REG_NONE = iota - REG_SZ - REG_EXPAND_SZ - REG_BINARY - REG_DWORD_LITTLE_ENDIAN - REG_DWORD_BIG_ENDIAN - REG_LINK - REG_MULTI_SZ - REG_RESOURCE_LIST - REG_FULL_RESOURCE_DESCRIPTOR - REG_RESOURCE_REQUIREMENTS_LIST - REG_QWORD_LITTLE_ENDIAN - REG_DWORD = REG_DWORD_LITTLE_ENDIAN - REG_QWORD = REG_QWORD_LITTLE_ENDIAN -) - -const ( - EVENT_MODIFY_STATE = 0x0002 - EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - MUTANT_QUERY_STATE = 0x0001 - MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE - - SEMAPHORE_MODIFY_STATE = 0x0002 - SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - TIMER_QUERY_STATE = 0x0001 - TIMER_MODIFY_STATE = 0x0002 - TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE - - MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE - MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS - - CREATE_EVENT_MANUAL_RESET = 0x1 - CREATE_EVENT_INITIAL_SET = 0x2 - CREATE_MUTEX_INITIAL_OWNER = 0x1 -) - -type AddrinfoW struct { - Flags int32 - Family int32 - Socktype int32 - Protocol int32 - Addrlen uintptr - Canonname *uint16 - Addr uintptr - Next *AddrinfoW -} - -const ( - AI_PASSIVE = 1 - AI_CANONNAME = 2 - AI_NUMERICHOST = 4 -) - -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -var WSAID_CONNECTEX = GUID{ - 0x25a207b9, - 0xddf3, - 0x4660, - [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, -} - -var WSAID_WSASENDMSG = GUID{ - 0xa441e712, - 0x754f, - 0x43ca, - [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, -} - -var WSAID_WSARECVMSG = GUID{ - 0xf689d7c8, - 0x6f1f, - 0x436b, - [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, -} - -const ( - FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - FILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -const ( - WSAPROTOCOL_LEN = 255 - MAX_PROTOCOL_CHAIN = 7 - BASE_PROTOCOL = 1 - LAYERED_PROTOCOL = 0 - - XP1_CONNECTIONLESS = 0x00000001 - XP1_GUARANTEED_DELIVERY = 0x00000002 - XP1_GUARANTEED_ORDER = 0x00000004 - XP1_MESSAGE_ORIENTED = 0x00000008 - XP1_PSEUDO_STREAM = 0x00000010 - XP1_GRACEFUL_CLOSE = 0x00000020 - XP1_EXPEDITED_DATA = 0x00000040 - XP1_CONNECT_DATA = 0x00000080 - XP1_DISCONNECT_DATA = 0x00000100 - XP1_SUPPORT_BROADCAST = 0x00000200 - XP1_SUPPORT_MULTIPOINT = 0x00000400 - XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 - XP1_MULTIPOINT_DATA_PLANE = 0x00001000 - XP1_QOS_SUPPORTED = 0x00002000 - XP1_UNI_SEND = 0x00008000 - XP1_UNI_RECV = 0x00010000 - XP1_IFS_HANDLES = 0x00020000 - XP1_PARTIAL_MESSAGE = 0x00040000 - XP1_SAN_SUPPORT_SDP = 0x00080000 - - PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 - PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 - PFL_HIDDEN = 0x00000004 - PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 - PFL_NETWORKDIRECT_PROVIDER = 0x00000010 -) - -type WSAProtocolInfo struct { - ServiceFlags1 uint32 - ServiceFlags2 uint32 - ServiceFlags3 uint32 - ServiceFlags4 uint32 - ProviderFlags uint32 - ProviderId GUID - CatalogEntryId uint32 - ProtocolChain WSAProtocolChain - Version int32 - AddressFamily int32 - MaxSockAddr int32 - MinSockAddr int32 - SocketType int32 - Protocol int32 - ProtocolMaxOffset int32 - NetworkByteOrder int32 - SecurityScheme int32 - MessageSize uint32 - ProviderReserved uint32 - ProtocolName [WSAPROTOCOL_LEN + 1]uint16 -} - -type WSAProtocolChain struct { - ChainLen int32 - ChainEntries [MAX_PROTOCOL_CHAIN]uint32 -} - -type TCPKeepalive struct { - OnOff uint32 - Time uint32 - Interval uint32 -} - -type symbolicLinkReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - Flags uint32 - PathBuffer [1]uint16 -} - -type mountPointReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - PathBuffer [1]uint16 -} - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - - // GenericReparseBuffer - reparseBuffer byte -} - -const ( - FSCTL_GET_REPARSE_POINT = 0x900A8 - MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 - IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 - IO_REPARSE_TAG_SYMLINK = 0xA000000C - SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 -) - -const ( - ComputerNameNetBIOS = 0 - ComputerNameDnsHostname = 1 - ComputerNameDnsDomain = 2 - ComputerNameDnsFullyQualified = 3 - ComputerNamePhysicalNetBIOS = 4 - ComputerNamePhysicalDnsHostname = 5 - ComputerNamePhysicalDnsDomain = 6 - ComputerNamePhysicalDnsFullyQualified = 7 - ComputerNameMax = 8 -) - -// For MessageBox() -const ( - MB_OK = 0x00000000 - MB_OKCANCEL = 0x00000001 - MB_ABORTRETRYIGNORE = 0x00000002 - MB_YESNOCANCEL = 0x00000003 - MB_YESNO = 0x00000004 - MB_RETRYCANCEL = 0x00000005 - MB_CANCELTRYCONTINUE = 0x00000006 - MB_ICONHAND = 0x00000010 - MB_ICONQUESTION = 0x00000020 - MB_ICONEXCLAMATION = 0x00000030 - MB_ICONASTERISK = 0x00000040 - MB_USERICON = 0x00000080 - MB_ICONWARNING = MB_ICONEXCLAMATION - MB_ICONERROR = MB_ICONHAND - MB_ICONINFORMATION = MB_ICONASTERISK - MB_ICONSTOP = MB_ICONHAND - MB_DEFBUTTON1 = 0x00000000 - MB_DEFBUTTON2 = 0x00000100 - MB_DEFBUTTON3 = 0x00000200 - MB_DEFBUTTON4 = 0x00000300 - MB_APPLMODAL = 0x00000000 - MB_SYSTEMMODAL = 0x00001000 - MB_TASKMODAL = 0x00002000 - MB_HELP = 0x00004000 - MB_NOFOCUS = 0x00008000 - MB_SETFOREGROUND = 0x00010000 - MB_DEFAULT_DESKTOP_ONLY = 0x00020000 - MB_TOPMOST = 0x00040000 - MB_RIGHT = 0x00080000 - MB_RTLREADING = 0x00100000 - MB_SERVICE_NOTIFICATION = 0x00200000 -) - -const ( - MOVEFILE_REPLACE_EXISTING = 0x1 - MOVEFILE_COPY_ALLOWED = 0x2 - MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 - MOVEFILE_WRITE_THROUGH = 0x8 - MOVEFILE_CREATE_HARDLINK = 0x10 - MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 -) - -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 - -const ( - IF_TYPE_OTHER = 1 - IF_TYPE_ETHERNET_CSMACD = 6 - IF_TYPE_ISO88025_TOKENRING = 9 - IF_TYPE_PPP = 23 - IF_TYPE_SOFTWARE_LOOPBACK = 24 - IF_TYPE_ATM = 37 - IF_TYPE_IEEE80211 = 71 - IF_TYPE_TUNNEL = 131 - IF_TYPE_IEEE1394 = 144 -) - -type SocketAddress struct { - Sockaddr *syscall.RawSockaddrAny - SockaddrLength int32 -} - -// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. -func (addr *SocketAddress) IP() net.IP { - if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { - return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { - return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } - return nil -} - -type IpAdapterUnicastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterUnicastAddress - Address SocketAddress - PrefixOrigin int32 - SuffixOrigin int32 - DadState int32 - ValidLifetime uint32 - PreferredLifetime uint32 - LeaseLifetime uint32 - OnLinkPrefixLength uint8 -} - -type IpAdapterAnycastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterAnycastAddress - Address SocketAddress -} - -type IpAdapterMulticastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterMulticastAddress - Address SocketAddress -} - -type IpAdapterDnsServerAdapter struct { - Length uint32 - Reserved uint32 - Next *IpAdapterDnsServerAdapter - Address SocketAddress -} - -type IpAdapterPrefix struct { - Length uint32 - Flags uint32 - Next *IpAdapterPrefix - Address SocketAddress - PrefixLength uint32 -} - -type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ -} - -const ( - IfOperStatusUp = 1 - IfOperStatusDown = 2 - IfOperStatusTesting = 3 - IfOperStatusUnknown = 4 - IfOperStatusDormant = 5 - IfOperStatusNotPresent = 6 - IfOperStatusLowerLayerDown = 7 -) - -// Console related constants used for the mode parameter to SetConsoleMode. See -// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. - -const ( - ENABLE_PROCESSED_INPUT = 0x1 - ENABLE_LINE_INPUT = 0x2 - ENABLE_ECHO_INPUT = 0x4 - ENABLE_WINDOW_INPUT = 0x8 - ENABLE_MOUSE_INPUT = 0x10 - ENABLE_INSERT_MODE = 0x20 - ENABLE_QUICK_EDIT_MODE = 0x40 - ENABLE_EXTENDED_FLAGS = 0x80 - ENABLE_AUTO_POSITION = 0x100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 - - ENABLE_PROCESSED_OUTPUT = 0x1 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 - DISABLE_NEWLINE_AUTO_RETURN = 0x8 - ENABLE_LVB_GRID_WORLDWIDE = 0x10 -) - -type Coord struct { - X int16 - Y int16 -} - -type SmallRect struct { - Left int16 - Top int16 - Right int16 - Bottom int16 -} - -// Used with GetConsoleScreenBuffer to retrieve information about a console -// screen buffer. See -// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str -// for details. - -type ConsoleScreenBufferInfo struct { - Size Coord - CursorPosition Coord - Attributes uint16 - Window SmallRect - MaximumWindowSize Coord -} - -const UNIX_PATH_MAX = 108 // defined in afunix.h - -const ( - // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags - JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 - JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 - JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 - JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 - JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 - JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 - JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 - JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 - JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 - JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 - JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 - JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 - JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 - JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 - JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 -) - -type IO_COUNTERS struct { - ReadOperationCount uint64 - WriteOperationCount uint64 - OtherOperationCount uint64 - ReadTransferCount uint64 - WriteTransferCount uint64 - OtherTransferCount uint64 -} - -type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { - BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION - IoInfo IO_COUNTERS - ProcessMemoryLimit uintptr - JobMemoryLimit uintptr - PeakProcessMemoryUsed uintptr - PeakJobMemoryUsed uintptr -} - -const ( - // UIRestrictionsClass - JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 - JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 - JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 - JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 - JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 - JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 - JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 - JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 -) - -type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { - UIRestrictionsClass uint32 -} - -const ( - // JobObjectInformationClass - JobObjectAssociateCompletionPortInformation = 7 - JobObjectBasicLimitInformation = 2 - JobObjectBasicUIRestrictions = 4 - JobObjectCpuRateControlInformation = 15 - JobObjectEndOfJobTimeInformation = 6 - JobObjectExtendedLimitInformation = 9 - JobObjectGroupInformation = 11 - JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 - JobObjectNetRateControlInformation = 32 - JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 - JobObjectSecurityLimitInformation = 5 -) - -const ( - KF_FLAG_DEFAULT = 0x00000000 - KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 - KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 - KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 - KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 - KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 - KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 - KF_FLAG_CREATE = 0x00008000 - KF_FLAG_DONT_VERIFY = 0x00004000 - KF_FLAG_DONT_UNEXPAND = 0x00002000 - KF_FLAG_NO_ALIAS = 0x00001000 - KF_FLAG_INIT = 0x00000800 - KF_FLAG_DEFAULT_PATH = 0x00000400 - KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 - KF_FLAG_SIMPLE_IDLIST = 0x00000100 - KF_FLAG_ALIAS_ONLY = 0x80000000 -) - -type OsVersionInfoEx struct { - osVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformId uint32 - CsdVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - _ byte -} - -const ( - EWX_LOGOFF = 0x00000000 - EWX_SHUTDOWN = 0x00000001 - EWX_REBOOT = 0x00000002 - EWX_FORCE = 0x00000004 - EWX_POWEROFF = 0x00000008 - EWX_FORCEIFHUNG = 0x00000010 - EWX_QUICKRESOLVE = 0x00000020 - EWX_RESTARTAPPS = 0x00000040 - EWX_HYBRID_SHUTDOWN = 0x00400000 - EWX_BOOTOPTIONS = 0x01000000 - - SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 - SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 - SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 - SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 - SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 - SHTDN_REASON_FLAG_PLANNED = 0x80000000 - SHTDN_REASON_MAJOR_OTHER = 0x00000000 - SHTDN_REASON_MAJOR_NONE = 0x00000000 - SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 - SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 - SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 - SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 - SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 - SHTDN_REASON_MAJOR_POWER = 0x00060000 - SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 - SHTDN_REASON_MINOR_OTHER = 0x00000000 - SHTDN_REASON_MINOR_NONE = 0x000000ff - SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 - SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 - SHTDN_REASON_MINOR_UPGRADE = 0x00000003 - SHTDN_REASON_MINOR_RECONFIG = 0x00000004 - SHTDN_REASON_MINOR_HUNG = 0x00000005 - SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 - SHTDN_REASON_MINOR_DISK = 0x00000007 - SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 - SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 - SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a - SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b - SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c - SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d - SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e - SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F - SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 - SHTDN_REASON_MINOR_HOTFIX = 0x00000011 - SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 - SHTDN_REASON_MINOR_SECURITY = 0x00000013 - SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 - SHTDN_REASON_MINOR_WMI = 0x00000015 - SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 - SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 - SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 - SHTDN_REASON_MINOR_MMC = 0x00000019 - SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a - SHTDN_REASON_MINOR_TERMSRV = 0x00000020 - SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 - SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 - SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE - SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED - SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff - - SHUTDOWN_NORETRY = 0x1 -) - -// Flags used for GetModuleHandleEx -const ( - GET_MODULE_HANDLE_EX_FLAG_PIN = 1 - GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 -) - -// MUI function flag values -const ( - MUI_LANGUAGE_ID = 0x4 - MUI_LANGUAGE_NAME = 0x8 - MUI_MERGE_SYSTEM_FALLBACK = 0x10 - MUI_MERGE_USER_FALLBACK = 0x20 - MUI_UI_FALLBACK = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK - MUI_THREAD_LANGUAGES = 0x40 - MUI_CONSOLE_FILTER = 0x100 - MUI_COMPLEX_SCRIPT_FILTER = 0x200 - MUI_RESET_FILTERS = 0x001 - MUI_USER_PREFERRED_UI_LANGUAGES = 0x10 - MUI_USE_INSTALLED_LANGUAGES = 0x20 - MUI_USE_SEARCH_ALL_LANGUAGES = 0x40 - MUI_LANG_NEUTRAL_PE_FILE = 0x100 - MUI_NON_LANG_NEUTRAL_FILE = 0x200 - MUI_MACHINE_LANGUAGE_SETTINGS = 0x400 - MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL = 0x001 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI = 0x004 - MUI_QUERY_TYPE = 0x001 - MUI_QUERY_CHECKSUM = 0x002 - MUI_QUERY_LANGUAGE_NAME = 0x004 - MUI_QUERY_RESOURCE_TYPES = 0x008 - MUI_FILEINFO_VERSION = 0x001 - - MUI_FULL_LANGUAGE = 0x01 - MUI_PARTIAL_LANGUAGE = 0x02 - MUI_LIP_LANGUAGE = 0x04 - MUI_LANGUAGE_INSTALLED = 0x20 - MUI_LANGUAGE_LICENSED = 0x40 -) diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_386.go b/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_386.go deleted file mode 100644 index 8bce3e2fc1b..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_386.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 - _ uint32 // pad to 8 byte boundary -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_amd64.go deleted file mode 100644 index fdddc0c70ab..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte -} - -type Servent struct { - Name *byte - Aliases **byte - Proto *byte - Port uint16 -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_arm.go b/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_arm.go deleted file mode 100644 index 321872c3e04..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/types_windows_arm.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 - _ uint32 // pad to 8 byte boundary -} diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/zerrors_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/zerrors_windows.go deleted file mode 100644 index f0212003520..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ /dev/null @@ -1,6853 +0,0 @@ -// Code generated by 'mkerrors.bash'; DO NOT EDIT. - -package windows - -import "syscall" - -const ( - FACILITY_NULL = 0 - FACILITY_RPC = 1 - FACILITY_DISPATCH = 2 - FACILITY_STORAGE = 3 - FACILITY_ITF = 4 - FACILITY_WIN32 = 7 - FACILITY_WINDOWS = 8 - FACILITY_SSPI = 9 - FACILITY_SECURITY = 9 - FACILITY_CONTROL = 10 - FACILITY_CERT = 11 - FACILITY_INTERNET = 12 - FACILITY_MEDIASERVER = 13 - FACILITY_MSMQ = 14 - FACILITY_SETUPAPI = 15 - FACILITY_SCARD = 16 - FACILITY_COMPLUS = 17 - FACILITY_AAF = 18 - FACILITY_URT = 19 - FACILITY_ACS = 20 - FACILITY_DPLAY = 21 - FACILITY_UMI = 22 - FACILITY_SXS = 23 - FACILITY_WINDOWS_CE = 24 - FACILITY_HTTP = 25 - FACILITY_USERMODE_COMMONLOG = 26 - FACILITY_WER = 27 - FACILITY_USERMODE_FILTER_MANAGER = 31 - FACILITY_BACKGROUNDCOPY = 32 - FACILITY_CONFIGURATION = 33 - FACILITY_WIA = 33 - FACILITY_STATE_MANAGEMENT = 34 - FACILITY_METADIRECTORY = 35 - FACILITY_WINDOWSUPDATE = 36 - FACILITY_DIRECTORYSERVICE = 37 - FACILITY_GRAPHICS = 38 - FACILITY_SHELL = 39 - FACILITY_NAP = 39 - FACILITY_TPM_SERVICES = 40 - FACILITY_TPM_SOFTWARE = 41 - FACILITY_UI = 42 - FACILITY_XAML = 43 - FACILITY_ACTION_QUEUE = 44 - FACILITY_PLA = 48 - FACILITY_WINDOWS_SETUP = 48 - FACILITY_FVE = 49 - FACILITY_FWP = 50 - FACILITY_WINRM = 51 - FACILITY_NDIS = 52 - FACILITY_USERMODE_HYPERVISOR = 53 - FACILITY_CMI = 54 - FACILITY_USERMODE_VIRTUALIZATION = 55 - FACILITY_USERMODE_VOLMGR = 56 - FACILITY_BCD = 57 - FACILITY_USERMODE_VHD = 58 - FACILITY_USERMODE_HNS = 59 - FACILITY_SDIAG = 60 - FACILITY_WEBSERVICES = 61 - FACILITY_WINPE = 61 - FACILITY_WPN = 62 - FACILITY_WINDOWS_STORE = 63 - FACILITY_INPUT = 64 - FACILITY_EAP = 66 - FACILITY_WINDOWS_DEFENDER = 80 - FACILITY_OPC = 81 - FACILITY_XPS = 82 - FACILITY_MBN = 84 - FACILITY_POWERSHELL = 84 - FACILITY_RAS = 83 - FACILITY_P2P_INT = 98 - FACILITY_P2P = 99 - FACILITY_DAF = 100 - FACILITY_BLUETOOTH_ATT = 101 - FACILITY_AUDIO = 102 - FACILITY_STATEREPOSITORY = 103 - FACILITY_VISUALCPP = 109 - FACILITY_SCRIPT = 112 - FACILITY_PARSE = 113 - FACILITY_BLB = 120 - FACILITY_BLB_CLI = 121 - FACILITY_WSBAPP = 122 - FACILITY_BLBUI = 128 - FACILITY_USN = 129 - FACILITY_USERMODE_VOLSNAP = 130 - FACILITY_TIERING = 131 - FACILITY_WSB_ONLINE = 133 - FACILITY_ONLINE_ID = 134 - FACILITY_DEVICE_UPDATE_AGENT = 135 - FACILITY_DRVSERVICING = 136 - FACILITY_DLS = 153 - FACILITY_DELIVERY_OPTIMIZATION = 208 - FACILITY_USERMODE_SPACES = 231 - FACILITY_USER_MODE_SECURITY_CORE = 232 - FACILITY_USERMODE_LICENSING = 234 - FACILITY_SOS = 160 - FACILITY_DEBUGGERS = 176 - FACILITY_SPP = 256 - FACILITY_RESTORE = 256 - FACILITY_DMSERVER = 256 - FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 - FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 - FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 - FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 - FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 - FACILITY_DEPLOYMENT_SERVICES_PXE = 263 - FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 - FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 - FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 - FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 - FACILITY_LINGUISTIC_SERVICES = 305 - FACILITY_AUDIOSTREAMING = 1094 - FACILITY_ACCELERATOR = 1536 - FACILITY_WMAAECMA = 1996 - FACILITY_DIRECTMUSIC = 2168 - FACILITY_DIRECT3D10 = 2169 - FACILITY_DXGI = 2170 - FACILITY_DXGI_DDI = 2171 - FACILITY_DIRECT3D11 = 2172 - FACILITY_DIRECT3D11_DEBUG = 2173 - FACILITY_DIRECT3D12 = 2174 - FACILITY_DIRECT3D12_DEBUG = 2175 - FACILITY_LEAP = 2184 - FACILITY_AUDCLNT = 2185 - FACILITY_WINCODEC_DWRITE_DWM = 2200 - FACILITY_WINML = 2192 - FACILITY_DIRECT2D = 2201 - FACILITY_DEFRAG = 2304 - FACILITY_USERMODE_SDBUS = 2305 - FACILITY_JSCRIPT = 2306 - FACILITY_PIDGENX = 2561 - FACILITY_EAS = 85 - FACILITY_WEB = 885 - FACILITY_WEB_SOCKET = 886 - FACILITY_MOBILE = 1793 - FACILITY_SQLITE = 1967 - FACILITY_UTC = 1989 - FACILITY_WEP = 2049 - FACILITY_SYNCENGINE = 2050 - FACILITY_XBOX = 2339 - FACILITY_PIX = 2748 - ERROR_SUCCESS syscall.Errno = 0 - NO_ERROR = 0 - SEC_E_OK Handle = 0x00000000 - ERROR_INVALID_FUNCTION syscall.Errno = 1 - ERROR_FILE_NOT_FOUND syscall.Errno = 2 - ERROR_PATH_NOT_FOUND syscall.Errno = 3 - ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 - ERROR_ACCESS_DENIED syscall.Errno = 5 - ERROR_INVALID_HANDLE syscall.Errno = 6 - ERROR_ARENA_TRASHED syscall.Errno = 7 - ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 - ERROR_INVALID_BLOCK syscall.Errno = 9 - ERROR_BAD_ENVIRONMENT syscall.Errno = 10 - ERROR_BAD_FORMAT syscall.Errno = 11 - ERROR_INVALID_ACCESS syscall.Errno = 12 - ERROR_INVALID_DATA syscall.Errno = 13 - ERROR_OUTOFMEMORY syscall.Errno = 14 - ERROR_INVALID_DRIVE syscall.Errno = 15 - ERROR_CURRENT_DIRECTORY syscall.Errno = 16 - ERROR_NOT_SAME_DEVICE syscall.Errno = 17 - ERROR_NO_MORE_FILES syscall.Errno = 18 - ERROR_WRITE_PROTECT syscall.Errno = 19 - ERROR_BAD_UNIT syscall.Errno = 20 - ERROR_NOT_READY syscall.Errno = 21 - ERROR_BAD_COMMAND syscall.Errno = 22 - ERROR_CRC syscall.Errno = 23 - ERROR_BAD_LENGTH syscall.Errno = 24 - ERROR_SEEK syscall.Errno = 25 - ERROR_NOT_DOS_DISK syscall.Errno = 26 - ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 - ERROR_OUT_OF_PAPER syscall.Errno = 28 - ERROR_WRITE_FAULT syscall.Errno = 29 - ERROR_READ_FAULT syscall.Errno = 30 - ERROR_GEN_FAILURE syscall.Errno = 31 - ERROR_SHARING_VIOLATION syscall.Errno = 32 - ERROR_LOCK_VIOLATION syscall.Errno = 33 - ERROR_WRONG_DISK syscall.Errno = 34 - ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 - ERROR_HANDLE_EOF syscall.Errno = 38 - ERROR_HANDLE_DISK_FULL syscall.Errno = 39 - ERROR_NOT_SUPPORTED syscall.Errno = 50 - ERROR_REM_NOT_LIST syscall.Errno = 51 - ERROR_DUP_NAME syscall.Errno = 52 - ERROR_BAD_NETPATH syscall.Errno = 53 - ERROR_NETWORK_BUSY syscall.Errno = 54 - ERROR_DEV_NOT_EXIST syscall.Errno = 55 - ERROR_TOO_MANY_CMDS syscall.Errno = 56 - ERROR_ADAP_HDW_ERR syscall.Errno = 57 - ERROR_BAD_NET_RESP syscall.Errno = 58 - ERROR_UNEXP_NET_ERR syscall.Errno = 59 - ERROR_BAD_REM_ADAP syscall.Errno = 60 - ERROR_PRINTQ_FULL syscall.Errno = 61 - ERROR_NO_SPOOL_SPACE syscall.Errno = 62 - ERROR_PRINT_CANCELLED syscall.Errno = 63 - ERROR_NETNAME_DELETED syscall.Errno = 64 - ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 - ERROR_BAD_DEV_TYPE syscall.Errno = 66 - ERROR_BAD_NET_NAME syscall.Errno = 67 - ERROR_TOO_MANY_NAMES syscall.Errno = 68 - ERROR_TOO_MANY_SESS syscall.Errno = 69 - ERROR_SHARING_PAUSED syscall.Errno = 70 - ERROR_REQ_NOT_ACCEP syscall.Errno = 71 - ERROR_REDIR_PAUSED syscall.Errno = 72 - ERROR_FILE_EXISTS syscall.Errno = 80 - ERROR_CANNOT_MAKE syscall.Errno = 82 - ERROR_FAIL_I24 syscall.Errno = 83 - ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 - ERROR_ALREADY_ASSIGNED syscall.Errno = 85 - ERROR_INVALID_PASSWORD syscall.Errno = 86 - ERROR_INVALID_PARAMETER syscall.Errno = 87 - ERROR_NET_WRITE_FAULT syscall.Errno = 88 - ERROR_NO_PROC_SLOTS syscall.Errno = 89 - ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 - ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 - ERROR_SEM_IS_SET syscall.Errno = 102 - ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 - ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 - ERROR_SEM_OWNER_DIED syscall.Errno = 105 - ERROR_SEM_USER_LIMIT syscall.Errno = 106 - ERROR_DISK_CHANGE syscall.Errno = 107 - ERROR_DRIVE_LOCKED syscall.Errno = 108 - ERROR_BROKEN_PIPE syscall.Errno = 109 - ERROR_OPEN_FAILED syscall.Errno = 110 - ERROR_BUFFER_OVERFLOW syscall.Errno = 111 - ERROR_DISK_FULL syscall.Errno = 112 - ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 - ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 - ERROR_INVALID_CATEGORY syscall.Errno = 117 - ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 - ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 - ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 - ERROR_SEM_TIMEOUT syscall.Errno = 121 - ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 - ERROR_INVALID_NAME syscall.Errno = 123 - ERROR_INVALID_LEVEL syscall.Errno = 124 - ERROR_NO_VOLUME_LABEL syscall.Errno = 125 - ERROR_MOD_NOT_FOUND syscall.Errno = 126 - ERROR_PROC_NOT_FOUND syscall.Errno = 127 - ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 - ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 - ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 - ERROR_NEGATIVE_SEEK syscall.Errno = 131 - ERROR_SEEK_ON_DEVICE syscall.Errno = 132 - ERROR_IS_JOIN_TARGET syscall.Errno = 133 - ERROR_IS_JOINED syscall.Errno = 134 - ERROR_IS_SUBSTED syscall.Errno = 135 - ERROR_NOT_JOINED syscall.Errno = 136 - ERROR_NOT_SUBSTED syscall.Errno = 137 - ERROR_JOIN_TO_JOIN syscall.Errno = 138 - ERROR_SUBST_TO_SUBST syscall.Errno = 139 - ERROR_JOIN_TO_SUBST syscall.Errno = 140 - ERROR_SUBST_TO_JOIN syscall.Errno = 141 - ERROR_BUSY_DRIVE syscall.Errno = 142 - ERROR_SAME_DRIVE syscall.Errno = 143 - ERROR_DIR_NOT_ROOT syscall.Errno = 144 - ERROR_DIR_NOT_EMPTY syscall.Errno = 145 - ERROR_IS_SUBST_PATH syscall.Errno = 146 - ERROR_IS_JOIN_PATH syscall.Errno = 147 - ERROR_PATH_BUSY syscall.Errno = 148 - ERROR_IS_SUBST_TARGET syscall.Errno = 149 - ERROR_SYSTEM_TRACE syscall.Errno = 150 - ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 - ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 - ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 - ERROR_LABEL_TOO_LONG syscall.Errno = 154 - ERROR_TOO_MANY_TCBS syscall.Errno = 155 - ERROR_SIGNAL_REFUSED syscall.Errno = 156 - ERROR_DISCARDED syscall.Errno = 157 - ERROR_NOT_LOCKED syscall.Errno = 158 - ERROR_BAD_THREADID_ADDR syscall.Errno = 159 - ERROR_BAD_ARGUMENTS syscall.Errno = 160 - ERROR_BAD_PATHNAME syscall.Errno = 161 - ERROR_SIGNAL_PENDING syscall.Errno = 162 - ERROR_MAX_THRDS_REACHED syscall.Errno = 164 - ERROR_LOCK_FAILED syscall.Errno = 167 - ERROR_BUSY syscall.Errno = 170 - ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 - ERROR_CANCEL_VIOLATION syscall.Errno = 173 - ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 - ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 - ERROR_INVALID_ORDINAL syscall.Errno = 182 - ERROR_ALREADY_EXISTS syscall.Errno = 183 - ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 - ERROR_SEM_NOT_FOUND syscall.Errno = 187 - ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 - ERROR_INVALID_STACKSEG syscall.Errno = 189 - ERROR_INVALID_MODULETYPE syscall.Errno = 190 - ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 - ERROR_EXE_MARKED_INVALID syscall.Errno = 192 - ERROR_BAD_EXE_FORMAT syscall.Errno = 193 - ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 - ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 - ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 - ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 - ERROR_INVALID_SEGDPL syscall.Errno = 198 - ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 - ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 - ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 - ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 - ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 - ERROR_NO_SIGNAL_SENT syscall.Errno = 205 - ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 - ERROR_RING2_STACK_IN_USE syscall.Errno = 207 - ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 - ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 - ERROR_THREAD_1_INACTIVE syscall.Errno = 210 - ERROR_LOCKED syscall.Errno = 212 - ERROR_TOO_MANY_MODULES syscall.Errno = 214 - ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 - ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 - ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 - ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 - ERROR_FILE_CHECKED_OUT syscall.Errno = 220 - ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 - ERROR_BAD_FILE_TYPE syscall.Errno = 222 - ERROR_FILE_TOO_LARGE syscall.Errno = 223 - ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 - ERROR_VIRUS_INFECTED syscall.Errno = 225 - ERROR_VIRUS_DELETED syscall.Errno = 226 - ERROR_PIPE_LOCAL syscall.Errno = 229 - ERROR_BAD_PIPE syscall.Errno = 230 - ERROR_PIPE_BUSY syscall.Errno = 231 - ERROR_NO_DATA syscall.Errno = 232 - ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 - ERROR_MORE_DATA syscall.Errno = 234 - ERROR_NO_WORK_DONE syscall.Errno = 235 - ERROR_VC_DISCONNECTED syscall.Errno = 240 - ERROR_INVALID_EA_NAME syscall.Errno = 254 - ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 - WAIT_TIMEOUT syscall.Errno = 258 - ERROR_NO_MORE_ITEMS syscall.Errno = 259 - ERROR_CANNOT_COPY syscall.Errno = 266 - ERROR_DIRECTORY syscall.Errno = 267 - ERROR_EAS_DIDNT_FIT syscall.Errno = 275 - ERROR_EA_FILE_CORRUPT syscall.Errno = 276 - ERROR_EA_TABLE_FULL syscall.Errno = 277 - ERROR_INVALID_EA_HANDLE syscall.Errno = 278 - ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 - ERROR_NOT_OWNER syscall.Errno = 288 - ERROR_TOO_MANY_POSTS syscall.Errno = 298 - ERROR_PARTIAL_COPY syscall.Errno = 299 - ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 - ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 - ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 - ERROR_DELETE_PENDING syscall.Errno = 303 - ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 - ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 - ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 - ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 - ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 - ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 - ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 - ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 - ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 - ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 - ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 - ERROR_INVALID_TOKEN syscall.Errno = 315 - ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 - ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 - ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 - ERROR_UNDEFINED_SCOPE syscall.Errno = 319 - ERROR_INVALID_CAP syscall.Errno = 320 - ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 - ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 - ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 - ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 - ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 - ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 - ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 - ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 - ERROR_BAD_DEVICE_PATH syscall.Errno = 330 - ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 - ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 - ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 - ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 - ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 - ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 - ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 - ERROR_FT_WRITE_FAILURE syscall.Errno = 338 - ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 - ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 - ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 - ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 - ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 - ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 - ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 - ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 - ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 - ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 - ERROR_ENCLAVE_FAILURE syscall.Errno = 349 - ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 - ERROR_FAIL_SHUTDOWN syscall.Errno = 351 - ERROR_FAIL_RESTART syscall.Errno = 352 - ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 - ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 - ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 - ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 - ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 - ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 - ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 - ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 - ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 - ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 - ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 - ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 - ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 - ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 - ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 - ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 - ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 - ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 - ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 - ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 - ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 - ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 - ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 - ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 - ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 - ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 - ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 - ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 - ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 - ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 - ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 - ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 - ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 - ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 - ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 - ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 - ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 - ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 - ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 - ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 - ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 - ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 - ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 - ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 - ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 - ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 - ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 - ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 - ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 - ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 - ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 - ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 - ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 - ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 - ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 - ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 - ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 - ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 - ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 - ERROR_FT_READ_FAILURE syscall.Errno = 415 - ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 - ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 - ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 - ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 - ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 - ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 - ERROR_TIME_CRITICAL_THREAD syscall.Errno = 422 - ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 - ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 - ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 - ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 - ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 - ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 - ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 - ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 - ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 - ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 - ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 - ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 - ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 - ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 - ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 - ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 - ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 - ERROR_INVALID_ADDRESS syscall.Errno = 487 - ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 - ERROR_PARTITION_TERMINATING syscall.Errno = 1184 - ERROR_USER_PROFILE_LOAD syscall.Errno = 500 - ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 - ERROR_PIPE_CONNECTED syscall.Errno = 535 - ERROR_PIPE_LISTENING syscall.Errno = 536 - ERROR_VERIFIER_STOP syscall.Errno = 537 - ERROR_ABIOS_ERROR syscall.Errno = 538 - ERROR_WX86_WARNING syscall.Errno = 539 - ERROR_WX86_ERROR syscall.Errno = 540 - ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 - ERROR_UNWIND syscall.Errno = 542 - ERROR_BAD_STACK syscall.Errno = 543 - ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 - ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 - ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 - ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 - ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 - ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 - ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 - ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 - ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 - ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 - ERROR_CANT_WAIT syscall.Errno = 554 - ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 - ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 - ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 - ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 - ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 - ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 - ERROR_INVALID_LDT_SIZE syscall.Errno = 561 - ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 - ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 - ERROR_TOO_MANY_THREADS syscall.Errno = 565 - ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 - ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 - ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 - ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 - ERROR_NET_OPEN_FAILED syscall.Errno = 570 - ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 - ERROR_CONTROL_C_EXIT syscall.Errno = 572 - ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 - ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 - ERROR_APP_INIT_FAILURE syscall.Errno = 575 - ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 - ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 - ERROR_NO_PAGEFILE syscall.Errno = 578 - ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 - ERROR_NO_EVENT_PAIR syscall.Errno = 580 - ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 - ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 - ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 - ERROR_FLOPPY_VOLUME syscall.Errno = 584 - ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 - ERROR_BACKUP_CONTROLLER syscall.Errno = 586 - ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 - ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 - ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 - ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 - ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 - ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 - ERROR_VDM_HARD_ERROR syscall.Errno = 593 - ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 - ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 - ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 - ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 - ERROR_NOT_TINY_STREAM syscall.Errno = 598 - ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 - ERROR_CONVERT_TO_LARGE syscall.Errno = 600 - ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 - ERROR_ALLOCATE_BUCKET syscall.Errno = 602 - ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 - ERROR_INVALID_VARIANT syscall.Errno = 604 - ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 - ERROR_AUDIT_FAILED syscall.Errno = 606 - ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 - ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 - ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 - ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 - ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 - ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 - ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 - ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 - ERROR_PWD_TOO_SHORT syscall.Errno = 615 - ERROR_PWD_TOO_RECENT syscall.Errno = 616 - ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 - ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 - ERROR_INVALID_HW_PROFILE syscall.Errno = 619 - ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 - ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 - ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 - ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 - ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 - ERROR_VALIDATE_CONTINUE syscall.Errno = 625 - ERROR_NO_MORE_MATCHES syscall.Errno = 626 - ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 - ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 - ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 - ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 - ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 - ERROR_NOINTERFACE syscall.Errno = 632 - ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 - ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 - ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 - ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 - ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 - ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 - ERROR_INSUFFICIENT_POWER syscall.Errno = 639 - ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 - ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 - ERROR_PORT_NOT_SET syscall.Errno = 642 - ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 - ERROR_RANGE_NOT_FOUND syscall.Errno = 644 - ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 - ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 - ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 - ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 - ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 - ERROR_MCA_OCCURED syscall.Errno = 651 - ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 - ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 - ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 - ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 - ERROR_HIBERNATION_FAILURE syscall.Errno = 656 - ERROR_PWD_TOO_LONG syscall.Errno = 657 - ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 - ERROR_ASSERTION_FAILURE syscall.Errno = 668 - ERROR_ACPI_ERROR syscall.Errno = 669 - ERROR_WOW_ASSERTION syscall.Errno = 670 - ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 - ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 - ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 - ERROR_PNP_INVALID_ID syscall.Errno = 674 - ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 - ERROR_HANDLES_CLOSED syscall.Errno = 676 - ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 - ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 - ERROR_MEDIA_CHECK syscall.Errno = 679 - ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 - ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 - ERROR_LONGJUMP syscall.Errno = 682 - ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 - ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 - ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 - ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 - ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 - ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 - ERROR_DBG_REPLY_LATER syscall.Errno = 689 - ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 - ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 - ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 - ERROR_DBG_CONTROL_C syscall.Errno = 693 - ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 - ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 - ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 - ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 - ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 - ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 - ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 - ERROR_RXACT_STATE_CREATED syscall.Errno = 701 - ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 - ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 - ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 - ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 - ERROR_RECEIVE_PARTIAL syscall.Errno = 707 - ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 - ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 - ERROR_EVENT_DONE syscall.Errno = 710 - ERROR_EVENT_PENDING syscall.Errno = 711 - ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 - ERROR_FATAL_APP_EXIT syscall.Errno = 713 - ERROR_PREDEFINED_HANDLE syscall.Errno = 714 - ERROR_WAS_UNLOCKED syscall.Errno = 715 - ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 - ERROR_WAS_LOCKED syscall.Errno = 717 - ERROR_LOG_HARD_ERROR syscall.Errno = 718 - ERROR_ALREADY_WIN32 syscall.Errno = 719 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 - ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 - ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 - ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 - ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 - ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 - ERROR_HIBERNATED syscall.Errno = 726 - ERROR_RESUME_HIBERNATION syscall.Errno = 727 - ERROR_FIRMWARE_UPDATED syscall.Errno = 728 - ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 - ERROR_WAKE_SYSTEM syscall.Errno = 730 - ERROR_WAIT_1 syscall.Errno = 731 - ERROR_WAIT_2 syscall.Errno = 732 - ERROR_WAIT_3 syscall.Errno = 733 - ERROR_WAIT_63 syscall.Errno = 734 - ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 - ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 - ERROR_USER_APC syscall.Errno = 737 - ERROR_KERNEL_APC syscall.Errno = 738 - ERROR_ALERTED syscall.Errno = 739 - ERROR_ELEVATION_REQUIRED syscall.Errno = 740 - ERROR_REPARSE syscall.Errno = 741 - ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 - ERROR_VOLUME_MOUNTED syscall.Errno = 743 - ERROR_RXACT_COMMITTED syscall.Errno = 744 - ERROR_NOTIFY_CLEANUP syscall.Errno = 745 - ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 - ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 - ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 - ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 - ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 - ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 - ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 - ERROR_CRASH_DUMP syscall.Errno = 753 - ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 - ERROR_REPARSE_OBJECT syscall.Errno = 755 - ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 - ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 - ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 - ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 - ERROR_PROCESS_IN_JOB syscall.Errno = 760 - ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 - ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 - ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 - ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 - ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 - ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 - ERROR_DBG_CONTINUE syscall.Errno = 767 - ERROR_CALLBACK_POP_STACK syscall.Errno = 768 - ERROR_COMPRESSION_DISABLED syscall.Errno = 769 - ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 - ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 - ERROR_ROWSNOTRELEASED syscall.Errno = 772 - ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 - ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 - ERROR_NOT_CAPABLE syscall.Errno = 775 - ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 - ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 - ERROR_BADSTARTPOSITION syscall.Errno = 778 - ERROR_MEMORY_HARDWARE syscall.Errno = 779 - ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 - ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 - ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 - ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 - ERROR_MCA_EXCEPTION syscall.Errno = 784 - ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 - ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 - ERROR_ABANDON_HIBERFILE syscall.Errno = 787 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 - ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 - ERROR_BAD_MCFG_TABLE syscall.Errno = 791 - ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 - ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 - ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 - ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 - ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 - ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 - ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 - ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 - ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 - ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 - ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 - ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 - ERROR_NO_ACE_CONDITION syscall.Errno = 804 - ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 - ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 - ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 - ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 - ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 - ERROR_QUOTA_ACTIVITY syscall.Errno = 810 - ERROR_HANDLE_REVOKED syscall.Errno = 811 - ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 - ERROR_CPU_SET_INVALID syscall.Errno = 813 - ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 - ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 - ERROR_EA_ACCESS_DENIED syscall.Errno = 994 - ERROR_OPERATION_ABORTED syscall.Errno = 995 - ERROR_IO_INCOMPLETE syscall.Errno = 996 - ERROR_IO_PENDING syscall.Errno = 997 - ERROR_NOACCESS syscall.Errno = 998 - ERROR_SWAPERROR syscall.Errno = 999 - ERROR_STACK_OVERFLOW syscall.Errno = 1001 - ERROR_INVALID_MESSAGE syscall.Errno = 1002 - ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 - ERROR_INVALID_FLAGS syscall.Errno = 1004 - ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 - ERROR_FILE_INVALID syscall.Errno = 1006 - ERROR_FULLSCREEN_MODE syscall.Errno = 1007 - ERROR_NO_TOKEN syscall.Errno = 1008 - ERROR_BADDB syscall.Errno = 1009 - ERROR_BADKEY syscall.Errno = 1010 - ERROR_CANTOPEN syscall.Errno = 1011 - ERROR_CANTREAD syscall.Errno = 1012 - ERROR_CANTWRITE syscall.Errno = 1013 - ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 - ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 - ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 - ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 - ERROR_KEY_DELETED syscall.Errno = 1018 - ERROR_NO_LOG_SPACE syscall.Errno = 1019 - ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 - ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 - ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 - ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 - ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 - ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 - ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 - ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 - ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 - ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 - ERROR_SERVICE_DISABLED syscall.Errno = 1058 - ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 - ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 - ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 - ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 - ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 - ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 - ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 - ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 - ERROR_PROCESS_ABORTED syscall.Errno = 1067 - ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 - ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 - ERROR_SERVICE_START_HANG syscall.Errno = 1070 - ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 - ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 - ERROR_SERVICE_EXISTS syscall.Errno = 1073 - ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 - ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 - ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 - ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 - ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 - ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 - ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 - ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 - ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 - ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 - ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 - ERROR_END_OF_MEDIA syscall.Errno = 1100 - ERROR_FILEMARK_DETECTED syscall.Errno = 1101 - ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 - ERROR_SETMARK_DETECTED syscall.Errno = 1103 - ERROR_NO_DATA_DETECTED syscall.Errno = 1104 - ERROR_PARTITION_FAILURE syscall.Errno = 1105 - ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 - ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 - ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 - ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 - ERROR_MEDIA_CHANGED syscall.Errno = 1110 - ERROR_BUS_RESET syscall.Errno = 1111 - ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 - ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 - ERROR_DLL_INIT_FAILED syscall.Errno = 1114 - ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 - ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 - ERROR_IO_DEVICE syscall.Errno = 1117 - ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 - ERROR_IRQ_BUSY syscall.Errno = 1119 - ERROR_MORE_WRITES syscall.Errno = 1120 - ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 - ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 - ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 - ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 - ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 - ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 - ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 - ERROR_DISK_RESET_FAILED syscall.Errno = 1128 - ERROR_EOM_OVERFLOW syscall.Errno = 1129 - ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 - ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 - ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 - ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 - ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 - ERROR_TOO_MANY_LINKS syscall.Errno = 1142 - ERROR_OLD_WIN_VERSION syscall.Errno = 1150 - ERROR_APP_WRONG_OS syscall.Errno = 1151 - ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 - ERROR_RMODE_APP syscall.Errno = 1153 - ERROR_INVALID_DLL syscall.Errno = 1154 - ERROR_NO_ASSOCIATION syscall.Errno = 1155 - ERROR_DDE_FAIL syscall.Errno = 1156 - ERROR_DLL_NOT_FOUND syscall.Errno = 1157 - ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 - ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 - ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 - ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 - ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 - ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 - ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 - ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 - ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 - ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 - ERROR_NOT_FOUND syscall.Errno = 1168 - ERROR_NO_MATCH syscall.Errno = 1169 - ERROR_SET_NOT_FOUND syscall.Errno = 1170 - ERROR_POINT_NOT_FOUND syscall.Errno = 1171 - ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 - ERROR_NO_VOLUME_ID syscall.Errno = 1173 - ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 - ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 - ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 - ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 - ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 - ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 - ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 - ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 - ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 - ERROR_BAD_DEVICE syscall.Errno = 1200 - ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 - ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 - ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 - ERROR_BAD_PROVIDER syscall.Errno = 1204 - ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 - ERROR_BAD_PROFILE syscall.Errno = 1206 - ERROR_NOT_CONTAINER syscall.Errno = 1207 - ERROR_EXTENDED_ERROR syscall.Errno = 1208 - ERROR_INVALID_GROUPNAME syscall.Errno = 1209 - ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 - ERROR_INVALID_EVENTNAME syscall.Errno = 1211 - ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 - ERROR_INVALID_SERVICENAME syscall.Errno = 1213 - ERROR_INVALID_NETNAME syscall.Errno = 1214 - ERROR_INVALID_SHARENAME syscall.Errno = 1215 - ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 - ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 - ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 - ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 - ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 - ERROR_DUP_DOMAINNAME syscall.Errno = 1221 - ERROR_NO_NETWORK syscall.Errno = 1222 - ERROR_CANCELLED syscall.Errno = 1223 - ERROR_USER_MAPPED_FILE syscall.Errno = 1224 - ERROR_CONNECTION_REFUSED syscall.Errno = 1225 - ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 - ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 - ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 - ERROR_CONNECTION_INVALID syscall.Errno = 1229 - ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 - ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 - ERROR_HOST_UNREACHABLE syscall.Errno = 1232 - ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 - ERROR_PORT_UNREACHABLE syscall.Errno = 1234 - ERROR_REQUEST_ABORTED syscall.Errno = 1235 - ERROR_CONNECTION_ABORTED syscall.Errno = 1236 - ERROR_RETRY syscall.Errno = 1237 - ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 - ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 - ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 - ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 - ERROR_ALREADY_REGISTERED syscall.Errno = 1242 - ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 - ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 - ERROR_NOT_LOGGED_ON syscall.Errno = 1245 - ERROR_CONTINUE syscall.Errno = 1246 - ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 - ERROR_NO_MORE_DEVICES syscall.Errno = 1248 - ERROR_NO_SUCH_SITE syscall.Errno = 1249 - ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 - ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 - ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 - ERROR_BAD_USER_PROFILE syscall.Errno = 1253 - ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 - ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 - ERROR_HOST_DOWN syscall.Errno = 1256 - ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 - ERROR_NON_DOMAIN_SID syscall.Errno = 1258 - ERROR_APPHELP_BLOCK syscall.Errno = 1259 - ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 - ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 - ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 - ERROR_PKINIT_FAILURE syscall.Errno = 1263 - ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 - ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 - ERROR_MACHINE_LOCKED syscall.Errno = 1271 - ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 - ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 - ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 - ERROR_DRIVER_BLOCKED syscall.Errno = 1275 - ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 - ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 - ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 - ERROR_RECOVERY_FAILURE syscall.Errno = 1279 - ERROR_ALREADY_FIBER syscall.Errno = 1280 - ERROR_ALREADY_THREAD syscall.Errno = 1281 - ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 - ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 - ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 - ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 - ERROR_VDM_DISALLOWED syscall.Errno = 1286 - ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 - ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 - ERROR_BEYOND_VDL syscall.Errno = 1289 - ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 - ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 - ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 - ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 - ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 - ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 - ERROR_CONTENT_BLOCKED syscall.Errno = 1296 - ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 - ERROR_APP_HANG syscall.Errno = 1298 - ERROR_INVALID_LABEL syscall.Errno = 1299 - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 - ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 - ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 - ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 - ERROR_UNKNOWN_REVISION syscall.Errno = 1305 - ERROR_REVISION_MISMATCH syscall.Errno = 1306 - ERROR_INVALID_OWNER syscall.Errno = 1307 - ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 - ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 - ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 - ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 - ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 - ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 - ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 - ERROR_USER_EXISTS syscall.Errno = 1316 - ERROR_NO_SUCH_USER syscall.Errno = 1317 - ERROR_GROUP_EXISTS syscall.Errno = 1318 - ERROR_NO_SUCH_GROUP syscall.Errno = 1319 - ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 - ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 - ERROR_LAST_ADMIN syscall.Errno = 1322 - ERROR_WRONG_PASSWORD syscall.Errno = 1323 - ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 - ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 - ERROR_LOGON_FAILURE syscall.Errno = 1326 - ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 - ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 - ERROR_INVALID_WORKSTATION syscall.Errno = 1329 - ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 - ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 - ERROR_NONE_MAPPED syscall.Errno = 1332 - ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 - ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 - ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 - ERROR_INVALID_ACL syscall.Errno = 1336 - ERROR_INVALID_SID syscall.Errno = 1337 - ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 - ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 - ERROR_SERVER_DISABLED syscall.Errno = 1341 - ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 - ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 - ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 - ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 - ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 - ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 - ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 - ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 - ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 - ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 - ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 - ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 - ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 - ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 - ERROR_DOMAIN_EXISTS syscall.Errno = 1356 - ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 - ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 - ERROR_INTERNAL_ERROR syscall.Errno = 1359 - ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 - ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 - ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 - ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 - ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 - ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 - ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 - ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 - ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 - ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 - ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 - ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 - ERROR_SPECIAL_GROUP syscall.Errno = 1372 - ERROR_SPECIAL_USER syscall.Errno = 1373 - ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 - ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 - ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 - ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 - ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 - ERROR_ALIAS_EXISTS syscall.Errno = 1379 - ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 - ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 - ERROR_SECRET_TOO_LONG syscall.Errno = 1382 - ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 - ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 - ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 - ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 - ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 - ERROR_INVALID_MEMBER syscall.Errno = 1388 - ERROR_TOO_MANY_SIDS syscall.Errno = 1389 - ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 - ERROR_NO_INHERITANCE syscall.Errno = 1391 - ERROR_FILE_CORRUPT syscall.Errno = 1392 - ERROR_DISK_CORRUPT syscall.Errno = 1393 - ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 - ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 - ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 - ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 - ERROR_TIME_SKEW syscall.Errno = 1398 - ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 - ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 - ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 - ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 - ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 - ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 - ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 - ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 - ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 - ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 - ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 - ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 - ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 - ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 - ERROR_INVALID_INDEX syscall.Errno = 1413 - ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 - ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 - ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 - ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 - ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 - ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 - ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 - ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 - ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 - ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 - ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 - ERROR_DC_NOT_FOUND syscall.Errno = 1425 - ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 - ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 - ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 - ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 - ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 - ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 - ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 - ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 - ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 - ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 - ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 - ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 - ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 - ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 - ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 - ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 - ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 - ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 - ERROR_INVALID_THREAD_ID syscall.Errno = 1444 - ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 - ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 - ERROR_NO_SCROLLBARS syscall.Errno = 1447 - ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 - ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 - ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 - ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 - ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 - ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 - ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 - ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 - ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 - ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 - ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 - ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 - ERROR_TIMEOUT syscall.Errno = 1460 - ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 - ERROR_INCORRECT_SIZE syscall.Errno = 1462 - ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 - ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 - ERROR_XML_PARSE_ERROR syscall.Errno = 1465 - ERROR_XMLDSIG_ERROR syscall.Errno = 1466 - ERROR_RESTART_APPLICATION syscall.Errno = 1467 - ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 - ERROR_AUTHIP_FAILURE syscall.Errno = 1469 - ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 - ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 - ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 - ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 - ERROR_LOG_FILE_FULL syscall.Errno = 1502 - ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 - ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 - ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 - ERROR_INVALID_TASK_NAME syscall.Errno = 1550 - ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 - ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 - ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 - ERROR_INSTALL_USEREXIT syscall.Errno = 1602 - ERROR_INSTALL_FAILURE syscall.Errno = 1603 - ERROR_INSTALL_SUSPEND syscall.Errno = 1604 - ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 - ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 - ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 - ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 - ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 - ERROR_BAD_CONFIGURATION syscall.Errno = 1610 - ERROR_INDEX_ABSENT syscall.Errno = 1611 - ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 - ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 - ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 - ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 - ERROR_INVALID_FIELD syscall.Errno = 1616 - ERROR_DEVICE_REMOVED syscall.Errno = 1617 - ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 - ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 - ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 - ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 - ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 - ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 - ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 - ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 - ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 - ERROR_FUNCTION_FAILED syscall.Errno = 1627 - ERROR_INVALID_TABLE syscall.Errno = 1628 - ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 - ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 - ERROR_CREATE_FAILED syscall.Errno = 1631 - ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 - ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 - ERROR_INSTALL_NOTUSED syscall.Errno = 1634 - ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 - ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 - ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 - ERROR_PRODUCT_VERSION syscall.Errno = 1638 - ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 - ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 - ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 - ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 - ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 - ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 - ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 - ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 - ERROR_UNKNOWN_PATCH syscall.Errno = 1647 - ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 - ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 - ERROR_INVALID_PATCH_XML syscall.Errno = 1650 - ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 - ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 - ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 - ERROR_INSTALL_REJECTED syscall.Errno = 1654 - ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 - ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 - ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 - ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 - ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 - RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 - RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 - RPC_S_INVALID_BINDING syscall.Errno = 1702 - RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 - RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 - RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 - RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 - RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 - RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 - RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 - RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 - RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 - RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 - RPC_S_ALREADY_LISTENING syscall.Errno = 1713 - RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 - RPC_S_NOT_LISTENING syscall.Errno = 1715 - RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 - RPC_S_UNKNOWN_IF syscall.Errno = 1717 - RPC_S_NO_BINDINGS syscall.Errno = 1718 - RPC_S_NO_PROTSEQS syscall.Errno = 1719 - RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 - RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 - RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 - RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 - RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 - RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 - RPC_S_CALL_FAILED syscall.Errno = 1726 - RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 - RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 - RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 - RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 - RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 - RPC_S_INVALID_TAG syscall.Errno = 1733 - RPC_S_INVALID_BOUND syscall.Errno = 1734 - RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 - RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 - RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 - RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 - RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 - RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 - RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 - RPC_S_STRING_TOO_LONG syscall.Errno = 1743 - RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 - RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 - RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 - RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 - RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 - RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 - RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 - EPT_S_INVALID_ENTRY syscall.Errno = 1751 - EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 - EPT_S_NOT_REGISTERED syscall.Errno = 1753 - RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 - RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 - RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 - RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 - RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 - RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 - RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 - RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 - RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 - RPC_S_INVALID_NAF_ID syscall.Errno = 1763 - RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 - RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 - RPC_S_INTERNAL_ERROR syscall.Errno = 1766 - RPC_S_ZERO_DIVIDE syscall.Errno = 1767 - RPC_S_ADDRESS_ERROR syscall.Errno = 1768 - RPC_S_FP_DIV_ZERO syscall.Errno = 1769 - RPC_S_FP_UNDERFLOW syscall.Errno = 1770 - RPC_S_FP_OVERFLOW syscall.Errno = 1771 - RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 - RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 - RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 - RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 - RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 - RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 - RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 - RPC_X_NULL_REF_POINTER syscall.Errno = 1780 - RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 - RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 - RPC_X_BAD_STUB_DATA syscall.Errno = 1783 - ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 - ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 - ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 - ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 - ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 - ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 - ERROR_TRUST_FAILURE syscall.Errno = 1790 - RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 - ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 - ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 - ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 - ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 - ERROR_UNKNOWN_PORT syscall.Errno = 1796 - ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 - ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 - ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 - ERROR_INVALID_PRIORITY syscall.Errno = 1800 - ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 - ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 - ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 - ERROR_INVALID_DATATYPE syscall.Errno = 1804 - ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 - RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 - ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 - ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 - ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 - ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 - ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 - ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 - ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 - ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 - ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 - ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 - RPC_S_NO_INTERFACES syscall.Errno = 1817 - RPC_S_CALL_CANCELLED syscall.Errno = 1818 - RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 - RPC_S_COMM_FAILURE syscall.Errno = 1820 - RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 - RPC_S_NO_PRINC_NAME syscall.Errno = 1822 - RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 - RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 - RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 - RPC_S_NOT_CANCELLED syscall.Errno = 1826 - RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 - RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 - RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 - RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 - RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 - RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 - RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 - RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 - RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 - RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 - RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 - EPT_S_CANT_CREATE syscall.Errno = 1899 - RPC_S_INVALID_OBJECT syscall.Errno = 1900 - ERROR_INVALID_TIME syscall.Errno = 1901 - ERROR_INVALID_FORM_NAME syscall.Errno = 1902 - ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 - ERROR_ALREADY_WAITING syscall.Errno = 1904 - ERROR_PRINTER_DELETED syscall.Errno = 1905 - ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 - ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 - ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 - ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 - OR_INVALID_OXID syscall.Errno = 1910 - OR_INVALID_OID syscall.Errno = 1911 - OR_INVALID_SET syscall.Errno = 1912 - RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 - RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 - RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 - RPC_X_PIPE_CLOSED syscall.Errno = 1916 - RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 - RPC_X_PIPE_EMPTY syscall.Errno = 1918 - ERROR_NO_SITENAME syscall.Errno = 1919 - ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 - ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 - RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 - RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 - RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 - RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 - RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 - RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 - RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 - RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 - ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 - ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 - ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 - ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 - ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 - ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 - ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 - ERROR_NTLM_BLOCKED syscall.Errno = 1937 - ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 - ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 - ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 - ERROR_BAD_DRIVER syscall.Errno = 2001 - ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 - ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 - ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 - ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 - ERROR_INVALID_CMM syscall.Errno = 2010 - ERROR_INVALID_PROFILE syscall.Errno = 2011 - ERROR_TAG_NOT_FOUND syscall.Errno = 2012 - ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 - ERROR_DUPLICATE_TAG syscall.Errno = 2014 - ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 - ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 - ERROR_INVALID_COLORSPACE syscall.Errno = 2017 - ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 - ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 - ERROR_INVALID_TRANSFORM syscall.Errno = 2020 - ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 - ERROR_INVALID_COLORINDEX syscall.Errno = 2022 - ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 - ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 - ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 - ERROR_BAD_USERNAME syscall.Errno = 2202 - ERROR_NOT_CONNECTED syscall.Errno = 2250 - ERROR_OPEN_FILES syscall.Errno = 2401 - ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 - ERROR_DEVICE_IN_USE syscall.Errno = 2404 - ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 - ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 - ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 - ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 - ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 - ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 - ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 - ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 - ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 - ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 - ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 - ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 - ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 - ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 - ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 - ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 - ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 - ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 - ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 - ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 - ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 - ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 - ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 - ERROR_REQUEST_PAUSED syscall.Errno = 3050 - ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 - ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 - ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 - ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 - ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 - ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 - ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 - ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 - ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 - ERROR_WINS_INTERNAL syscall.Errno = 4000 - ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 - ERROR_STATIC_INIT syscall.Errno = 4002 - ERROR_INC_BACKUP syscall.Errno = 4003 - ERROR_FULL_BACKUP syscall.Errno = 4004 - ERROR_REC_NON_EXISTENT syscall.Errno = 4005 - ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 - PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 - PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 - PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 - PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 - PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 - PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 - PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 - PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 - PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 - PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 - PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 - PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 - PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 - PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 - PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 - PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 - PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 - ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 - ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 - ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 - ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 - ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 - ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 - ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 - ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 - ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 - ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 - ERROR_WMI_DP_FAILED syscall.Errno = 4209 - ERROR_WMI_INVALID_MOF syscall.Errno = 4210 - ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 - ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 - ERROR_WMI_READ_ONLY syscall.Errno = 4213 - ERROR_WMI_SET_FAILURE syscall.Errno = 4214 - ERROR_NOT_APPCONTAINER syscall.Errno = 4250 - ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 - ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 - ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 - ERROR_INVALID_MEDIA syscall.Errno = 4300 - ERROR_INVALID_LIBRARY syscall.Errno = 4301 - ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 - ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 - ERROR_MEDIA_OFFLINE syscall.Errno = 4304 - ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 - ERROR_EMPTY syscall.Errno = 4306 - ERROR_NOT_EMPTY syscall.Errno = 4307 - ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 - ERROR_RESOURCE_DISABLED syscall.Errno = 4309 - ERROR_INVALID_CLEANER syscall.Errno = 4310 - ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 - ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 - ERROR_DATABASE_FAILURE syscall.Errno = 4313 - ERROR_DATABASE_FULL syscall.Errno = 4314 - ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 - ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 - ERROR_INVALID_OPERATION syscall.Errno = 4317 - ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 - ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 - ERROR_REQUEST_REFUSED syscall.Errno = 4320 - ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 - ERROR_LIBRARY_FULL syscall.Errno = 4322 - ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 - ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 - ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 - ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 - ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 - ERROR_TRANSPORT_FULL syscall.Errno = 4328 - ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 - ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 - ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 - ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 - ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 - ERROR_UNEXPECTED_OMID syscall.Errno = 4334 - ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 - ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 - ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 - ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 - ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 - ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 - ERROR_IEPORT_FULL syscall.Errno = 4341 - ERROR_FILE_OFFLINE syscall.Errno = 4350 - ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 - ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 - ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 - ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 - ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 - ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 - ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 - ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 - ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 - ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 - ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 - ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 - ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 - ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 - ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 - ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 - ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 - ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 - ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 - ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 - ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 - ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 - ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 - ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 - ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 - ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 - ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 - ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 - ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 - ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 - ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 - ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 - ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 - ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 - ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 - ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 - ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 - ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 - ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 - ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 - ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 - ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 - ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 - ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 - ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 - ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 - ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 - ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 - ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 - ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 - ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 - ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 - ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 - ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 - ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 - ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 - ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 - ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 - ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 - ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 - ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 - ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 - ERROR_OBJECT_IN_LIST syscall.Errno = 5011 - ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 - ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 - ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 - ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 - ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 - ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 - ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 - ERROR_RESOURCE_ONLINE syscall.Errno = 5019 - ERROR_QUORUM_RESOURCE syscall.Errno = 5020 - ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 - ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 - ERROR_INVALID_STATE syscall.Errno = 5023 - ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 - ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 - ERROR_CORE_RESOURCE syscall.Errno = 5026 - ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 - ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 - ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 - ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 - ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 - ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 - ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 - ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 - ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 - ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 - ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 - ERROR_RESOURCE_FAILED syscall.Errno = 5038 - ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 - ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 - ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 - ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 - ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 - ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 - ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 - ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 - ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 - ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 - ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 - ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 - ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 - ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 - ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 - ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 - ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 - ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 - ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 - ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 - ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 - ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 - ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 - ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 - ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 - ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 - ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 - ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 - ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 - ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 - ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 - ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 - ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 - ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 - ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 - ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 - ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 - ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 - ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 - ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 - ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 - ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 - ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 - ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 - ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 - ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 - ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 - ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 - ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 - ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 - ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 - ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 - ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 - ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 - ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 - ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 - ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 - ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 - ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 - ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 - ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 - ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 - ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 - ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 - ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 - ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 - ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 - ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 - ERROR_CLUSTER_POISONED syscall.Errno = 5907 - ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 - ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 - ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 - ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 - ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 - ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 - ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 - ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 - ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 - ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 - ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 - ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 - ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 - ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 - ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 - ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 - ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 - ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 - ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 - ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 - ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 - ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 - ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 - ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 - ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 - ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 - ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 - ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 - ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 - ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 - ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 - ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 - ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 - ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 - ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 - ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 - ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 - ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 - ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 - ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 - ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 - ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 - ERROR_NON_CSV_PATH syscall.Errno = 5950 - ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 - ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 - ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 - ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 - ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 - ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 - ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 - ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 - ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 - ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 - ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 - ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 - ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 - ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 - ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 - ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 - ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 - ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 - ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 - ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 - ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 - ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 - ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 - ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 - ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 - ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 - ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 - ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 - ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 - ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 - ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 - ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 - ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 - ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 - ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 - ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 - ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 - ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 - ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 - ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 - ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 - ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 - ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 - ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 - ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 - ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 - ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 - ERROR_DECRYPTION_FAILED syscall.Errno = 6001 - ERROR_FILE_ENCRYPTED syscall.Errno = 6002 - ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 - ERROR_NO_EFS syscall.Errno = 6004 - ERROR_WRONG_EFS syscall.Errno = 6005 - ERROR_NO_USER_KEYS syscall.Errno = 6006 - ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 - ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 - ERROR_FILE_READ_ONLY syscall.Errno = 6009 - ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 - ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 - ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 - ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 - ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 - ERROR_EFS_DISABLED syscall.Errno = 6015 - ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 - ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 - ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 - ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 - ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 - ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 - ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 - ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 - SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 - ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 - ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 - ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 - ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 - ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 - ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 - ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 - ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 - ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 - ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 - ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 - ERROR_LOG_NO_RESTART syscall.Errno = 6611 - ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 - ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 - ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 - ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 - ERROR_LOG_CANT_DELETE syscall.Errno = 6616 - ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 - ERROR_LOG_START_OF_LOG syscall.Errno = 6618 - ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 - ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 - ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 - ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 - ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 - ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 - ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 - ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 - ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 - ERROR_LOG_FULL syscall.Errno = 6628 - ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 - ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 - ERROR_LOG_DEDICATED syscall.Errno = 6631 - ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 - ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 - ERROR_LOG_EPHEMERAL syscall.Errno = 6634 - ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 - ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 - ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 - ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 - ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 - ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 - ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 - ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 - ERROR_LOG_STATE_INVALID syscall.Errno = 6643 - ERROR_LOG_PINNED syscall.Errno = 6644 - ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 - ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 - ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 - ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 - ERROR_INVALID_TRANSACTION syscall.Errno = 6700 - ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 - ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 - ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 - ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 - ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 - ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 - ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 - ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 - ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 - ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 - ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 - ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 - ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 - ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 - ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 - ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 - ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 - ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 - ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 - ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 - ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 - ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 - ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 - ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 - ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 - ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 - ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 - ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 - ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 - ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 - ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 - ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 - ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 - ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 - ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 - ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 - ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 - ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 - ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 - ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 - ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 - ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 - ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 - ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 - ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 - ERROR_NO_TXF_METADATA syscall.Errno = 6816 - ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 - ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 - ERROR_RM_DISCONNECTED syscall.Errno = 6819 - ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 - ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 - ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 - ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 - ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 - ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 - ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 - ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 - ERROR_TM_VOLATILE syscall.Errno = 6828 - ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 - ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 - ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 - ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 - ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 - ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 - ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 - ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 - ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 - ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 - ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 - ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 - ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 - ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 - ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 - ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 - ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 - ERROR_FLOATED_SECTION syscall.Errno = 6846 - ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 - ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 - ERROR_BAD_CLUSTERS syscall.Errno = 6849 - ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 - ERROR_VOLUME_DIRTY syscall.Errno = 6851 - ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 - ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 - ERROR_EXPIRED_HANDLE syscall.Errno = 6854 - ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 - ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 - ERROR_CTX_INVALID_PD syscall.Errno = 7002 - ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 - ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 - ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 - ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 - ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 - ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 - ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 - ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 - ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 - ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 - ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 - ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 - ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 - ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 - ERROR_CTX_TD_ERROR syscall.Errno = 7017 - ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 - ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 - ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 - ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 - ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 - ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 - ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 - ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 - ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 - ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 - ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 - ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 - ERROR_CTX_INVALID_WD syscall.Errno = 7049 - ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 - ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 - ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 - ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 - ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 - ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 - ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 - ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 - ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 - ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 - ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 - ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 - ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 - ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 - ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 - ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 - ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 - ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 - ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 - ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 - ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 - FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 - FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 - FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 - FRS_ERR_INTERNAL_API syscall.Errno = 8004 - FRS_ERR_INTERNAL syscall.Errno = 8005 - FRS_ERR_SERVICE_COMM syscall.Errno = 8006 - FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 - FRS_ERR_AUTHENTICATION syscall.Errno = 8008 - FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 - FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 - FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 - FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 - FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 - FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 - FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 - FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 - FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 - DS_S_SUCCESS = ERROR_SUCCESS - ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 - ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 - ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 - ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 - ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 - ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 - ERROR_DS_BUSY syscall.Errno = 8206 - ERROR_DS_UNAVAILABLE syscall.Errno = 8207 - ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 - ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 - ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 - ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 - ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 - ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 - ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 - ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 - ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 - ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 - ERROR_SHARED_POLICY syscall.Errno = 8218 - ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 - ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 - ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 - ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 - ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 - ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 - ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 - ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 - ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 - ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 - ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 - ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 - ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 - ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 - ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 - ERROR_DS_REFERRAL syscall.Errno = 8235 - ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 - ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 - ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 - ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 - ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 - ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 - ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 - ERROR_DS_IS_LEAF syscall.Errno = 8243 - ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 - ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 - ERROR_DS_LOOP_DETECT syscall.Errno = 8246 - ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 - ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 - ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 - ERROR_DS_SERVER_DOWN syscall.Errno = 8250 - ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 - ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 - ERROR_DS_DECODING_ERROR syscall.Errno = 8253 - ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 - ERROR_DS_PARAM_ERROR syscall.Errno = 8255 - ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 - ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 - ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 - ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 - ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 - ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 - ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 - ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 - ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 - ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 - ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 - ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 - ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 - ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 - ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 - ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 - ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 - ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 - ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 - ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 - ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 - ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 - ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 - ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 - ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 - ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 - ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 - ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 - ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 - ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 - ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 - ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 - ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 - ERROR_DS_NO_CHAINING syscall.Errno = 8327 - ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 - ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 - ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 - ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 - ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 - ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 - ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 - ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 - ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 - ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 - ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 - ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 - ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 - ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 - ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 - ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 - ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 - ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 - ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 - ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 - ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 - ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 - ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 - ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 - ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 - ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 - ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 - ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 - ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 - ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 - ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 - ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 - ERROR_DS_INVALID_DMD syscall.Errno = 8360 - ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 - ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 - ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 - ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 - ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 - ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 - ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 - ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 - ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 - ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 - ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 - ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 - ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 - ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 - ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 - ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 - ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 - ERROR_DS_DUP_RDN syscall.Errno = 8378 - ERROR_DS_DUP_OID syscall.Errno = 8379 - ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 - ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 - ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 - ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 - ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 - ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 - ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 - ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 - ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 - ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 - ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 - ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 - ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 - ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 - ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 - ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 - ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 - ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 - ERROR_DS_CANT_DELETE syscall.Errno = 8398 - ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 - ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 - ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 - ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 - ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 - ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 - ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 - ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 - ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 - ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 - ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 - ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 - ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 - ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 - ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 - ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 - ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 - ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 - ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 - ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 - ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 - ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 - ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 - ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 - ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 - ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 - ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 - ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 - ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 - ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 - ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 - ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 - ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 - ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 - ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 - ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 - ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 - ERROR_DS_DRA_GENERIC syscall.Errno = 8436 - ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 - ERROR_DS_DRA_BUSY syscall.Errno = 8438 - ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 - ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 - ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 - ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 - ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 - ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 - ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 - ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 - ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 - ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 - ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 - ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 - ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 - ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 - ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 - ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 - ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 - ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 - ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 - ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 - ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 - ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 - ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 - ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 - ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 - ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 - ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 - ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 - ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 - ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 - ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 - ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 - ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 - ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 - ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 - ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 - ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 - ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 - ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 - ERROR_DS_DS_REQUIRED syscall.Errno = 8478 - ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 - ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 - ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 - ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 - ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 - ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 - ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 - ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 - ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 - ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 - ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 - ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 - ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 - ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 - ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 - ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 - ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 - ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 - ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 - ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 - ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 - ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 - ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 - ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 - ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 - ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 - ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 - ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 - ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 - ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 - ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 - ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 - ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 - ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 - ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 - ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 - ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 - ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 - ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 - ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 - ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 - ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 - ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 - ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 - ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 - ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 - ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 - ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 - ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 - ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 - ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 - ERROR_DS_CANT_START syscall.Errno = 8531 - ERROR_DS_INIT_FAILURE syscall.Errno = 8532 - ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 - ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 - ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 - ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 - ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 - ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 - ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 - ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 - ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 - ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 - ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 - ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 - ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 - ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 - ERROR_DS_GC_REQUIRED syscall.Errno = 8547 - ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 - ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 - ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 - ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 - ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 - ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 - ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 - ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 - ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 - ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 - ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 - ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 - ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 - ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 - ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 - ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 - ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 - ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 - ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 - ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 - ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 - ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 - ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 - ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 - ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 - ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 - ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 - ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 - ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 - ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 - ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 - ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 - ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 - ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 - ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 - ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 - ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 - ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 - ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 - ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 - ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 - ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 - ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 - ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 - ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 - ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 - ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 - ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 - ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 - ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 - ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 - ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 - ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 - ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 - ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 - ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 - ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 - ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 - ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 - ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 - ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 - ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 - ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 - ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 - ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 - ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 - ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 - ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 - ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 - ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 - ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 - ERROR_NO_SECRETS syscall.Errno = 8620 - ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 - ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 - ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 - ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 - ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 - ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 - ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 - ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 - ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 - ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 - ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 - ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 - ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 - ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 - ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 - ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 - ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 - ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 - ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 - ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 - ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 - ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 - ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 - ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 - ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 - ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 - ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 - ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 - ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 - ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 - DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 - DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS - DNS_ERROR_MASK syscall.Errno = 0x00002328 - DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 - DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 - DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 - DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 - DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 - DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 - DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 - DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 - DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 - DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 - DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 - DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 - DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 - DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME - DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 - DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 - DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 - DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 - DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 - DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 - DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 - DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 - DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 - DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 - DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 - DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 - DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 - DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 - DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 - DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 - DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 - DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 - DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 - DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 - DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 - DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 - DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 - DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 - DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 - DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 - DNS_ERROR_INVALID_XML syscall.Errno = 9126 - DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 - DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 - DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 - DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 - DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 - DNS_INFO_NO_RECORDS syscall.Errno = 9501 - DNS_ERROR_BAD_PACKET syscall.Errno = 9502 - DNS_ERROR_NO_PACKET syscall.Errno = 9503 - DNS_ERROR_RCODE syscall.Errno = 9504 - DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 - DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET - DNS_REQUEST_PENDING syscall.Errno = 9506 - DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY - DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME - DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA - DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 - DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 - DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 - DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 - DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 - DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 - DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 - DNS_STATUS_FQDN syscall.Errno = 9557 - DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 - DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 - DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 - DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 - DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 - DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 - DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 - DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 - DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 - DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 - DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 - DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 - DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 - DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 - DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 - DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 - DNS_ERROR_ZONE_BASE syscall.Errno = 9600 - DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 - DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 - DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 - DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 - DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 - DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 - DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 - DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 - DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 - DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 - DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 - DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 - DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 - DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 - DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 - DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 - DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 - DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 - DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 - DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 - DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 - DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 - DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 - DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 - DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 - DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 - DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 - DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 - DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 - DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 - DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 - DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 - DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 - DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 - DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 - DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 - DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 - DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 - DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 - DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 - DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 - DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 - DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 - DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 - DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 - DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 - DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 - DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 - DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 - DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 - DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 - DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 - DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 - DNS_ERROR_AXFR syscall.Errno = 9752 - DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 - DNS_ERROR_SECURE_BASE syscall.Errno = 9800 - DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 - DNS_ERROR_SETUP_BASE syscall.Errno = 9850 - DNS_ERROR_NO_TCPIP syscall.Errno = 9851 - DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 - DNS_ERROR_DP_BASE syscall.Errno = 9900 - DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 - DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 - DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 - DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 - DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 - DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 - DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 - DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 - DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 - DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 - DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 - DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 - DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 - DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 - DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 - DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 - DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 - DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 - DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 - DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 - DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 - DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 - DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 - DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 - DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 - DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 - DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 - DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 - DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 - DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 - DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 - DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 - DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 - DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 - DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 - DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 - DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 - DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 - DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 - DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 - DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 - DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 - DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 - DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 - DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 - DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 - DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 - DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 - DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 - DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 - DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 - DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 - DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 - DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 - DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 - DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 - DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 - WSABASEERR syscall.Errno = 10000 - WSAEINTR syscall.Errno = 10004 - WSAEBADF syscall.Errno = 10009 - WSAEACCES syscall.Errno = 10013 - WSAEFAULT syscall.Errno = 10014 - WSAEINVAL syscall.Errno = 10022 - WSAEMFILE syscall.Errno = 10024 - WSAEWOULDBLOCK syscall.Errno = 10035 - WSAEINPROGRESS syscall.Errno = 10036 - WSAEALREADY syscall.Errno = 10037 - WSAENOTSOCK syscall.Errno = 10038 - WSAEDESTADDRREQ syscall.Errno = 10039 - WSAEMSGSIZE syscall.Errno = 10040 - WSAEPROTOTYPE syscall.Errno = 10041 - WSAENOPROTOOPT syscall.Errno = 10042 - WSAEPROTONOSUPPORT syscall.Errno = 10043 - WSAESOCKTNOSUPPORT syscall.Errno = 10044 - WSAEOPNOTSUPP syscall.Errno = 10045 - WSAEPFNOSUPPORT syscall.Errno = 10046 - WSAEAFNOSUPPORT syscall.Errno = 10047 - WSAEADDRINUSE syscall.Errno = 10048 - WSAEADDRNOTAVAIL syscall.Errno = 10049 - WSAENETDOWN syscall.Errno = 10050 - WSAENETUNREACH syscall.Errno = 10051 - WSAENETRESET syscall.Errno = 10052 - WSAECONNABORTED syscall.Errno = 10053 - WSAECONNRESET syscall.Errno = 10054 - WSAENOBUFS syscall.Errno = 10055 - WSAEISCONN syscall.Errno = 10056 - WSAENOTCONN syscall.Errno = 10057 - WSAESHUTDOWN syscall.Errno = 10058 - WSAETOOMANYREFS syscall.Errno = 10059 - WSAETIMEDOUT syscall.Errno = 10060 - WSAECONNREFUSED syscall.Errno = 10061 - WSAELOOP syscall.Errno = 10062 - WSAENAMETOOLONG syscall.Errno = 10063 - WSAEHOSTDOWN syscall.Errno = 10064 - WSAEHOSTUNREACH syscall.Errno = 10065 - WSAENOTEMPTY syscall.Errno = 10066 - WSAEPROCLIM syscall.Errno = 10067 - WSAEUSERS syscall.Errno = 10068 - WSAEDQUOT syscall.Errno = 10069 - WSAESTALE syscall.Errno = 10070 - WSAEREMOTE syscall.Errno = 10071 - WSASYSNOTREADY syscall.Errno = 10091 - WSAVERNOTSUPPORTED syscall.Errno = 10092 - WSANOTINITIALISED syscall.Errno = 10093 - WSAEDISCON syscall.Errno = 10101 - WSAENOMORE syscall.Errno = 10102 - WSAECANCELLED syscall.Errno = 10103 - WSAEINVALIDPROCTABLE syscall.Errno = 10104 - WSAEINVALIDPROVIDER syscall.Errno = 10105 - WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 - WSASYSCALLFAILURE syscall.Errno = 10107 - WSASERVICE_NOT_FOUND syscall.Errno = 10108 - WSATYPE_NOT_FOUND syscall.Errno = 10109 - WSA_E_NO_MORE syscall.Errno = 10110 - WSA_E_CANCELLED syscall.Errno = 10111 - WSAEREFUSED syscall.Errno = 10112 - WSAHOST_NOT_FOUND syscall.Errno = 11001 - WSATRY_AGAIN syscall.Errno = 11002 - WSANO_RECOVERY syscall.Errno = 11003 - WSANO_DATA syscall.Errno = 11004 - WSA_QOS_RECEIVERS syscall.Errno = 11005 - WSA_QOS_SENDERS syscall.Errno = 11006 - WSA_QOS_NO_SENDERS syscall.Errno = 11007 - WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 - WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 - WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 - WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 - WSA_QOS_BAD_STYLE syscall.Errno = 11012 - WSA_QOS_BAD_OBJECT syscall.Errno = 11013 - WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 - WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 - WSA_QOS_ESERVICETYPE syscall.Errno = 11016 - WSA_QOS_EFLOWSPEC syscall.Errno = 11017 - WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 - WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 - WSA_QOS_EFILTERTYPE syscall.Errno = 11020 - WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 - WSA_QOS_EOBJLENGTH syscall.Errno = 11022 - WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 - WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 - WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 - WSA_QOS_EFLOWDESC syscall.Errno = 11026 - WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 - WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 - WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 - WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 - WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 - WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 - WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 - ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 - ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 - ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 - ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 - ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 - ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 - ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 - ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 - ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 - ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 - ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 - ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 - ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 - ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 - ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 - ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 - ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 - ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 - ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 - ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 - ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 - ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 - ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 - ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 - WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 - WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 - ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 - ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 - ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 - ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 - ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 - ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 - ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 - ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 - ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 - ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 - ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 - ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 - ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 - ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 - ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 - ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 - ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 - ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 - ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 - ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 - ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 - ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 - ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 - ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 - ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 - ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 - ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 - ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 - ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 - ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 - ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 - ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 - ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 - ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 - ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 - ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 - ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 - ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 - ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 - ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 - ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 - ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 - ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 - ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 - ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 - ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 - ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 - ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 - ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 - ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 - ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 - ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 - ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 - ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 - ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 - ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 - ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 - ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 - ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 - ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 - ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 - ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 - ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 - ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 - ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 - ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 - ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 - ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 - ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 - ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 - ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 - ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 - ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 - ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 - ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 - ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 - ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 - ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 - ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 - ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 - ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 - ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 - ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 - ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 - ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 - ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 - ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 - ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 - ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 - ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 - ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 - ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 - ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 - ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 - ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 - ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 - ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 - ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 - ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 - ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 - ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 - ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 - ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 - ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 - ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 - ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 - ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 - ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 - ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 - ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 - ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 - ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 - ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 - ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 - ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 - ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 - ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 - ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 - ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 - ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 - ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 - ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 - ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 - ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 - ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 - ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 - ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 - ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 - ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 - ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 - ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 - ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 - ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 - ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 - ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 - ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 - ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 - ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 - ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 - ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 - ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 - ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 - ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 - ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 - ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 - ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 - ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 - ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 - ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 - ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 - ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 - ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 - ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 - ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 - ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 - ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 - ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 - ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 - ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 - ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 - ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 - ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 - ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 - ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 - ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 - ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 - ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 - ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 - ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 - ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 - ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 - ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 - ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 - ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 - ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 - ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 - ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 - ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 - ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 - ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 - ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 - ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 - ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 - ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 - ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 - ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 - ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 - ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 - ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 - ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 - ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 - ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 - ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 - ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 - ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 - ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 - ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 - ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 - ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 - ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 - ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 - ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 - ERROR_SXS_CORRUPTION syscall.Errno = 14083 - ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 - ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 - ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 - ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 - ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 - ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 - ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 - ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 - ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 - ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 - ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 - ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 - ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 - ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 - ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 - ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 - ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 - ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 - ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 - ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 - ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 - ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 - ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 - ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 - ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 - ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 - ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 - ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 - ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 - ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 - ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 - ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 - ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 - ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 - ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 - ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 - ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 - ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 - ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 - ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 - ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 - ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 - ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 - ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 - ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 - ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 - ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 - ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 - ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 - ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 - ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 - ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 - ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 - ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 - ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 - ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 - ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 - ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 - ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 - ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 - ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 - ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 - ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 - ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 - ERROR_EC_LOG_DISABLED syscall.Errno = 15081 - ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 - ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 - ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 - ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 - ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 - ERROR_MUI_INVALID_FILE syscall.Errno = 15101 - ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 - ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 - ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 - ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 - ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 - ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 - ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 - ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 - ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 - ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 - ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 - ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 - ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 - ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 - ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 - ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 - ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 - ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 - ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 - ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 - ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 - ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 - ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 - ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 - ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 - ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 - ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 - ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 - ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 - ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 - ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 - ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 - ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 - ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 - ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 - ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 - ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 - ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 - ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 - ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 - ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 - ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 - ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 - ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 - ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 - ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 - ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 - ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 - ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 - ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 - ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 - ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 - ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 - ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 - ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 - ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 - ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 - ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 - ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 - ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 - ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 - ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 - ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 - ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 - ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 - ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 - ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 - ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 - ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 - ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 - ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 - ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 - ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 - ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 - ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 - ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 - ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 - ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 - ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 - ERROR_INSTALL_CANCEL syscall.Errno = 15608 - ERROR_INSTALL_FAILED syscall.Errno = 15609 - ERROR_REMOVE_FAILED syscall.Errno = 15610 - ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 - ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 - ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 - ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 - ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 - ERROR_PACKAGE_UPDATING syscall.Errno = 15616 - ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 - ERROR_PACKAGES_IN_USE syscall.Errno = 15618 - ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 - ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 - ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 - ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 - ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 - ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 - ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 - ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 - ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 - ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 - ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 - ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 - ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 - ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 - ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 - ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 - ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 - ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 - ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 - ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 - ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 - ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 - ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 - ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 - ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 - ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 - ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 - APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 - APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 - APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 - APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 - APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 - APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 - APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 - ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 - ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 - ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 - ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 - ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 - ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 - ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 - ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 - ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 - ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 - ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 - ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 - ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 - ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 - ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 - ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 - ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 - ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 - ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 - ERROR_API_UNAVAILABLE syscall.Errno = 15841 - STORE_ERROR_UNLICENSED syscall.Errno = 15861 - STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 - STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 - STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 - SEVERITY_SUCCESS syscall.Errno = 0 - SEVERITY_ERROR syscall.Errno = 1 - FACILITY_NT_BIT = 0x10000000 - E_NOT_SET = ERROR_NOT_FOUND - E_NOT_VALID_STATE = ERROR_INVALID_STATE - E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER - E_TIME_CRITICAL_THREAD = ERROR_TIME_CRITICAL_THREAD - NOERROR syscall.Errno = 0 - E_UNEXPECTED Handle = 0x8000FFFF - E_NOTIMPL Handle = 0x80004001 - E_OUTOFMEMORY Handle = 0x8007000E - E_INVALIDARG Handle = 0x80070057 - E_NOINTERFACE Handle = 0x80004002 - E_POINTER Handle = 0x80004003 - E_HANDLE Handle = 0x80070006 - E_ABORT Handle = 0x80004004 - E_FAIL Handle = 0x80004005 - E_ACCESSDENIED Handle = 0x80070005 - E_PENDING Handle = 0x8000000A - E_BOUNDS Handle = 0x8000000B - E_CHANGED_STATE Handle = 0x8000000C - E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D - E_ILLEGAL_METHOD_CALL Handle = 0x8000000E - RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F - RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 - RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 - RO_E_INVALID_METADATA_FILE Handle = 0x80000012 - RO_E_CLOSED Handle = 0x80000013 - RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 - RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 - RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 - E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 - E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 - E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 - E_APPLICATION_EXITING Handle = 0x8000001A - E_APPLICATION_VIEW_EXITING Handle = 0x8000001B - RO_E_MUST_BE_AGILE Handle = 0x8000001C - RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D - RO_E_COMMITTED Handle = 0x8000001E - RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F - RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 - RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 - CO_E_INIT_TLS Handle = 0x80004006 - CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 - CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 - CO_E_INIT_CLASS_CACHE Handle = 0x80004009 - CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A - CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B - CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C - CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D - CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E - CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F - CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 - CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 - CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 - CO_E_CANT_REMOTE Handle = 0x80004013 - CO_E_BAD_SERVER_NAME Handle = 0x80004014 - CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 - CO_E_OLE1DDE_DISABLED Handle = 0x80004016 - CO_E_RUNAS_SYNTAX Handle = 0x80004017 - CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 - CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 - CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A - CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B - CO_E_START_SERVICE_FAILURE Handle = 0x8000401C - CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D - CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E - CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F - CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 - CO_E_NOT_SUPPORTED Handle = 0x80004021 - CO_E_RELOAD_DLL Handle = 0x80004022 - CO_E_MSI_ERROR Handle = 0x80004023 - CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 - CO_E_SERVER_PAUSED Handle = 0x80004025 - CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 - CO_E_CLASS_DISABLED Handle = 0x80004027 - CO_E_CLRNOTAVAILABLE Handle = 0x80004028 - CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 - CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A - CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B - CO_E_TRACKER_CONFIG Handle = 0x80004030 - CO_E_THREADPOOL_CONFIG Handle = 0x80004031 - CO_E_SXS_CONFIG Handle = 0x80004032 - CO_E_MALFORMED_SPN Handle = 0x80004033 - CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 - CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 - S_OK Handle = 0 - S_FALSE Handle = 1 - OLE_E_FIRST Handle = 0x80040000 - OLE_E_LAST Handle = 0x800400FF - OLE_S_FIRST Handle = 0x00040000 - OLE_S_LAST Handle = 0x000400FF - OLE_E_OLEVERB Handle = 0x80040000 - OLE_E_ADVF Handle = 0x80040001 - OLE_E_ENUM_NOMORE Handle = 0x80040002 - OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 - OLE_E_NOCONNECTION Handle = 0x80040004 - OLE_E_NOTRUNNING Handle = 0x80040005 - OLE_E_NOCACHE Handle = 0x80040006 - OLE_E_BLANK Handle = 0x80040007 - OLE_E_CLASSDIFF Handle = 0x80040008 - OLE_E_CANT_GETMONIKER Handle = 0x80040009 - OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A - OLE_E_STATIC Handle = 0x8004000B - OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C - OLE_E_INVALIDRECT Handle = 0x8004000D - OLE_E_WRONGCOMPOBJ Handle = 0x8004000E - OLE_E_INVALIDHWND Handle = 0x8004000F - OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 - OLE_E_CANTCONVERT Handle = 0x80040011 - OLE_E_NOSTORAGE Handle = 0x80040012 - DV_E_FORMATETC Handle = 0x80040064 - DV_E_DVTARGETDEVICE Handle = 0x80040065 - DV_E_STGMEDIUM Handle = 0x80040066 - DV_E_STATDATA Handle = 0x80040067 - DV_E_LINDEX Handle = 0x80040068 - DV_E_TYMED Handle = 0x80040069 - DV_E_CLIPFORMAT Handle = 0x8004006A - DV_E_DVASPECT Handle = 0x8004006B - DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C - DV_E_NOIVIEWOBJECT Handle = 0x8004006D - DRAGDROP_E_FIRST syscall.Errno = 0x80040100 - DRAGDROP_E_LAST syscall.Errno = 0x8004010F - DRAGDROP_S_FIRST syscall.Errno = 0x00040100 - DRAGDROP_S_LAST syscall.Errno = 0x0004010F - DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 - DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 - DRAGDROP_E_INVALIDHWND Handle = 0x80040102 - DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 - CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 - CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F - CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 - CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F - CLASS_E_NOAGGREGATION Handle = 0x80040110 - CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 - CLASS_E_NOTLICENSED Handle = 0x80040112 - MARSHAL_E_FIRST syscall.Errno = 0x80040120 - MARSHAL_E_LAST syscall.Errno = 0x8004012F - MARSHAL_S_FIRST syscall.Errno = 0x00040120 - MARSHAL_S_LAST syscall.Errno = 0x0004012F - DATA_E_FIRST syscall.Errno = 0x80040130 - DATA_E_LAST syscall.Errno = 0x8004013F - DATA_S_FIRST syscall.Errno = 0x00040130 - DATA_S_LAST syscall.Errno = 0x0004013F - VIEW_E_FIRST syscall.Errno = 0x80040140 - VIEW_E_LAST syscall.Errno = 0x8004014F - VIEW_S_FIRST syscall.Errno = 0x00040140 - VIEW_S_LAST syscall.Errno = 0x0004014F - VIEW_E_DRAW Handle = 0x80040140 - REGDB_E_FIRST syscall.Errno = 0x80040150 - REGDB_E_LAST syscall.Errno = 0x8004015F - REGDB_S_FIRST syscall.Errno = 0x00040150 - REGDB_S_LAST syscall.Errno = 0x0004015F - REGDB_E_READREGDB Handle = 0x80040150 - REGDB_E_WRITEREGDB Handle = 0x80040151 - REGDB_E_KEYMISSING Handle = 0x80040152 - REGDB_E_INVALIDVALUE Handle = 0x80040153 - REGDB_E_CLASSNOTREG Handle = 0x80040154 - REGDB_E_IIDNOTREG Handle = 0x80040155 - REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 - REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 - CAT_E_FIRST syscall.Errno = 0x80040160 - CAT_E_LAST syscall.Errno = 0x80040161 - CAT_E_CATIDNOEXIST Handle = 0x80040160 - CAT_E_NODESCRIPTION Handle = 0x80040161 - CS_E_FIRST syscall.Errno = 0x80040164 - CS_E_LAST syscall.Errno = 0x8004016F - CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 - CS_E_NOT_DELETABLE Handle = 0x80040165 - CS_E_CLASS_NOTFOUND Handle = 0x80040166 - CS_E_INVALID_VERSION Handle = 0x80040167 - CS_E_NO_CLASSSTORE Handle = 0x80040168 - CS_E_OBJECT_NOTFOUND Handle = 0x80040169 - CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A - CS_E_INVALID_PATH Handle = 0x8004016B - CS_E_NETWORK_ERROR Handle = 0x8004016C - CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D - CS_E_SCHEMA_MISMATCH Handle = 0x8004016E - CS_E_INTERNAL_ERROR Handle = 0x8004016F - CACHE_E_FIRST syscall.Errno = 0x80040170 - CACHE_E_LAST syscall.Errno = 0x8004017F - CACHE_S_FIRST syscall.Errno = 0x00040170 - CACHE_S_LAST syscall.Errno = 0x0004017F - CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 - OLEOBJ_E_FIRST syscall.Errno = 0x80040180 - OLEOBJ_E_LAST syscall.Errno = 0x8004018F - OLEOBJ_S_FIRST syscall.Errno = 0x00040180 - OLEOBJ_S_LAST syscall.Errno = 0x0004018F - OLEOBJ_E_NOVERBS Handle = 0x80040180 - OLEOBJ_E_INVALIDVERB Handle = 0x80040181 - CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 - CLIENTSITE_E_LAST syscall.Errno = 0x8004019F - CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 - CLIENTSITE_S_LAST syscall.Errno = 0x0004019F - INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 - INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 - INPLACE_E_FIRST syscall.Errno = 0x800401A0 - INPLACE_E_LAST syscall.Errno = 0x800401AF - INPLACE_S_FIRST syscall.Errno = 0x000401A0 - INPLACE_S_LAST syscall.Errno = 0x000401AF - ENUM_E_FIRST syscall.Errno = 0x800401B0 - ENUM_E_LAST syscall.Errno = 0x800401BF - ENUM_S_FIRST syscall.Errno = 0x000401B0 - ENUM_S_LAST syscall.Errno = 0x000401BF - CONVERT10_E_FIRST syscall.Errno = 0x800401C0 - CONVERT10_E_LAST syscall.Errno = 0x800401CF - CONVERT10_S_FIRST syscall.Errno = 0x000401C0 - CONVERT10_S_LAST syscall.Errno = 0x000401CF - CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 - CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 - CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 - CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 - CONVERT10_E_STG_FMT Handle = 0x800401C4 - CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 - CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 - CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 - CLIPBRD_E_LAST syscall.Errno = 0x800401DF - CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 - CLIPBRD_S_LAST syscall.Errno = 0x000401DF - CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 - CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 - CLIPBRD_E_CANT_SET Handle = 0x800401D2 - CLIPBRD_E_BAD_DATA Handle = 0x800401D3 - CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 - MK_E_FIRST syscall.Errno = 0x800401E0 - MK_E_LAST syscall.Errno = 0x800401EF - MK_S_FIRST syscall.Errno = 0x000401E0 - MK_S_LAST syscall.Errno = 0x000401EF - MK_E_CONNECTMANUALLY Handle = 0x800401E0 - MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 - MK_E_NEEDGENERIC Handle = 0x800401E2 - MK_E_UNAVAILABLE Handle = 0x800401E3 - MK_E_SYNTAX Handle = 0x800401E4 - MK_E_NOOBJECT Handle = 0x800401E5 - MK_E_INVALIDEXTENSION Handle = 0x800401E6 - MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 - MK_E_NOTBINDABLE Handle = 0x800401E8 - MK_E_NOTBOUND Handle = 0x800401E9 - MK_E_CANTOPENFILE Handle = 0x800401EA - MK_E_MUSTBOTHERUSER Handle = 0x800401EB - MK_E_NOINVERSE Handle = 0x800401EC - MK_E_NOSTORAGE Handle = 0x800401ED - MK_E_NOPREFIX Handle = 0x800401EE - MK_E_ENUMERATION_FAILED Handle = 0x800401EF - CO_E_FIRST syscall.Errno = 0x800401F0 - CO_E_LAST syscall.Errno = 0x800401FF - CO_S_FIRST syscall.Errno = 0x000401F0 - CO_S_LAST syscall.Errno = 0x000401FF - CO_E_NOTINITIALIZED Handle = 0x800401F0 - CO_E_ALREADYINITIALIZED Handle = 0x800401F1 - CO_E_CANTDETERMINECLASS Handle = 0x800401F2 - CO_E_CLASSSTRING Handle = 0x800401F3 - CO_E_IIDSTRING Handle = 0x800401F4 - CO_E_APPNOTFOUND Handle = 0x800401F5 - CO_E_APPSINGLEUSE Handle = 0x800401F6 - CO_E_ERRORINAPP Handle = 0x800401F7 - CO_E_DLLNOTFOUND Handle = 0x800401F8 - CO_E_ERRORINDLL Handle = 0x800401F9 - CO_E_WRONGOSFORAPP Handle = 0x800401FA - CO_E_OBJNOTREG Handle = 0x800401FB - CO_E_OBJISREG Handle = 0x800401FC - CO_E_OBJNOTCONNECTED Handle = 0x800401FD - CO_E_APPDIDNTREG Handle = 0x800401FE - CO_E_RELEASED Handle = 0x800401FF - EVENT_E_FIRST syscall.Errno = 0x80040200 - EVENT_E_LAST syscall.Errno = 0x8004021F - EVENT_S_FIRST syscall.Errno = 0x00040200 - EVENT_S_LAST syscall.Errno = 0x0004021F - EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 - EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 - EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 - EVENT_E_QUERYSYNTAX Handle = 0x80040203 - EVENT_E_QUERYFIELD Handle = 0x80040204 - EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 - EVENT_E_INTERNALERROR Handle = 0x80040206 - EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 - EVENT_E_USER_EXCEPTION Handle = 0x80040208 - EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 - EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A - EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B - EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C - EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D - EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E - EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F - EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 - TPC_E_INVALID_PROPERTY Handle = 0x80040241 - TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 - TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B - TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 - TPC_E_INVALID_STROKE Handle = 0x80040222 - TPC_E_INITIALIZE_FAIL Handle = 0x80040223 - TPC_E_NOT_RELEVANT Handle = 0x80040232 - TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 - TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 - TPC_E_INVALID_RIGHTS Handle = 0x80040236 - TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 - TPC_E_QUEUE_FULL Handle = 0x80040238 - TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 - TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A - TPC_S_TRUNCATED Handle = 0x00040252 - TPC_S_INTERRUPTED Handle = 0x00040253 - TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 - XACT_E_FIRST syscall.Errno = 0x8004D000 - XACT_E_LAST syscall.Errno = 0x8004D02B - XACT_S_FIRST syscall.Errno = 0x0004D000 - XACT_S_LAST syscall.Errno = 0x0004D010 - XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 - XACT_E_CANTRETAIN Handle = 0x8004D001 - XACT_E_COMMITFAILED Handle = 0x8004D002 - XACT_E_COMMITPREVENTED Handle = 0x8004D003 - XACT_E_HEURISTICABORT Handle = 0x8004D004 - XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 - XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 - XACT_E_HEURISTICDANGER Handle = 0x8004D007 - XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 - XACT_E_NOASYNC Handle = 0x8004D009 - XACT_E_NOENLIST Handle = 0x8004D00A - XACT_E_NOISORETAIN Handle = 0x8004D00B - XACT_E_NORESOURCE Handle = 0x8004D00C - XACT_E_NOTCURRENT Handle = 0x8004D00D - XACT_E_NOTRANSACTION Handle = 0x8004D00E - XACT_E_NOTSUPPORTED Handle = 0x8004D00F - XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 - XACT_E_WRONGSTATE Handle = 0x8004D011 - XACT_E_WRONGUOW Handle = 0x8004D012 - XACT_E_XTIONEXISTS Handle = 0x8004D013 - XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 - XACT_E_INVALIDCOOKIE Handle = 0x8004D015 - XACT_E_INDOUBT Handle = 0x8004D016 - XACT_E_NOTIMEOUT Handle = 0x8004D017 - XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 - XACT_E_ABORTED Handle = 0x8004D019 - XACT_E_LOGFULL Handle = 0x8004D01A - XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B - XACT_E_CONNECTION_DOWN Handle = 0x8004D01C - XACT_E_CONNECTION_DENIED Handle = 0x8004D01D - XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E - XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F - XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 - XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 - XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 - XACT_E_TIP_DISABLED Handle = 0x8004D023 - XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 - XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 - XACT_E_XA_TX_DISABLED Handle = 0x8004D026 - XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 - XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 - XACT_E_ABORTING Handle = 0x8004D029 - XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A - XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B - XACT_E_LU_TX_DISABLED Handle = 0x8004D02C - XACT_E_CLERKNOTFOUND Handle = 0x8004D080 - XACT_E_CLERKEXISTS Handle = 0x8004D081 - XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 - XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 - XACT_E_INVALIDLSN Handle = 0x8004D084 - XACT_E_REPLAYREQUEST Handle = 0x8004D085 - XACT_S_ASYNC Handle = 0x0004D000 - XACT_S_DEFECT Handle = 0x0004D001 - XACT_S_READONLY Handle = 0x0004D002 - XACT_S_SOMENORETAIN Handle = 0x0004D003 - XACT_S_OKINFORM Handle = 0x0004D004 - XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 - XACT_S_MADECHANGESINFORM Handle = 0x0004D006 - XACT_S_ALLNORETAIN Handle = 0x0004D007 - XACT_S_ABORTING Handle = 0x0004D008 - XACT_S_SINGLEPHASE Handle = 0x0004D009 - XACT_S_LOCALLY_OK Handle = 0x0004D00A - XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 - CONTEXT_E_FIRST syscall.Errno = 0x8004E000 - CONTEXT_E_LAST syscall.Errno = 0x8004E02F - CONTEXT_S_FIRST syscall.Errno = 0x0004E000 - CONTEXT_S_LAST syscall.Errno = 0x0004E02F - CONTEXT_E_ABORTED Handle = 0x8004E002 - CONTEXT_E_ABORTING Handle = 0x8004E003 - CONTEXT_E_NOCONTEXT Handle = 0x8004E004 - CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 - CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 - CONTEXT_E_OLDREF Handle = 0x8004E007 - CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C - CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F - CO_E_ACTIVATIONFAILED Handle = 0x8004E021 - CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 - CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 - CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 - CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 - CONTEXT_E_NOJIT Handle = 0x8004E026 - CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 - CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 - CO_E_NOIISINTRINSICS Handle = 0x8004E029 - CO_E_NOCOOKIES Handle = 0x8004E02A - CO_E_DBERROR Handle = 0x8004E02B - CO_E_NOTPOOLED Handle = 0x8004E02C - CO_E_NOTCONSTRUCTED Handle = 0x8004E02D - CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E - CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F - CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 - CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 - OLE_S_USEREG Handle = 0x00040000 - OLE_S_STATIC Handle = 0x00040001 - OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 - DRAGDROP_S_DROP Handle = 0x00040100 - DRAGDROP_S_CANCEL Handle = 0x00040101 - DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 - DATA_S_SAMEFORMATETC Handle = 0x00040130 - VIEW_S_ALREADY_FROZEN Handle = 0x00040140 - CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 - CACHE_S_SAMECACHE Handle = 0x00040171 - CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 - OLEOBJ_S_INVALIDVERB Handle = 0x00040180 - OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 - OLEOBJ_S_INVALIDHWND Handle = 0x00040182 - INPLACE_S_TRUNCATED Handle = 0x000401A0 - CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 - MK_S_REDUCED_TO_SELF Handle = 0x000401E2 - MK_S_ME Handle = 0x000401E4 - MK_S_HIM Handle = 0x000401E5 - MK_S_US Handle = 0x000401E6 - MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 - SCHED_S_TASK_READY Handle = 0x00041300 - SCHED_S_TASK_RUNNING Handle = 0x00041301 - SCHED_S_TASK_DISABLED Handle = 0x00041302 - SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 - SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 - SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 - SCHED_S_TASK_TERMINATED Handle = 0x00041306 - SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 - SCHED_S_EVENT_TRIGGER Handle = 0x00041308 - SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 - SCHED_E_TASK_NOT_READY Handle = 0x8004130A - SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B - SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C - SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D - SCHED_E_INVALID_TASK Handle = 0x8004130E - SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F - SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 - SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 - SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 - SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 - SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 - SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 - SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 - SCHED_E_NAMESPACE Handle = 0x80041317 - SCHED_E_INVALIDVALUE Handle = 0x80041318 - SCHED_E_MISSINGNODE Handle = 0x80041319 - SCHED_E_MALFORMEDXML Handle = 0x8004131A - SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B - SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C - SCHED_E_TOO_MANY_NODES Handle = 0x8004131D - SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E - SCHED_E_ALREADY_RUNNING Handle = 0x8004131F - SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 - SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 - SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 - SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 - SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 - SCHED_S_TASK_QUEUED Handle = 0x00041325 - SCHED_E_TASK_DISABLED Handle = 0x80041326 - SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 - SCHED_E_START_ON_DEMAND Handle = 0x80041328 - SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 - SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 - CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 - CO_E_SCM_ERROR Handle = 0x80080002 - CO_E_SCM_RPC_FAILURE Handle = 0x80080003 - CO_E_BAD_PATH Handle = 0x80080004 - CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 - CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 - MK_E_NO_NORMALIZED Handle = 0x80080007 - CO_E_SERVER_STOPPING Handle = 0x80080008 - MEM_E_INVALID_ROOT Handle = 0x80080009 - MEM_E_INVALID_LINK Handle = 0x80080010 - MEM_E_INVALID_SIZE Handle = 0x80080011 - CO_S_NOTALLINTERFACES Handle = 0x00080012 - CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 - CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 - CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 - CO_E_ELEVATION_DISABLED Handle = 0x80080017 - APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 - APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 - APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 - APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 - APPX_E_INVALID_MANIFEST Handle = 0x80080204 - APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 - APPX_E_CORRUPT_CONTENT Handle = 0x80080206 - APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 - APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 - APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 - APPX_E_INVALID_KEY_INFO Handle = 0x8008020A - APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B - APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C - APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D - APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E - APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F - APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 - APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 - APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 - APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 - APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 - APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 - APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 - BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 - DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 - DISP_E_MEMBERNOTFOUND Handle = 0x80020003 - DISP_E_PARAMNOTFOUND Handle = 0x80020004 - DISP_E_TYPEMISMATCH Handle = 0x80020005 - DISP_E_UNKNOWNNAME Handle = 0x80020006 - DISP_E_NONAMEDARGS Handle = 0x80020007 - DISP_E_BADVARTYPE Handle = 0x80020008 - DISP_E_EXCEPTION Handle = 0x80020009 - DISP_E_OVERFLOW Handle = 0x8002000A - DISP_E_BADINDEX Handle = 0x8002000B - DISP_E_UNKNOWNLCID Handle = 0x8002000C - DISP_E_ARRAYISLOCKED Handle = 0x8002000D - DISP_E_BADPARAMCOUNT Handle = 0x8002000E - DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F - DISP_E_BADCALLEE Handle = 0x80020010 - DISP_E_NOTACOLLECTION Handle = 0x80020011 - DISP_E_DIVBYZERO Handle = 0x80020012 - DISP_E_BUFFERTOOSMALL Handle = 0x80020013 - TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 - TYPE_E_FIELDNOTFOUND Handle = 0x80028017 - TYPE_E_INVDATAREAD Handle = 0x80028018 - TYPE_E_UNSUPFORMAT Handle = 0x80028019 - TYPE_E_REGISTRYACCESS Handle = 0x8002801C - TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D - TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 - TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 - TYPE_E_INVALIDSTATE Handle = 0x80028029 - TYPE_E_WRONGTYPEKIND Handle = 0x8002802A - TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B - TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C - TYPE_E_NAMECONFLICT Handle = 0x8002802D - TYPE_E_UNKNOWNLCID Handle = 0x8002802E - TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F - TYPE_E_BADMODULEKIND Handle = 0x800288BD - TYPE_E_SIZETOOBIG Handle = 0x800288C5 - TYPE_E_DUPLICATEID Handle = 0x800288C6 - TYPE_E_INVALIDID Handle = 0x800288CF - TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 - TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 - TYPE_E_IOERROR Handle = 0x80028CA2 - TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 - TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A - TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 - TYPE_E_CIRCULARTYPE Handle = 0x80029C84 - STG_E_INVALIDFUNCTION Handle = 0x80030001 - STG_E_FILENOTFOUND Handle = 0x80030002 - STG_E_PATHNOTFOUND Handle = 0x80030003 - STG_E_TOOMANYOPENFILES Handle = 0x80030004 - STG_E_ACCESSDENIED Handle = 0x80030005 - STG_E_INVALIDHANDLE Handle = 0x80030006 - STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 - STG_E_INVALIDPOINTER Handle = 0x80030009 - STG_E_NOMOREFILES Handle = 0x80030012 - STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 - STG_E_SEEKERROR Handle = 0x80030019 - STG_E_WRITEFAULT Handle = 0x8003001D - STG_E_READFAULT Handle = 0x8003001E - STG_E_SHAREVIOLATION Handle = 0x80030020 - STG_E_LOCKVIOLATION Handle = 0x80030021 - STG_E_FILEALREADYEXISTS Handle = 0x80030050 - STG_E_INVALIDPARAMETER Handle = 0x80030057 - STG_E_MEDIUMFULL Handle = 0x80030070 - STG_E_PROPSETMISMATCHED Handle = 0x800300F0 - STG_E_ABNORMALAPIEXIT Handle = 0x800300FA - STG_E_INVALIDHEADER Handle = 0x800300FB - STG_E_INVALIDNAME Handle = 0x800300FC - STG_E_UNKNOWN Handle = 0x800300FD - STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE - STG_E_INVALIDFLAG Handle = 0x800300FF - STG_E_INUSE Handle = 0x80030100 - STG_E_NOTCURRENT Handle = 0x80030101 - STG_E_REVERTED Handle = 0x80030102 - STG_E_CANTSAVE Handle = 0x80030103 - STG_E_OLDFORMAT Handle = 0x80030104 - STG_E_OLDDLL Handle = 0x80030105 - STG_E_SHAREREQUIRED Handle = 0x80030106 - STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 - STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 - STG_E_DOCFILECORRUPT Handle = 0x80030109 - STG_E_BADBASEADDRESS Handle = 0x80030110 - STG_E_DOCFILETOOLARGE Handle = 0x80030111 - STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 - STG_E_INCOMPLETE Handle = 0x80030201 - STG_E_TERMINATED Handle = 0x80030202 - STG_S_CONVERTED Handle = 0x00030200 - STG_S_BLOCK Handle = 0x00030201 - STG_S_RETRYNOW Handle = 0x00030202 - STG_S_MONITORING Handle = 0x00030203 - STG_S_MULTIPLEOPENS Handle = 0x00030204 - STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 - STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 - STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 - STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 - STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 - STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A - STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 - STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 - STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 - STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 - STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 - STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A - STG_E_RESETS_EXHAUSTED Handle = 0x8003030B - RPC_E_CALL_REJECTED Handle = 0x80010001 - RPC_E_CALL_CANCELED Handle = 0x80010002 - RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 - RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 - RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 - RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 - RPC_E_SERVER_DIED Handle = 0x80010007 - RPC_E_CLIENT_DIED Handle = 0x80010008 - RPC_E_INVALID_DATAPACKET Handle = 0x80010009 - RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A - RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B - RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C - RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D - RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E - RPC_E_INVALID_DATA Handle = 0x8001000F - RPC_E_INVALID_PARAMETER Handle = 0x80010010 - RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 - RPC_E_SERVER_DIED_DNE Handle = 0x80010012 - RPC_E_SYS_CALL_FAILED Handle = 0x80010100 - RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 - RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 - RPC_E_NOT_REGISTERED Handle = 0x80010103 - RPC_E_FAULT Handle = 0x80010104 - RPC_E_SERVERFAULT Handle = 0x80010105 - RPC_E_CHANGED_MODE Handle = 0x80010106 - RPC_E_INVALIDMETHOD Handle = 0x80010107 - RPC_E_DISCONNECTED Handle = 0x80010108 - RPC_E_RETRY Handle = 0x80010109 - RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A - RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B - RPC_E_INVALID_CALLDATA Handle = 0x8001010C - RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D - RPC_E_WRONG_THREAD Handle = 0x8001010E - RPC_E_THREAD_NOT_INIT Handle = 0x8001010F - RPC_E_VERSION_MISMATCH Handle = 0x80010110 - RPC_E_INVALID_HEADER Handle = 0x80010111 - RPC_E_INVALID_EXTENSION Handle = 0x80010112 - RPC_E_INVALID_IPID Handle = 0x80010113 - RPC_E_INVALID_OBJECT Handle = 0x80010114 - RPC_S_CALLPENDING Handle = 0x80010115 - RPC_S_WAITONTIMER Handle = 0x80010116 - RPC_E_CALL_COMPLETE Handle = 0x80010117 - RPC_E_UNSECURE_CALL Handle = 0x80010118 - RPC_E_TOO_LATE Handle = 0x80010119 - RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A - RPC_E_ACCESS_DENIED Handle = 0x8001011B - RPC_E_REMOTE_DISABLED Handle = 0x8001011C - RPC_E_INVALID_OBJREF Handle = 0x8001011D - RPC_E_NO_CONTEXT Handle = 0x8001011E - RPC_E_TIMEOUT Handle = 0x8001011F - RPC_E_NO_SYNC Handle = 0x80010120 - RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 - RPC_E_INVALID_STD_NAME Handle = 0x80010122 - CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 - CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 - CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 - CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 - CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 - CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 - CO_E_FAILEDTOSETDACL Handle = 0x80010129 - CO_E_ACCESSCHECKFAILED Handle = 0x8001012A - CO_E_NETACCESSAPIFAILED Handle = 0x8001012B - CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C - CO_E_INVALIDSID Handle = 0x8001012D - CO_E_CONVERSIONFAILED Handle = 0x8001012E - CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F - CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 - CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 - CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 - CO_E_SETSERLHNDLFAILED Handle = 0x80010133 - CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 - CO_E_PATHTOOLONG Handle = 0x80010135 - CO_E_FAILEDTOGENUUID Handle = 0x80010136 - CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 - CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 - CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 - CO_E_ACESINWRONGORDER Handle = 0x8001013A - CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B - CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C - CO_E_DECODEFAILED Handle = 0x8001013D - CO_E_ACNOTINITIALIZED Handle = 0x8001013F - CO_E_CANCEL_DISABLED Handle = 0x80010140 - RPC_E_UNEXPECTED Handle = 0x8001FFFF - ERROR_AUDITING_DISABLED Handle = 0xC0090001 - ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 - ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 - NTE_BAD_UID Handle = 0x80090001 - NTE_BAD_HASH Handle = 0x80090002 - NTE_BAD_KEY Handle = 0x80090003 - NTE_BAD_LEN Handle = 0x80090004 - NTE_BAD_DATA Handle = 0x80090005 - NTE_BAD_SIGNATURE Handle = 0x80090006 - NTE_BAD_VER Handle = 0x80090007 - NTE_BAD_ALGID Handle = 0x80090008 - NTE_BAD_FLAGS Handle = 0x80090009 - NTE_BAD_TYPE Handle = 0x8009000A - NTE_BAD_KEY_STATE Handle = 0x8009000B - NTE_BAD_HASH_STATE Handle = 0x8009000C - NTE_NO_KEY Handle = 0x8009000D - NTE_NO_MEMORY Handle = 0x8009000E - NTE_EXISTS Handle = 0x8009000F - NTE_PERM Handle = 0x80090010 - NTE_NOT_FOUND Handle = 0x80090011 - NTE_DOUBLE_ENCRYPT Handle = 0x80090012 - NTE_BAD_PROVIDER Handle = 0x80090013 - NTE_BAD_PROV_TYPE Handle = 0x80090014 - NTE_BAD_PUBLIC_KEY Handle = 0x80090015 - NTE_BAD_KEYSET Handle = 0x80090016 - NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 - NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 - NTE_KEYSET_NOT_DEF Handle = 0x80090019 - NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A - NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B - NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C - NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D - NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E - NTE_BAD_KEYSET_PARAM Handle = 0x8009001F - NTE_FAIL Handle = 0x80090020 - NTE_SYS_ERR Handle = 0x80090021 - NTE_SILENT_CONTEXT Handle = 0x80090022 - NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 - NTE_TEMPORARY_PROFILE Handle = 0x80090024 - NTE_FIXEDPARAMETER Handle = 0x80090025 - NTE_INVALID_HANDLE Handle = 0x80090026 - NTE_INVALID_PARAMETER Handle = 0x80090027 - NTE_BUFFER_TOO_SMALL Handle = 0x80090028 - NTE_NOT_SUPPORTED Handle = 0x80090029 - NTE_NO_MORE_ITEMS Handle = 0x8009002A - NTE_BUFFERS_OVERLAP Handle = 0x8009002B - NTE_DECRYPTION_FAILURE Handle = 0x8009002C - NTE_INTERNAL_ERROR Handle = 0x8009002D - NTE_UI_REQUIRED Handle = 0x8009002E - NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F - NTE_DEVICE_NOT_READY Handle = 0x80090030 - NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 - NTE_VALIDATION_FAILED Handle = 0x80090032 - NTE_INCORRECT_PASSWORD Handle = 0x80090033 - NTE_ENCRYPTION_FAILURE Handle = 0x80090034 - NTE_DEVICE_NOT_FOUND Handle = 0x80090035 - NTE_USER_CANCELLED Handle = 0x80090036 - NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 - NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 - SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 - SEC_E_INVALID_HANDLE Handle = 0x80090301 - SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 - SEC_E_TARGET_UNKNOWN Handle = 0x80090303 - SEC_E_INTERNAL_ERROR Handle = 0x80090304 - SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 - SEC_E_NOT_OWNER Handle = 0x80090306 - SEC_E_CANNOT_INSTALL Handle = 0x80090307 - SEC_E_INVALID_TOKEN Handle = 0x80090308 - SEC_E_CANNOT_PACK Handle = 0x80090309 - SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A - SEC_E_NO_IMPERSONATION Handle = 0x8009030B - SEC_E_LOGON_DENIED Handle = 0x8009030C - SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D - SEC_E_NO_CREDENTIALS Handle = 0x8009030E - SEC_E_MESSAGE_ALTERED Handle = 0x8009030F - SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 - SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 - SEC_I_CONTINUE_NEEDED Handle = 0x00090312 - SEC_I_COMPLETE_NEEDED Handle = 0x00090313 - SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 - SEC_I_LOCAL_LOGON Handle = 0x00090315 - SEC_E_BAD_PKGID Handle = 0x80090316 - SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 - SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 - SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 - SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 - SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 - SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 - SEC_I_RENEGOTIATE Handle = 0x00090321 - SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 - SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 - SEC_E_TIME_SKEW Handle = 0x80090324 - SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 - SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 - SEC_E_CERT_UNKNOWN Handle = 0x80090327 - SEC_E_CERT_EXPIRED Handle = 0x80090328 - SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 - SEC_E_DECRYPT_FAILURE Handle = 0x80090330 - SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 - SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 - SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 - SEC_E_NO_TGT_REPLY Handle = 0x80090334 - SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 - SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 - SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 - SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 - SEC_E_MUST_BE_KDC Handle = 0x80090339 - SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A - SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B - SEC_E_NO_PA_DATA Handle = 0x8009033C - SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D - SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E - SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F - SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 - SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 - SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 - SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 - SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 - SEC_E_BAD_BINDINGS Handle = 0x80090346 - SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 - SEC_E_NO_KERB_KEY Handle = 0x80090348 - SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 - SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 - SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 - SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 - SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 - SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 - SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 - SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 - SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 - SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 - SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 - SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A - SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B - SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C - SEC_E_INVALID_PARAMETER Handle = 0x8009035D - SEC_E_DELEGATION_POLICY Handle = 0x8009035E - SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F - SEC_I_NO_RENEGOTIATION Handle = 0x00090360 - SEC_E_NO_CONTEXT Handle = 0x80090361 - SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 - SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 - SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 - SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 - SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 - SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 - SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 - SEC_E_INVALID_UPN_NAME Handle = 0x80090369 - SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR - SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION - CRYPT_E_MSG_ERROR Handle = 0x80091001 - CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 - CRYPT_E_OID_FORMAT Handle = 0x80091003 - CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 - CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 - CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 - CRYPT_E_HASH_VALUE Handle = 0x80091007 - CRYPT_E_INVALID_INDEX Handle = 0x80091008 - CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 - CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A - CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B - CRYPT_E_CONTROL_TYPE Handle = 0x8009100C - CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D - CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E - CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F - CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 - CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 - CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 - CRYPT_E_BAD_LEN Handle = 0x80092001 - CRYPT_E_BAD_ENCODE Handle = 0x80092002 - CRYPT_E_FILE_ERROR Handle = 0x80092003 - CRYPT_E_NOT_FOUND Handle = 0x80092004 - CRYPT_E_EXISTS Handle = 0x80092005 - CRYPT_E_NO_PROVIDER Handle = 0x80092006 - CRYPT_E_SELF_SIGNED Handle = 0x80092007 - CRYPT_E_DELETED_PREV Handle = 0x80092008 - CRYPT_E_NO_MATCH Handle = 0x80092009 - CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A - CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B - CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C - CRYPT_E_BAD_MSG Handle = 0x8009200D - CRYPT_E_NO_SIGNER Handle = 0x8009200E - CRYPT_E_PENDING_CLOSE Handle = 0x8009200F - CRYPT_E_REVOKED Handle = 0x80092010 - CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 - CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 - CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 - CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 - CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 - CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 - CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 - CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 - CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 - CRYPT_E_FILERESIZED Handle = 0x80092025 - CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 - CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 - CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 - CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 - CRYPT_E_NOT_IN_CTL Handle = 0x8009202A - CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B - CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C - CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D - CRYPT_E_OSS_ERROR Handle = 0x80093000 - OSS_MORE_BUF Handle = 0x80093001 - OSS_NEGATIVE_UINTEGER Handle = 0x80093002 - OSS_PDU_RANGE Handle = 0x80093003 - OSS_MORE_INPUT Handle = 0x80093004 - OSS_DATA_ERROR Handle = 0x80093005 - OSS_BAD_ARG Handle = 0x80093006 - OSS_BAD_VERSION Handle = 0x80093007 - OSS_OUT_MEMORY Handle = 0x80093008 - OSS_PDU_MISMATCH Handle = 0x80093009 - OSS_LIMITED Handle = 0x8009300A - OSS_BAD_PTR Handle = 0x8009300B - OSS_BAD_TIME Handle = 0x8009300C - OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D - OSS_MEM_ERROR Handle = 0x8009300E - OSS_BAD_TABLE Handle = 0x8009300F - OSS_TOO_LONG Handle = 0x80093010 - OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 - OSS_FATAL_ERROR Handle = 0x80093012 - OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 - OSS_NULL_TBL Handle = 0x80093014 - OSS_NULL_FCN Handle = 0x80093015 - OSS_BAD_ENCRULES Handle = 0x80093016 - OSS_UNAVAIL_ENCRULES Handle = 0x80093017 - OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 - OSS_UNIMPLEMENTED Handle = 0x80093019 - OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A - OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B - OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C - OSS_TABLE_MISMATCH Handle = 0x8009301D - OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E - OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F - OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 - OSS_OUT_OF_RANGE Handle = 0x80093021 - OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 - OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 - OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 - OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 - OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 - OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 - OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 - OSS_API_DLL_NOT_LINKED Handle = 0x80093029 - OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A - OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B - OSS_OPEN_TYPE_ERROR Handle = 0x8009302C - OSS_MUTEX_NOT_CREATED Handle = 0x8009302D - OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E - CRYPT_E_ASN1_ERROR Handle = 0x80093100 - CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 - CRYPT_E_ASN1_EOD Handle = 0x80093102 - CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 - CRYPT_E_ASN1_LARGE Handle = 0x80093104 - CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 - CRYPT_E_ASN1_MEMORY Handle = 0x80093106 - CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 - CRYPT_E_ASN1_BADPDU Handle = 0x80093108 - CRYPT_E_ASN1_BADARGS Handle = 0x80093109 - CRYPT_E_ASN1_BADREAL Handle = 0x8009310A - CRYPT_E_ASN1_BADTAG Handle = 0x8009310B - CRYPT_E_ASN1_CHOICE Handle = 0x8009310C - CRYPT_E_ASN1_RULE Handle = 0x8009310D - CRYPT_E_ASN1_UTF8 Handle = 0x8009310E - CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 - CRYPT_E_ASN1_NYI Handle = 0x80093134 - CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 - CRYPT_E_ASN1_NOEOD Handle = 0x80093202 - CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 - CERTSRV_E_NO_REQUEST Handle = 0x80094002 - CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 - CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 - CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 - CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 - CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 - CERTSRV_E_ROLECONFLICT Handle = 0x80094008 - CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 - CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A - CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B - CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C - CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D - CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E - CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F - CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 - CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 - CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 - CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 - CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 - CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 - CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 - CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 - CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 - CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 - CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 - CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 - CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 - CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 - CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 - CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 - CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 - CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 - CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 - CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A - CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B - CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C - CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D - CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E - CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F - CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 - CERTSRV_E_KEY_LENGTH Handle = 0x80094811 - CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 - CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 - CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 - CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 - CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 - CERTSRV_E_INVALID_EK Handle = 0x80094817 - CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 - CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 - CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A - CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B - CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C - CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D - CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E - CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F - CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 - XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 - XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 - XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 - XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 - XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 - XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 - TRUST_E_SYSTEM_ERROR Handle = 0x80096001 - TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 - TRUST_E_COUNTER_SIGNER Handle = 0x80096003 - TRUST_E_CERT_SIGNATURE Handle = 0x80096004 - TRUST_E_TIME_STAMP Handle = 0x80096005 - TRUST_E_BAD_DIGEST Handle = 0x80096010 - TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 - TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 - TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E - MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 - MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 - MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 - MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 - MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 - MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 - MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 - MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 - MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 - MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A - MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B - MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C - MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D - MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 - MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 - MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 - MSSIPOTF_E_FILE Handle = 0x80097013 - MSSIPOTF_E_CRYPT Handle = 0x80097014 - MSSIPOTF_E_BADVERSION Handle = 0x80097015 - MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 - MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 - MSSIPOTF_E_STRUCTURE Handle = 0x80097018 - ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 - NTE_OP_OK syscall.Errno = 0 - TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 - TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 - TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 - TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 - DIGSIG_E_ENCODE Handle = 0x800B0005 - DIGSIG_E_DECODE Handle = 0x800B0006 - DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 - DIGSIG_E_CRYPTO Handle = 0x800B0008 - PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 - PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A - PERSIST_E_NOTSELFSIZING Handle = 0x800B000B - TRUST_E_NOSIGNATURE Handle = 0x800B0100 - CERT_E_EXPIRED Handle = 0x800B0101 - CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 - CERT_E_ROLE Handle = 0x800B0103 - CERT_E_PATHLENCONST Handle = 0x800B0104 - CERT_E_CRITICAL Handle = 0x800B0105 - CERT_E_PURPOSE Handle = 0x800B0106 - CERT_E_ISSUERCHAINING Handle = 0x800B0107 - CERT_E_MALFORMED Handle = 0x800B0108 - CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 - CERT_E_CHAINING Handle = 0x800B010A - TRUST_E_FAIL Handle = 0x800B010B - CERT_E_REVOKED Handle = 0x800B010C - CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D - CERT_E_REVOCATION_FAILURE Handle = 0x800B010E - CERT_E_CN_NO_MATCH Handle = 0x800B010F - CERT_E_WRONG_USAGE Handle = 0x800B0110 - TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 - CERT_E_UNTRUSTEDCA Handle = 0x800B0112 - CERT_E_INVALID_POLICY Handle = 0x800B0113 - CERT_E_INVALID_NAME Handle = 0x800B0114 - SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 - SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 - SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 - SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 - SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 - SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 - SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 - SPAPI_E_NO_BACKUP Handle = 0x800F0103 - SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 - SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 - SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 - SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 - SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 - SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 - SPAPI_E_INVALID_CLASS Handle = 0x800F0206 - SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 - SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 - SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 - SPAPI_E_NO_INF Handle = 0x800F020A - SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B - SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C - SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D - SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E - SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F - SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 - SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 - SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 - SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 - SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 - SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 - SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 - SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 - SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 - SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 - SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A - SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B - SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C - SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D - SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E - SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F - SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 - SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 - SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 - SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 - SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 - SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 - SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 - SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 - SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 - SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 - SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A - SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B - SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C - SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D - SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E - SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F - SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 - SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 - SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 - SPAPI_E_INVALID_TARGET Handle = 0x800F0233 - SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 - SPAPI_E_IN_WOW64 Handle = 0x800F0235 - SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 - SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 - SPAPI_E_SCE_DISABLED Handle = 0x800F0238 - SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 - SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A - SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B - SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C - SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D - SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E - SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F - SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 - SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 - SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 - SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 - SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 - SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 - SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 - SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 - SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 - SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 - SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A - SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B - SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C - SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 - SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 - SCARD_S_SUCCESS = S_OK - SCARD_F_INTERNAL_ERROR Handle = 0x80100001 - SCARD_E_CANCELLED Handle = 0x80100002 - SCARD_E_INVALID_HANDLE Handle = 0x80100003 - SCARD_E_INVALID_PARAMETER Handle = 0x80100004 - SCARD_E_INVALID_TARGET Handle = 0x80100005 - SCARD_E_NO_MEMORY Handle = 0x80100006 - SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 - SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 - SCARD_E_UNKNOWN_READER Handle = 0x80100009 - SCARD_E_TIMEOUT Handle = 0x8010000A - SCARD_E_SHARING_VIOLATION Handle = 0x8010000B - SCARD_E_NO_SMARTCARD Handle = 0x8010000C - SCARD_E_UNKNOWN_CARD Handle = 0x8010000D - SCARD_E_CANT_DISPOSE Handle = 0x8010000E - SCARD_E_PROTO_MISMATCH Handle = 0x8010000F - SCARD_E_NOT_READY Handle = 0x80100010 - SCARD_E_INVALID_VALUE Handle = 0x80100011 - SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 - SCARD_F_COMM_ERROR Handle = 0x80100013 - SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 - SCARD_E_INVALID_ATR Handle = 0x80100015 - SCARD_E_NOT_TRANSACTED Handle = 0x80100016 - SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 - SCARD_P_SHUTDOWN Handle = 0x80100018 - SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 - SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A - SCARD_E_DUPLICATE_READER Handle = 0x8010001B - SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C - SCARD_E_NO_SERVICE Handle = 0x8010001D - SCARD_E_SERVICE_STOPPED Handle = 0x8010001E - SCARD_E_UNEXPECTED Handle = 0x8010001F - SCARD_E_ICC_INSTALLATION Handle = 0x80100020 - SCARD_E_ICC_CREATEORDER Handle = 0x80100021 - SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 - SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 - SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 - SCARD_E_NO_DIR Handle = 0x80100025 - SCARD_E_NO_FILE Handle = 0x80100026 - SCARD_E_NO_ACCESS Handle = 0x80100027 - SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 - SCARD_E_BAD_SEEK Handle = 0x80100029 - SCARD_E_INVALID_CHV Handle = 0x8010002A - SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B - SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C - SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D - SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E - SCARD_E_COMM_DATA_LOST Handle = 0x8010002F - SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 - SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 - SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 - SCARD_E_NO_PIN_CACHE Handle = 0x80100033 - SCARD_E_READ_ONLY_CARD Handle = 0x80100034 - SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 - SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 - SCARD_W_UNPOWERED_CARD Handle = 0x80100067 - SCARD_W_RESET_CARD Handle = 0x80100068 - SCARD_W_REMOVED_CARD Handle = 0x80100069 - SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A - SCARD_W_WRONG_CHV Handle = 0x8010006B - SCARD_W_CHV_BLOCKED Handle = 0x8010006C - SCARD_W_EOF Handle = 0x8010006D - SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E - SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F - SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 - SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 - SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 - COMADMIN_E_OBJECTERRORS Handle = 0x80110401 - COMADMIN_E_OBJECTINVALID Handle = 0x80110402 - COMADMIN_E_KEYMISSING Handle = 0x80110403 - COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 - COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 - COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 - COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 - COMADMIN_E_BADPATH Handle = 0x8011040A - COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B - COMADMIN_E_ROLEEXISTS Handle = 0x8011040C - COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D - COMADMIN_E_NOUSER Handle = 0x8011040F - COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 - COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 - COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 - COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 - COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 - COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 - COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 - COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A - COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B - COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D - COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E - COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F - COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 - COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 - COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 - COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 - COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 - COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 - COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 - COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A - COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B - COMADMIN_E_SESSION Handle = 0x8011042C - COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D - COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E - COMADMIN_E_REGISTERTLB Handle = 0x80110430 - COMADMIN_E_SYSTEMAPP Handle = 0x80110433 - COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 - COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 - COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 - COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 - COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 - COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 - COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B - COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C - COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E - COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F - COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 - COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 - COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 - COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 - COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A - COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B - COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C - COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D - COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E - COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F - COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 - COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 - COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 - COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 - COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 - COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A - COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B - COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C - COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D - COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 - COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 - COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 - COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 - COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 - COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 - COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 - COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 - COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 - COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 - COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 - COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 - COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 - COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 - COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 - COMQC_E_BAD_MESSAGE Handle = 0x80110604 - COMQC_E_UNAUTHENTICATED Handle = 0x80110605 - COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 - MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 - COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 - COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 - COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A - COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B - COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D - COMADMIN_E_USER_IN_SET Handle = 0x8011080E - COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F - COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 - COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 - COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 - COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 - COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 - COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 - COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 - COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 - COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B - COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C - COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D - COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E - COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F - COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 - COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 - COMADMIN_E_SAFERINVALID Handle = 0x80110822 - COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 - COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 - WER_S_REPORT_DEBUG Handle = 0x001B0000 - WER_S_REPORT_UPLOADED Handle = 0x001B0001 - WER_S_REPORT_QUEUED Handle = 0x001B0002 - WER_S_DISABLED Handle = 0x001B0003 - WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 - WER_S_DISABLED_QUEUE Handle = 0x001B0005 - WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 - WER_S_REPORT_ASYNC Handle = 0x001B0007 - WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 - WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 - WER_S_ASSERT_CONTINUE Handle = 0x001B000A - WER_S_THROTTLED Handle = 0x001B000B - WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C - WER_E_CRASH_FAILURE Handle = 0x801B8000 - WER_E_CANCELED Handle = 0x801B8001 - WER_E_NETWORK_FAILURE Handle = 0x801B8002 - WER_E_NOT_INITIALIZED Handle = 0x801B8003 - WER_E_ALREADY_REPORTING Handle = 0x801B8004 - WER_E_DUMP_THROTTLED Handle = 0x801B8005 - WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 - WER_E_TOO_HEAVY Handle = 0x801B8007 - ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 - ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 - ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 - ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 - ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 - ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 - ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 - ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 - ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 - ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 - ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A - ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B - ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C - ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D - ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E - ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F - ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 - ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 - ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 - ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 - ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 - ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 - ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 - ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 - ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 - ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 - ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A - ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B - ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C - ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 - ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 - ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 - DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 - DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 - DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 - DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 - DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 - DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 - DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 - DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 - ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 - ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 - ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 - ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 - ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 - ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 - ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 - ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 - ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 - ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A - ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 - ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 - ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 - ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 - ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 - ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 - ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 - ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 - ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 - ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 - ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A - ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B - ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C - ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D - ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E - ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F - ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 - ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 - ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 - ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 - ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 - ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 - ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 - ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 - ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 - ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 - ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 - ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 - ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 - ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 - ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 - ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 - ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 - ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 - ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 - ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 - ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 - ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 - ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 - ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 - ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A - ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B - ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 - ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 - ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 - ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 - ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 - ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 - ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 - ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C - ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D - ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E - ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F - ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 - ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 - ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 - ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 - ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 - ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B - ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C - ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D - ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E - ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F - ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 - ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 - ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 - ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 - ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 - ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 - ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 - ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 - ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 - ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 - ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A - ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B - ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C - ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D - ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E - ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F - ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 - ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 - ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 - ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 - ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 - ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 - ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 - ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 - ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 - ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A - ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B - ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D - ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E - ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F - ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 - ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 - ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 - ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 - ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 - ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 - ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 - ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 - ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A - ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B - ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C - ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 - ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 - ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F - ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 - ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 - ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 - ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 - ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 - ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 - ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 - ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 - ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 - ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 - ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A - ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B - ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C - ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 - ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 - ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 - ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 - ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 - ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B - ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C - ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E - ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F - ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 - ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 - ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 - ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 - ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 - ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A - ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C - ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D - ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F - ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 - ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 - ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 - ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 - ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 - ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 - ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 - ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 - ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 - ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 - ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B - ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C - ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D - ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 - ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 - ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA - ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB - ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC - ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE - ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF - ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 - ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 - ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 - ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 - ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 - ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 - ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 - ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 - ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 - NAP_E_INVALID_PACKET Handle = 0x80270001 - NAP_E_MISSING_SOH Handle = 0x80270002 - NAP_E_CONFLICTING_ID Handle = 0x80270003 - NAP_E_NO_CACHED_SOH Handle = 0x80270004 - NAP_E_STILL_BOUND Handle = 0x80270005 - NAP_E_NOT_REGISTERED Handle = 0x80270006 - NAP_E_NOT_INITIALIZED Handle = 0x80270007 - NAP_E_MISMATCHED_ID Handle = 0x80270008 - NAP_E_NOT_PENDING Handle = 0x80270009 - NAP_E_ID_NOT_FOUND Handle = 0x8027000A - NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B - NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C - NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D - NAP_E_ENTITY_DISABLED Handle = 0x8027000E - NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F - NAP_E_TOO_MANY_CALLS Handle = 0x80270010 - NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 - NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 - NAP_E_SHV_TIMEOUT Handle = 0x80270013 - TPM_E_ERROR_MASK Handle = 0x80280000 - TPM_E_AUTHFAIL Handle = 0x80280001 - TPM_E_BADINDEX Handle = 0x80280002 - TPM_E_BAD_PARAMETER Handle = 0x80280003 - TPM_E_AUDITFAILURE Handle = 0x80280004 - TPM_E_CLEAR_DISABLED Handle = 0x80280005 - TPM_E_DEACTIVATED Handle = 0x80280006 - TPM_E_DISABLED Handle = 0x80280007 - TPM_E_DISABLED_CMD Handle = 0x80280008 - TPM_E_FAIL Handle = 0x80280009 - TPM_E_BAD_ORDINAL Handle = 0x8028000A - TPM_E_INSTALL_DISABLED Handle = 0x8028000B - TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C - TPM_E_KEYNOTFOUND Handle = 0x8028000D - TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E - TPM_E_MIGRATEFAIL Handle = 0x8028000F - TPM_E_INVALID_PCR_INFO Handle = 0x80280010 - TPM_E_NOSPACE Handle = 0x80280011 - TPM_E_NOSRK Handle = 0x80280012 - TPM_E_NOTSEALED_BLOB Handle = 0x80280013 - TPM_E_OWNER_SET Handle = 0x80280014 - TPM_E_RESOURCES Handle = 0x80280015 - TPM_E_SHORTRANDOM Handle = 0x80280016 - TPM_E_SIZE Handle = 0x80280017 - TPM_E_WRONGPCRVAL Handle = 0x80280018 - TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 - TPM_E_SHA_THREAD Handle = 0x8028001A - TPM_E_SHA_ERROR Handle = 0x8028001B - TPM_E_FAILEDSELFTEST Handle = 0x8028001C - TPM_E_AUTH2FAIL Handle = 0x8028001D - TPM_E_BADTAG Handle = 0x8028001E - TPM_E_IOERROR Handle = 0x8028001F - TPM_E_ENCRYPT_ERROR Handle = 0x80280020 - TPM_E_DECRYPT_ERROR Handle = 0x80280021 - TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 - TPM_E_NO_ENDORSEMENT Handle = 0x80280023 - TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 - TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 - TPM_E_INVALID_POSTINIT Handle = 0x80280026 - TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 - TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 - TPM_E_BAD_MIGRATION Handle = 0x80280029 - TPM_E_BAD_SCHEME Handle = 0x8028002A - TPM_E_BAD_DATASIZE Handle = 0x8028002B - TPM_E_BAD_MODE Handle = 0x8028002C - TPM_E_BAD_PRESENCE Handle = 0x8028002D - TPM_E_BAD_VERSION Handle = 0x8028002E - TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F - TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 - TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 - TPM_E_NOTRESETABLE Handle = 0x80280032 - TPM_E_NOTLOCAL Handle = 0x80280033 - TPM_E_BAD_TYPE Handle = 0x80280034 - TPM_E_INVALID_RESOURCE Handle = 0x80280035 - TPM_E_NOTFIPS Handle = 0x80280036 - TPM_E_INVALID_FAMILY Handle = 0x80280037 - TPM_E_NO_NV_PERMISSION Handle = 0x80280038 - TPM_E_REQUIRES_SIGN Handle = 0x80280039 - TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A - TPM_E_AUTH_CONFLICT Handle = 0x8028003B - TPM_E_AREA_LOCKED Handle = 0x8028003C - TPM_E_BAD_LOCALITY Handle = 0x8028003D - TPM_E_READ_ONLY Handle = 0x8028003E - TPM_E_PER_NOWRITE Handle = 0x8028003F - TPM_E_FAMILYCOUNT Handle = 0x80280040 - TPM_E_WRITE_LOCKED Handle = 0x80280041 - TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 - TPM_E_INVALID_STRUCTURE Handle = 0x80280043 - TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 - TPM_E_BAD_COUNTER Handle = 0x80280045 - TPM_E_NOT_FULLWRITE Handle = 0x80280046 - TPM_E_CONTEXT_GAP Handle = 0x80280047 - TPM_E_MAXNVWRITES Handle = 0x80280048 - TPM_E_NOOPERATOR Handle = 0x80280049 - TPM_E_RESOURCEMISSING Handle = 0x8028004A - TPM_E_DELEGATE_LOCK Handle = 0x8028004B - TPM_E_DELEGATE_FAMILY Handle = 0x8028004C - TPM_E_DELEGATE_ADMIN Handle = 0x8028004D - TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E - TPM_E_OWNER_CONTROL Handle = 0x8028004F - TPM_E_DAA_RESOURCES Handle = 0x80280050 - TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 - TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 - TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 - TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 - TPM_E_DAA_STAGE Handle = 0x80280055 - TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 - TPM_E_DAA_WRONG_W Handle = 0x80280057 - TPM_E_BAD_HANDLE Handle = 0x80280058 - TPM_E_BAD_DELEGATE Handle = 0x80280059 - TPM_E_BADCONTEXT Handle = 0x8028005A - TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B - TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C - TPM_E_MA_DESTINATION Handle = 0x8028005D - TPM_E_MA_SOURCE Handle = 0x8028005E - TPM_E_MA_AUTHORITY Handle = 0x8028005F - TPM_E_PERMANENTEK Handle = 0x80280061 - TPM_E_BAD_SIGNATURE Handle = 0x80280062 - TPM_E_NOCONTEXTSPACE Handle = 0x80280063 - TPM_20_E_ASYMMETRIC Handle = 0x80280081 - TPM_20_E_ATTRIBUTES Handle = 0x80280082 - TPM_20_E_HASH Handle = 0x80280083 - TPM_20_E_VALUE Handle = 0x80280084 - TPM_20_E_HIERARCHY Handle = 0x80280085 - TPM_20_E_KEY_SIZE Handle = 0x80280087 - TPM_20_E_MGF Handle = 0x80280088 - TPM_20_E_MODE Handle = 0x80280089 - TPM_20_E_TYPE Handle = 0x8028008A - TPM_20_E_HANDLE Handle = 0x8028008B - TPM_20_E_KDF Handle = 0x8028008C - TPM_20_E_RANGE Handle = 0x8028008D - TPM_20_E_AUTH_FAIL Handle = 0x8028008E - TPM_20_E_NONCE Handle = 0x8028008F - TPM_20_E_PP Handle = 0x80280090 - TPM_20_E_SCHEME Handle = 0x80280092 - TPM_20_E_SIZE Handle = 0x80280095 - TPM_20_E_SYMMETRIC Handle = 0x80280096 - TPM_20_E_TAG Handle = 0x80280097 - TPM_20_E_SELECTOR Handle = 0x80280098 - TPM_20_E_INSUFFICIENT Handle = 0x8028009A - TPM_20_E_SIGNATURE Handle = 0x8028009B - TPM_20_E_KEY Handle = 0x8028009C - TPM_20_E_POLICY_FAIL Handle = 0x8028009D - TPM_20_E_INTEGRITY Handle = 0x8028009F - TPM_20_E_TICKET Handle = 0x802800A0 - TPM_20_E_RESERVED_BITS Handle = 0x802800A1 - TPM_20_E_BAD_AUTH Handle = 0x802800A2 - TPM_20_E_EXPIRED Handle = 0x802800A3 - TPM_20_E_POLICY_CC Handle = 0x802800A4 - TPM_20_E_BINDING Handle = 0x802800A5 - TPM_20_E_CURVE Handle = 0x802800A6 - TPM_20_E_ECC_POINT Handle = 0x802800A7 - TPM_20_E_INITIALIZE Handle = 0x80280100 - TPM_20_E_FAILURE Handle = 0x80280101 - TPM_20_E_SEQUENCE Handle = 0x80280103 - TPM_20_E_PRIVATE Handle = 0x8028010B - TPM_20_E_HMAC Handle = 0x80280119 - TPM_20_E_DISABLED Handle = 0x80280120 - TPM_20_E_EXCLUSIVE Handle = 0x80280121 - TPM_20_E_ECC_CURVE Handle = 0x80280123 - TPM_20_E_AUTH_TYPE Handle = 0x80280124 - TPM_20_E_AUTH_MISSING Handle = 0x80280125 - TPM_20_E_POLICY Handle = 0x80280126 - TPM_20_E_PCR Handle = 0x80280127 - TPM_20_E_PCR_CHANGED Handle = 0x80280128 - TPM_20_E_UPGRADE Handle = 0x8028012D - TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E - TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F - TPM_20_E_REBOOT Handle = 0x80280130 - TPM_20_E_UNBALANCED Handle = 0x80280131 - TPM_20_E_COMMAND_SIZE Handle = 0x80280142 - TPM_20_E_COMMAND_CODE Handle = 0x80280143 - TPM_20_E_AUTHSIZE Handle = 0x80280144 - TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 - TPM_20_E_NV_RANGE Handle = 0x80280146 - TPM_20_E_NV_SIZE Handle = 0x80280147 - TPM_20_E_NV_LOCKED Handle = 0x80280148 - TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 - TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A - TPM_20_E_NV_SPACE Handle = 0x8028014B - TPM_20_E_NV_DEFINED Handle = 0x8028014C - TPM_20_E_BAD_CONTEXT Handle = 0x80280150 - TPM_20_E_CPHASH Handle = 0x80280151 - TPM_20_E_PARENT Handle = 0x80280152 - TPM_20_E_NEEDS_TEST Handle = 0x80280153 - TPM_20_E_NO_RESULT Handle = 0x80280154 - TPM_20_E_SENSITIVE Handle = 0x80280155 - TPM_E_COMMAND_BLOCKED Handle = 0x80280400 - TPM_E_INVALID_HANDLE Handle = 0x80280401 - TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 - TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 - TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 - TPM_E_RETRY Handle = 0x80280800 - TPM_E_NEEDS_SELFTEST Handle = 0x80280801 - TPM_E_DOING_SELFTEST Handle = 0x80280802 - TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 - TPM_20_E_CONTEXT_GAP Handle = 0x80280901 - TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 - TPM_20_E_SESSION_MEMORY Handle = 0x80280903 - TPM_20_E_MEMORY Handle = 0x80280904 - TPM_20_E_SESSION_HANDLES Handle = 0x80280905 - TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 - TPM_20_E_LOCALITY Handle = 0x80280907 - TPM_20_E_YIELDED Handle = 0x80280908 - TPM_20_E_CANCELED Handle = 0x80280909 - TPM_20_E_TESTING Handle = 0x8028090A - TPM_20_E_NV_RATE Handle = 0x80280920 - TPM_20_E_LOCKOUT Handle = 0x80280921 - TPM_20_E_RETRY Handle = 0x80280922 - TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 - TBS_E_INTERNAL_ERROR Handle = 0x80284001 - TBS_E_BAD_PARAMETER Handle = 0x80284002 - TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 - TBS_E_INVALID_CONTEXT Handle = 0x80284004 - TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 - TBS_E_IOERROR Handle = 0x80284006 - TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 - TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 - TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 - TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A - TBS_E_SERVICE_START_PENDING Handle = 0x8028400B - TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C - TBS_E_COMMAND_CANCELED Handle = 0x8028400D - TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E - TBS_E_TPM_NOT_FOUND Handle = 0x8028400F - TBS_E_SERVICE_DISABLED Handle = 0x80284010 - TBS_E_NO_EVENT_LOG Handle = 0x80284011 - TBS_E_ACCESS_DENIED Handle = 0x80284012 - TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 - TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 - TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 - TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 - TPMAPI_E_INVALID_STATE Handle = 0x80290100 - TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 - TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 - TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 - TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 - TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 - TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 - TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 - TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 - TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 - TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A - TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B - TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C - TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D - TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E - TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F - TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 - TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 - TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 - TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 - TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 - TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 - TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 - TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 - TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 - TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 - TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A - TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B - TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C - TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D - TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E - TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F - TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 - TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 - TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 - TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 - TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 - TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 - TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 - TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 - TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 - TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 - TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A - TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B - TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C - TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 - TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 - TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 - TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 - TBSIMP_E_TPM_ERROR Handle = 0x80290204 - TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 - TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 - TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 - TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 - TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 - TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A - TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B - TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C - TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D - TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E - TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F - TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 - TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 - TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 - TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 - TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 - TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 - TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 - TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 - TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 - TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 - TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A - TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B - TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 - TPM_E_PPI_USER_ABORT Handle = 0x80290301 - TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 - TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 - TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 - TPM_E_PCP_ERROR_MASK Handle = 0x80290400 - TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 - TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 - TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 - TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 - TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 - TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 - TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 - TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 - TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 - TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A - TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B - TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C - TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E - TPM_E_KEY_NOT_LOADED Handle = 0x8029040F - TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 - TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 - TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 - TPM_E_NOT_PCR_BOUND Handle = 0x80290413 - TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 - TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 - TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 - TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 - TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 - TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 - TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A - TPM_E_LOCKED_OUT Handle = 0x8029041B - TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C - TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D - TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E - TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F - TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 - TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 - TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 - TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 - TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 - PLA_E_DCS_NOT_FOUND Handle = 0x80300002 - PLA_E_DCS_IN_USE Handle = 0x803000AA - PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 - PLA_E_NO_MIN_DISK Handle = 0x80300070 - PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 - PLA_S_PROPERTY_IGNORED Handle = 0x00300100 - PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 - PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 - PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 - PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 - PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 - PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 - PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 - PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 - PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 - PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A - PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B - PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C - PLA_E_NO_DUPLICATES Handle = 0x8030010D - PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E - PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F - PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 - PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 - PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 - PLA_E_CABAPI_FAILURE Handle = 0x80300113 - FVE_E_LOCKED_VOLUME Handle = 0x80310000 - FVE_E_NOT_ENCRYPTED Handle = 0x80310001 - FVE_E_NO_TPM_BIOS Handle = 0x80310002 - FVE_E_NO_MBR_METRIC Handle = 0x80310003 - FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 - FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 - FVE_E_WRONG_BOOTMGR Handle = 0x80310006 - FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 - FVE_E_NOT_ACTIVATED Handle = 0x80310008 - FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 - FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A - FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B - FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C - FVE_E_AD_NO_VALUES Handle = 0x8031000D - FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E - FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F - FVE_E_BAD_INFORMATION Handle = 0x80310010 - FVE_E_TOO_SMALL Handle = 0x80310011 - FVE_E_SYSTEM_VOLUME Handle = 0x80310012 - FVE_E_FAILED_WRONG_FS Handle = 0x80310013 - FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 - FVE_E_NOT_SUPPORTED Handle = 0x80310015 - FVE_E_BAD_DATA Handle = 0x80310016 - FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 - FVE_E_TPM_NOT_OWNED Handle = 0x80310018 - FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 - FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A - FVE_E_CONV_READ Handle = 0x8031001B - FVE_E_CONV_WRITE Handle = 0x8031001C - FVE_E_KEY_REQUIRED Handle = 0x8031001D - FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E - FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F - FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 - FVE_E_PROTECTION_DISABLED Handle = 0x80310021 - FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 - FVE_E_FOREIGN_VOLUME Handle = 0x80310023 - FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 - FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 - FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 - FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 - FVE_E_NOT_OS_VOLUME Handle = 0x80310028 - FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 - FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A - FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B - FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C - FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D - FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E - FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 - FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 - FVE_E_RELATIVE_PATH Handle = 0x80310032 - FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 - FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 - FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 - FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 - FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 - FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 - FVE_E_NOT_DECRYPTED Handle = 0x80310039 - FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A - FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B - FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C - FVE_E_KEYFILE_INVALID Handle = 0x8031003D - FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E - FVE_E_TPM_DISABLED Handle = 0x8031003F - FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 - FVE_E_TPM_INVALID_PCR Handle = 0x80310041 - FVE_E_TPM_NO_VMK Handle = 0x80310042 - FVE_E_PIN_INVALID Handle = 0x80310043 - FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 - FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 - FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 - FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 - FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 - FVE_E_NO_LICENSE Handle = 0x80310049 - FVE_E_NOT_ON_STACK Handle = 0x8031004A - FVE_E_FS_MOUNTED Handle = 0x8031004B - FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C - FVE_E_DRY_RUN_FAILED Handle = 0x8031004D - FVE_E_REBOOT_REQUIRED Handle = 0x8031004E - FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F - FVE_E_RAW_ACCESS Handle = 0x80310050 - FVE_E_RAW_BLOCKED Handle = 0x80310051 - FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 - FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 - FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 - FVE_E_MOR_FAILED Handle = 0x80310055 - FVE_E_HIDDEN_VOLUME Handle = 0x80310056 - FVE_E_TRANSIENT_STATE Handle = 0x80310057 - FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 - FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 - FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A - FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B - FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C - FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D - FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E - FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F - FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 - FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 - FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 - FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 - FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 - FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 - FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 - FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 - FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 - FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 - FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A - FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B - FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C - FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D - FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E - FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F - FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 - FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 - FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 - FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 - FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 - FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 - FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 - FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 - FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 - FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 - FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 - FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 - FVE_E_RECOVERY_PARTITION Handle = 0x80310082 - FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 - FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 - FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 - FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 - FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 - FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 - FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 - FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 - FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 - FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 - FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 - FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 - FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 - FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 - FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 - FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 - FVE_E_ENH_PIN_INVALID Handle = 0x80310099 - FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A - FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B - FVE_E_EFI_ONLY Handle = 0x8031009C - FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D - FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E - FVE_E_INVALID_NKP_CERT Handle = 0x8031009F - FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 - FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 - FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 - FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 - FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 - FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 - FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 - FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 - FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 - FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 - FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA - FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB - FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC - FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD - FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE - FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF - FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 - FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 - FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 - FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 - FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 - FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 - FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 - FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 - FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 - FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 - FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA - FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB - FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC - FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD - FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE - FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF - FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 - FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 - FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 - FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 - FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 - FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 - FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 - FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 - FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 - FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 - FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA - FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB - FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC - FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD - FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE - FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF - FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 - FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 - FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 - FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 - FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 - FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 - FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 - FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 - FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 - FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 - FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 - FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 - FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 - FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 - FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 - FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 - FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 - FWP_E_NOT_FOUND Handle = 0x80320008 - FWP_E_ALREADY_EXISTS Handle = 0x80320009 - FWP_E_IN_USE Handle = 0x8032000A - FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B - FWP_E_WRONG_SESSION Handle = 0x8032000C - FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D - FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E - FWP_E_TXN_ABORTED Handle = 0x8032000F - FWP_E_SESSION_ABORTED Handle = 0x80320010 - FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 - FWP_E_TIMEOUT Handle = 0x80320012 - FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 - FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 - FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 - FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 - FWP_E_BUILTIN_OBJECT Handle = 0x80320017 - FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 - FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 - FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A - FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B - FWP_E_NULL_POINTER Handle = 0x8032001C - FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D - FWP_E_INVALID_FLAGS Handle = 0x8032001E - FWP_E_INVALID_NET_MASK Handle = 0x8032001F - FWP_E_INVALID_RANGE Handle = 0x80320020 - FWP_E_INVALID_INTERVAL Handle = 0x80320021 - FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 - FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 - FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 - FWP_E_INVALID_WEIGHT Handle = 0x80320025 - FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 - FWP_E_TYPE_MISMATCH Handle = 0x80320027 - FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 - FWP_E_RESERVED Handle = 0x80320029 - FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A - FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B - FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C - FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D - FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E - FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F - FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 - FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 - FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 - FWP_E_NEVER_MATCH Handle = 0x80320033 - FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 - FWP_E_INVALID_PARAMETER Handle = 0x80320035 - FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 - FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 - FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 - FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 - FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A - FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B - FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C - FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D - FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E - FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F - FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 - FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 - FWP_E_INVALID_DNS_NAME Handle = 0x80320042 - FWP_E_STILL_ON Handle = 0x80320043 - FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 - FWP_E_DROP_NOICMP Handle = 0x80320104 - WS_S_ASYNC Handle = 0x003D0000 - WS_S_END Handle = 0x003D0001 - WS_E_INVALID_FORMAT Handle = 0x803D0000 - WS_E_OBJECT_FAULTED Handle = 0x803D0001 - WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 - WS_E_INVALID_OPERATION Handle = 0x803D0003 - WS_E_OPERATION_ABORTED Handle = 0x803D0004 - WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 - WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 - WS_E_OPERATION_ABANDONED Handle = 0x803D0007 - WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 - WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 - WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A - WS_E_ADDRESS_IN_USE Handle = 0x803D000B - WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C - WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D - WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E - WS_E_ENDPOINT_FAILURE Handle = 0x803D000F - WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 - WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 - WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 - WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 - WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 - WS_E_PROXY_FAILURE Handle = 0x803D0015 - WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 - WS_E_NOT_SUPPORTED Handle = 0x803D0017 - WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 - WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 - WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A - WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B - WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C - WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D - WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E - WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F - WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 - WS_E_OTHER Handle = 0x803D0021 - WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 - WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 - ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 - ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 - ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 - ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 - ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 - ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 - ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 - ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A - ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B - ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C - ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D - ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB - ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F - ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 - ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 - ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 - ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 - ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 - ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 - ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 - ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A - ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B - ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C - ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D - ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E - ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F - ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 - ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 - ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A - ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B - ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C - ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D - ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E - ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F - ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 - ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 - ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 - ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 - ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 - ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 - ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 - ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 - ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 - ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 - ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 - ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 - ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F - ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 - ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 - ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 - ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 - ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 - ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 - ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 - ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 - ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 - ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 - ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A - ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B - ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C - ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D - ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E - ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 - ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 - ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 - ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 - ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 - ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 - ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 - ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 - ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 - ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A - ERROR_HV_NO_DATA syscall.Errno = 0xC035001B - ERROR_HV_INACTIVE syscall.Errno = 0xC035001C - ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D - ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E - ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 - ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 - ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C - ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D - ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E - ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F - ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 - ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 - ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 - ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 - ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 - ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 - ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 - ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 - ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F - ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 - ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 - ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 - ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 - ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 - ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 - ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 - ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 - ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 - ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 - ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 - ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 - ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 - ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 - ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A - ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B - ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C - ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D - ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E - ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F - ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 - ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 - ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 - ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 - ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 - ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 - ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 - ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 - ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 - ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 - ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A - ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B - ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C - ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D - ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E - ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F - ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 - ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 - ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 - ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 - ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 - ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 - ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 - ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 - ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 - ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 - ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A - ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 - ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 - ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 - ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 - ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 - ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 - ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 - ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 - ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 - ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 - ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A - ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B - ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C - ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D - ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E - ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F - ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 - ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 - ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 - ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 - HCS_E_TERMINATED_DURING_START Handle = 0x80370100 - HCS_E_IMAGE_MISMATCH Handle = 0x80370101 - HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 - HCS_E_INVALID_STATE Handle = 0x80370105 - HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 - HCS_E_TERMINATED Handle = 0x80370107 - HCS_E_CONNECT_FAILED Handle = 0x80370108 - HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 - HCS_E_CONNECTION_CLOSED Handle = 0x8037010A - HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B - HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C - HCS_E_INVALID_JSON Handle = 0x8037010D - HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E - HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F - HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 - HCS_E_PROTOCOL_ERROR Handle = 0x80370111 - HCS_E_INVALID_LAYER Handle = 0x80370112 - HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 - HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 - HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 - HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 - HCS_E_OPERATION_PENDING Handle = 0x80370117 - HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 - HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 - HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A - HCS_E_ACCESS_DENIED Handle = 0x8037011B - HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C - ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 - ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 - WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 - WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 - WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 - WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 - WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 - WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 - WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 - WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 - WHV_E_INVALID_VP_STATE Handle = 0x80370308 - WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 - ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 - ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 - ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 - ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 - ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 - ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 - ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 - ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 - ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 - ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 - ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 - ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 - ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 - ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A - ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B - ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C - ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D - ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E - ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F - ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 - ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 - ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 - ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 - ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 - ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 - ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 - ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 - ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A - ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B - ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C - ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D - ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E - ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F - ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 - ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 - ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 - ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 - ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 - ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 - ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 - ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 - ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 - ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 - ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A - ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B - ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C - ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D - ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E - ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F - ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 - ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 - ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 - ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 - ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 - ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 - ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 - ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 - ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 - ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 - ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A - ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B - ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C - ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D - ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E - ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F - ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 - ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 - ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 - ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 - ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 - ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 - ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 - ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 - ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 - ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 - ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A - ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B - ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C - ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D - ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E - ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F - ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 - ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 - ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 - ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 - ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 - ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 - ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 - ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 - ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A - ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B - ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C - ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 - ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 - ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 - ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 - ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 - ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 - ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 - ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 - ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 - ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 - ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 - ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 - ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A - ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B - ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C - ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D - ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E - ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F - ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 - ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 - ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 - ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 - ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 - ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 - ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 - ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 - ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 - ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 - ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A - ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B - ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C - ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D - ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E - ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F - ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 - ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 - ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 - ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 - ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 - ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 - ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 - ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 - ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 - ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 - ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A - ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 - ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 - HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 - HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 - HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 - HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 - HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 - HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 - HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 - HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 - HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 - HCN_E_INVALID_NETWORK Handle = 0x803B000A - HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B - HCN_E_INVALID_ENDPOINT Handle = 0x803B000C - HCN_E_INVALID_POLICY Handle = 0x803B000D - HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E - HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F - HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 - HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 - HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 - HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 - HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 - HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 - HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 - HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 - HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 - HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 - HCN_E_REGKEY_FAILURE Handle = 0x803B001A - HCN_E_INVALID_JSON Handle = 0x803B001B - HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C - HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D - HCN_E_INVALID_IP Handle = 0x803B001E - HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F - HCN_E_MANAGER_STOPPED Handle = 0x803B0020 - GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 - GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 - GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 - GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 - GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 - GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 - GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 - GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 - GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 - SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 - SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 - SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 - SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 - SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 - SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 - SDIAG_E_DISABLED syscall.Errno = 0x803C0106 - SDIAG_E_TRUST syscall.Errno = 0x803C0107 - SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 - SDIAG_E_VERSION syscall.Errno = 0x803C0109 - SDIAG_E_RESOURCE syscall.Errno = 0x803C010A - SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B - WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 - WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 - WPN_E_INVALID_APP Handle = 0x803E0102 - WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 - WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 - WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 - WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 - WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 - WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 - WPN_E_CLOUD_DISABLED Handle = 0x803E0109 - WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 - WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A - WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B - WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C - WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 - WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 - WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 - WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 - WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 - WPN_E_TAG_SIZE Handle = 0x803E0116 - WPN_E_ACCESS_DENIED Handle = 0x803E0117 - WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 - WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 - WPN_E_DEV_ID_SIZE Handle = 0x803E0120 - WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A - WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B - WPN_E_OUT_OF_SESSION Handle = 0x803E0200 - WPN_E_POWER_SAVE Handle = 0x803E0201 - WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 - WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 - WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 - WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 - WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 - WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 - WPN_E_STORAGE_LOCKED Handle = 0x803E0208 - WPN_E_GROUP_SIZE Handle = 0x803E0209 - WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A - WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B - E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 - E_MBN_BAD_SIM Handle = 0x80548202 - E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 - E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 - E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 - E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 - E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 - E_MBN_RADIO_POWER_OFF Handle = 0x80548208 - E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 - E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A - E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B - E_MBN_INVALID_CACHE Handle = 0x8054820C - E_MBN_NOT_REGISTERED Handle = 0x8054820D - E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E - E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F - E_MBN_PIN_REQUIRED Handle = 0x80548210 - E_MBN_PIN_DISABLED Handle = 0x80548211 - E_MBN_FAILURE Handle = 0x80548212 - E_MBN_INVALID_PROFILE Handle = 0x80548218 - E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 - E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 - E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 - E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 - E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 - E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 - E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 - E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 - E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 - E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 - E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 - PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 - PEER_E_NOT_INITIALIZED Handle = 0x80630002 - PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 - PEER_E_NOT_LICENSED Handle = 0x80630004 - PEER_E_INVALID_GRAPH Handle = 0x80630010 - PEER_E_DBNAME_CHANGED Handle = 0x80630011 - PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 - PEER_E_GRAPH_NOT_READY Handle = 0x80630013 - PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 - PEER_E_GRAPH_IN_USE Handle = 0x80630015 - PEER_E_INVALID_DATABASE Handle = 0x80630016 - PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 - PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 - PEER_E_CONNECT_SELF Handle = 0x80630106 - PEER_E_ALREADY_LISTENING Handle = 0x80630107 - PEER_E_NODE_NOT_FOUND Handle = 0x80630108 - PEER_E_CONNECTION_FAILED Handle = 0x80630109 - PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A - PEER_E_CONNECTION_REFUSED Handle = 0x8063010B - PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 - PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 - PEER_E_NO_KEY_ACCESS Handle = 0x80630203 - PEER_E_GROUPS_EXIST Handle = 0x80630204 - PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 - PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 - PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 - PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 - PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 - PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 - PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 - PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 - PEER_E_INVALID_SEARCH Handle = 0x80630601 - PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 - PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 - PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 - PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 - PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 - PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 - PEER_E_NO_CLOUD Handle = 0x80631001 - PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 - PEER_E_INVALID_RECORD Handle = 0x80632010 - PEER_E_NOT_AUTHORIZED Handle = 0x80632020 - PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 - PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 - PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 - PEER_E_INVALID_PEER_NAME Handle = 0x80632050 - PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 - PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 - PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 - PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 - PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 - PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 - PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 - PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 - PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 - PEER_E_GROUP_NOT_READY Handle = 0x80632091 - PEER_E_GROUP_IN_USE Handle = 0x80632092 - PEER_E_INVALID_GROUP Handle = 0x80632093 - PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 - PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 - PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 - PEER_E_IDENTITY_DELETED Handle = 0x806320A0 - PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 - PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 - PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 - PEER_S_NO_EVENT_DATA Handle = 0x00630002 - PEER_S_ALREADY_CONNECTED Handle = 0x00632000 - PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 - PEER_S_NO_CONNECTIVITY Handle = 0x00630005 - PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 - PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 - PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 - PEER_E_NO_MORE Handle = 0x80634003 - PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 - PEER_E_INVITE_CANCELLED Handle = 0x80637000 - PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 - PEER_E_NOT_SIGNED_IN Handle = 0x80637003 - PEER_E_PRIVACY_DECLINED Handle = 0x80637004 - PEER_E_TIMEOUT Handle = 0x80637005 - PEER_E_INVALID_ADDRESS Handle = 0x80637007 - PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 - PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 - PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A - PEER_E_FW_DECLINED Handle = 0x8063700B - UI_E_CREATE_FAILED Handle = 0x802A0001 - UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 - UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 - UI_E_OBJECT_SEALED Handle = 0x802A0004 - UI_E_VALUE_NOT_SET Handle = 0x802A0005 - UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 - UI_E_INVALID_OUTPUT Handle = 0x802A0007 - UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 - UI_E_DIFFERENT_OWNER Handle = 0x802A0009 - UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A - UI_E_FP_OVERFLOW Handle = 0x802A000B - UI_E_WRONG_THREAD Handle = 0x802A000C - UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 - UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 - UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 - UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 - UI_E_LOOPS_OVERLAP Handle = 0x802A0105 - UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 - UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 - UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 - UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 - UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A - UI_E_INVALID_DIMENSION Handle = 0x802A010B - UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C - UI_E_WINDOW_CLOSED Handle = 0x802A0201 - E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 - E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 - E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 - E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 - E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 - E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 - E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C - E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D - E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F - E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 - E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 - E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 - E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 - E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 - E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 - E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 - E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 - STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 - STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 - STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 - STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 - STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 - STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 - STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 - STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 - STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A - STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B - STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C - STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D - STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F - STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 - STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 - STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 - STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 - STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 - ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 - ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 - ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 - ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 - ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 - ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 - ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 - ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 - ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 - ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A - ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B - ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C - ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D - ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E - ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F - ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 - ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 - ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 - ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 - ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 - ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 - ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 - ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 - ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 - ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 - ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 - ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 - ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 - ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 - ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 - ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 - ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A - ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 - ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 - ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 - ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 - ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 - ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 - ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 - ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 - ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 - ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 - ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A - DXGI_STATUS_OCCLUDED Handle = 0x087A0001 - DXGI_STATUS_CLIPPED Handle = 0x087A0002 - DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 - DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 - DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 - DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 - DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 - DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 - DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 - DXGI_ERROR_MORE_DATA Handle = 0x887A0003 - DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 - DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 - DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 - DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 - DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A - DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B - DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C - DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 - DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 - DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 - DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 - DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 - DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 - DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 - DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 - DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 - DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A - DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B - DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C - DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D - DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E - DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 - DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 - DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 - DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 - DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A - DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 - DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F - DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 - DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 - DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 - DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 - DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 - DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 - DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 - D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 - D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 - D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 - D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 - D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 - D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 - D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 - D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 - D2DERR_WRONG_STATE Handle = 0x88990001 - D2DERR_NOT_INITIALIZED Handle = 0x88990002 - D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 - D2DERR_SCANNER_FAILED Handle = 0x88990004 - D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 - D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 - D2DERR_ZERO_VECTOR Handle = 0x88990007 - D2DERR_INTERNAL_ERROR Handle = 0x88990008 - D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 - D2DERR_INVALID_CALL Handle = 0x8899000A - D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B - D2DERR_RECREATE_TARGET Handle = 0x8899000C - D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D - D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E - D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F - D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 - D2DERR_BAD_NUMBER Handle = 0x88990011 - D2DERR_WRONG_FACTORY Handle = 0x88990012 - D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 - D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 - D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 - D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 - D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 - D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 - D2DERR_WIN32_ERROR Handle = 0x88990019 - D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A - D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B - D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C - D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D - D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E - D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F - D2DERR_CYCLIC_GRAPH Handle = 0x88990020 - D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 - D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 - D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 - D2DERR_INVALID_TARGET Handle = 0x88990024 - D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 - D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 - D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 - D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 - D2DERR_INVALID_PROPERTY Handle = 0x88990029 - D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A - D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B - D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C - D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D - D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E - DWRITE_E_FILEFORMAT Handle = 0x88985000 - DWRITE_E_UNEXPECTED Handle = 0x88985001 - DWRITE_E_NOFONT Handle = 0x88985002 - DWRITE_E_FILENOTFOUND Handle = 0x88985003 - DWRITE_E_FILEACCESS Handle = 0x88985004 - DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 - DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 - DWRITE_E_CACHEFORMAT Handle = 0x88985007 - DWRITE_E_CACHEVERSION Handle = 0x88985008 - DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 - DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A - DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B - DWRITE_E_NOCOLOR Handle = 0x8898500C - DWRITE_E_REMOTEFONT Handle = 0x8898500D - DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E - DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F - DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 - WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 - WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 - WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 - WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B - WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C - WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D - WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 - WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 - WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 - WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 - WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 - WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 - WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 - WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 - WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 - WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 - WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 - WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 - WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 - WINCODEC_ERR_BADHEADER Handle = 0x88982F61 - WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 - WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 - WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 - WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 - WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 - WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 - WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 - WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 - WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A - WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B - WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C - WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D - WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E - WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F - WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 - WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 - WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 - WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 - WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 - WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 - WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 - MILERR_OBJECTBUSY Handle = 0x88980001 - MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 - MILERR_WIN32ERROR Handle = 0x88980003 - MILERR_SCANNER_FAILED Handle = 0x88980004 - MILERR_SCREENACCESSDENIED Handle = 0x88980005 - MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 - MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 - MILERR_ZEROVECTOR Handle = 0x88980008 - MILERR_TERMINATED Handle = 0x88980009 - MILERR_BADNUMBER Handle = 0x8898000A - MILERR_INTERNALERROR Handle = 0x88980080 - MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 - MILERR_INVALIDCALL Handle = 0x88980085 - MILERR_ALREADYLOCKED Handle = 0x88980086 - MILERR_NOTLOCKED Handle = 0x88980087 - MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 - MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 - MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A - MILERR_GENERIC_IGNORE Handle = 0x8898008B - MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C - MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D - MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E - MILERR_ALREADY_INITIALIZED Handle = 0x8898008F - MILERR_MISMATCHED_SIZE Handle = 0x88980090 - MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 - MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 - MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 - MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 - MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 - MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 - MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 - MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 - MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 - MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A - MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B - MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D - MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E - MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F - MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 - MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 - UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 - UCEERR_UNKNOWNPACKET Handle = 0x88980401 - UCEERR_ILLEGALPACKET Handle = 0x88980402 - UCEERR_MALFORMEDPACKET Handle = 0x88980403 - UCEERR_ILLEGALHANDLE Handle = 0x88980404 - UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 - UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 - UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 - UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 - UCEERR_BLOCKSFULL Handle = 0x88980409 - UCEERR_MEMORYFAILURE Handle = 0x8898040A - UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B - UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C - UCEERR_OUTOFHANDLES Handle = 0x8898040D - UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E - UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F - UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 - UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 - UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 - UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 - UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 - UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 - UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 - UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 - UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 - UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 - UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 - UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 - UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 - UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 - MILAVERR_NOCLOCK Handle = 0x88980500 - MILAVERR_NOMEDIATYPE Handle = 0x88980501 - MILAVERR_NOVIDEOMIXER Handle = 0x88980502 - MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 - MILAVERR_NOREADYFRAMES Handle = 0x88980504 - MILAVERR_MODULENOTLOADED Handle = 0x88980505 - MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 - MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 - MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 - MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 - MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A - MILAVERR_SEEKFAILED Handle = 0x8898050B - MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C - MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D - MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E - MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E - MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F - MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 - MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 - MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 - MILEFFECTSERR_RESERVED Handle = 0x88980613 - MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 - MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 - MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 - MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 - MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 - MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 - MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A - MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B - DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 - DWMERR_THEME_FAILED Handle = 0x88980701 - DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 - DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 - DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 - DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 - ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 - ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 - ONL_E_INVALID_APPLICATION Handle = 0x80860003 - ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 - ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 - ONL_E_FORCESIGNIN Handle = 0x80860006 - ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 - ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 - ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 - ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A - ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B - ONL_E_ACTION_REQUIRED Handle = 0x8086000C - ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D - ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E - ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F - ONL_E_REQUEST_THROTTLED Handle = 0x80860010 - FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 - FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 - E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 - E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 - E_UAC_DISABLED Handle = 0x80270252 - E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 - E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 - E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 - E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 - E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 - S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 - S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 - E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A - E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B - E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C - E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D - E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 - E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 - E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 - E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 - E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 - E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 - E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 - E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 - E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 - E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 - E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 - E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 - E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 - E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 - E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 - E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 - E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 - E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 - E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 - E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 - E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 - E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 - E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 - E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 - E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 - E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 - E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 - E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A - E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B - EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 - EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 - EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 - EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 - EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 - EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 - EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 - EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 - EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A - EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C - EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D - WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 - WEB_E_INVALID_XML Handle = 0x83750002 - WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 - WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 - WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 - WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 - WEB_E_INVALID_JSON_STRING Handle = 0x83750007 - WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 - WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 - HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 - HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 - HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 - HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 - HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C - HTTP_E_STATUS_MOVED Handle = 0x8019012D - HTTP_E_STATUS_REDIRECT Handle = 0x8019012E - HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F - HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 - HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 - HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 - HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 - HTTP_E_STATUS_DENIED Handle = 0x80190191 - HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 - HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 - HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 - HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 - HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 - HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 - HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 - HTTP_E_STATUS_CONFLICT Handle = 0x80190199 - HTTP_E_STATUS_GONE Handle = 0x8019019A - HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B - HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C - HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D - HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E - HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F - HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 - HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 - HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 - HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 - HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 - HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 - HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 - HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 - E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 - E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 - E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 - E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 - E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 - INPUT_E_OUT_OF_ORDER Handle = 0x80400000 - INPUT_E_REENTRANCY Handle = 0x80400001 - INPUT_E_MULTIMODAL Handle = 0x80400002 - INPUT_E_PACKET Handle = 0x80400003 - INPUT_E_FRAME Handle = 0x80400004 - INPUT_E_HISTORY Handle = 0x80400005 - INPUT_E_DEVICE_INFO Handle = 0x80400006 - INPUT_E_TRANSFORM Handle = 0x80400007 - INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 - INET_E_INVALID_URL Handle = 0x800C0002 - INET_E_NO_SESSION Handle = 0x800C0003 - INET_E_CANNOT_CONNECT Handle = 0x800C0004 - INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 - INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 - INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 - INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 - INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 - INET_E_NO_VALID_MEDIA Handle = 0x800C000A - INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B - INET_E_INVALID_REQUEST Handle = 0x800C000C - INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D - INET_E_SECURITY_PROBLEM Handle = 0x800C000E - INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F - INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 - INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 - INET_E_REDIRECT_FAILED Handle = 0x800C0014 - INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 - ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 - ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 - ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 - ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 - ERROR_IO_PREEMPTED Handle = 0x89010001 - JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 - WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 - WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 - WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 - WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 - WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 - WEP_E_NO_LICENSE Handle = 0x88010006 - WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 - WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 - WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 - ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 - ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 - ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 - ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 - ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 - ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 - ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 - ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 - ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 - ERROR_VHD_SHARED Handle = 0xC05CFF0A - ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B - ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C - ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 - ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 - WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 - WININET_E_TIMEOUT Handle = 0x80072EE2 - WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 - WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 - WININET_E_INVALID_URL Handle = 0x80072EE5 - WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 - WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 - WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 - WININET_E_INVALID_OPTION Handle = 0x80072EE9 - WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA - WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB - WININET_E_SHUTDOWN Handle = 0x80072EEC - WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED - WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE - WININET_E_LOGIN_FAILURE Handle = 0x80072EEF - WININET_E_INVALID_OPERATION Handle = 0x80072EF0 - WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 - WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 - WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 - WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 - WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 - WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 - WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 - WININET_E_NO_CONTEXT Handle = 0x80072EF8 - WININET_E_NO_CALLBACK Handle = 0x80072EF9 - WININET_E_REQUEST_PENDING Handle = 0x80072EFA - WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB - WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC - WININET_E_CANNOT_CONNECT Handle = 0x80072EFD - WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE - WININET_E_CONNECTION_RESET Handle = 0x80072EFF - WININET_E_FORCE_RETRY Handle = 0x80072F00 - WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 - WININET_E_NEED_UI Handle = 0x80072F02 - WININET_E_HANDLE_EXISTS Handle = 0x80072F04 - WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 - WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 - WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 - WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 - WININET_E_MIXED_SECURITY Handle = 0x80072F09 - WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A - WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B - WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C - WININET_E_INVALID_CA Handle = 0x80072F0D - WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E - WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F - WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 - WININET_E_DIALOG_PENDING Handle = 0x80072F11 - WININET_E_RETRY_DIALOG Handle = 0x80072F12 - WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 - WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 - WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 - WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 - WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 - WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 - WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 - WININET_E_INVALID_HEADER Handle = 0x80072F79 - WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A - WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B - WININET_E_REDIRECT_FAILED Handle = 0x80072F7C - WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D - WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E - WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F - WININET_E_DISCONNECTED Handle = 0x80072F83 - WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 - WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 - WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 - WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 - WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 - WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A - WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B - WININET_E_NOT_INITIALIZED Handle = 0x80072F8C - WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E - WININET_E_DECODING_FAILED Handle = 0x80072F8F - WININET_E_NOT_REDIRECTED Handle = 0x80072F80 - WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 - WININET_E_COOKIE_DECLINED Handle = 0x80072F82 - WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 - SQLITE_E_ERROR Handle = 0x87AF0001 - SQLITE_E_INTERNAL Handle = 0x87AF0002 - SQLITE_E_PERM Handle = 0x87AF0003 - SQLITE_E_ABORT Handle = 0x87AF0004 - SQLITE_E_BUSY Handle = 0x87AF0005 - SQLITE_E_LOCKED Handle = 0x87AF0006 - SQLITE_E_NOMEM Handle = 0x87AF0007 - SQLITE_E_READONLY Handle = 0x87AF0008 - SQLITE_E_INTERRUPT Handle = 0x87AF0009 - SQLITE_E_IOERR Handle = 0x87AF000A - SQLITE_E_CORRUPT Handle = 0x87AF000B - SQLITE_E_NOTFOUND Handle = 0x87AF000C - SQLITE_E_FULL Handle = 0x87AF000D - SQLITE_E_CANTOPEN Handle = 0x87AF000E - SQLITE_E_PROTOCOL Handle = 0x87AF000F - SQLITE_E_EMPTY Handle = 0x87AF0010 - SQLITE_E_SCHEMA Handle = 0x87AF0011 - SQLITE_E_TOOBIG Handle = 0x87AF0012 - SQLITE_E_CONSTRAINT Handle = 0x87AF0013 - SQLITE_E_MISMATCH Handle = 0x87AF0014 - SQLITE_E_MISUSE Handle = 0x87AF0015 - SQLITE_E_NOLFS Handle = 0x87AF0016 - SQLITE_E_AUTH Handle = 0x87AF0017 - SQLITE_E_FORMAT Handle = 0x87AF0018 - SQLITE_E_RANGE Handle = 0x87AF0019 - SQLITE_E_NOTADB Handle = 0x87AF001A - SQLITE_E_NOTICE Handle = 0x87AF001B - SQLITE_E_WARNING Handle = 0x87AF001C - SQLITE_E_ROW Handle = 0x87AF0064 - SQLITE_E_DONE Handle = 0x87AF0065 - SQLITE_E_IOERR_READ Handle = 0x87AF010A - SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A - SQLITE_E_IOERR_WRITE Handle = 0x87AF030A - SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A - SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A - SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A - SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A - SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A - SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A - SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A - SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A - SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A - SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A - SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A - SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A - SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A - SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A - SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A - SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A - SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A - SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A - SQLITE_E_IOERR_SEEK Handle = 0x87AF160A - SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A - SQLITE_E_IOERR_MMAP Handle = 0x87AF180A - SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A - SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A - SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 - SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 - SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 - SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 - SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 - SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E - SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E - SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E - SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E - SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B - SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 - SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 - SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 - SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 - SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 - SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 - SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 - SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 - SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 - SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 - SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 - SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 - SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 - SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 - SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 - SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B - SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B - SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C - UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 - UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 - UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 - UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 - UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 - UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 - UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 - UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 - UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 - UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A - UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B - UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C - UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D - UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E - UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F - UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 - UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 - UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 - UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 - UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 - UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 - UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 - UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 - UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 - UTC_E_INVALID_FILTER Handle = 0x87C51019 - UTC_E_EXE_TERMINATED Handle = 0x87C5101A - UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B - UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C - UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D - UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E - UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F - UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 - UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 - UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 - UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 - UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 - UTC_E_DELAY_TERMINATED Handle = 0x87C51025 - UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 - UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 - UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 - UTC_E_RPC_TIMEOUT Handle = 0x87C51029 - UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A - UTC_E_API_BUSY Handle = 0x87C5102B - UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C - UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D - UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E - UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F - UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 - UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 - UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 - UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 - UTC_E_BINARY_MISSING Handle = 0x87C51034 - UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 - UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 - UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 - UTC_E_THROTTLED Handle = 0x87C51038 - UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 - UTC_E_SCRIPT_MISSING Handle = 0x87C5103A - UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B - UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C - UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D - UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E - UTC_E_CERT_REV_FAILED Handle = 0x87C5103F - UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 - UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 - UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 - UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 - UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 - UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 - UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 - UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 - UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 - UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 - UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 - UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 - UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 - UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 - UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 - UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 - UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 - UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 - UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 - UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 - UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A - UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B - WINML_ERR_INVALID_DEVICE Handle = 0x88900001 - WINML_ERR_INVALID_BINDING Handle = 0x88900002 - WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 - WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 -) diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go deleted file mode 100644 index 6048ac679fa..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. - -package windows - -type KNOWNFOLDERID GUID - -var ( - FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} - FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} - FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} - FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} - FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} - FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} - FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} - FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} - FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} - FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} - FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} - FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} - FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} - FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} - FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} - FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} - FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} - FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} - FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} - FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} - FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} - FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} - FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} - FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} - FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} - FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} - FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} - FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} - FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} - FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} - FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} - FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} - FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} - FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} - FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} - FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} - FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} - FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} - FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} - FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} - FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} - FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} - FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} - FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} - FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} - FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} - FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} - FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} - FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} - FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} - FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} - FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} - FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} - FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} - FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} - FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} - FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} - FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} - FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} - FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} - FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} - FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} - FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} - FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} - FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} - FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} - FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} - FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} - FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} - FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} - FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} - FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} - FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} - FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} - FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} - FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} - FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} - FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} - FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} - FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} - FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} - FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} - FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} - FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} - FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} - FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} - FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} - FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} - FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} - FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} - FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} - FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} - FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} - FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} - FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} - FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} - FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} - FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} - FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} - FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} - FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} - FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} - FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} - FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} - FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} - FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} - FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} - FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} - FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} - FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} - FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} - FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} - FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} - FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} - FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} - FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} - FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} - FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} - FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} - FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} - FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} - FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} - FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} - FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} - FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} - FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} - FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} - FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} - FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} - FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} - FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} - FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} - FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} - FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} - FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} - FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} - FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} - FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} - FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} -) diff --git a/awsproviderlint/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/awsproviderlint/vendor/golang.org/x/sys/windows/zsyscall_windows.go deleted file mode 100644 index 347f13dbf6e..00000000000 --- a/awsproviderlint/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ /dev/null @@ -1,4083 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package windows - -import ( - "syscall" - "unsafe" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = NewLazySystemDLL("advapi32.dll") - modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") - modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") - modntdll = NewLazySystemDLL("ntdll.dll") - modpsapi = NewLazySystemDLL("psapi.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") - modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") - modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") - procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") - procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") - procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") - procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") - procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") - procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") - procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") - procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") - procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") - procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") - procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") - procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") - procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsendto = modws2_32.NewProc("sendto") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") -) - -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) - } - return -} - -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibrary(_p0) -} - -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) -} - -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { - return - } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - var _p0 uint32 - if *isWow64 { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { - var _p0 uint32 - if bInheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - var _p0 uint32 - if waitAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) - return -} - -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) - return -} - -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - var _p0 uint32 - if watchSubTree { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - return -} - -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) - return -} - -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) - id = uint32(r0) - return -} - -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) - return -} - -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) - return -} - -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) - id = uint32(r0) - if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) - return -} - -func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) - return -} - -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return -} - -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return -} - -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) - return -} - -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) - return -} - -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetHostByName(_p0) -} - -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return - } - return _GetServByName(_p0, _p1) -} - -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) - return -} - -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetProtoByName(_p0) -} - -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return - } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) -} - -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) - return -} - -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 - return -} - -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) - return -} - -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) - return -} - -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) - return -} - -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 - return -} - -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 - return -} - -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) - return -} - -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 - return -} - -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - return -} - -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) -} - -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) -} - -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) - return -} - -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 - return -} - -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} diff --git a/awsproviderlint/vendor/gopkg.in/warnings.v0/LICENSE b/awsproviderlint/vendor/gopkg.in/warnings.v0/LICENSE deleted file mode 100644 index d65f7e9d8cd..00000000000 --- a/awsproviderlint/vendor/gopkg.in/warnings.v0/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2016 Péter Surányi. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/awsproviderlint/vendor/gopkg.in/warnings.v0/README b/awsproviderlint/vendor/gopkg.in/warnings.v0/README deleted file mode 100644 index 974212ba1b9..00000000000 --- a/awsproviderlint/vendor/gopkg.in/warnings.v0/README +++ /dev/null @@ -1,77 +0,0 @@ -Package warnings implements error handling with non-fatal errors (warnings). - -import path: "gopkg.in/warnings.v0" -package docs: https://godoc.org/gopkg.in/warnings.v0 -issues: https://github.com/go-warnings/warnings/issues -pull requests: https://github.com/go-warnings/warnings/pulls - -A recurring pattern in Go programming is the following: - - func myfunc(params) error { - if err := doSomething(...); err != nil { - return err - } - if err := doSomethingElse(...); err != nil { - return err - } - if ok := doAnotherThing(...); !ok { - return errors.New("my error") - } - ... - return nil - } - -This pattern allows interrupting the flow on any received error. But what if -there are errors that should be noted but still not fatal, for which the flow -should not be interrupted? Implementing such logic at each if statement would -make the code complex and the flow much harder to follow. - -Package warnings provides the Collector type and a clean and simple pattern -for achieving such logic. The Collector takes care of deciding when to break -the flow and when to continue, collecting any non-fatal errors (warnings) -along the way. The only requirement is that fatal and non-fatal errors can be -distinguished programmatically; that is a function such as - - IsFatal(error) bool - -must be implemented. The following is an example of what the above snippet -could look like using the warnings package: - - import "gopkg.in/warnings.v0" - - func isFatal(err error) bool { - _, ok := err.(WarningType) - return !ok - } - - func myfunc(params) error { - c := warnings.NewCollector(isFatal) - c.FatalWithWarnings = true - if err := c.Collect(doSomething()); err != nil { - return err - } - if err := c.Collect(doSomethingElse(...)); err != nil { - return err - } - if ok := doAnotherThing(...); !ok { - if err := c.Collect(errors.New("my error")); err != nil { - return err - } - } - ... - return c.Done() - } - -For an example of a non-trivial code base using this library, see -gopkg.in/gcfg.v1 - -Rules for using warnings - - - ensure that warnings are programmatically distinguishable from fatal - errors (i.e. implement an isFatal function and any necessary error types) - - ensure that there is a single Collector instance for a call of each - exported function - - ensure that all errors (fatal or warning) are fed through Collect - - ensure that every time an error is returned, it is one returned by a - Collector (from Collect or Done) - - ensure that Collect is never called after Done diff --git a/awsproviderlint/vendor/gopkg.in/warnings.v0/warnings.go b/awsproviderlint/vendor/gopkg.in/warnings.v0/warnings.go deleted file mode 100644 index b849d1e3d9a..00000000000 --- a/awsproviderlint/vendor/gopkg.in/warnings.v0/warnings.go +++ /dev/null @@ -1,194 +0,0 @@ -// Package warnings implements error handling with non-fatal errors (warnings). -// -// A recurring pattern in Go programming is the following: -// -// func myfunc(params) error { -// if err := doSomething(...); err != nil { -// return err -// } -// if err := doSomethingElse(...); err != nil { -// return err -// } -// if ok := doAnotherThing(...); !ok { -// return errors.New("my error") -// } -// ... -// return nil -// } -// -// This pattern allows interrupting the flow on any received error. But what if -// there are errors that should be noted but still not fatal, for which the flow -// should not be interrupted? Implementing such logic at each if statement would -// make the code complex and the flow much harder to follow. -// -// Package warnings provides the Collector type and a clean and simple pattern -// for achieving such logic. The Collector takes care of deciding when to break -// the flow and when to continue, collecting any non-fatal errors (warnings) -// along the way. The only requirement is that fatal and non-fatal errors can be -// distinguished programmatically; that is a function such as -// -// IsFatal(error) bool -// -// must be implemented. The following is an example of what the above snippet -// could look like using the warnings package: -// -// import "gopkg.in/warnings.v0" -// -// func isFatal(err error) bool { -// _, ok := err.(WarningType) -// return !ok -// } -// -// func myfunc(params) error { -// c := warnings.NewCollector(isFatal) -// c.FatalWithWarnings = true -// if err := c.Collect(doSomething()); err != nil { -// return err -// } -// if err := c.Collect(doSomethingElse(...)); err != nil { -// return err -// } -// if ok := doAnotherThing(...); !ok { -// if err := c.Collect(errors.New("my error")); err != nil { -// return err -// } -// } -// ... -// return c.Done() -// } -// -// For an example of a non-trivial code base using this library, see -// gopkg.in/gcfg.v1 -// -// Rules for using warnings -// -// - ensure that warnings are programmatically distinguishable from fatal -// errors (i.e. implement an isFatal function and any necessary error types) -// - ensure that there is a single Collector instance for a call of each -// exported function -// - ensure that all errors (fatal or warning) are fed through Collect -// - ensure that every time an error is returned, it is one returned by a -// Collector (from Collect or Done) -// - ensure that Collect is never called after Done -// -// TODO -// -// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?) -// - consider interaction with contexts -// - go vet-style invocations verifier -// - semi-automatic code converter -// -package warnings // import "gopkg.in/warnings.v0" - -import ( - "bytes" - "fmt" -) - -// List holds a collection of warnings and optionally one fatal error. -type List struct { - Warnings []error - Fatal error -} - -// Error implements the error interface. -func (l List) Error() string { - b := bytes.NewBuffer(nil) - if l.Fatal != nil { - fmt.Fprintln(b, "fatal:") - fmt.Fprintln(b, l.Fatal) - } - switch len(l.Warnings) { - case 0: - // nop - case 1: - fmt.Fprintln(b, "warning:") - default: - fmt.Fprintln(b, "warnings:") - } - for _, err := range l.Warnings { - fmt.Fprintln(b, err) - } - return b.String() -} - -// A Collector collects errors up to the first fatal error. -type Collector struct { - // IsFatal distinguishes between warnings and fatal errors. - IsFatal func(error) bool - // FatalWithWarnings set to true means that a fatal error is returned as - // a List together with all warnings so far. The default behavior is to - // only return the fatal error and discard any warnings that have been - // collected. - FatalWithWarnings bool - - l List - done bool -} - -// NewCollector returns a new Collector; it uses isFatal to distinguish between -// warnings and fatal errors. -func NewCollector(isFatal func(error) bool) *Collector { - return &Collector{IsFatal: isFatal} -} - -// Collect collects a single error (warning or fatal). It returns nil if -// collection can continue (only warnings so far), or otherwise the errors -// collected. Collect mustn't be called after the first fatal error or after -// Done has been called. -func (c *Collector) Collect(err error) error { - if c.done { - panic("warnings.Collector already done") - } - if err == nil { - return nil - } - if c.IsFatal(err) { - c.done = true - c.l.Fatal = err - } else { - c.l.Warnings = append(c.l.Warnings, err) - } - if c.l.Fatal != nil { - return c.erorr() - } - return nil -} - -// Done ends collection and returns the collected error(s). -func (c *Collector) Done() error { - c.done = true - return c.erorr() -} - -func (c *Collector) erorr() error { - if !c.FatalWithWarnings && c.l.Fatal != nil { - return c.l.Fatal - } - if c.l.Fatal == nil && len(c.l.Warnings) == 0 { - return nil - } - // Note that a single warning is also returned as a List. This is to make it - // easier to determine fatal-ness of the returned error. - return c.l -} - -// FatalOnly returns the fatal error, if any, **in an error returned by a -// Collector**. It returns nil if and only if err is nil or err is a List -// with err.Fatal == nil. -func FatalOnly(err error) error { - l, ok := err.(List) - if !ok { - return err - } - return l.Fatal -} - -// WarningsOnly returns the warnings **in an error returned by a Collector**. -func WarningsOnly(err error) []error { - l, ok := err.(List) - if !ok { - return nil - } - return l.Warnings -} diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index 365d7b6de34..fc43dfdcc79 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -200,67 +200,8 @@ github.com/bflad/tfproviderlint/xpasses/XS002 github.com/bgentry/go-netrc/netrc # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/emirpasic/gods v1.12.0 -github.com/emirpasic/gods/containers -github.com/emirpasic/gods/lists -github.com/emirpasic/gods/lists/arraylist -github.com/emirpasic/gods/trees -github.com/emirpasic/gods/trees/binaryheap -github.com/emirpasic/gods/utils -# github.com/go-git/gcfg v1.5.0 -github.com/go-git/gcfg -github.com/go-git/gcfg/scanner -github.com/go-git/gcfg/token -github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.0.0 -github.com/go-git/go-billy/v5 -github.com/go-git/go-billy/v5/helper/chroot -github.com/go-git/go-billy/v5/helper/polyfill -github.com/go-git/go-billy/v5/osfs -github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.1.0 -github.com/go-git/go-git/v5 -github.com/go-git/go-git/v5/config -github.com/go-git/go-git/v5/internal/revision -github.com/go-git/go-git/v5/internal/url -github.com/go-git/go-git/v5/plumbing -github.com/go-git/go-git/v5/plumbing/cache -github.com/go-git/go-git/v5/plumbing/color -github.com/go-git/go-git/v5/plumbing/filemode -github.com/go-git/go-git/v5/plumbing/format/config -github.com/go-git/go-git/v5/plumbing/format/diff -github.com/go-git/go-git/v5/plumbing/format/gitignore -github.com/go-git/go-git/v5/plumbing/format/idxfile -github.com/go-git/go-git/v5/plumbing/format/index -github.com/go-git/go-git/v5/plumbing/format/objfile -github.com/go-git/go-git/v5/plumbing/format/packfile -github.com/go-git/go-git/v5/plumbing/format/pktline -github.com/go-git/go-git/v5/plumbing/object -github.com/go-git/go-git/v5/plumbing/protocol/packp -github.com/go-git/go-git/v5/plumbing/protocol/packp/capability -github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband -github.com/go-git/go-git/v5/plumbing/revlist -github.com/go-git/go-git/v5/plumbing/storer -github.com/go-git/go-git/v5/plumbing/transport -github.com/go-git/go-git/v5/plumbing/transport/client -github.com/go-git/go-git/v5/plumbing/transport/file -github.com/go-git/go-git/v5/plumbing/transport/git -github.com/go-git/go-git/v5/plumbing/transport/http -github.com/go-git/go-git/v5/plumbing/transport/internal/common -github.com/go-git/go-git/v5/plumbing/transport/server -github.com/go-git/go-git/v5/plumbing/transport/ssh -github.com/go-git/go-git/v5/storage -github.com/go-git/go-git/v5/storage/filesystem -github.com/go-git/go-git/v5/storage/filesystem/dotgit -github.com/go-git/go-git/v5/storage/memory -github.com/go-git/go-git/v5/utils/binary -github.com/go-git/go-git/v5/utils/diff -github.com/go-git/go-git/v5/utils/ioutil -github.com/go-git/go-git/v5/utils/merkletrie -github.com/go-git/go-git/v5/utils/merkletrie/filesystem -github.com/go-git/go-git/v5/utils/merkletrie/index -github.com/go-git/go-git/v5/utils/merkletrie/internal/frame -github.com/go-git/go-git/v5/utils/merkletrie/noder +# github.com/fatih/color v1.7.0 +github.com/fatih/color # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru # github.com/golang/protobuf v1.4.2 @@ -297,11 +238,11 @@ github.com/hashicorp/go-cty/cty/set # github.com/hashicorp/go-getter v1.5.0 github.com/hashicorp/go-getter github.com/hashicorp/go-getter/helper/url -# github.com/hashicorp/go-hclog v0.9.2 +# github.com/hashicorp/go-hclog v0.15.0 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.3.0 +# github.com/hashicorp/go-plugin v1.4.0 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin # github.com/hashicorp/go-safetemp v1.0.0 @@ -316,11 +257,11 @@ github.com/hashicorp/hcl/v2/ext/customdecode github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.10.0 +# github.com/hashicorp/terraform-exec v0.12.0 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec github.com/hashicorp/terraform-exec/tfinstall -# github.com/hashicorp/terraform-json v0.5.0 +# github.com/hashicorp/terraform-json v0.8.0 github.com/hashicorp/terraform-json # github.com/hashicorp/terraform-plugin-go v0.1.0 github.com/hashicorp/terraform-plugin-go/tfprotov5 @@ -329,7 +270,7 @@ github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov5/server github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.3.0 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 ## explicit github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging @@ -350,18 +291,16 @@ github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux -# github.com/imdario/mergo v0.3.9 -github.com/imdario/mergo -# github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 -github.com/jbenet/go-context/io # github.com/jmespath/go-jmespath v0.4.0 github.com/jmespath/go-jmespath # github.com/jstemmer/go-junit-report v0.9.1 github.com/jstemmer/go-junit-report github.com/jstemmer/go-junit-report/formatter github.com/jstemmer/go-junit-report/parser -# github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd -github.com/kevinburke/ssh_config +# github.com/mattn/go-colorable v0.1.4 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.10 +github.com/mattn/go-isatty # github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 @@ -376,8 +315,6 @@ github.com/mitchellh/mapstructure github.com/mitchellh/reflectwalk # github.com/oklog/run v1.0.0 github.com/oklog/run -# github.com/sergi/go-diff v1.1.0 -github.com/sergi/go-diff/diffmatchpatch # github.com/ulikunitz/xz v0.5.8 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash @@ -386,8 +323,6 @@ github.com/ulikunitz/xz/lzma # github.com/vmihailenco/msgpack v4.0.4+incompatible github.com/vmihailenco/msgpack github.com/vmihailenco/msgpack/codes -# github.com/xanzy/ssh-agent v0.2.1 -github.com/xanzy/ssh-agent # github.com/zclconf/go-cty v1.2.1 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert @@ -414,24 +349,13 @@ go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate # golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 -golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 -golang.org/x/crypto/chacha20 -golang.org/x/crypto/curve25519 -golang.org/x/crypto/ed25519 -golang.org/x/crypto/ed25519/internal/edwards25519 -golang.org/x/crypto/internal/subtle golang.org/x/crypto/openpgp golang.org/x/crypto/openpgp/armor golang.org/x/crypto/openpgp/elgamal golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k -golang.org/x/crypto/poly1305 -golang.org/x/crypto/ssh -golang.org/x/crypto/ssh/agent -golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -golang.org/x/crypto/ssh/knownhosts # golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/lint golang.org/x/lint/golint @@ -445,9 +369,7 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries -golang.org/x/net/proxy golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 @@ -456,10 +378,8 @@ golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f -golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix -golang.org/x/sys/windows # golang.org/x/text v0.3.3 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -617,5 +537,3 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/pluginpb -# gopkg.in/warnings.v0 v0.1.2 -gopkg.in/warnings.v0 From f8ba388f7044d0e66a3e3f85f11b6d106f675ec0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 21 Dec 2020 17:49:45 -0500 Subject: [PATCH 0347/1212] r/aws_appmesh_route: Allow empty 'match' for 'grpc_route'. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSAppmesh/Route/grpcRoute' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAppmesh/Route/grpcRoute -timeout 120m === RUN TestAccAWSAppmesh_serial === RUN TestAccAWSAppmesh_serial/VirtualRouter === RUN TestAccAWSAppmesh_serial/GatewayRoute === RUN TestAccAWSAppmesh_serial/GatewayRoute/grpcRoute === RUN TestAccAWSAppmesh_serial/Route === RUN TestAccAWSAppmesh_serial/Route/grpcRoute === RUN TestAccAWSAppmesh_serial/Route/grpcRouteEmptyMatch === RUN TestAccAWSAppmesh_serial/Route/grpcRouteTimeout --- PASS: TestAccAWSAppmesh_serial (131.91s) --- PASS: TestAccAWSAppmesh_serial/VirtualRouter (0.00s) --- PASS: TestAccAWSAppmesh_serial/GatewayRoute (32.18s) --- PASS: TestAccAWSAppmesh_serial/GatewayRoute/grpcRoute (32.18s) --- PASS: TestAccAWSAppmesh_serial/Route (99.73s) --- PASS: TestAccAWSAppmesh_serial/Route/grpcRoute (47.54s) --- PASS: TestAccAWSAppmesh_serial/Route/grpcRouteEmptyMatch (18.53s) --- PASS: TestAccAWSAppmesh_serial/Route/grpcRouteTimeout (33.66s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 132.036s --- aws/resource_aws_appmesh_route.go | 9 +-- aws/resource_aws_appmesh_route_test.go | 75 +++++++++++++++++++++ aws/resource_aws_appmesh_test.go | 25 +++---- aws/structure.go | 93 ++++++++++++++------------ 4 files changed, 142 insertions(+), 60 deletions(-) diff --git a/aws/resource_aws_appmesh_route.go b/aws/resource_aws_appmesh_route.go index 5e02abfa7a4..77bf73c9ef5 100644 --- a/aws/resource_aws_appmesh_route.go +++ b/aws/resource_aws_appmesh_route.go @@ -103,8 +103,8 @@ func resourceAwsAppmeshRoute() *schema.Resource { "match": { Type: schema.TypeList, - Required: true, - MinItems: 1, + Optional: true, + MinItems: 0, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -196,8 +196,9 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, "service_name": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"spec.0.grpc_route.0.match.0.method_name"}, }, }, }, diff --git a/aws/resource_aws_appmesh_route_test.go b/aws/resource_aws_appmesh_route_test.go index 2aa4180ec38..19750749a3a 100644 --- a/aws/resource_aws_appmesh_route_test.go +++ b/aws/resource_aws_appmesh_route_test.go @@ -368,6 +368,58 @@ func testAccAwsAppmeshRoute_grpcRouteTimeout(t *testing.T) { }) } +func testAccAwsAppmeshRoute_grpcRouteEmptyMatch(t *testing.T) { + var r appmesh.RouteData + resourceName := "aws_appmesh_route.test" + meshName := acctest.RandomWithPrefix("tf-acc-test") + vrName := acctest.RandomWithPrefix("tf-acc-test") + vn1Name := acctest.RandomWithPrefix("tf-acc-test") + vn2Name := acctest.RandomWithPrefix("tf-acc-test") + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(appmesh.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAppmeshRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsAppmeshRouteConfig_grpcRouteWithEmptyMatch(meshName, vrName, vn1Name, vn2Name, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppmeshRouteExists(resourceName, &r), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), + testAccCheckResourceAttrAccountID(resourceName, "mesh_owner"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", vrName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.match.0.metadata.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.match.0.method_name", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.match.0.service_name", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.retry_policy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.grpc_route.0.timeout.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http2_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + testAccCheckResourceAttrAccountID(resourceName, "resource_owner"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", meshName, vrName, rName)), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccAwsAppmeshRoute_http2Route(t *testing.T) { var r appmesh.RouteData resourceName := "aws_appmesh_route.test" @@ -1645,6 +1697,29 @@ resource "aws_appmesh_route" "test" { `, rName)) } +func testAccAwsAppmeshRouteConfig_grpcRouteWithEmptyMatch(meshName, vrName, vn1Name, vn2Name, rName string) string { + return composeConfig(testAccAppmeshRouteConfigBase(meshName, vrName, "grpc", vn1Name, vn2Name), fmt.Sprintf(` +resource "aws_appmesh_route" "test" { + name = %[1]q + mesh_name = aws_appmesh_mesh.test.id + virtual_router_name = aws_appmesh_virtual_router.test.name + + spec { + grpc_route { + match {} + + action { + weighted_target { + virtual_node = aws_appmesh_virtual_node.foo.name + weight = 100 + } + } + } + } +} +`, rName)) +} + func testAccAwsAppmeshRouteConfig_http2Route(meshName, vrName, vn1Name, vn2Name, rName string) string { return composeConfig(testAccAppmeshRouteConfigBase(meshName, vrName, "http2", vn1Name, vn2Name), fmt.Sprintf(` resource "aws_appmesh_route" "test" { diff --git a/aws/resource_aws_appmesh_test.go b/aws/resource_aws_appmesh_test.go index db9afbaa07f..5fd7be34fee 100644 --- a/aws/resource_aws_appmesh_test.go +++ b/aws/resource_aws_appmesh_test.go @@ -20,18 +20,19 @@ func TestAccAWSAppmesh_serial(t *testing.T) { "tags": testAccAwsAppmeshMesh_tags, }, "Route": { - "grpcRoute": testAccAwsAppmeshRoute_grpcRoute, - "grpcRouteTimeout": testAccAwsAppmeshRoute_grpcRouteTimeout, - "http2Route": testAccAwsAppmeshRoute_http2Route, - "http2RouteTimeout": testAccAwsAppmeshRoute_http2RouteTimeout, - "httpHeader": testAccAwsAppmeshRoute_httpHeader, - "httpRetryPolicy": testAccAwsAppmeshRoute_httpRetryPolicy, - "httpRoute": testAccAwsAppmeshRoute_httpRoute, - "httpRouteTimeout": testAccAwsAppmeshRoute_httpRouteTimeout, - "routePriority": testAccAwsAppmeshRoute_routePriority, - "tcpRoute": testAccAwsAppmeshRoute_tcpRoute, - "tcpRouteTimeout": testAccAwsAppmeshRoute_tcpRouteTimeout, - "tags": testAccAwsAppmeshRoute_tags, + "grpcRoute": testAccAwsAppmeshRoute_grpcRoute, + "grpcRouteEmptyMatch": testAccAwsAppmeshRoute_grpcRouteEmptyMatch, + "grpcRouteTimeout": testAccAwsAppmeshRoute_grpcRouteTimeout, + "http2Route": testAccAwsAppmeshRoute_http2Route, + "http2RouteTimeout": testAccAwsAppmeshRoute_http2RouteTimeout, + "httpHeader": testAccAwsAppmeshRoute_httpHeader, + "httpRetryPolicy": testAccAwsAppmeshRoute_httpRetryPolicy, + "httpRoute": testAccAwsAppmeshRoute_httpRoute, + "httpRouteTimeout": testAccAwsAppmeshRoute_httpRouteTimeout, + "routePriority": testAccAwsAppmeshRoute_routePriority, + "tcpRoute": testAccAwsAppmeshRoute_tcpRoute, + "tcpRouteTimeout": testAccAwsAppmeshRoute_tcpRouteTimeout, + "tags": testAccAwsAppmeshRoute_tags, }, "VirtualGateway": { "basic": testAccAwsAppmeshVirtualGateway_basic, diff --git a/aws/structure.go b/aws/structure.go index 10a7f881773..8eed4b36e95 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -5547,69 +5547,74 @@ func expandAppmeshGrpcRoute(vGrpcRoute []interface{}) *appmesh.GrpcRoute { } } - if vGrpcRouteMatch, ok := mGrpcRoute["match"].([]interface{}); ok && len(vGrpcRouteMatch) > 0 && vGrpcRouteMatch[0] != nil { + if vGrpcRouteMatch, ok := mGrpcRoute["match"].([]interface{}); ok { grpcRouteMatch := &appmesh.GrpcRouteMatch{} - mGrpcRouteMatch := vGrpcRouteMatch[0].(map[string]interface{}) + // Empty match is allowed. + // https://github.com/hashicorp/terraform-provider-aws/issues/16816. - if vMethodName, ok := mGrpcRouteMatch["method_name"].(string); ok && vMethodName != "" { - grpcRouteMatch.MethodName = aws.String(vMethodName) - } - if vServiceName, ok := mGrpcRouteMatch["service_name"].(string); ok && vServiceName != "" { - grpcRouteMatch.ServiceName = aws.String(vServiceName) - } - - if vGrpcRouteMetadatas, ok := mGrpcRouteMatch["metadata"].(*schema.Set); ok && vGrpcRouteMetadatas.Len() > 0 { - grpcRouteMetadatas := []*appmesh.GrpcRouteMetadata{} - - for _, vGrpcRouteMetadata := range vGrpcRouteMetadatas.List() { - grpcRouteMetadata := &appmesh.GrpcRouteMetadata{} + if len(vGrpcRouteMatch) > 0 && vGrpcRouteMatch[0] != nil { + mGrpcRouteMatch := vGrpcRouteMatch[0].(map[string]interface{}) - mGrpcRouteMetadata := vGrpcRouteMetadata.(map[string]interface{}) + if vMethodName, ok := mGrpcRouteMatch["method_name"].(string); ok && vMethodName != "" { + grpcRouteMatch.MethodName = aws.String(vMethodName) + } + if vServiceName, ok := mGrpcRouteMatch["service_name"].(string); ok && vServiceName != "" { + grpcRouteMatch.ServiceName = aws.String(vServiceName) + } - if vInvert, ok := mGrpcRouteMetadata["invert"].(bool); ok { - grpcRouteMetadata.Invert = aws.Bool(vInvert) - } - if vName, ok := mGrpcRouteMetadata["name"].(string); ok && vName != "" { - grpcRouteMetadata.Name = aws.String(vName) - } + if vGrpcRouteMetadatas, ok := mGrpcRouteMatch["metadata"].(*schema.Set); ok && vGrpcRouteMetadatas.Len() > 0 { + grpcRouteMetadatas := []*appmesh.GrpcRouteMetadata{} - if vMatch, ok := mGrpcRouteMetadata["match"].([]interface{}); ok && len(vMatch) > 0 && vMatch[0] != nil { - grpcRouteMetadata.Match = &appmesh.GrpcRouteMetadataMatchMethod{} + for _, vGrpcRouteMetadata := range vGrpcRouteMetadatas.List() { + grpcRouteMetadata := &appmesh.GrpcRouteMetadata{} - mMatch := vMatch[0].(map[string]interface{}) + mGrpcRouteMetadata := vGrpcRouteMetadata.(map[string]interface{}) - if vExact, ok := mMatch["exact"].(string); ok && vExact != "" { - grpcRouteMetadata.Match.Exact = aws.String(vExact) + if vInvert, ok := mGrpcRouteMetadata["invert"].(bool); ok { + grpcRouteMetadata.Invert = aws.Bool(vInvert) } - if vPrefix, ok := mMatch["prefix"].(string); ok && vPrefix != "" { - grpcRouteMetadata.Match.Prefix = aws.String(vPrefix) - } - if vRegex, ok := mMatch["regex"].(string); ok && vRegex != "" { - grpcRouteMetadata.Match.Regex = aws.String(vRegex) - } - if vSuffix, ok := mMatch["suffix"].(string); ok && vSuffix != "" { - grpcRouteMetadata.Match.Suffix = aws.String(vSuffix) + if vName, ok := mGrpcRouteMetadata["name"].(string); ok && vName != "" { + grpcRouteMetadata.Name = aws.String(vName) } - if vRange, ok := mMatch["range"].([]interface{}); ok && len(vRange) > 0 && vRange[0] != nil { - grpcRouteMetadata.Match.Range = &appmesh.MatchRange{} + if vMatch, ok := mGrpcRouteMetadata["match"].([]interface{}); ok && len(vMatch) > 0 && vMatch[0] != nil { + grpcRouteMetadata.Match = &appmesh.GrpcRouteMetadataMatchMethod{} - mRange := vRange[0].(map[string]interface{}) + mMatch := vMatch[0].(map[string]interface{}) - if vEnd, ok := mRange["end"].(int); ok && vEnd > 0 { - grpcRouteMetadata.Match.Range.End = aws.Int64(int64(vEnd)) + if vExact, ok := mMatch["exact"].(string); ok && vExact != "" { + grpcRouteMetadata.Match.Exact = aws.String(vExact) } - if vStart, ok := mRange["start"].(int); ok && vStart > 0 { - grpcRouteMetadata.Match.Range.Start = aws.Int64(int64(vStart)) + if vPrefix, ok := mMatch["prefix"].(string); ok && vPrefix != "" { + grpcRouteMetadata.Match.Prefix = aws.String(vPrefix) + } + if vRegex, ok := mMatch["regex"].(string); ok && vRegex != "" { + grpcRouteMetadata.Match.Regex = aws.String(vRegex) + } + if vSuffix, ok := mMatch["suffix"].(string); ok && vSuffix != "" { + grpcRouteMetadata.Match.Suffix = aws.String(vSuffix) + } + + if vRange, ok := mMatch["range"].([]interface{}); ok && len(vRange) > 0 && vRange[0] != nil { + grpcRouteMetadata.Match.Range = &appmesh.MatchRange{} + + mRange := vRange[0].(map[string]interface{}) + + if vEnd, ok := mRange["end"].(int); ok && vEnd > 0 { + grpcRouteMetadata.Match.Range.End = aws.Int64(int64(vEnd)) + } + if vStart, ok := mRange["start"].(int); ok && vStart > 0 { + grpcRouteMetadata.Match.Range.Start = aws.Int64(int64(vStart)) + } } } + + grpcRouteMetadatas = append(grpcRouteMetadatas, grpcRouteMetadata) } - grpcRouteMetadatas = append(grpcRouteMetadatas, grpcRouteMetadata) + grpcRouteMatch.Metadata = grpcRouteMetadatas } - - grpcRouteMatch.Metadata = grpcRouteMetadatas } grpcRoute.Match = grpcRouteMatch From 3576864a207b4eb88e36ecb0247e0ce110540e97 Mon Sep 17 00:00:00 2001 From: Wedge Jarrad Date: Mon, 21 Dec 2020 17:24:34 -0800 Subject: [PATCH 0348/1212] Include string "IMDSv2" in docs Include the acronym for Instance Metadata Service Version 2 along with the spelled out version to enhance findability for people searching for "IMDSv2". --- website/docs/r/instance.html.markdown | 2 +- website/docs/r/launch_template.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 0d4203d2f2a..3ea56975041 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -206,7 +206,7 @@ Metadata options can be applied/modified to the EC2 Instance at any time. The `metadata_options` block supports the following: * `http_endpoint` - (Optional) Whether the metadata service is available. Can be `"enabled"` or `"disabled"`. (Default: `"enabled"`). -* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2_. Can be `"optional"` or `"required"`. (Default: `"optional"`). +* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Can be `"optional"` or `"required"`. (Default: `"optional"`). * `http_put_response_hop_limit` - (Optional) The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from `1` to `64`. (Default: `1`). For more information, see the documentation on the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index aa509c5f637..3c84ef93478 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -272,7 +272,7 @@ The metadata options for the instances. The `metadata_options` block supports the following: * `http_endpoint` - (Optional) Whether the metadata service is available. Can be `"enabled"` or `"disabled"`. (Default: `"enabled"`). -* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2_. Can be `"optional"` or `"required"`. (Default: `"optional"`). +* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Can be `"optional"` or `"required"`. (Default: `"optional"`). * `http_put_response_hop_limit` - (Optional) The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from `1` to `64`. (Default: `1`). For more information, see the documentation on the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). From 4f9e523672ec2508bfdf8e5e6ac55a843142041c Mon Sep 17 00:00:00 2001 From: Ian Mckay Date: Tue, 22 Dec 2020 14:58:29 +1100 Subject: [PATCH 0349/1212] Changelog typo --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e1756591c5..c803f37cf96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,7 +106,7 @@ BUG FIXES FEATURES -* **New Data Source:** `aws_glue_registry` ([#16418](https://github.com/hashicorp/terraform-provider-aws/issues/16418)) +* **New Resource:** `aws_glue_registry` ([#16418](https://github.com/hashicorp/terraform-provider-aws/issues/16418)) ENHANCEMENTS From 0041d0bf0925e63de73b688c0c297d17065a15ef Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 22 Dec 2020 16:10:18 -0500 Subject: [PATCH 0350/1212] Update website/docs/r/ssm_maintenance_window_task.html.markdown --- website/docs/r/ssm_maintenance_window_task.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssm_maintenance_window_task.html.markdown b/website/docs/r/ssm_maintenance_window_task.html.markdown index e41674dc108..4978332f6e9 100644 --- a/website/docs/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/r/ssm_maintenance_window_task.html.markdown @@ -140,7 +140,7 @@ The following arguments are supported: * `window_id` - (Required) The Id of the maintenance window to register the task with. * `max_concurrency` - (Required) The maximum number of targets this task can be run for in parallel. * `max_errors` - (Required) The maximum number of errors allowed before this task stops being scheduled. -* `task_type` - (Required) The type of task being registered. Valid options are `AUTOMATION`, `LAMBDA`, `RUN_COMMAND` or `STEP_FUNCTIONS`. +* `task_type` - (Required) The type of task being registered. Valid values: `AUTOMATION`, `LAMBDA`, `RUN_COMMAND` or `STEP_FUNCTIONS`. * `task_arn` - (Required) The ARN of the task to execute. * `service_role_arn` - (Required) The role that should be assumed when executing the task. * `name` - (Optional) The name of the maintenance window task. From bebe732d7185320b303d0d14d3c19551213c1ba2 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 22 Dec 2020 16:39:15 -0500 Subject: [PATCH 0351/1212] Update CHANGELOG for #16608 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c803f37cf96..627343b8de3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 3.23.0 (Unreleased) + +ENHANCEMENTS + +* resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] + ## 3.22.0 (December 18, 2020) FEATURES From 8123e8e81d399c416d2485013ab13e58c4ee2168 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Dec 2020 22:10:02 -0500 Subject: [PATCH 0352/1212] remove ForceNew attribute from stateful_rule field and its children --- ...resource_aws_networkfirewall_rule_group.go | 22 +- ...rce_aws_networkfirewall_rule_group_test.go | 217 ++++++++++++++++++ 2 files changed, 224 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_networkfirewall_rule_group.go b/aws/resource_aws_networkfirewall_rule_group.go index beb601fbf30..5e78d010a61 100644 --- a/aws/resource_aws_networkfirewall_rule_group.go +++ b/aws/resource_aws_networkfirewall_rule_group.go @@ -170,26 +170,22 @@ func resourceAwsNetworkFirewallRuleGroup() *schema.Resource { "stateful_rule": { Type: schema.TypeSet, Optional: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "action": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringInSlice(networkfirewall.StatefulAction_Values(), false), }, "header": { Type: schema.TypeList, Required: true, MaxItems: 1, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "destination": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.Any( validateIpv4CIDRNetworkAddress, validation.StringInSlice([]string{networkfirewall.StatefulRuleDirectionAny}, false), @@ -198,24 +194,20 @@ func resourceAwsNetworkFirewallRuleGroup() *schema.Resource { "destination_port": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "direction": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringInSlice(networkfirewall.StatefulRuleDirection_Values(), false), }, "protocol": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringInSlice(networkfirewall.StatefulRuleProtocol_Values(), false), }, "source": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.Any( validateIpv4CIDRNetworkAddress, validation.StringInSlice([]string{networkfirewall.StatefulRuleDirectionAny}, false), @@ -224,7 +216,6 @@ func resourceAwsNetworkFirewallRuleGroup() *schema.Resource { "source_port": { Type: schema.TypeString, Required: true, - ForceNew: true, }, }, }, @@ -498,19 +489,20 @@ func resourceAwsNetworkFirewallRuleGroupUpdate(ctx context.Context, d *schema.Re log.Printf("[DEBUG] Updating NetworkFirewall Rule Group %s", arn) if d.HasChanges("description", "rule_group", "rules", "type") { + // Provide updated object with the currently configured fields input := &networkfirewall.UpdateRuleGroupInput{ RuleGroupArn: aws.String(arn), Type: aws.String(d.Get("type").(string)), UpdateToken: aws.String(d.Get("update_token").(string)), } - if d.HasChange("description") { - input.Description = aws.String(d.Get("description").(string)) + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) } - if d.HasChange("rule_group") { - input.RuleGroup = expandNetworkFirewallRuleGroup(d.Get("rule_group").([]interface{})) + if v, ok := d.GetOk("rule_group"); ok { + input.RuleGroup = expandNetworkFirewallRuleGroup(v.([]interface{})) } - if d.HasChange("rules") { - input.Rules = aws.String(d.Get("rules").(string)) + if v, ok := d.GetOk("rules"); ok { + input.Rules = aws.String(v.(string)) } _, err := conn.UpdateRuleGroupWithContext(ctx, input) diff --git a/aws/resource_aws_networkfirewall_rule_group_test.go b/aws/resource_aws_networkfirewall_rule_group_test.go index cee003bf18d..5a03f00387e 100644 --- a/aws/resource_aws_networkfirewall_rule_group_test.go +++ b/aws/resource_aws_networkfirewall_rule_group_test.go @@ -145,6 +145,9 @@ func TestAccAwsNetworkFirewallRuleGroup_basic_statefulRule(t *testing.T) { "header.0.source_port": "53", "rule_option.#": "1", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*.rule_option.*", map[string]string{ + "keyword": "sid:1", + }), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -401,6 +404,8 @@ func TestAccAwsNetworkFirewallRuleGroup_rulesSourceAndRuleVariables(t *testing.T }) } +// TestAccAwsNetworkFirewallRuleGroup_updateStatefulRule validates +// in-place updates to a single stateful_rule configuration block func TestAccAwsNetworkFirewallRuleGroup_updateStatefulRule(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_networkfirewall_rule_group.test" @@ -443,6 +448,147 @@ func TestAccAwsNetworkFirewallRuleGroup_updateStatefulRule(t *testing.T) { }) } +// TestAccAwsNetworkFirewallRuleGroup_updateMultipleStatefulRules validates +// in-place updates to stateful_rule configuration blocks +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16868 +func TestAccAwsNetworkFirewallRuleGroup_updateMultipleStatefulRules(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_networkfirewall_rule_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAwsNetworkFirewall(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsNetworkFirewallRuleGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccNetworkFirewallRuleGroup_basic_statefulRule(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "1"), + ), + }, + { + Config: testAccNetworkFirewallRuleGroup_multipleStatefulRules(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionPass, + "header.#": "1", + "header.0.destination": "124.1.1.24/32", + "header.0.destination_port": "53", + "header.0.direction": networkfirewall.StatefulRuleDirectionAny, + "header.0.protocol": networkfirewall.StatefulRuleProtocolTcp, + "header.0.source": "1.2.3.4/32", + "header.0.source_port": "53", + "rule_option.#": "1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionAlert, + "header.#": "1", + "header.0.destination": networkfirewall.StatefulRuleDirectionAny, + "header.0.destination_port": networkfirewall.StatefulRuleDirectionAny, + "header.0.direction": networkfirewall.StatefulRuleDirectionAny, + "header.0.protocol": networkfirewall.StatefulRuleProtocolIp, + "header.0.source": networkfirewall.StatefulRuleDirectionAny, + "header.0.source_port": networkfirewall.StatefulRuleDirectionAny, + "rule_option.#": "1", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkFirewallRuleGroup_updateStatefulRule(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionDrop, + "header.#": "1", + "header.0.destination": "1.2.3.4/32", + "header.0.destination_port": "1001", + "header.0.direction": networkfirewall.StatefulRuleDirectionForward, + "header.0.protocol": networkfirewall.StatefulRuleProtocolIp, + "header.0.source": "124.1.1.24/32", + "header.0.source_port": "1001", + "rule_option.#": "1", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// TestAccAwsNetworkFirewallRuleGroup_statefulRule_action validates in-place +// updates to the "action" argument within 1 stateful_rule configuration block +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16868 +func TestAccAwsNetworkFirewallRuleGroup_statefulRule_action(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_networkfirewall_rule_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAwsNetworkFirewall(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsNetworkFirewallRuleGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccNetworkFirewallRuleGroup_statefulRule_action(rName, networkfirewall.StatefulActionAlert), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionAlert, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkFirewallRuleGroup_statefulRule_action(rName, networkfirewall.StatefulActionPass), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionPass, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkFirewallRuleGroup_statefulRule_action(rName, networkfirewall.StatefulActionDrop), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsNetworkFirewallRuleGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule_group.0.rules_source.0.stateful_rule.*", map[string]string{ + "action": networkfirewall.StatefulActionDrop, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16470 func TestAccAwsNetworkFirewallRuleGroup_statefulRule_header(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") @@ -794,6 +940,35 @@ resource "aws_networkfirewall_rule_group" "test" { `, rName) } +func testAccNetworkFirewallRuleGroup_statefulRule_action(rName, action string) string { + return fmt.Sprintf(` +resource "aws_networkfirewall_rule_group" "test" { + capacity = 100 + name = %[1]q + description = %[1]q + type = "STATEFUL" + rule_group { + rules_source { + stateful_rule { + action = %q + header { + destination = "124.1.1.24/32" + destination_port = 53 + direction = "ANY" + protocol = "TCP" + source = "1.2.3.4/32" + source_port = 53 + } + rule_option { + keyword = "sid:1" + } + } + } + } +} +`, rName, action) +} + func testAccNetworkFirewallRuleGroup_statefulRule_header(rName, dstPort, srcPort string) string { return fmt.Sprintf(` resource "aws_networkfirewall_rule_group" "test" { @@ -841,10 +1016,52 @@ resource "aws_networkfirewall_rule_group" "test" { source = "124.1.1.24/32" source_port = 1001 } + rule_option { + keyword = "sid:1;rev:2" + } + } + } + } +} +`, rName) +} + +func testAccNetworkFirewallRuleGroup_multipleStatefulRules(rName string) string { + return fmt.Sprintf(` +resource "aws_networkfirewall_rule_group" "test" { + capacity = 100 + name = %[1]q + type = "STATEFUL" + rule_group { + rules_source { + stateful_rule { + action = "PASS" + header { + destination = "124.1.1.24/32" + destination_port = 53 + direction = "ANY" + protocol = "TCP" + source = "1.2.3.4/32" + source_port = 53 + } rule_option { keyword = "sid:1" } } + stateful_rule { + action = "ALERT" + header { + destination = "ANY" + destination_port = "ANY" + direction = "ANY" + protocol = "IP" + source = "ANY" + source_port = "ANY" + } + rule_option { + keyword = "sid:2" + } + } } } } From 6e8519654ac51be92bb20f95c442bf8e0cd77183 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Dec 2020 23:30:35 -0500 Subject: [PATCH 0353/1212] add validation check to final snapshot identifier --- aws/resource_aws_db_instance.go | 20 ++++++-------------- aws/resource_aws_db_instance_test.go | 2 +- website/docs/r/db_instance.html.markdown | 2 +- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_db_instance.go b/aws/resource_aws_db_instance.go index e48cc00f692..1eb5453ae13 100644 --- a/aws/resource_aws_db_instance.go +++ b/aws/resource_aws_db_instance.go @@ -263,20 +263,12 @@ func resourceAwsDbInstance() *schema.Resource { "final_snapshot_identifier": { Type: schema.TypeString, Optional: true, - ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { - value := v.(string) - if !regexp.MustCompile(`^[0-9A-Za-z-]+$`).MatchString(value) { - es = append(es, fmt.Errorf( - "only alphanumeric characters and hyphens allowed in %q", k)) - } - if regexp.MustCompile(`--`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot contain two consecutive hyphens", k)) - } - if regexp.MustCompile(`-$`).MatchString(value) { - es = append(es, fmt.Errorf("%q cannot end in a hyphen", k)) - } - return - }, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), + validation.StringMatch(regexp.MustCompile(`^[0-9A-Za-z-]+$`), "must only contain alphanumeric characters and hyphens"), + validation.StringDoesNotMatch(regexp.MustCompile(`--`), "cannot contain two consecutive hyphens"), + validation.StringDoesNotMatch(regexp.MustCompile(`-$`), "cannot end in a hyphen"), + ), }, "restore_to_point_in_time": { diff --git a/aws/resource_aws_db_instance_test.go b/aws/resource_aws_db_instance_test.go index fc086b55d1b..599c0f32009 100644 --- a/aws/resource_aws_db_instance_test.go +++ b/aws/resource_aws_db_instance_test.go @@ -442,7 +442,7 @@ func TestAccAWSDBInstance_FinalSnapshotIdentifier(t *testing.T) { PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, // testAccCheckAWSDBInstanceSnapshot verifies a database snapshot is - // created, and subequently deletes it + // created, and subsequently deletes it CheckDestroy: testAccCheckAWSDBInstanceSnapshot, Steps: []resource.TestStep{ { diff --git a/website/docs/r/db_instance.html.markdown b/website/docs/r/db_instance.html.markdown index 9030a8d2e6b..b1494a295e8 100644 --- a/website/docs/r/db_instance.html.markdown +++ b/website/docs/r/db_instance.html.markdown @@ -120,7 +120,7 @@ For supported values, see the EngineVersion parameter in [API action CreateDBIns Note that for Amazon Aurora instances the engine version must match the [DB cluster](/docs/providers/aws/r/rds_cluster.html)'s engine version'. * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB instance is deleted. Must be provided if `skip_final_snapshot` is -set to `false`. +set to `false`. The value must begin with a letter, must contain alphanumeric characters and hyphens, and must not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. * `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. From 264395fbe90aec020754ff66eb884f10db158594 Mon Sep 17 00:00:00 2001 From: gunadhya <6939749+gunadhya@users.noreply.github.com> Date: Mon, 28 Dec 2020 16:51:02 +0530 Subject: [PATCH 0354/1212] fix in resource aws_msk_cluster enhanced_monitoring and client_broker using the AWS Go SDK _Values() --- aws/resource_aws_msk_cluster.go | 26 +++++++++----------------- tools/go.mod | 1 + tools/go.sum | 2 ++ 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index e7805a4e8fe..69bbecddb79 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -190,15 +190,11 @@ func resourceAwsMskCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_broker": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafka.ClientBrokerTls, - ValidateFunc: validation.StringInSlice([]string{ - kafka.ClientBrokerPlaintext, - kafka.ClientBrokerTlsPlaintext, - kafka.ClientBrokerTls, - }, false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: kafka.ClientBrokerTls, + ValidateFunc: validation.StringInSlice(kafka.ClientBroker_Values(), false), }, "in_cluster": { Type: schema.TypeBool, @@ -213,14 +209,10 @@ func resourceAwsMskCluster() *schema.Resource { }, }, "enhanced_monitoring": { - Type: schema.TypeString, - Optional: true, - Default: kafka.EnhancedMonitoringDefault, - ValidateFunc: validation.StringInSlice([]string{ - kafka.EnhancedMonitoringDefault, - kafka.EnhancedMonitoringPerBroker, - kafka.EnhancedMonitoringPerTopicPerBroker, - }, true), + Type: schema.TypeString, + Optional: true, + Default: kafka.EnhancedMonitoringDefault, + ValidateFunc: validation.StringInSlice(kafka.EnhancedMonitoring_Values(), true), }, "kafka_version": { Type: schema.TypeString, diff --git a/tools/go.mod b/tools/go.mod index 748e315d1bb..0440c631e81 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -7,6 +7,7 @@ require ( github.com/client9/misspell v0.3.4 github.com/golangci/golangci-lint v1.33.0 github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 + github.com/pavius/impi v0.0.3 // indirect github.com/terraform-linters/tflint v0.20.3 ) diff --git a/tools/go.sum b/tools/go.sum index 1384a0fce44..76d6b9d5d22 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -640,6 +640,8 @@ github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pavius/impi v0.0.3 h1:DND6MzU+BLABhOZXbELR3FU8b+zDgcq4dOCNLhiTYuI= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= From 41cc8bbc697cf479ff5b1d458a585ed4dded7ba7 Mon Sep 17 00:00:00 2001 From: Andrew Konrath Date: Tue, 29 Dec 2020 21:12:57 -0600 Subject: [PATCH 0355/1212] Fix autoscaling_group instance_warmup documentation. --- website/docs/r/autoscaling_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index bfceea975dd..9ce32a45605 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -402,7 +402,7 @@ This configuration block supports the following: * `strategy` - (Required) The strategy to use for instance refresh. The only allowed value is `Rolling`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. * `preferences` - (Optional) Override default parameters for Instance Refresh. - * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + * `instance_warmup` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. * `triggers` - (Optional) Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of `launch_configuration`, `launch_template`, or `mixed_instances_policy`. From 28aae4fcf8ed101e06dc25465a1142b1964a169d Mon Sep 17 00:00:00 2001 From: Rob H Date: Thu, 1 Aug 2019 22:48:54 +0100 Subject: [PATCH 0356/1212] Initial commit for FMS policy --- aws/provider.go | 1 + aws/resource_aws_fms_policy.go | 401 ++++++++++++++++++++++++++++ aws/resource_aws_fms_policy_test.go | 247 +++++++++++++++++ 3 files changed, 649 insertions(+) create mode 100644 aws/resource_aws_fms_policy.go create mode 100644 aws/resource_aws_fms_policy_test.go diff --git a/aws/provider.go b/aws/provider.go index 8e3f901dee5..0e14ecad532 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -661,6 +661,7 @@ func Provider() *schema.Provider { "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), "aws_fms_admin_account": resourceAwsFmsAdminAccount(), + "aws_fms_policy": resourceAwsFmsPolicy(), "aws_gamelift_alias": resourceAwsGameliftAlias(), "aws_gamelift_build": resourceAwsGameliftBuild(), "aws_gamelift_fleet": resourceAwsGameliftFleet(), diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go new file mode 100644 index 00000000000..36719078238 --- /dev/null +++ b/aws/resource_aws_fms_policy.go @@ -0,0 +1,401 @@ +package aws + +import ( + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fms" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "log" +) + +func resourceAwsFmsPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFmsPolicyCreate, + Read: resourceAwsFmsPolicyRead, + Update: resourceAwsFmsPolicyUpdate, + Delete: resourceAwsFmsPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "exclude_resource_tags": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, + + "exclude_map": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(12, 12), + }, + }, + }, + }, + }, + + "include_map": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(12, 12), + }, + }, + }, + }, + }, + + "remediation_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + + "resource_type_list": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"AWS::ApiGateway::Stage", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::CloudFront::Distribution"}, false), + }, + Set: schema.HashString, + }, + + "policy_update_token": { + Type: schema.TypeString, + Computed: true, + }, + + "resource_tags": tagsSchema(), + + "security_service_policy_data": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "managed_service_data": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Optional: true, + Type: schema.TypeString, + }, + "rule_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + }, + "override_action": { + Type: schema.TypeMap, + Optional: true, + Default: map[string]interface{}{}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "NONE", + ValidateFunc: validation.StringInSlice([]string{"COUNT", "NONE"}, false), + }, + }, + }, + }, + }, + }, + }, + "default_action": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Default: "BLOCK", + }, + }, + }, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"WAF", "ADVANCED_SHIELD"}, false), + }, + }, + }, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + fmsPolicy := &fms.Policy{ + PolicyName: aws.String(d.Get("name").(string)), + RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), + ResourceType: aws.String("ResourceTypeList"), + ResourceTypeList: expandStringList(d.Get("resource_type_list").(*schema.Set).List()), + ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), + } + + if v, ok := d.GetOk("security_service_policy_data"); ok { + fmsPolicy.SecurityServicePolicyData = flattenAwsFmsManagedSecurityData(v.(*schema.Set)) + } + + if rTags, tagsOk := d.GetOk("resource_tags"); tagsOk { + fmsPolicy.ResourceTags = buildResourceTags(rTags) + } + + if v, ok := d.GetOk("include_map"); ok { + fmsPolicy.IncludeMap = buildAccountList(v.(*schema.Set)) + } + + if v, ok := d.GetOk("exclude_map"); ok { + fmsPolicy.ExcludeMap = buildAccountList(v.(*schema.Set)) + } + + params := &fms.PutPolicyInput{ + Policy: fmsPolicy, + } + + var resp *fms.PutPolicyOutput + var err error + + resp, err = conn.PutPolicy(params) + + if err != nil { + return fmt.Errorf("Creating Policy Failed: %s", err.Error()) + } + + log.Printf("[DEBUG] Printing ID: %s", aws.StringValue(resp.Policy.PolicyId)) + + d.SetId(aws.StringValue(resp.Policy.PolicyId)) + d.Set("arn", aws.StringValue(resp.PolicyArn)) + d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) + d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) + + return resourceAwsFmsPolicyRead(d, meta) +} + +func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + var resp *fms.GetPolicyOutput + var req = &fms.GetPolicyInput{ + PolicyId: aws.String(d.Id()), + } + + resp, err := conn.GetPolicy(req) + + if err != nil { + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] FMS Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + d.Set("arn", aws.StringValue(resp.PolicyArn)) + d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) + d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) + + return nil +} + +func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + fmsPolicy := &fms.Policy{ + PolicyName: aws.String(d.Get("name").(string)), + PolicyId: aws.String(d.Id()), + PolicyUpdateToken: aws.String(d.Get("policy_update_token").(string)), + RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), + ResourceType: aws.String("ResourceTypeList"), + ResourceTypeList: expandStringList(d.Get("resource_type_list").(*schema.Set).List()), + ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), + } + + requestUpdate := false + + if d.HasChange("exclude_map") { + fmsPolicy.ExcludeMap = buildAccountList(d.Get("exclude_map").(*schema.Set)) + requestUpdate = true + } + + if d.HasChange("include_map") { + fmsPolicy.ExcludeMap = buildAccountList(d.Get("include_map").(*schema.Set)) + requestUpdate = true + } + + if d.HasChange("resource_tags") { + fmsPolicy.ResourceTags = buildResourceTags(d.Get("resource_tags")) + requestUpdate = true + } + + if requestUpdate { + fmsPolicy.SecurityServicePolicyData = flattenAwsFmsManagedSecurityData(d.Get("security_service_policy_data").(*schema.Set)) + + params := &fms.PutPolicyInput{Policy: fmsPolicy} + _, err := conn.PutPolicy(params) + + if err != nil { + return fmt.Errorf("Error modifying FMS Policy Rule: %s", err) + } + } + + return resourceAwsFmsPolicyRead(d, meta) +} + +func resourceAwsFmsPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + log.Printf("[DEBUG] Delete FMS Policy: %s", d.Id()) + + _, err := conn.DeletePolicy(&fms.DeletePolicyInput{ + PolicyId: aws.String(d.Id()), + DeleteAllPolicyResources: aws.Bool(true), + }) + + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting FMS Policy (%s): %s", d.Id(), err) + } + + return nil +} + +func buildAccountList(set *schema.Set) map[string][]*string { + var accountList = make(map[string][]*string) + + for _, account := range set.List() { + l := account.(map[string]interface{}) + y := l["account"].([]interface{}) + + for _, a := range y { + accountList["ACCOUNT"] = append(accountList["ACCOUNT"], aws.String(a.(string))) + } + } + + return accountList +} + +func constructManagedServiceData(m []interface{}) map[string]interface{} { + var msd map[string]interface{} + + for _, data := range m { + m := data.(map[string]interface{}) + + rgl := m["rule_groups"].(*schema.Set).List() + rgs := constructRuleGroupsList(rgl) + + msd = map[string]interface{}{ + "type": m["type"].(string), + "defaultAction": m["default_action"].(map[string]interface{}), + "ruleGroups": rgs, + } + } + return msd +} + +func constructRuleGroupsList(rgs []interface{}) []map[string]interface{} { + ruleGroup := []map[string]interface{}{} + + for _, rg := range rgs { + log.Printf("[DEBUG] Rule_Group Keys: %s", rg) + + m := rg.(map[string]interface{}) + + ruleId := m["id"].(string) + overrideAction := m["override_action"].(map[string]interface{}) + + rule := map[string]interface{}{ + "id": ruleId, + "overrideAction": overrideAction, + } + + ruleGroup = append(ruleGroup, rule) + } + return ruleGroup +} + +func flattenAwsFmsManagedSecurityData(set *schema.Set) *fms.SecurityServicePolicyData { + spd := set.List() + + securityServicePolicyData := &fms.SecurityServicePolicyData{} + + for _, t := range spd { + spdMap := t.(map[string]interface{}) + spdType := spdMap["type"].(string) + + securityServicePolicyData.Type = aws.String(spdType) + + switch spdType { + case "WAF": + if v, ok := spdMap["managed_service_data"]; !ok { + log.Printf("[DEBUG] Error Looking up Managed Service Data: %s", v) + } else { + spdPolicy := constructManagedServiceData(v.(*schema.Set).List()) + + js, err := json.Marshal(spdPolicy) + if err != nil { + log.Printf("[DEBUG] JSON Error: %s", err) + } + + securityServicePolicyData.ManagedServiceData = aws.String(string(js)) + } + } + } + + return securityServicePolicyData +} + +func buildResourceTags(rTags interface{}) []*fms.ResourceTag { + var rTagList []*fms.ResourceTag + + tags := rTags.(map[string]interface{}) + for k, v := range tags { + rTagList = append(rTagList, &fms.ResourceTag{Key: aws.String(k), Value: aws.String(v.(string))}) + } + + return rTagList +} diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go new file mode 100644 index 00000000000..a1566e8c28b --- /dev/null +++ b/aws/resource_aws_fms_policy_test.go @@ -0,0 +1,247 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fms" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSFmsPolicy_importBasic(t *testing.T) { + resourceName := "aws_fms_policy.test" + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig(fmsPolicyName, wafRuleGroupName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSFmsPolicy_basic(t *testing.T) { + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + testAccMatchResourceAttrRegionalARN("aws_fms_policy.test", "arn", "fms", regexp.MustCompile(`policy/`)), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "security_service_policy_data.#", "1"), + ), + }, + }, + }) +} + +func TestAccAWSFmsPolicy_tags(t *testing.T) { + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig_tags(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "resource_tags.%", "2"), + resource.TestCheckResourceAttr("aws_fms_policy.test", "resource_tags.Usage", "original"), + ), + }, + { + Config: testAccFmsPolicyConfig_tagsChanged(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "resource_tags.%", "1"), + resource.TestCheckResourceAttr("aws_fms_policy.test", "resource_tags.Usage", "changed"), + ), + }, + }, + }) +} + +func testAccCheckAwsFmsPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fmsconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fms_policy" { + continue + } + + policyId := rs.Primary.Attributes["id"] + + input := &fms.GetPolicyInput{ + PolicyId: aws.String(policyId), + } + + resp, err := conn.GetPolicy(input) + + if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { + continue + } + + if err != nil { + return err + } + + if resp.Policy.PolicyId != nil { + return fmt.Errorf("[DESTROY Error] Fms Policy (%s) not deleted", rs.Primary.ID) + } + } + return nil +} + +func testAccCheckAwsFmsPolicyExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + return nil + } +} + +func testAccFmsPolicyConfig(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + security_service_policy_data { + type = "WAF" + + managed_service_data { + type = "WAF" + + rule_groups { + id = "${aws_wafregional_rule_group.test.id}" + + override_action={ + type = "COUNT" + } + } + + default_action={ + type = "BLOCK" + } + } + } +} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q +} +`, name, group) +} + +func testAccFmsPolicyConfig_tags(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + security_service_policy_data { + type = "WAF" + + managed_service_data { + type = "WAF" + + rule_groups { + id = "${aws_wafregional_rule_group.test.id}" + + override_action={ + type = "COUNT" + } + } + + default_action={ + type = "BLOCK" + } + } + } + resource_tags = { + "Environment" = "Testing", + "Usage"= "original", + } + +} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q +} +`, name, group) +} + +func testAccFmsPolicyConfig_tagsChanged(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + security_service_policy_data { + type = "WAF" + + managed_service_data { + type = "WAF" + + rule_groups { + id = "${aws_wafregional_rule_group.test.id}" + + override_action={ + type = "COUNT" + } + } + + default_action={ + type = "BLOCK" + } + } + } + resource_tags = { + "Usage"= "changed", + } + +} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q +} +`, name, group) +} From 06c03286f27597c0be1797f5af40ee4bb5122ba6 Mon Sep 17 00:00:00 2001 From: Rob H Date: Thu, 1 Aug 2019 23:03:03 +0100 Subject: [PATCH 0357/1212] Removed log statement --- aws/resource_aws_fms_policy.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index 36719078238..95fd3d3e3af 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -204,8 +204,6 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Creating Policy Failed: %s", err.Error()) } - log.Printf("[DEBUG] Printing ID: %s", aws.StringValue(resp.Policy.PolicyId)) - d.SetId(aws.StringValue(resp.Policy.PolicyId)) d.Set("arn", aws.StringValue(resp.PolicyArn)) d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) From 5dde20901ba86b8fe505640153019dbcddc984d3 Mon Sep 17 00:00:00 2001 From: Rob H Date: Tue, 6 Aug 2019 10:43:20 +0100 Subject: [PATCH 0358/1212] Fixed resource import --- aws/resource_aws_fms_policy.go | 137 ++++++++++++++++++++++++---- aws/resource_aws_fms_policy_test.go | 7 +- 2 files changed, 125 insertions(+), 19 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index 95fd3d3e3af..b0ce24253b9 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -5,9 +5,11 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/fms" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "log" + "strings" ) func resourceAwsFmsPolicy() *schema.Resource { @@ -155,7 +157,6 @@ func resourceAwsFmsPolicy() *schema.Resource { }, }, }, - "arn": { Type: schema.TypeString, Computed: true, @@ -176,19 +177,19 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("security_service_policy_data"); ok { - fmsPolicy.SecurityServicePolicyData = flattenAwsFmsManagedSecurityData(v.(*schema.Set)) + fmsPolicy.SecurityServicePolicyData = expandAwsFmsManagedSecurityData(v.(*schema.Set)) } if rTags, tagsOk := d.GetOk("resource_tags"); tagsOk { - fmsPolicy.ResourceTags = buildResourceTags(rTags) + fmsPolicy.ResourceTags = constructResourceTags(rTags) } if v, ok := d.GetOk("include_map"); ok { - fmsPolicy.IncludeMap = buildAccountList(v.(*schema.Set)) + fmsPolicy.IncludeMap = expandAccountList(v.(*schema.Set)) } if v, ok := d.GetOk("exclude_map"); ok { - fmsPolicy.ExcludeMap = buildAccountList(v.(*schema.Set)) + fmsPolicy.ExcludeMap = expandAccountList(v.(*schema.Set)) } params := &fms.PutPolicyInput{ @@ -205,9 +206,6 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error } d.SetId(aws.StringValue(resp.Policy.PolicyId)) - d.Set("arn", aws.StringValue(resp.PolicyArn)) - d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) - d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) return resourceAwsFmsPolicyRead(d, meta) } @@ -232,8 +230,16 @@ func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { } d.Set("arn", aws.StringValue(resp.PolicyArn)) - d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) + + d.Set("name", aws.StringValue(resp.Policy.PolicyName)) d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) + d.Set("exclude_map", flattenFMSAccountMap(resp.Policy.ExcludeMap)) + d.Set("include_map", flattenFMSAccountMap(resp.Policy.IncludeMap)) + d.Set("remediation_enabled", aws.BoolValue(resp.Policy.RemediationEnabled)) + d.Set("resource_type_list", resp.Policy.ResourceTypeList) + d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) + d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)) + d.Set("security_service_policy_data", flattenFmsSecurityServicePolicyData(resp.Policy.SecurityServicePolicyData)) return nil } @@ -254,22 +260,22 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error requestUpdate := false if d.HasChange("exclude_map") { - fmsPolicy.ExcludeMap = buildAccountList(d.Get("exclude_map").(*schema.Set)) + fmsPolicy.ExcludeMap = expandAccountList(d.Get("exclude_map").(*schema.Set)) requestUpdate = true } if d.HasChange("include_map") { - fmsPolicy.ExcludeMap = buildAccountList(d.Get("include_map").(*schema.Set)) + fmsPolicy.ExcludeMap = expandAccountList(d.Get("include_map").(*schema.Set)) requestUpdate = true } if d.HasChange("resource_tags") { - fmsPolicy.ResourceTags = buildResourceTags(d.Get("resource_tags")) + fmsPolicy.ResourceTags = constructResourceTags(d.Get("resource_tags")) requestUpdate = true } if requestUpdate { - fmsPolicy.SecurityServicePolicyData = flattenAwsFmsManagedSecurityData(d.Get("security_service_policy_data").(*schema.Set)) + fmsPolicy.SecurityServicePolicyData = expandAwsFmsManagedSecurityData(d.Get("security_service_policy_data").(*schema.Set)) params := &fms.PutPolicyInput{Policy: fmsPolicy} _, err := conn.PutPolicy(params) @@ -302,7 +308,7 @@ func resourceAwsFmsPolicyDelete(d *schema.ResourceData, meta interface{}) error return nil } -func buildAccountList(set *schema.Set) map[string][]*string { +func expandAccountList(set *schema.Set) map[string][]*string { var accountList = make(map[string][]*string) for _, account := range set.List() { @@ -356,7 +362,7 @@ func constructRuleGroupsList(rgs []interface{}) []map[string]interface{} { return ruleGroup } -func flattenAwsFmsManagedSecurityData(set *schema.Set) *fms.SecurityServicePolicyData { +func expandAwsFmsManagedSecurityData(set *schema.Set) *fms.SecurityServicePolicyData { spd := set.List() securityServicePolicyData := &fms.SecurityServicePolicyData{} @@ -387,7 +393,92 @@ func flattenAwsFmsManagedSecurityData(set *schema.Set) *fms.SecurityServicePolic return securityServicePolicyData } -func buildResourceTags(rTags interface{}) []*fms.ResourceTag { +func flattenFMSAccountMap(accountMap map[string][]*string) *schema.Set { + eMap := map[string]interface{}{} + + if _, ok := eMap["account"]; ok { + for _, v := range accountMap["ACCOUNT"] { + eMap["account"] = append(eMap["account"].([]*string), v) + } + } + + s := schema.NewSet(fmsPolicyDataHash, []interface{}{}) + s.Add(eMap) + + return s +} + +func flattenFMSResourceTags(resourceTags []*fms.ResourceTag) map[string]interface{} { + resTags := map[string]interface{}{} + + for _, v := range resourceTags { + resTags[*v.Key] = v.Value + } + return resTags +} + +func flattenFmsManagedServiceData(sspdMsd map[string]interface{}) *schema.Set { + msdSS := schema.NewSet(fmsPolicyDataHash, []interface{}{}) + + msdData := map[string]interface{}{ + "type": sspdMsd["type"].(string), + } + + if sspdMsd["defaultAction"] != nil { + msdData["default_action"] = sspdMsd["defaultAction"] + } + + msdData["rule_groups"] = flattenFmsMsdRuleGroupsList(sspdMsd) + + msdSS.Add(msdData) + + return msdSS +} + +func flattenFmsMsdRuleGroupsList(sspdMsd map[string]interface{}) *schema.Set { + ruleGroupsSet := schema.NewSet(fmsPolicyDataHash, []interface{}{}) + if sspdMsd["ruleGroups"] != nil { + for _, v := range sspdMsd["ruleGroups"].([]interface{}) { + + rg := v.(map[string]interface{}) + + rule := map[string]interface{}{ + "id": rg["id"].(string), + "override_action": rg["overrideAction"].(map[string]interface{}), + } + + ruleGroupsSet.Add(rule) + } + } + return ruleGroupsSet +} + +func flattenFmsSecurityServicePolicyData(spd *fms.SecurityServicePolicyData) *schema.Set { + s := schema.NewSet(fmsPolicyDataHash, []interface{}{}) + + sspd := map[string]interface{}{ + "type": aws.StringValue(spd.Type), + } + + var policy map[string]interface{} + + if spd.ManagedServiceData != nil { + + msd := []byte(aws.StringValue(spd.ManagedServiceData)) + + if err := json.Unmarshal(msd, &policy); err != nil { + panic(err) + } + + sspd["managed_service_data"] = flattenFmsManagedServiceData(policy) + } + + s.Add(sspd) + + return s +} + +func constructResourceTags(rTags interface{}) []*fms.ResourceTag { var rTagList []*fms.ResourceTag tags := rTags.(map[string]interface{}) @@ -397,3 +488,17 @@ func buildResourceTags(rTags interface{}) []*fms.ResourceTag { return rTagList } + +func fmsPolicyDataHash(v interface{}) int { + var buf strings.Builder + + m := v.(map[string]interface{}) + + if _, ok := m["Id"]; ok { + buf.WriteString(fmt.Sprintf("%s", m["Id"])) + } else { + buf.WriteString(fmt.Sprintf("%s-", m)) + } + + return hashcode.String(buf.String()) +} diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index a1566e8c28b..26ec196b463 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -26,9 +26,10 @@ func TestAccAWSFmsPolicy_importBasic(t *testing.T) { Config: testAccFmsPolicyConfig(fmsPolicyName, wafRuleGroupName), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"policy_update_token"}, }, }, }) From f2367d56412cd6f741ad9ed31e0dff08becb3ae8 Mon Sep 17 00:00:00 2001 From: Rob H Date: Tue, 6 Aug 2019 15:09:50 +0100 Subject: [PATCH 0359/1212] Added documentation & tidyup --- aws/resource_aws_fms_policy.go | 30 ++++---- website/docs/r/fms_policy.html.markdown | 98 +++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 13 deletions(-) create mode 100644 website/docs/r/fms_policy.html.markdown diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index b0ce24253b9..fd99932b3eb 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -29,6 +29,12 @@ func resourceAwsFmsPolicy() *schema.Resource { ForceNew: true, }, + "delete_all_policy_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "exclude_resource_tags": { Type: schema.TypeBool, Required: true, @@ -42,7 +48,7 @@ func resourceAwsFmsPolicy() *schema.Resource { Schema: map[string]*schema.Schema{ "account": { Type: schema.TypeList, - Optional: true, + Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(12, 12), @@ -59,7 +65,7 @@ func resourceAwsFmsPolicy() *schema.Resource { Schema: map[string]*schema.Schema{ "account": { Type: schema.TypeList, - Optional: true, + Required: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(12, 12), @@ -71,7 +77,7 @@ func resourceAwsFmsPolicy() *schema.Resource { "remediation_enabled": { Type: schema.TypeBool, - Optional: true, + Required: true, }, "resource_type_list": { @@ -103,28 +109,26 @@ func resourceAwsFmsPolicy() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { - Optional: true, + Required: true, Type: schema.TypeString, }, "rule_groups": { Type: schema.TypeSet, - Optional: true, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { Type: schema.TypeString, - Optional: true, + Required: true, }, "override_action": { Type: schema.TypeMap, - Optional: true, - Default: map[string]interface{}{}, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Optional: true, - Default: "NONE", + Required: true, ValidateFunc: validation.StringInSlice([]string{"COUNT", "NONE"}, false), }, }, @@ -139,8 +143,8 @@ func resourceAwsFmsPolicy() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { - Type: schema.TypeString, - Default: "BLOCK", + Required: true, + Type: schema.TypeString, }, }, }, @@ -294,7 +298,7 @@ func resourceAwsFmsPolicyDelete(d *schema.ResourceData, meta interface{}) error _, err := conn.DeletePolicy(&fms.DeletePolicyInput{ PolicyId: aws.String(d.Id()), - DeleteAllPolicyResources: aws.Bool(true), + DeleteAllPolicyResources: aws.Bool(d.Get("delete_all_policy_resources").(bool)), }) if isAWSErr(err, fms.ErrCodeResourceNotFoundException, "") { diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown new file mode 100644 index 00000000000..172d798fb97 --- /dev/null +++ b/website/docs/r/fms_policy.html.markdown @@ -0,0 +1,98 @@ +--- +layout: "aws" +page_title: "AWS: aws_fms_policy" +sidebar_current: "docs-aws-resource-fms-policy" +description: |- + Provides a resource to create an AWS Firewall Manager policy +--- + +# Resource: aws_fms_policy + +Provides a resource to create an AWS Firewall Manager policy. You need to be using AWS organizations and have enabled the Firewall Manager administrator account. + +## Example Usage + +```hcl +resource "aws_fms_policy" "example" { + name = "FMS-Policy-Example" + exclude_resource_tags = false + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + security_service_policy_data { + type = "WAF" + + managed_service_data { + type = "WAF" + + rule_groups { + id = "${aws_wafregional_rule_group.test.id}" + + override_action { + type = "COUNT" + } + } + + default_action { + type = "BLOCK" + } + } + } +} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "WAFRuleGroupExample" + name = "WAF-Rule-Group-Example" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required, Forces new resource) The friendly name of the AWS Firewall Manager Policy. +* `delete_all_policy_resources` - (Optional) If true, the request will also perform a clean-up process. Defaults to `true`. More information can be found here [AWS Firewall Manager delete policy](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_DeletePolicy.html) +* `exclude_map` - (Optional) A map of lists, with a single key named 'account' with a list of AWS Account IDs to exclude from this policy. +* `exclude_resource_tags` - (Required, Forces new resource) A boolean value, if true the tags that are specified in the `resource_tags` are not protected by this policy. If set to false and resource_tags are populated, resources that contain tags will be protected by this policy. +* `include_map` - (Optional) A map of lists, with a single key named 'account' with a list of AWS Account IDs to include for this policy. +* `remediation_enabled` - (Required) A boolean value, indicates if the policy should automatically applied to resources that already exist in the account. +* `resource_tags` - (Optional) A map of resource tags, that if present will filter protections on resources based on the exclude_resource_tags. +* `resource_type_list` - (Required, Forces new resource) A list of resource types to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. +* `security_service_policy_data` - (Required) The objects to include in Security Service Policy Data. Documented below. + +## exclude_map Configuration block +* `account` - (Required) A list of AWS Organization member Accounts that you want to exclude from this AWS FMS Policy. + +## include_map Configuration block +* `account` - (Required) A list of AWS Organization member Accounts that you want to include for this AWS FMS Policy. + +## security_service_policy_data Configuration block +* `managed_service_data` (Optional) Configuration block containing WAF data, required if type is set to WAF. +* `type` (Required, Forces new resource) The service that the policy is using to protect the resources. Values WAF or SHIELD_ADVANCED. + +## managed_service_data Configuration block + +-> Additonal information about this configuration can be found in the [AWS Firewall Manager SecurityServicePolicyData API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_SecurityServicePolicyData.html) + +* `type` - (Required) Type currently only supports WAF. +* `rule_groups` - (Required) A rule group block, maximum of 2 rule group blocks are currently supported. + * `id` - (Required) Id of the WAF Rule Group that's to be attached. + * `override_action` (Required) Override the action that a group requests CloudFront or AWS WAF takes when a web request matches the conditions in the rule. + * `type` - (Required) valid values are `NONE` or `COUNT`. +* `default_action`- (Required) Configuration block with action that you want AWS Waf to take when a request doesn't match the criteria in any of the rules. + * `type` - (Required) valid values are `BLOCK` or `COUNT`. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The AWS account ID of the AWS Firewall Manager administrator account. +* `policy_update_token` - A unique identifier for each update to the policy. + +## Import + +Firewall Manager policies can be imported using the policy ID, e.g. + +``` +$ terraform import aws_fms_policy.example 5be49585-a7e3-4c49-dde1-a179fe4a619a +``` From 9bbe6c077d059f4507fdbbc8de5488196ec29860 Mon Sep 17 00:00:00 2001 From: Rob H Date: Tue, 6 Aug 2019 15:19:24 +0100 Subject: [PATCH 0360/1212] Fixed typo in docs --- website/docs/r/fms_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index 172d798fb97..54e279c5490 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -72,7 +72,7 @@ The following arguments are supported: ## managed_service_data Configuration block --> Additonal information about this configuration can be found in the [AWS Firewall Manager SecurityServicePolicyData API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_SecurityServicePolicyData.html) +-> Additional information about this configuration can be found in the [AWS Firewall Manager SecurityServicePolicyData API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_SecurityServicePolicyData.html) * `type` - (Required) Type currently only supports WAF. * `rule_groups` - (Required) A rule group block, maximum of 2 rule group blocks are currently supported. From da9a71d569a1651e6397a7d040b163c0ab1f2cf9 Mon Sep 17 00:00:00 2001 From: Hans Schabert Date: Mon, 4 Jan 2021 12:40:00 +0000 Subject: [PATCH 0361/1212] feature: Add working_directory to ImageBuilder --- aws/data_source_aws_imagebuilder_image_recipe.go | 5 +++++ aws/data_source_aws_imagebuilder_image_recipe_test.go | 1 + aws/resource_aws_imagebuilder_image_recipe.go | 10 ++++++++++ 3 files changed, 16 insertions(+) diff --git a/aws/data_source_aws_imagebuilder_image_recipe.go b/aws/data_source_aws_imagebuilder_image_recipe.go index 252490c2883..996f531a222 100644 --- a/aws/data_source_aws_imagebuilder_image_recipe.go +++ b/aws/data_source_aws_imagebuilder_image_recipe.go @@ -116,6 +116,10 @@ func dataSourceAwsImageBuilderImageRecipe() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "working_directory": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -154,6 +158,7 @@ func dataSourceAwsImageBuilderImageRecipeRead(d *schema.ResourceData, meta inter d.Set("platform", imageRecipe.Platform) d.Set("tags", keyvaluetags.ImagebuilderKeyValueTags(imageRecipe.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) d.Set("version", imageRecipe.Version) + d.Set("working_directory", imageRecipe.WorkingDirectory) return nil } diff --git a/aws/data_source_aws_imagebuilder_image_recipe_test.go b/aws/data_source_aws_imagebuilder_image_recipe_test.go index 80ef8941dcc..73cde70f0b1 100644 --- a/aws/data_source_aws_imagebuilder_image_recipe_test.go +++ b/aws/data_source_aws_imagebuilder_image_recipe_test.go @@ -59,6 +59,7 @@ resource "aws_imagebuilder_component" "test" { }] schemaVersion = 1.0 }) + working_directory = "/tmp" name = %[1]q platform = "Linux" version = "1.0.0" diff --git a/aws/resource_aws_imagebuilder_image_recipe.go b/aws/resource_aws_imagebuilder_image_recipe.go index acfff669805..45f9e106a64 100644 --- a/aws/resource_aws_imagebuilder_image_recipe.go +++ b/aws/resource_aws_imagebuilder_image_recipe.go @@ -170,6 +170,12 @@ func resourceAwsImageBuilderImageRecipe() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 128), }, + "working_directory": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, }, } } @@ -208,6 +214,9 @@ func resourceAwsImageBuilderImageRecipeCreate(d *schema.ResourceData, meta inter if v, ok := d.GetOk("version"); ok { input.SemanticVersion = aws.String(v.(string)) } + if v, ok := d.GetOk("working_directory"); ok { + input.WorkingDirectory = aws.String(v.(string)) + } output, err := conn.CreateImageRecipe(input) @@ -261,6 +270,7 @@ func resourceAwsImageBuilderImageRecipeRead(d *schema.ResourceData, meta interfa d.Set("platform", imageRecipe.Platform) d.Set("tags", keyvaluetags.ImagebuilderKeyValueTags(imageRecipe.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) d.Set("version", imageRecipe.Version) + d.Set("working_directory", imageRecipe.WorkingDirectory) return nil } From af637ff235bed1444533a38d8e8123ca70fc3175 Mon Sep 17 00:00:00 2001 From: Hans Schabert Date: Mon, 4 Jan 2021 12:54:34 +0000 Subject: [PATCH 0362/1212] docs: Update aws_imagebuilder_image_recipe new property working_directory --- website/docs/r/imagebuilder_image_recipe.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/imagebuilder_image_recipe.html.markdown b/website/docs/r/imagebuilder_image_recipe.html.markdown index c86e180bce7..82f402dec05 100644 --- a/website/docs/r/imagebuilder_image_recipe.html.markdown +++ b/website/docs/r/imagebuilder_image_recipe.html.markdown @@ -48,6 +48,7 @@ The following attributes are optional: * `block_device_mapping` - (Optional) Configuration block(s) with block device mappings for the the image recipe. Detailed below. * `description` - (Optional) Description of the image recipe. * `tags` - (Optional) Key-value map of resource tags for the image recipe. +* `working_directory` - (Optional) The working directory to be used during build and test workflows. ### block_device_mapping From fbc1e7b805481cf3a7f41d73cfa623686df1d414 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 08:39:39 -0500 Subject: [PATCH 0363/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.36.12 to 1.36.19 (#16934) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.12 to 1.36.19. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.12...v1.36.19) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e99137fcde3..8487cdce11c 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.12 + github.com/aws/aws-sdk-go v1.36.19 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index 52e82c886d6..b4e8f6da350 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG8= -github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.19 h1:zbJZKkxeDiYxUYFjymjWxPye+qa1G2gRVyhIzZrB9zA= +github.com/aws/aws-sdk-go v1.36.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 78d88ac22b25045bdb3dbb1281a6557e65855f03 Mon Sep 17 00:00:00 2001 From: Sijmen Date: Mon, 4 Jan 2021 15:14:20 +0100 Subject: [PATCH 0364/1212] docs/provider: Fix links to terraform resource timeouts (#16899) --- website/docs/r/ami.html.markdown | 2 +- website/docs/r/ami_copy.html.markdown | 2 +- website/docs/r/ami_from_instance.html.markdown | 2 +- website/docs/r/dynamodb_table.html.markdown | 2 +- website/docs/r/instance.html.markdown | 2 +- website/docs/r/lex_bot.html.markdown | 2 +- website/docs/r/lex_bot_alias.html.markdown | 2 +- website/docs/r/lex_intent.html.markdown | 2 +- website/docs/r/lex_slot_type.html.markdown | 2 +- website/docs/r/spot_fleet_request.html.markdown | 2 +- website/docs/r/spot_instance_request.html.markdown | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/website/docs/r/ami.html.markdown b/website/docs/r/ami.html.markdown index f013c5f4d56..90b0a833661 100644 --- a/website/docs/r/ami.html.markdown +++ b/website/docs/r/ami.html.markdown @@ -98,7 +98,7 @@ Nested `ephemeral_block_device` blocks have the following structure: ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 40 mins) Used when creating the AMI * `update` - (Defaults to 40 mins) Used when updating the AMI diff --git a/website/docs/r/ami_copy.html.markdown b/website/docs/r/ami_copy.html.markdown index a8b167c44bb..bb491315736 100644 --- a/website/docs/r/ami_copy.html.markdown +++ b/website/docs/r/ami_copy.html.markdown @@ -52,7 +52,7 @@ This resource also exposes the full set of arguments from the [`aws_ami`](ami.ht ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 40 mins) Used when creating the AMI * `update` - (Defaults to 40 mins) Used when updating the AMI diff --git a/website/docs/r/ami_from_instance.html.markdown b/website/docs/r/ami_from_instance.html.markdown index e85be3adc75..c1e669dffe8 100644 --- a/website/docs/r/ami_from_instance.html.markdown +++ b/website/docs/r/ami_from_instance.html.markdown @@ -49,7 +49,7 @@ The following arguments are supported: ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 40 mins) Used when creating the AMI * `update` - (Defaults to 40 mins) Used when updating the AMI diff --git a/website/docs/r/dynamodb_table.html.markdown b/website/docs/r/dynamodb_table.html.markdown index 7ad39137702..5779e9e5d5c 100644 --- a/website/docs/r/dynamodb_table.html.markdown +++ b/website/docs/r/dynamodb_table.html.markdown @@ -122,7 +122,7 @@ attributes, etc. ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 10 mins) Used when creating the table * `update` - (Defaults to 60 mins) Used when updating the table configuration and reset for each individual Global Secondary Index and Replica update diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 3ea56975041..60905b374f6 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -112,7 +112,7 @@ instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/Use ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 10 mins) Used when launching the instance (until it reaches the initial `running` state) * `update` - (Defaults to 10 mins) Used when stopping and starting the instance when necessary during update - e.g. when changing instance type diff --git a/website/docs/r/lex_bot.html.markdown b/website/docs/r/lex_bot.html.markdown index aa19905b71a..a3e0cb99499 100644 --- a/website/docs/r/lex_bot.html.markdown +++ b/website/docs/r/lex_bot.html.markdown @@ -112,7 +112,7 @@ slot values into the response card. For more information, see ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 1 mins) Used when creating the bot * `update` - (Defaults to 1 mins) Used when updating the bot diff --git a/website/docs/r/lex_bot_alias.html.markdown b/website/docs/r/lex_bot_alias.html.markdown index 8a172faf396..9a4cc1052f7 100644 --- a/website/docs/r/lex_bot_alias.html.markdown +++ b/website/docs/r/lex_bot_alias.html.markdown @@ -51,7 +51,7 @@ The settings for conversation logs. ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 1 mins) Used when creating the bot alias * `update` - (Defaults to 1 mins) Used when updating the bot alias diff --git a/website/docs/r/lex_intent.html.markdown b/website/docs/r/lex_intent.html.markdown index b48b17f5d00..eb233e43c92 100644 --- a/website/docs/r/lex_intent.html.markdown +++ b/website/docs/r/lex_intent.html.markdown @@ -241,7 +241,7 @@ slot values into the response card. For more information, see ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 1 min) Used when creating the intent * `update` - (Defaults to 1 min) Used when updating the intent diff --git a/website/docs/r/lex_slot_type.html.markdown b/website/docs/r/lex_slot_type.html.markdown index 416a5f8df9c..877c2a904e9 100644 --- a/website/docs/r/lex_slot_type.html.markdown +++ b/website/docs/r/lex_slot_type.html.markdown @@ -72,7 +72,7 @@ pizza should have. The slot type could include the values: thick, thin, stuffed. ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 1 min) Used when creating the slot type * `update` - (Defaults to 1 min) Used when updating the slot type diff --git a/website/docs/r/spot_fleet_request.html.markdown b/website/docs/r/spot_fleet_request.html.markdown index 5357cf63bda..d57ed06fe10 100644 --- a/website/docs/r/spot_fleet_request.html.markdown +++ b/website/docs/r/spot_fleet_request.html.markdown @@ -240,7 +240,7 @@ The `launch_template_config` block supports the following: ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 10 mins) Used when requesting the spot instance (only valid if `wait_for_fulfillment = true`) * `delete` - (Defaults to 15 mins) Used when destroying the spot instance diff --git a/website/docs/r/spot_instance_request.html.markdown b/website/docs/r/spot_instance_request.html.markdown index d201065c173..e351cd33e04 100644 --- a/website/docs/r/spot_instance_request.html.markdown +++ b/website/docs/r/spot_instance_request.html.markdown @@ -70,7 +70,7 @@ Spot Instance Requests support all the same arguments as ### Timeouts -The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) for certain actions: * `create` - (Defaults to 10 mins) Used when requesting the spot instance (only valid if `wait_for_fulfillment = true`) * `delete` - (Defaults to 20 mins) Used when terminating all instances launched via the given spot instance request From 50a39345eb0d0ca891327dd24496286d94fdb9d7 Mon Sep 17 00:00:00 2001 From: Dan Marshall Date: Mon, 4 Jan 2021 14:20:31 +0000 Subject: [PATCH 0365/1212] docs/resource/aws_s3_ownership_controls: Remove preview note as it's now GA (#16897) --- website/docs/r/s3_bucket_ownership_controls.html.markdown | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/docs/r/s3_bucket_ownership_controls.html.markdown b/website/docs/r/s3_bucket_ownership_controls.html.markdown index 6c02dd7d131..40ea8eceb54 100644 --- a/website/docs/r/s3_bucket_ownership_controls.html.markdown +++ b/website/docs/r/s3_bucket_ownership_controls.html.markdown @@ -10,8 +10,6 @@ description: |- Provides a resource to manage S3 Bucket Ownership Controls. For more information, see the [S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). -~> **NOTE:** This AWS functionality is in Preview and may change before General Availability release. Backwards compatibility is not guaranteed between Terraform AWS Provider releases. - ## Example Usage ```hcl From b1e0ae154589b8b5697b229f53e63aa8b669a554 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=BAlio=20Pedrosa?= Date: Mon, 4 Jan 2021 16:00:57 +0100 Subject: [PATCH 0366/1212] resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` maximum limit (#16905) --- aws/resource_aws_imagebuilder_distribution_configuration.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/resource_aws_imagebuilder_distribution_configuration.go b/aws/resource_aws_imagebuilder_distribution_configuration.go index 799487efede..05365f2baf8 100644 --- a/aws/resource_aws_imagebuilder_distribution_configuration.go +++ b/aws/resource_aws_imagebuilder_distribution_configuration.go @@ -85,7 +85,6 @@ func resourceAwsImageBuilderDistributionConfiguration() *schema.Resource { Type: schema.TypeString, ValidateFunc: validateAwsAccountId, }, - MaxItems: 50, }, }, }, From d1269601a6dd56b2f204527fff056fef6dc6095b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 4 Jan 2021 10:01:37 -0500 Subject: [PATCH 0367/1212] Update CHANGELOG for #16905 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 627343b8de3..90e2e5191b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ ENHANCEMENTS * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] +BUG FIXES + +* resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit [GH-16905] + ## 3.22.0 (December 18, 2020) FEATURES From 0e625ec563356affde17e87b0c14f3f1627b3969 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 10:46:21 -0500 Subject: [PATCH 0368/1212] build(deps): bump github.com/aws/aws-sdk-go in /awsproviderlint (#16935) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.12 to 1.36.19. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.12...v1.36.19) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 86 ++++++++++++++++--- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/s3/api.go | 2 +- awsproviderlint/vendor/modules.txt | 2 +- 6 files changed, 82 insertions(+), 16 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index cffc47a69f9..db9c1276796 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.12 + github.com/aws/aws-sdk-go v1.36.19 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 42d8cda4de5..03b33ba6b9c 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -56,8 +56,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.12 h1:YJpKFEMbqEoo+incs5qMe61n1JH3o4O1IMkMexLzJG8= -github.com/aws/aws-sdk-go v1.36.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.19 h1:zbJZKkxeDiYxUYFjymjWxPye+qa1G2gRVyhIzZrB9zA= +github.com/aws/aws-sdk-go v1.36.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 72dcdfad248..7ea175ecd11 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1255,6 +1255,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1397,6 +1398,7 @@ var awsPartition = partition{ "codepipeline": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3057,11 +3059,41 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-prod-ca-central-1": endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-prod-us-east-1": endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-prod-us-east-2": endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-prod-us-west-1": endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-prod-us-west-2": endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -3970,6 +4002,7 @@ var awsPartition = partition{ "macie2": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3979,6 +4012,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4006,11 +4040,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "managedblockchain": service{ @@ -8553,6 +8588,25 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "fsx": service{ + + Endpoints: endpoints{ + "fips-prod-us-gov-east-1": endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-prod-us-gov-west-1": endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -9464,12 +9518,24 @@ var awsusgovPartition = partition{ "waf-regional": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "fips-us-gov-west-1": endpoint{ Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ Hostname: "waf-regional.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index 70325bd761c..d2fbb55f6dc 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.12" +const SDKVersion = "1.36.19" diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 66700cce13d..89a0a29afff 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -19563,7 +19563,7 @@ type GetObjectInput struct { ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` // Specifies the algorithm to use to when encrypting the object (for example, // AES256). diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index fc43dfdcc79..a020a1a2d01 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.36.12 +# github.com/aws/aws-sdk-go v1.36.19 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 1a65888daebb162f797b62cf69972b1937d02d86 Mon Sep 17 00:00:00 2001 From: Immo Stanke Date: Mon, 4 Jan 2021 16:50:09 +0100 Subject: [PATCH 0369/1212] docs/resource/aws_codeartifact_repository_permissions_policy: Fix typo (#16954) --- .../r/codeartifact_repository_permissions_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/codeartifact_repository_permissions_policy.html.markdown b/website/docs/r/codeartifact_repository_permissions_policy.html.markdown index 42eee4bba5c..8c0a7e33f8a 100644 --- a/website/docs/r/codeartifact_repository_permissions_policy.html.markdown +++ b/website/docs/r/codeartifact_repository_permissions_policy.html.markdown @@ -28,7 +28,7 @@ resource "aws_codeartifact_repository" "example" { } resource "aws_codeartifact_repository_permissions_policy" "example" { - repository = aws_codeartifact_repository.example.repsitory + repository = aws_codeartifact_repository.example.repository domain = aws_codeartifact_domain.example.domain policy_document = < Date: Mon, 4 Jan 2021 11:13:15 -0500 Subject: [PATCH 0370/1212] Update aws/resource_aws_imagebuilder_image_recipe.go --- aws/resource_aws_imagebuilder_image_recipe.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_imagebuilder_image_recipe.go b/aws/resource_aws_imagebuilder_image_recipe.go index 45f9e106a64..d37b44cc2c0 100644 --- a/aws/resource_aws_imagebuilder_image_recipe.go +++ b/aws/resource_aws_imagebuilder_image_recipe.go @@ -174,7 +174,7 @@ func resourceAwsImageBuilderImageRecipe() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 128), + ValidateFunc: validation.StringLenBetween(1, 1024), }, }, } From c7641556de36a345d8d2d2bd3072ba642d75c0df Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 4 Jan 2021 11:24:01 -0500 Subject: [PATCH 0371/1212] service/imagebuilder: Review items from #16947 Reference: https://github.com/hashicorp/terraform-provider-aws/pull/16947#pullrequestreview-561149655 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsImageBuilderImageRecipe_disappears (31.42s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Encrypted (34.85s) --- PASS: TestAccAwsImageBuilderImageRecipe_basic (35.02s) --- PASS: TestAccAwsImageBuilderImageRecipe_Description (37.85s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Iops (38.14s) --- PASS: TestAccAwsImageBuilderImageRecipe_WorkingDirectory (38.17s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_VirtualName (38.18s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_DeviceName (38.24s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeSize (38.32s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_NoDevice (38.39s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_DeleteOnTermination (38.44s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType (38.44s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_KmsKeyId (39.35s) --- PASS: TestAccAwsImageBuilderImageRecipe_Component (39.39s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_SnapshotId (48.07s) --- PASS: TestAccAwsImageBuilderImageRecipe_Tags (64.35s) --- PASS: TestAccAwsImageBuilderImageRecipeDataSource_Arn (27.89s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAwsImageBuilderImageRecipe_disappears (37.78s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_VirtualName (40.06s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Encrypted (40.46s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeSize (40.52s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType (40.68s) --- PASS: TestAccAwsImageBuilderImageRecipe_basic (41.90s) --- PASS: TestAccAwsImageBuilderImageRecipe_Description (42.30s) --- PASS: TestAccAwsImageBuilderImageRecipe_WorkingDirectory (42.46s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_DeleteOnTermination (42.62s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_NoDevice (42.64s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_DeviceName (42.82s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_KmsKeyId (43.29s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_Iops (43.30s) --- PASS: TestAccAwsImageBuilderImageRecipe_Component (44.64s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_SnapshotId (49.57s) --- PASS: TestAccAwsImageBuilderImageRecipe_Tags (73.70s) --- PASS: TestAccAwsImageBuilderImageRecipeDataSource_Arn (30.79s) ``` --- ...urce_aws_imagebuilder_image_recipe_test.go | 2 +- ...urce_aws_imagebuilder_image_recipe_test.go | 42 +++++++++++++++++++ .../d/imagebuilder_image_recipe.html.markdown | 3 +- 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_imagebuilder_image_recipe_test.go b/aws/data_source_aws_imagebuilder_image_recipe_test.go index 73cde70f0b1..205e872dc0e 100644 --- a/aws/data_source_aws_imagebuilder_image_recipe_test.go +++ b/aws/data_source_aws_imagebuilder_image_recipe_test.go @@ -32,6 +32,7 @@ func TestAccAwsImageBuilderImageRecipeDataSource_Arn(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "platform", resourceName, "platform"), resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), resource.TestCheckResourceAttrPair(dataSourceName, "version", resourceName, "version"), + resource.TestCheckResourceAttrPair(dataSourceName, "working_directory", resourceName, "working_directory"), ), }, }, @@ -59,7 +60,6 @@ resource "aws_imagebuilder_component" "test" { }] schemaVersion = 1.0 }) - working_directory = "/tmp" name = %[1]q platform = "Linux" version = "1.0.0" diff --git a/aws/resource_aws_imagebuilder_image_recipe_test.go b/aws/resource_aws_imagebuilder_image_recipe_test.go index 8d961ee1bea..39a19483cbf 100644 --- a/aws/resource_aws_imagebuilder_image_recipe_test.go +++ b/aws/resource_aws_imagebuilder_image_recipe_test.go @@ -506,6 +506,31 @@ func TestAccAwsImageBuilderImageRecipe_Tags(t *testing.T) { }) } +func TestAccAwsImageBuilderImageRecipe_WorkingDirectory(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image_recipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageRecipeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageRecipeConfigWorkingDirectory(rName, "/tmp"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageRecipeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "working_directory", "/tmp"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAwsImageBuilderImageRecipeDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).imagebuilderconn @@ -920,3 +945,20 @@ resource "aws_imagebuilder_image_recipe" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccAwsImageBuilderImageRecipeConfigWorkingDirectory(rName string, workingDirectory string) string { + return composeConfig( + testAccAwsImageBuilderImageRecipeConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = aws_imagebuilder_component.test.arn + } + + name = %[1]q + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" + working_directory = %[2]q +} +`, rName, workingDirectory)) +} diff --git a/website/docs/d/imagebuilder_image_recipe.html.markdown b/website/docs/d/imagebuilder_image_recipe.html.markdown index c4b2932ab92..a4d753fd380 100644 --- a/website/docs/d/imagebuilder_image_recipe.html.markdown +++ b/website/docs/d/imagebuilder_image_recipe.html.markdown @@ -48,5 +48,6 @@ In addition to all arguments above, the following attributes are exported: * `owner` - Owner of the image recipe. * `parent_image` - Platform of the image recipe. * `platform` - Platform of the image recipe. -* `tags` - (Optional) Key-value map of resource tags for the image recipe. +* `tags` - Key-value map of resource tags for the image recipe. * `version` - Version of the image recipe. +* `working_directory` - The working directory used during build and test workflows. From ed6bf2f922277922197d0a1d8371c0bb958638e7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 4 Jan 2021 11:27:00 -0500 Subject: [PATCH 0372/1212] Update CHANGELOG for #16947 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90e2e5191b8..813a6415837 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ENHANCEMENTS +* data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] +* resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] BUG FIXES From 401f96f991150e6c360171dbb9ac0fc6c00783e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 11:37:03 -0500 Subject: [PATCH 0373/1212] build(deps): bump alex-page/github-project-automation-plus (#16764) Bumps [alex-page/github-project-automation-plus](https://github.com/alex-page/github-project-automation-plus) from v0.3.0 to v0.5.1. - [Release notes](https://github.com/alex-page/github-project-automation-plus/releases) - [Commits](https://github.com/alex-page/github-project-automation-plus/compare/v0.3.0...e930d2bbdb89d76d29169f8f5544cd12554fc08e) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/project.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/project.yml b/.github/workflows/project.yml index 6cef673288d..32e5a977c9c 100644 --- a/.github/workflows/project.yml +++ b/.github/workflows/project.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Move team PRs to Review column - uses: alex-page/github-project-automation-plus@v0.3.0 + uses: alex-page/github-project-automation-plus@v0.5.1 if: contains(fromJSON('["anGie44", "bflad", "bill-rich", "breathingdust", "gdavison", "maryelizbeth", "YakDriver"]'), github.actor) && github.event.pull_request.draft == false with: project: AWS Provider Working Board From ce0579c30daaf152d6c8836414d9f1acbce7c782 Mon Sep 17 00:00:00 2001 From: Sijmen Date: Mon, 4 Jan 2021 19:11:39 +0100 Subject: [PATCH 0374/1212] docs/provider: Fix broken links to terraform docs (#16956) Co-authored-by: Brian Flad Co-authored-by: Brian Flad --- website/docs/d/instance.html.markdown | 2 +- website/docs/d/kms_ciphertext.html.markdown | 2 +- website/docs/d/ssm_parameter.html.markdown | 2 +- website/docs/guides/resource-tagging.html.md | 6 +++--- website/docs/guides/version-3-upgrade.html.md | 20 +++++++++---------- website/docs/index.html.markdown | 6 +++--- website/docs/r/acm_certificate.html.markdown | 2 +- .../acm_certificate_validation.html.markdown | 2 +- ...acmpca_certificate_authority.html.markdown | 2 +- .../r/api_gateway_deployment.html.markdown | 4 ++-- .../r/api_gateway_domain_name.html.markdown | 2 +- .../docs/r/api_gateway_rest_api.html.markdown | 2 +- .../api_gateway_rest_api_policy.html.markdown | 2 +- .../docs/r/api_gateway_stage.html.markdown | 2 +- website/docs/r/apigatewayv2_api.html.markdown | 2 +- .../r/apigatewayv2_deployment.html.markdown | 4 ++-- .../r/apigatewayv2_domain_name.html.markdown | 2 +- .../r/autoscaling_attachment.html.markdown | 2 +- .../docs/r/autoscaling_group.html.markdown | 4 ++-- .../docs/r/cloudformation_stack.html.markdown | 2 +- .../r/cloudformation_stack_set.html.markdown | 2 +- ...formation_stack_set_instance.html.markdown | 2 +- .../r/cloudwatch_event_target.html.markdown | 4 ++-- website/docs/r/cognito_user_pool.markdown | 2 +- ...fig_organization_custom_rule.html.markdown | 2 +- ...ig_organization_managed_rule.html.markdown | 2 +- website/docs/r/datasync_agent.html.markdown | 2 +- website/docs/r/datasync_task.html.markdown | 2 +- website/docs/r/dax_cluster.html.markdown | 2 +- .../docs/r/db_cluster_snapshot.html.markdown | 2 +- .../r/db_event_subscription.html.markdown | 2 +- website/docs/r/db_instance.html.markdown | 6 +++--- website/docs/r/db_option_group.html.markdown | 2 +- website/docs/r/db_proxy.html.markdown | 2 +- ...b_proxy_default_target_group.html.markdown | 2 +- website/docs/r/db_snapshot.html.markdown | 2 +- .../docs/r/default_route_table.html.markdown | 2 +- .../directory_service_directory.html.markdown | 2 +- website/docs/r/dms_certificate.html.markdown | 2 +- website/docs/r/dms_endpoint.html.markdown | 2 +- .../r/dms_event_subscription.html.markdown | 2 +- .../r/dms_replication_instance.html.markdown | 2 +- website/docs/r/docdb_cluster.html.markdown | 4 ++-- .../r/docdb_cluster_instance.html.markdown | 4 ++-- .../r/docdb_cluster_snapshot.html.markdown | 2 +- website/docs/r/dx_bgp_peer.html.markdown | 2 +- website/docs/r/dx_gateway.html.markdown | 2 +- .../r/dx_gateway_association.html.markdown | 2 +- ...ed_private_virtual_interface.html.markdown | 2 +- ...e_virtual_interface_accepter.html.markdown | 2 +- ...ted_public_virtual_interface.html.markdown | 2 +- ...c_virtual_interface_accepter.html.markdown | 2 +- ...ed_transit_virtual_interface.html.markdown | 2 +- ...t_virtual_interface_accepter.html.markdown | 2 +- ...dx_private_virtual_interface.html.markdown | 2 +- .../dx_public_virtual_interface.html.markdown | 2 +- ...dx_transit_virtual_interface.html.markdown | 2 +- website/docs/r/dynamodb_table.html.markdown | 2 +- website/docs/r/ebs_snapshot.html.markdown | 2 +- website/docs/r/ec2_fleet.html.markdown | 2 +- website/docs/r/ecr_repository.html.markdown | 2 +- website/docs/r/ecs_service.html.markdown | 4 ++-- website/docs/r/efs_mount_target.html.markdown | 2 +- website/docs/r/eip.html.markdown | 2 +- website/docs/r/eks_cluster.html.markdown | 8 ++++---- .../docs/r/eks_fargate_profile.html.markdown | 2 +- website/docs/r/eks_node_group.html.markdown | 4 ++-- ...lasticache_replication_group.html.markdown | 4 ++-- .../docs/r/elasticsearch_domain.html.markdown | 2 +- website/docs/r/emr_cluster.html.markdown | 6 +++--- .../r/fsx_lustre_file_system.html.markdown | 4 ++-- .../r/fsx_windows_file_system.html.markdown | 4 ++-- website/docs/r/gamelift_fleet.html.markdown | 2 +- website/docs/r/glue_trigger.html.markdown | 2 +- .../r/guardduty_invite_accepter.html.markdown | 2 +- website/docs/r/guardduty_member.html.markdown | 2 +- website/docs/r/iam_role.html.markdown | 2 +- .../r/iam_server_certificate.html.markdown | 2 +- .../r/iam_user_login_profile.html.markdown | 2 +- website/docs/r/instance.html.markdown | 2 +- website/docs/r/kinesis_stream.html.markdown | 2 +- .../docs/r/kinesis_video_stream.html.markdown | 2 +- website/docs/r/kms_ciphertext.html.markdown | 2 +- website/docs/r/kms_external_key.html.markdown | 2 +- website/docs/r/lambda_function.html.markdown | 2 +- ...ovisioned_concurrency_config.html.markdown | 2 +- .../docs/r/launch_configuration.html.markdown | 4 ++-- website/docs/r/lb.html.markdown | 2 +- website/docs/r/mq_broker.html.markdown | 2 +- website/docs/r/neptune_cluster.html.markdown | 2 +- .../r/neptune_cluster_instance.html.markdown | 4 ++-- .../r/neptune_cluster_snapshot.html.markdown | 2 +- .../neptune_event_subscription.html.markdown | 2 +- website/docs/r/network_acl.html.markdown | 4 ++-- .../docs/r/opsworks_instance.html.markdown | 4 ++-- .../docs/r/opsworks_mysql_layer.html.markdown | 2 +- .../r/opsworks_rds_db_instance.html.markdown | 2 +- .../r/organizations_account.html.markdown | 4 ++-- website/docs/r/pinpoint_adm_channel.markdown | 2 +- website/docs/r/pinpoint_apns_channel.markdown | 2 +- .../r/pinpoint_apns_sandbox_channel.markdown | 2 +- .../r/pinpoint_apns_voip_channel.markdown | 2 +- ...inpoint_apns_voip_sandbox_channel.markdown | 2 +- .../docs/r/pinpoint_baidu_channel.markdown | 2 +- website/docs/r/pinpoint_gcm_channel.markdown | 2 +- website/docs/r/rds_cluster.html.markdown | 8 ++++---- .../docs/r/rds_cluster_instance.html.markdown | 6 +++--- .../docs/r/rds_global_cluster.html.markdown | 2 +- website/docs/r/redshift_cluster.html.markdown | 4 ++-- website/docs/r/route.html.markdown | 2 +- .../r/route53_resolver_endpoint.html.markdown | 2 +- website/docs/r/route53_zone.html.markdown | 2 +- .../r/route53_zone_association.html.markdown | 2 +- website/docs/r/route_table.html.markdown | 2 +- website/docs/r/security_group.html.markdown | 6 +++--- ...domain_identity_verification.html.markdown | 2 +- website/docs/r/ssm_document.html.markdown | 2 +- website/docs/r/ssm_parameter.html.markdown | 2 +- .../r/storagegateway_gateway.html.markdown | 4 ++-- ...toragegateway_nfs_file_share.html.markdown | 2 +- ...toragegateway_smb_file_share.html.markdown | 2 +- website/docs/r/subnet.html.markdown | 2 +- website/docs/r/vpc_endpoint.html.markdown | 2 +- ..._endpoint_subnet_association.html.markdown | 2 +- ..._ipv4_cidr_block_association.html.markdown | 2 +- .../r/vpc_peering_connection.html.markdown | 2 +- ..._peering_connection_accepter.html.markdown | 2 +- website/docs/r/vpn_connection.html.markdown | 2 +- .../docs/r/workspaces_workspace.html.markdown | 2 +- 129 files changed, 174 insertions(+), 174 deletions(-) diff --git a/website/docs/d/instance.html.markdown b/website/docs/d/instance.html.markdown index 9d9089df438..11e83a511fc 100644 --- a/website/docs/d/instance.html.markdown +++ b/website/docs/d/instance.html.markdown @@ -111,7 +111,7 @@ interpolation. * `subnet_id` - The VPC subnet ID. * `outpost_arn` - The Amazon Resource Name (ARN) of the Outpost. * `user_data` - SHA-1 hash of User Data supplied to the Instance. -* `user_data_base64` - Base64 encoded contents of User Data supplied to the Instance. Valid UTF-8 contents can be decoded with the [`base64decode` function](/docs/configuration/functions/base64decode.html). This attribute is only exported if `get_user_data` is true. +* `user_data_base64` - Base64 encoded contents of User Data supplied to the Instance. Valid UTF-8 contents can be decoded with the [`base64decode` function](https://www.terraform.io/docs/configuration/functions/base64decode.html). This attribute is only exported if `get_user_data` is true. * `tags` - A map of tags assigned to the Instance. * `tenancy` - The tenancy of the instance: `dedicated`, `default`, `host`. * `host_id` - The Id of the dedicated host the instance will be assigned to. diff --git a/website/docs/d/kms_ciphertext.html.markdown b/website/docs/d/kms_ciphertext.html.markdown index e7b9c50c426..5f02b4fb020 100644 --- a/website/docs/d/kms_ciphertext.html.markdown +++ b/website/docs/d/kms_ciphertext.html.markdown @@ -14,7 +14,7 @@ changes every apply. For a stable ciphertext value, see the [`aws_kms_ciphertext resource](/docs/providers/aws/r/kms_ciphertext.html). ~> **Note:** All arguments including the plaintext be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/d/ssm_parameter.html.markdown b/website/docs/d/ssm_parameter.html.markdown index cd42f7cb08c..44ea48185dd 100644 --- a/website/docs/d/ssm_parameter.html.markdown +++ b/website/docs/d/ssm_parameter.html.markdown @@ -19,7 +19,7 @@ data "aws_ssm_parameter" "foo" { ``` ~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ~> **Note:** The data source is currently following the behavior of the [SSM API](https://docs.aws.amazon.com/sdk-for-go/api/service/ssm/#Parameter) to return a string value, regardless of parameter type. For type `StringList`, we can use the built-in [split()](https://www.terraform.io/docs/configuration/functions/split.html) function to get values in a list. Example: `split(",", data.aws_ssm_parameter.subnets.value)` diff --git a/website/docs/guides/resource-tagging.html.md b/website/docs/guides/resource-tagging.html.md index 2148b3952e0..f28afb7186c 100644 --- a/website/docs/guides/resource-tagging.html.md +++ b/website/docs/guides/resource-tagging.html.md @@ -38,7 +38,7 @@ resource "aws_vpc" "example" { The tags for the resource are wholly managed by Terraform except tag keys beginning with `aws:` as these are managed by AWS services and cannot typically be edited or deleted. Any non-AWS tags added to the VPC outside of Terraform will be proposed for removal on the next Terraform execution. Missing tags or those with incorrect values from the Terraform configuration will be proposed for addition or update on the next Terraform execution. Advanced patterns that can adjust these behaviors for special use cases, such as Terraform AWS Provider configurations that affect all resources and the ability to manage resource tags for resources not managed by Terraform, can be found later in this guide. -For most environments and use cases, this is the typical implementation pattern, whether it be in a standalone Terraform configuration or within a [Terraform Module](/docs/modules/). The Terraform configuration language also enables less repetitive configurations via [variables](/docs/configuration/variables.html), [locals](/docs/configuration/locals.html), or potentially a combination of these, e.g. +For most environments and use cases, this is the typical implementation pattern, whether it be in a standalone Terraform configuration or within a [Terraform Module](https://www.terraform.io/docs/modules/). The Terraform configuration language also enables less repetitive configurations via [variables](https://www.terraform.io/docs/configuration/variables.html), [locals](https://www.terraform.io/docs/configuration/locals.html), or potentially a combination of these, e.g. ```hcl # Terraform 0.12 and later syntax @@ -67,7 +67,7 @@ Systems outside of Terraform may automatically interact with the tagging associa ### Ignoring Changes in Individual Resources -All Terraform resources support the [`lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes), which can be used to explicitly ignore all tags changes on a resource beyond an initial configuration or individual tag values. +All Terraform resources support the [`lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes), which can be used to explicitly ignore all tags changes on a resource beyond an initial configuration or individual tag values. In this example, the `Name` tag will be added to the VPC on resource creation, however any external changes to the `Name` tag value or the addition/removal of any tag (including the `Name` tag) will be ignored: @@ -155,7 +155,7 @@ resource "aws_ec2_tag" "example" { } ``` -To manage multiple tags for a resource in this scenario, [`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) can be used: +To manage multiple tags for a resource in this scenario, [`for_each`](https://www.terraform.io/docs/configuration/meta-arguments/for_each.html) can be used: ```hcl # Terraform 0.12 and later syntax diff --git a/website/docs/guides/version-3-upgrade.html.md b/website/docs/guides/version-3-upgrade.html.md index a889be66ad0..ba3c4896032 100644 --- a/website/docs/guides/version-3-upgrade.html.md +++ b/website/docs/guides/version-3-upgrade.html.md @@ -267,7 +267,7 @@ resources that the for_each depends on. The `domain_validation_options` attribute is now a set type and the resource will attempt to populate the information necessary during the planning phase to handle the above situation in most environments without workarounds. This change also prevents Terraform from showing unexpected differences if the API returns the results in varying order. -Configuration references to this attribute will likely require updates since sets cannot be indexed (e.g. `domain_validation_options[0]` or the older `domain_validation_options.0.` syntax will return errors). If the `domain_validation_options` list previously contained only a single element like the two examples just shown, it may be possible to wrap these references using the [`tolist()` function](/docs/configuration/functions/tolist.html) (e.g. `tolist(aws_acm_certificate.example.domain_validation_options)[0]`) as a quick configuration update, however given the complexity and workarounds required with the previous `domain_validation_options` attribute implementation, different environments will require different configuration updates and migration steps. Below is a more advanced example. Further questions on potential update steps can be submitted to the [community forums](https://discuss.hashicorp.com/c/terraform-providers/tf-aws/33). +Configuration references to this attribute will likely require updates since sets cannot be indexed (e.g. `domain_validation_options[0]` or the older `domain_validation_options.0.` syntax will return errors). If the `domain_validation_options` list previously contained only a single element like the two examples just shown, it may be possible to wrap these references using the [`tolist()` function](https://www.terraform.io/docs/configuration/functions/tolist.html) (e.g. `tolist(aws_acm_certificate.example.domain_validation_options)[0]`) as a quick configuration update, however given the complexity and workarounds required with the previous `domain_validation_options` attribute implementation, different environments will require different configuration updates and migration steps. Below is a more advanced example. Further questions on potential update steps can be submitted to the [community forums](https://discuss.hashicorp.com/c/terraform-providers/tf-aws/33). For example, given this previous configuration using a `count` based resource approach that may have been used in certain environments: @@ -318,7 +318,7 @@ Error: Invalid index This value does not have any indices. ``` -Since the `domain_validation_options` attribute changed from a list to a set and sets cannot be indexed in Terraform, the recommendation is to update the configuration to use the more stable [resource `for_each` support](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) instead of [`count`](/docs/configuration/resources.html#count-multiple-resource-instances-by-count). Note the slight change in the `validation_record_fqdns` syntax as well. +Since the `domain_validation_options` attribute changed from a list to a set and sets cannot be indexed in Terraform, the recommendation is to update the configuration to use the more stable [resource `for_each` support](https://www.terraform.io/docs/configuration/meta-arguments/for_each.html) instead of [`count`](https://www.terraform.io/docs/configuration/meta-arguments/count.html). Note the slight change in the `validation_record_fqdns` syntax as well. ```hcl resource "aws_route53_record" "existing" { @@ -480,11 +480,11 @@ Terraform will perform the following actions: Plan: 5 to add, 0 to change, 5 to destroy. ``` -Due to the type of configuration change, Terraform does not know that the previous `aws_route53_record` resources (indexed by number in the existing state) and the new resources (indexed by domain names in the updated configuration) are equivalent. Typically in this situation, the [`terraform state mv` command](/docs/commands/state/mv.html) can be used to reduce the plan to show no changes. This is done by associating the count index (e.g. `[1]`) with the equivalent domain name index (e.g. `["existing2.example.com"]`), making one of the four commands to fix the above example: `terraform state mv 'aws_route53_record.existing[1]' 'aws_route53_record.existing["existing2.example.com"]'`. It is recommended to use this `terraform state mv` update process where possible to reduce chances of unexpected behaviors or changes in an environment. +Due to the type of configuration change, Terraform does not know that the previous `aws_route53_record` resources (indexed by number in the existing state) and the new resources (indexed by domain names in the updated configuration) are equivalent. Typically in this situation, the [`terraform state mv` command](https://www.terraform.io/docs/commands/state/mv.html) can be used to reduce the plan to show no changes. This is done by associating the count index (e.g. `[1]`) with the equivalent domain name index (e.g. `["existing2.example.com"]`), making one of the four commands to fix the above example: `terraform state mv 'aws_route53_record.existing[1]' 'aws_route53_record.existing["existing2.example.com"]'`. It is recommended to use this `terraform state mv` update process where possible to reduce chances of unexpected behaviors or changes in an environment. If using `terraform state mv` to reduce the plan to show no changes, no additional steps are required. -In larger or more complex environments though, this process can be tedius to match the old resource address to the new resource address and run all the necessary `terraform state mv` commands. Instead, since the `aws_route53_record` resource implements the `allow_overwrite = true` argument, it is possible to just remove the old `aws_route53_record` resources from the Terraform state using the [`terraform state rm` command](/docs/commands/state/rm.html). In this case, Terraform will leave the existing records in Route 53 and plan to just overwrite the existing validation records with the same exact (previous) values. +In larger or more complex environments though, this process can be tedius to match the old resource address to the new resource address and run all the necessary `terraform state mv` commands. Instead, since the `aws_route53_record` resource implements the `allow_overwrite = true` argument, it is possible to just remove the old `aws_route53_record` resources from the Terraform state using the [`terraform state rm` command](https://www.terraform.io/docs/commands/state/rm.html). In this case, Terraform will leave the existing records in Route 53 and plan to just overwrite the existing validation records with the same exact (previous) values. -> This guide is showing the simpler `terraform state rm` option below as a potential shortcut in this specific situation, however in most other cases `terraform state mv` is required to change from `count` based resources to `for_each` based resources and properly match the existing Terraform state to the updated Terraform configuration. @@ -623,7 +623,7 @@ resource "aws_autoscaling_group" "example" { } ``` -If `aws_autoscaling_attachment` resources reference your ASG configurations, you will need to add the [`lifecycle` configuration block](/docs/configuration/resources.html#lifecycle-lifecycle-customizations) with an `ignore_changes` argument to prevent Terraform non-empty plans (i.e. forcing resource update) during the next state refresh. +If `aws_autoscaling_attachment` resources reference your ASG configurations, you will need to add the [`lifecycle` configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with an `ignore_changes` argument to prevent Terraform non-empty plans (i.e. forcing resource update) during the next state refresh. For example, given this previous configuration: @@ -843,7 +843,7 @@ resource "aws_cognito_user_pool" "example" { ### Removal of Automatic aws_dx_gateway_association Import -Previously when importing the `aws_dx_gateway` resource with the [`terraform import` command](/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_dx_gateway_association` resource(s) as well. This automatic resource import has been removed. Use the [`aws_dx_gateway_association` resource import](/docs/providers/aws/r/dx_gateway_association.html#import) to import those resources separately. +Previously when importing the `aws_dx_gateway` resource with the [`terraform import` command](https://www.terraform.io/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_dx_gateway_association` resource(s) as well. This automatic resource import has been removed. Use the [`aws_dx_gateway_association` resource import](/docs/providers/aws/r/dx_gateway_association.html#import) to import those resources separately. ## Resource: aws_dx_gateway_association @@ -1070,7 +1070,7 @@ resource "aws_glue_job" "example" { ### ses_smtp_password Attribute Removal -In many regions today and in all regions after October 1, 2020, the [SES API will only accept version 4 signatures](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-authentication.html). If referencing the `ses_smtp_password` attribute, switch your Terraform configuration to the `ses_smtp_password_v4` attribute instead. Please note that this signature is based on the region of the Terraform AWS Provider. If you need the SES v4 password in multiple regions, it may require using [multiple provider instances](/docs/configuration/providers.html#alias-multiple-provider-instances). +In many regions today and in all regions after October 1, 2020, the [SES API will only accept version 4 signatures](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-ses-api-authentication.html). If referencing the `ses_smtp_password` attribute, switch your Terraform configuration to the `ses_smtp_password_v4` attribute instead. Please note that this signature is based on the region of the Terraform AWS Provider. If you need the SES v4 password in multiple regions, it may require using [multiple provider instances](https://www.terraform.io/docs/configuration/providers.html#alias-multiple-provider-configurations). ## Resource: aws_iam_instance_profile @@ -1232,7 +1232,7 @@ While the returned value will omit the trailing period, use of configurations wi ### Removal of Automatic aws_s3_bucket_policy Import -Previously when importing the `aws_s3_bucket` resource with the [`terraform import` command](/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_s3_bucket_policy` resource as well. This automatic resource import has been removed. Use the [`aws_s3_bucket_policy` resource import](/docs/providers/aws/r/s3_bucket_policy.html#import) to import that resource separately. +Previously when importing the `aws_s3_bucket` resource with the [`terraform import` command](https://www.terraform.io/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_s3_bucket_policy` resource as well. This automatic resource import has been removed. Use the [`aws_s3_bucket_policy` resource import](/docs/providers/aws/r/s3_bucket_policy.html#import) to import that resource separately. ### region Attribute Is Now Read-Only @@ -1284,7 +1284,7 @@ resource "aws_s3_bucket_metric" "example" { ### Removal of Automatic aws_security_group_rule Import -Previously when importing the `aws_security_group` resource with the [`terraform import` command](/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_security_group_rule` resource(s) as well. This automatic resource import has been removed. Use the [`aws_security_group_rule` resource import](/docs/providers/aws/r/security_group_rule.html#import) to import those resources separately. +Previously when importing the `aws_security_group` resource with the [`terraform import` command](https://www.terraform.io/docs/commands/import.html), the Terraform AWS Provider would automatically attempt to import an associated `aws_security_group_rule` resource(s) as well. This automatic resource import has been removed. Use the [`aws_security_group_rule` resource import](/docs/providers/aws/r/security_group_rule.html#import) to import those resources separately. ## Resource: aws_sns_platform_application @@ -1296,7 +1296,7 @@ Previously when the `platform_credential` and `platform_principal` arguments wer ### valid_until Argument No Longer Uses 24 Hour Default -Previously when the `valid_until` argument was not configured, the resource would default to a 24 hour request. This behavior has been removed and allows for non-expiring requests. To recreate the old behavior, the [`time_offset` resource](/docs/providers/time/r/offset.html) can potentially be used. +Previously when the `valid_until` argument was not configured, the resource would default to a 24 hour request. This behavior has been removed and allows for non-expiring requests. To recreate the old behavior, the [`time_offset` resource](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/offset) can potentially be used. ## Resource: aws_ssm_maintenance_window_task diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index ba0bfc3cdb0..a40bde3c5a7 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -216,7 +216,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib potentially end up destroying a live environment). Conflicts with `allowed_account_ids`. -* `ignore_tags` - (Optional) Configuration block with resource tag settings to ignore across all resources handled by this provider (except any individual service tag resources such as `aws_ec2_tag`) for situations where external systems are managing certain resource tags. Arguments to the configuration block are described below in the `ignore_tags` Configuration Block section. See the [Terraform multiple provider instances documentation](/docs/configuration/providers.html#alias-multiple-provider-instances) for more information about additional provider configurations. +* `ignore_tags` - (Optional) Configuration block with resource tag settings to ignore across all resources handled by this provider (except any individual service tag resources such as `aws_ec2_tag`) for situations where external systems are managing certain resource tags. Arguments to the configuration block are described below in the `ignore_tags` Configuration Block section. See the [Terraform multiple provider instances documentation](https://www.terraform.io/docs/configuration/providers.html#alias-multiple-provider-configurations) for more information about additional provider configurations. * `insecure` - (Optional) Explicitly allow the provider to perform "insecure" SSL requests. If omitted, the default value is `false`. @@ -388,8 +388,8 @@ provider "aws" { The `ignore_tags` configuration block supports the following arguments: -* `keys` - (Optional) List of exact resource tag keys to ignore across all resources handled by this provider. This configuration prevents Terraform from returning the tag in any `tags` attributes and displaying any configuration difference for the tag value. If any resource configuration still has this tag key configured in the `tags` argument, it will display a perpetual difference until the tag is removed from the argument or [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) is also used. -* `key_prefixes` - (Optional) List of resource tag key prefixes to ignore across all resources handled by this provider. This configuration prevents Terraform from returning any tag key matching the prefixes in any `tags` attributes and displaying any configuration difference for those tag values. If any resource configuration still has a tag matching one of the prefixes configured in the `tags` argument, it will display a perpetual difference until the tag is removed from the argument or [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) is also used. +* `keys` - (Optional) List of exact resource tag keys to ignore across all resources handled by this provider. This configuration prevents Terraform from returning the tag in any `tags` attributes and displaying any configuration difference for the tag value. If any resource configuration still has this tag key configured in the `tags` argument, it will display a perpetual difference until the tag is removed from the argument or [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) is also used. +* `key_prefixes` - (Optional) List of resource tag key prefixes to ignore across all resources handled by this provider. This configuration prevents Terraform from returning any tag key matching the prefixes in any `tags` attributes and displaying any configuration difference for those tag values. If any resource configuration still has a tag matching one of the prefixes configured in the `tags` argument, it will display a perpetual difference until the tag is removed from the argument or [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) is also used. ## Getting the Account ID diff --git a/website/docs/r/acm_certificate.html.markdown b/website/docs/r/acm_certificate.html.markdown index 8d1346eabc9..7e81a6b83a1 100644 --- a/website/docs/r/acm_certificate.html.markdown +++ b/website/docs/r/acm_certificate.html.markdown @@ -142,7 +142,7 @@ Domain validation objects export the following attributes: * `resource_record_type` - The type of DNS record to create * `resource_record_value` - The value the DNS record needs to have -[1]: /docs/configuration/resources.html#lifecycle +[1]: https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html ## Import diff --git a/website/docs/r/acm_certificate_validation.html.markdown b/website/docs/r/acm_certificate_validation.html.markdown index 1cd5fdb019b..ea7ddc68acf 100644 --- a/website/docs/r/acm_certificate_validation.html.markdown +++ b/website/docs/r/acm_certificate_validation.html.markdown @@ -141,7 +141,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`acm_certificate_validation` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`acm_certificate_validation` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `45m`) How long to wait for a certificate to be issued. diff --git a/website/docs/r/acmpca_certificate_authority.html.markdown b/website/docs/r/acmpca_certificate_authority.html.markdown index 79b8c3edea9..205a6663156 100644 --- a/website/docs/r/acmpca_certificate_authority.html.markdown +++ b/website/docs/r/acmpca_certificate_authority.html.markdown @@ -149,7 +149,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_acmpca_certificate_authority` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_acmpca_certificate_authority` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `1m`) How long to wait for a certificate authority to be created. diff --git a/website/docs/r/api_gateway_deployment.html.markdown b/website/docs/r/api_gateway_deployment.html.markdown index 4b5fd322ef2..950c48ef755 100644 --- a/website/docs/r/api_gateway_deployment.html.markdown +++ b/website/docs/r/api_gateway_deployment.html.markdown @@ -10,7 +10,7 @@ description: |- Provides an API Gateway REST Deployment. -~> **Note:** This resource depends on having at least one `aws_api_gateway_integration` created in the REST API, which itself has other dependencies. To avoid race conditions when all resources are being created together, you need to add implicit resource references via the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](/docs/configuration/resources.html#depends_on-explicit-resource-dependencies). +~> **Note:** This resource depends on having at least one `aws_api_gateway_integration` created in the REST API, which itself has other dependencies. To avoid race conditions when all resources are being created together, you need to add implicit resource references via the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). -> It is recommended to enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. @@ -87,7 +87,7 @@ The following arguments are supported: * `stage_name` - (Optional) The name of the stage. If the specified stage already exists, it will be updated to point to the new deployment. If the stage does not exist, a new one will be created and point to this deployment. * `description` - (Optional) The description of the deployment * `stage_description` - (Optional) The description of the stage -* `triggers` - (Optional) A map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](/docs/commands/taint.html). +* `triggers` - (Optional) A map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). * `variables` - (Optional) A map that defines variables for the stage ## Attributes Reference diff --git a/website/docs/r/api_gateway_domain_name.html.markdown b/website/docs/r/api_gateway_domain_name.html.markdown index 36b65c21ab2..eee2427b32b 100644 --- a/website/docs/r/api_gateway_domain_name.html.markdown +++ b/website/docs/r/api_gateway_domain_name.html.markdown @@ -36,7 +36,7 @@ from the validation resource where it will be available after the resource creat `regional_certificate_arn = aws_acm_certificate_validation.cert.certificate_arn`. ~> **Note:** All arguments including the private key will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/api_gateway_rest_api.html.markdown b/website/docs/r/api_gateway_rest_api.html.markdown index e18cfba9284..46cb85b9664 100644 --- a/website/docs/r/api_gateway_rest_api.html.markdown +++ b/website/docs/r/api_gateway_rest_api.html.markdown @@ -10,7 +10,7 @@ description: |- Provides an API Gateway REST API. --> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2 [resources](https://www.terraform.io/docs/providers/aws/r/apigatewayv2_api.html). +-> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2 [resources](/docs/providers/aws/r/apigatewayv2_api.html). ## Example Usage diff --git a/website/docs/r/api_gateway_rest_api_policy.html.markdown b/website/docs/r/api_gateway_rest_api_policy.html.markdown index c01f3d0c857..3e18dbc0d7f 100644 --- a/website/docs/r/api_gateway_rest_api_policy.html.markdown +++ b/website/docs/r/api_gateway_rest_api_policy.html.markdown @@ -10,7 +10,7 @@ description: |- Provides an API Gateway REST API Policy. --> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2 [resources](https://www.terraform.io/docs/providers/aws/r/apigatewayv2_api.html). +-> **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2 [resources](/docs/providers/aws/r/apigatewayv2_api.html). ## Example Usage diff --git a/website/docs/r/api_gateway_stage.html.markdown b/website/docs/r/api_gateway_stage.html.markdown index a90d54a6042..c06c2bfe59d 100644 --- a/website/docs/r/api_gateway_stage.html.markdown +++ b/website/docs/r/api_gateway_stage.html.markdown @@ -66,7 +66,7 @@ resource "aws_api_gateway_integration" "test" { API Gateway provides the ability to [enable CloudWatch API logging](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html). To manage the CloudWatch Log Group when this feature is enabled, the [`aws_cloudwatch_log_group` resource](/docs/providers/aws/r/cloudwatch_log_group.html) can be used where the name matches the API Gateway naming convention. If the CloudWatch Log Group previously exists, the [`aws_cloudwatch_log_group` resource can be imported into Terraform](/docs/providers/aws/r/cloudwatch_log_group.html#import) as a one time operation and recreation of the environment can occur without import. --> The below configuration uses [`depends_on`](/docs/configuration/resources.html#depends_on-explicit-resource-dependencies) to prevent ordering issues with API Gateway automatically creating the log group first and a variable for naming consistency. Other ordering and naming methodologies may be more appropriate for your environment. +-> The below configuration uses [`depends_on`](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html) to prevent ordering issues with API Gateway automatically creating the log group first and a variable for naming consistency. Other ordering and naming methodologies may be more appropriate for your environment. ```hcl variable "stage_name" { diff --git a/website/docs/r/apigatewayv2_api.html.markdown b/website/docs/r/apigatewayv2_api.html.markdown index 4143f26d90f..be8c0777d04 100644 --- a/website/docs/r/apigatewayv2_api.html.markdown +++ b/website/docs/r/apigatewayv2_api.html.markdown @@ -10,7 +10,7 @@ description: |- Manages an Amazon API Gateway Version 2 API. --> **Note:** Amazon API Gateway Version 2 resources are used for creating and deploying WebSocket and HTTP APIs. To create and deploy REST APIs, use Amazon API Gateway Version 1 [resources](https://www.terraform.io/docs/providers/aws/r/api_gateway_rest_api.html). +-> **Note:** Amazon API Gateway Version 2 resources are used for creating and deploying WebSocket and HTTP APIs. To create and deploy REST APIs, use Amazon API Gateway Version 1 [resources](/docs/providers/aws/r/api_gateway_rest_api.html). ## Example Usage diff --git a/website/docs/r/apigatewayv2_deployment.html.markdown b/website/docs/r/apigatewayv2_deployment.html.markdown index 56770b10961..181de827201 100644 --- a/website/docs/r/apigatewayv2_deployment.html.markdown +++ b/website/docs/r/apigatewayv2_deployment.html.markdown @@ -11,7 +11,7 @@ description: |- Manages an Amazon API Gateway Version 2 deployment. More information can be found in the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api.html). --> **Note:** Creating a deployment for an API requires at least one `aws_apigatewayv2_route` resource associated with that API. To avoid race conditions when all resources are being created together, you need to add implicit resource references via the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](/docs/configuration/resources.html#depends_on-explicit-resource-dependencies). +-> **Note:** Creating a deployment for an API requires at least one `aws_apigatewayv2_route` resource associated with that API. To avoid race conditions when all resources are being created together, you need to add implicit resource references via the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). -> It is recommended to enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. @@ -58,7 +58,7 @@ The following arguments are supported: * `api_id` - (Required) The API identifier. * `description` - (Optional) The description for the deployment resource. Must be less than or equal to 1024 characters in length. -* `triggers` - (Optional) A map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](/docs/commands/taint.html). +* `triggers` - (Optional) A map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). ## Attributes Reference diff --git a/website/docs/r/apigatewayv2_domain_name.html.markdown b/website/docs/r/apigatewayv2_domain_name.html.markdown index ed301014f20..c28383ab6dc 100644 --- a/website/docs/r/apigatewayv2_domain_name.html.markdown +++ b/website/docs/r/apigatewayv2_domain_name.html.markdown @@ -90,7 +90,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_apigatewayv2_domain_name` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_apigatewayv2_domain_name` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `update` - (Default `60 minutes`) Used for updating the domain name diff --git a/website/docs/r/autoscaling_attachment.html.markdown b/website/docs/r/autoscaling_attachment.html.markdown index d5ae0c324f0..6c6fc929f0a 100644 --- a/website/docs/r/autoscaling_attachment.html.markdown +++ b/website/docs/r/autoscaling_attachment.html.markdown @@ -17,7 +17,7 @@ with `load_balancers` and `target_group_arns` defined in-line. These two methods mutually-exclusive. If `aws_autoscaling_attachment` resources are used, either alone or with inline `load_balancers` or `target_group_arns`, the `aws_autoscaling_group` resource must be configured to ignore changes to the `load_balancers` and `target_group_arns` arguments within a -[`lifecycle` configuration block](/docs/configuration/resources.html#lifecycle-lifecycle-customizations). +[`lifecycle` configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html). ## Example Usage diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index bfceea975dd..e83ff3edc96 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -19,7 +19,7 @@ with `load_balancers` and `target_group_arns` defined in-line. These two methods mutually-exclusive. If `aws_autoscaling_attachment` resources are used, either alone or with inline `load_balancers` or `target_group_arns`, the `aws_autoscaling_group` resource must be configured to ignore changes to the `load_balancers` and `target_group_arns` arguments within a -[`lifecycle` configuration block](/docs/configuration/resources.html#lifecycle-lifecycle-customizations). +[`lifecycle` configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html). ## Example Usage @@ -445,7 +445,7 @@ care to not duplicate these hooks in `aws_autoscaling_lifecycle_hook`. ## Timeouts `autoscaling_group` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `delete` - (Default `10 minutes`) Used for destroying ASG. diff --git a/website/docs/r/cloudformation_stack.html.markdown b/website/docs/r/cloudformation_stack.html.markdown index 903334fa16c..b0848753120 100644 --- a/website/docs/r/cloudformation_stack.html.markdown +++ b/website/docs/r/cloudformation_stack.html.markdown @@ -87,7 +87,7 @@ $ terraform import aws_cloudformation_stack.stack networking-stack ## Timeouts `aws_cloudformation_stack` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Used for Creating Stacks - `update` - (Default `30 minutes`) Used for Stack modifications diff --git a/website/docs/r/cloudformation_stack_set.html.markdown b/website/docs/r/cloudformation_stack_set.html.markdown index 12456490f9b..fc5477fc8fe 100644 --- a/website/docs/r/cloudformation_stack_set.html.markdown +++ b/website/docs/r/cloudformation_stack_set.html.markdown @@ -105,7 +105,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_cloudformation_stack_set` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_cloudformation_stack_set` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `update` - (Default `30m`) How long to wait for a StackSet to be updated. diff --git a/website/docs/r/cloudformation_stack_set_instance.html.markdown b/website/docs/r/cloudformation_stack_set_instance.html.markdown index 4de9c15095f..99f84c9834a 100644 --- a/website/docs/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/r/cloudformation_stack_set_instance.html.markdown @@ -85,7 +85,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_cloudformation_stack_set_instance` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_cloudformation_stack_set_instance` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `30m`) How long to wait for a Stack to be created. * `update` - (Default `30m`) How long to wait for a Stack to be updated. diff --git a/website/docs/r/cloudwatch_event_target.html.markdown b/website/docs/r/cloudwatch_event_target.html.markdown index fe2d730b050..afef8c36bd9 100644 --- a/website/docs/r/cloudwatch_event_target.html.markdown +++ b/website/docs/r/cloudwatch_event_target.html.markdown @@ -288,8 +288,8 @@ resource "aws_cloudwatch_event_rule" "example" { -> **Note:** In order to be able to have your AWS Lambda function or SNS topic invoked by an EventBridge rule, you must setup the right permissions - using [`aws_lambda_permission`](https://www.terraform.io/docs/providers/aws/r/lambda_permission.html) - or [`aws_sns_topic.policy`](https://www.terraform.io/docs/providers/aws/r/sns_topic.html#policy). + using [`aws_lambda_permission`](/docs/providers/aws/r/lambda_permission.html) + or [`aws_sns_topic.policy`](/docs/providers/aws/r/sns_topic.html#policy). More info [here](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/resource-based-policies-cwe.html). The following arguments are supported: diff --git a/website/docs/r/cognito_user_pool.markdown b/website/docs/r/cognito_user_pool.markdown index e47e3acbcdf..db80695b8ec 100644 --- a/website/docs/r/cognito_user_pool.markdown +++ b/website/docs/r/cognito_user_pool.markdown @@ -80,7 +80,7 @@ The following arguments are supported: * `password_policy` (Optional) - A container for information about the [user pool password policy](#password-policy). * `schema` (Optional) - A container with the [schema attributes](#schema-attributes) of a user pool. Schema attributes from the [standard attribute set](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes) only need to be specified if they are different from the default configuration. Maximum of 50 attributes. * `sms_authentication_message` - (Optional) A string representing the SMS authentication message. The message must contain the `{####}` placeholder, which will be replaced with the code. -* `sms_configuration` (Optional) - Configuration block for Short Message Service (SMS) settings. Detailed below. These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](/docs/commands/taint.html). +* `sms_configuration` (Optional) - Configuration block for Short Message Service (SMS) settings. Detailed below. These settings apply to SMS user verification and SMS Multi-Factor Authentication (MFA). Due to Cognito API restrictions, the SMS configuration cannot be removed without recreating the Cognito User Pool. For user data safety, this resource will ignore the removal of this configuration by disabling drift detection. To force resource recreation after this configuration has been applied, see the [`taint` command](https://www.terraform.io/docs/commands/taint.html). * `sms_verification_message` - (Optional) A string representing the SMS verification message. Conflicts with `verification_message_template` configuration block `sms_message` argument. * `software_token_mfa_configuration` - (Optional) Configuration block for software token Mult-Factor Authentication (MFA) settings. Detailed below. * `tags` - (Optional) A map of tags to assign to the User Pool. diff --git a/website/docs/r/config_organization_custom_rule.html.markdown b/website/docs/r/config_organization_custom_rule.html.markdown index 761b0b20de3..b3b21b39db0 100644 --- a/website/docs/r/config_organization_custom_rule.html.markdown +++ b/website/docs/r/config_organization_custom_rule.html.markdown @@ -66,7 +66,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_config_organization_custom_rule` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_config_organization_custom_rule` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `5m`) How long to wait for the rule to be created. diff --git a/website/docs/r/config_organization_managed_rule.html.markdown b/website/docs/r/config_organization_managed_rule.html.markdown index 63a9f0716f6..bf6cf84c072 100644 --- a/website/docs/r/config_organization_managed_rule.html.markdown +++ b/website/docs/r/config_organization_managed_rule.html.markdown @@ -53,7 +53,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_config_organization_managed_rule` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_config_organization_managed_rule` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `5m`) How long to wait for the rule to be created. diff --git a/website/docs/r/datasync_agent.html.markdown b/website/docs/r/datasync_agent.html.markdown index 402aa9d5905..e4d5415eb04 100644 --- a/website/docs/r/datasync_agent.html.markdown +++ b/website/docs/r/datasync_agent.html.markdown @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_datasync_agent` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_datasync_agent` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for agent activation and connection to DataSync. diff --git a/website/docs/r/datasync_task.html.markdown b/website/docs/r/datasync_task.html.markdown index 08a316b9587..e16ef80e904 100644 --- a/website/docs/r/datasync_task.html.markdown +++ b/website/docs/r/datasync_task.html.markdown @@ -60,7 +60,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_datasync_task` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_datasync_task` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `5m`) How long to wait for DataSync Task availability. diff --git a/website/docs/r/dax_cluster.html.markdown b/website/docs/r/dax_cluster.html.markdown index 801543db9d1..d77981acbb2 100644 --- a/website/docs/r/dax_cluster.html.markdown +++ b/website/docs/r/dax_cluster.html.markdown @@ -90,7 +90,7 @@ consisting of a DNS name and a port number ## Timeouts `aws_dax_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `45 minutes`) Used for creating a DAX cluster - `update` - (Default `45 minutes`) Used for cluster modifications diff --git a/website/docs/r/db_cluster_snapshot.html.markdown b/website/docs/r/db_cluster_snapshot.html.markdown index 877b63690f5..955c252dce4 100644 --- a/website/docs/r/db_cluster_snapshot.html.markdown +++ b/website/docs/r/db_cluster_snapshot.html.markdown @@ -46,7 +46,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_db_cluster_snapshot` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_db_cluster_snapshot` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `20m`) How long to wait for the snapshot to be available. diff --git a/website/docs/r/db_event_subscription.html.markdown b/website/docs/r/db_event_subscription.html.markdown index e7cc3bdad88..21f7f226724 100644 --- a/website/docs/r/db_event_subscription.html.markdown +++ b/website/docs/r/db_event_subscription.html.markdown @@ -74,7 +74,7 @@ The following additional atttributes are provided: ## Timeouts -`aws_db_event_subscription` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_db_event_subscription` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `40m`) How long to wait for an RDS event notification subscription to be ready. diff --git a/website/docs/r/db_instance.html.markdown b/website/docs/r/db_instance.html.markdown index 9030a8d2e6b..20a5e0337d7 100644 --- a/website/docs/r/db_instance.html.markdown +++ b/website/docs/r/db_instance.html.markdown @@ -27,7 +27,7 @@ server reboots. See the AWS Docs on [RDS Maintenance][2] for more information. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. [Read more about sensitive data in -state](/docs/state/sensitive-data.html). +state](https://www.terraform.io/docs/state/sensitive-data.html). ## RDS Instance Class Types Amazon RDS supports three types of instance classes: Standard, Memory Optimized, @@ -212,7 +212,7 @@ standalone database. -> **Note:** You can restore to any point in time before the source DB instance's `latest_restorable_time` or a point up to the number of days specified in the source DB instance's `backup_retention_period`. For more information, please refer to the [Developer Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html). -This setting does not apply to `aurora-mysql` or `aurora-postgresql` DB engines. For Aurora, refer to the [`aws_rds_cluster` resource documentation](https://www.terraform.io/docs/providers/aws/r/rds_cluster.html#restore_in_time). +This setting does not apply to `aurora-mysql` or `aurora-postgresql` DB engines. For Aurora, refer to the [`aws_rds_cluster` resource documentation](/docs/providers/aws/r/rds_cluster.html#restore_in_time). The `restore_to_point_in_time` block supports the following arguments: @@ -248,7 +248,7 @@ This will not recreate the resource if the S3 object changes in some way. It's ### Timeouts `aws_db_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `40 minutes`) Used for Creating Instances, Replicas, and restoring from Snapshots. diff --git a/website/docs/r/db_option_group.html.markdown b/website/docs/r/db_option_group.html.markdown index b2ecb282fc5..428241cd265 100644 --- a/website/docs/r/db_option_group.html.markdown +++ b/website/docs/r/db_option_group.html.markdown @@ -86,7 +86,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_db_option_group` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `delete` - (Default `15 minutes`) diff --git a/website/docs/r/db_proxy.html.markdown b/website/docs/r/db_proxy.html.markdown index cdd1a9ccd3f..1aac841c7c9 100644 --- a/website/docs/r/db_proxy.html.markdown +++ b/website/docs/r/db_proxy.html.markdown @@ -70,7 +70,7 @@ In addition to all arguments above, the following attributes are exported: ### Timeouts -`aws_db_proxy` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_db_proxy` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Used for creating DB proxies. - `update` - (Default `30 minutes`) Used for modifying DB proxies. diff --git a/website/docs/r/db_proxy_default_target_group.html.markdown b/website/docs/r/db_proxy_default_target_group.html.markdown index 8ad63d82233..c5aec6ce920 100644 --- a/website/docs/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/r/db_proxy_default_target_group.html.markdown @@ -76,7 +76,7 @@ In addition to all arguments above, the following attributes are exported: ### Timeouts -`aws_db_proxy_default_target_group` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_db_proxy_default_target_group` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Timeout for modifying DB proxy target group on creation. - `update` - (Default `30 minutes`) Timeout for modifying DB proxy target group on update. diff --git a/website/docs/r/db_snapshot.html.markdown b/website/docs/r/db_snapshot.html.markdown index a29312815b0..384d2b4347a 100644 --- a/website/docs/r/db_snapshot.html.markdown +++ b/website/docs/r/db_snapshot.html.markdown @@ -64,7 +64,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_db_snapshot` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_db_snapshot` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `read` - (Default `20 minutes`) Length of time to wait for the snapshot to become available diff --git a/website/docs/r/default_route_table.html.markdown b/website/docs/r/default_route_table.html.markdown index d868f83311e..57ab6f7ed8d 100644 --- a/website/docs/r/default_route_table.html.markdown +++ b/website/docs/r/default_route_table.html.markdown @@ -62,7 +62,7 @@ The following arguments are supported: * `default_route_table_id` - (Required) The ID of the Default Routing Table. * `route` - (Optional) A list of route objects. Their keys are documented below. - This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). + This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `tags` - (Optional) A map of tags to assign to the resource. * `propagating_vgws` - (Optional) A list of virtual gateways for propagation. diff --git a/website/docs/r/directory_service_directory.html.markdown b/website/docs/r/directory_service_directory.html.markdown index 3cdd29495bf..fddf2014434 100644 --- a/website/docs/r/directory_service_directory.html.markdown +++ b/website/docs/r/directory_service_directory.html.markdown @@ -11,7 +11,7 @@ description: |- Provides a Simple or Managed Microsoft directory in AWS Directory Service. ~> **Note:** All arguments including the password and customer username will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/dms_certificate.html.markdown b/website/docs/r/dms_certificate.html.markdown index 9464510c252..51774c19e3e 100644 --- a/website/docs/r/dms_certificate.html.markdown +++ b/website/docs/r/dms_certificate.html.markdown @@ -11,7 +11,7 @@ description: |- Provides a DMS (Data Migration Service) certificate resource. DMS certificates can be created, deleted, and imported. ~> **Note:** All arguments including the PEM encoded certificate will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index 308719e4e3a..140aaefb505 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -11,7 +11,7 @@ description: |- Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported. ~> **Note:** All arguments including the password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/dms_event_subscription.html.markdown b/website/docs/r/dms_event_subscription.html.markdown index 795c65f3793..e0f69d24182 100644 --- a/website/docs/r/dms_event_subscription.html.markdown +++ b/website/docs/r/dms_event_subscription.html.markdown @@ -40,7 +40,7 @@ The following arguments are supported: ## Timeouts -`aws_dms_event_subscription` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_dms_event_subscription` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10m`) Used for creating event subscriptions. - `update` - (Default `10m`) Used for event subscription modifications. diff --git a/website/docs/r/dms_replication_instance.html.markdown b/website/docs/r/dms_replication_instance.html.markdown index d26a22df1b8..b40204af91b 100644 --- a/website/docs/r/dms_replication_instance.html.markdown +++ b/website/docs/r/dms_replication_instance.html.markdown @@ -129,7 +129,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dms_replication_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Used for Creating Instances - `update` - (Default `30 minutes`) Used for Database modifications diff --git a/website/docs/r/docdb_cluster.html.markdown b/website/docs/r/docdb_cluster.html.markdown index 8f05fcad538..37a5daa0e01 100644 --- a/website/docs/r/docdb_cluster.html.markdown +++ b/website/docs/r/docdb_cluster.html.markdown @@ -19,7 +19,7 @@ phase because a modification has not yet taken place. You can use the ~> **Note:** using `apply_immediately` can result in a brief downtime as the server reboots. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage @@ -90,7 +90,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_docdb_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `120 minutes`) Used for Cluster creation - `update` - (Default `120 minutes`) Used for Cluster modifications diff --git a/website/docs/r/docdb_cluster_instance.html.markdown b/website/docs/r/docdb_cluster_instance.html.markdown index 210253c9d75..7c4ceddb09f 100644 --- a/website/docs/r/docdb_cluster_instance.html.markdown +++ b/website/docs/r/docdb_cluster_instance.html.markdown @@ -81,13 +81,13 @@ In addition to all arguments above, the following attributes are exported: [1]: /docs/providers/aws/r/docdb_cluster.html [2]: https://docs.aws.amazon.com/documentdb/latest/developerguide/db-cluster-manage-performance.html#db-cluster-manage-scaling-instance -[3]: /docs/configuration/resources.html#count +[3]: https://www.terraform.io/docs/configuration/meta-arguments/count.html [4]: https://docs.aws.amazon.com/documentdb/latest/developerguide/db-instance-classes.html#db-instance-class-specs ## Timeouts `aws_docdb_cluster_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `90 minutes`) Used for Creating Instances, Replicas, and restoring from Snapshots diff --git a/website/docs/r/docdb_cluster_snapshot.html.markdown b/website/docs/r/docdb_cluster_snapshot.html.markdown index e63b0ca7fac..84dfd2099a1 100644 --- a/website/docs/r/docdb_cluster_snapshot.html.markdown +++ b/website/docs/r/docdb_cluster_snapshot.html.markdown @@ -43,7 +43,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_docdb_cluster_snapshot` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_docdb_cluster_snapshot` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `20m`) How long to wait for the snapshot to be available. diff --git a/website/docs/r/dx_bgp_peer.html.markdown b/website/docs/r/dx_bgp_peer.html.markdown index ac092175675..39aa635099d 100644 --- a/website/docs/r/dx_bgp_peer.html.markdown +++ b/website/docs/r/dx_bgp_peer.html.markdown @@ -45,7 +45,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_bgp_peer` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating BGP peer - `delete` - (Default `10 minutes`) Used for destroying BGP peer diff --git a/website/docs/r/dx_gateway.html.markdown b/website/docs/r/dx_gateway.html.markdown index 4d0b6c26895..a0ef2722e35 100644 --- a/website/docs/r/dx_gateway.html.markdown +++ b/website/docs/r/dx_gateway.html.markdown @@ -36,7 +36,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_gateway` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating the gateway - `delete` - (Default `10 minutes`) Used for destroying the gateway diff --git a/website/docs/r/dx_gateway_association.html.markdown b/website/docs/r/dx_gateway_association.html.markdown index e2cf50fa28a..bd8811c6884 100644 --- a/website/docs/r/dx_gateway_association.html.markdown +++ b/website/docs/r/dx_gateway_association.html.markdown @@ -116,7 +116,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_gateway_association` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Used for creating the association - `update` - (Default `30 minutes`) Used for updating the association diff --git a/website/docs/r/dx_hosted_private_virtual_interface.html.markdown b/website/docs/r/dx_hosted_private_virtual_interface.html.markdown index 0bf85b94449..1c36e486cb4 100644 --- a/website/docs/r/dx_hosted_private_virtual_interface.html.markdown +++ b/website/docs/r/dx_hosted_private_virtual_interface.html.markdown @@ -51,7 +51,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_private_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `update` - (Default `10 minutes`) Used for virtual interface modifications diff --git a/website/docs/r/dx_hosted_private_virtual_interface_accepter.html.markdown b/website/docs/r/dx_hosted_private_virtual_interface_accepter.html.markdown index cd10037105a..97c7597eb66 100644 --- a/website/docs/r/dx_hosted_private_virtual_interface_accepter.html.markdown +++ b/website/docs/r/dx_hosted_private_virtual_interface_accepter.html.markdown @@ -86,7 +86,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_private_virtual_interface_accepter` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `delete` - (Default `10 minutes`) Used for destroying virtual interface diff --git a/website/docs/r/dx_hosted_public_virtual_interface.html.markdown b/website/docs/r/dx_hosted_public_virtual_interface.html.markdown index f7c7a318165..233f63a17f5 100644 --- a/website/docs/r/dx_hosted_public_virtual_interface.html.markdown +++ b/website/docs/r/dx_hosted_public_virtual_interface.html.markdown @@ -58,7 +58,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_public_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `delete` - (Default `10 minutes`) Used for destroying virtual interface diff --git a/website/docs/r/dx_hosted_public_virtual_interface_accepter.html.markdown b/website/docs/r/dx_hosted_public_virtual_interface_accepter.html.markdown index dc07803b6c5..26d74d6c734 100644 --- a/website/docs/r/dx_hosted_public_virtual_interface_accepter.html.markdown +++ b/website/docs/r/dx_hosted_public_virtual_interface_accepter.html.markdown @@ -84,7 +84,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_public_virtual_interface_accepter` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `delete` - (Default `10 minutes`) Used for destroying virtual interface diff --git a/website/docs/r/dx_hosted_transit_virtual_interface.html.markdown b/website/docs/r/dx_hosted_transit_virtual_interface.html.markdown index 75718b5caf5..5a38a6ecdcd 100644 --- a/website/docs/r/dx_hosted_transit_virtual_interface.html.markdown +++ b/website/docs/r/dx_hosted_transit_virtual_interface.html.markdown @@ -52,7 +52,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_transit_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `update` - (Default `10 minutes`) Used for virtual interface modifications diff --git a/website/docs/r/dx_hosted_transit_virtual_interface_accepter.html.markdown b/website/docs/r/dx_hosted_transit_virtual_interface_accepter.html.markdown index d3cf96e8047..ec73c108d88 100644 --- a/website/docs/r/dx_hosted_transit_virtual_interface_accepter.html.markdown +++ b/website/docs/r/dx_hosted_transit_virtual_interface_accepter.html.markdown @@ -82,7 +82,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_hosted_transit_virtual_interface_accepter` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `delete` - (Default `10 minutes`) Used for destroying virtual interface diff --git a/website/docs/r/dx_private_virtual_interface.html.markdown b/website/docs/r/dx_private_virtual_interface.html.markdown index c1bd5846cb9..278e6d13839 100644 --- a/website/docs/r/dx_private_virtual_interface.html.markdown +++ b/website/docs/r/dx_private_virtual_interface.html.markdown @@ -53,7 +53,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_private_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `update` - (Default `10 minutes`) Used for virtual interface modifications diff --git a/website/docs/r/dx_public_virtual_interface.html.markdown b/website/docs/r/dx_public_virtual_interface.html.markdown index 091213f5347..ca88dafefac 100644 --- a/website/docs/r/dx_public_virtual_interface.html.markdown +++ b/website/docs/r/dx_public_virtual_interface.html.markdown @@ -57,7 +57,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_public_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `delete` - (Default `10 minutes`) Used for destroying virtual interface diff --git a/website/docs/r/dx_transit_virtual_interface.html.markdown b/website/docs/r/dx_transit_virtual_interface.html.markdown index c45b35462d9..230783776ce 100644 --- a/website/docs/r/dx_transit_virtual_interface.html.markdown +++ b/website/docs/r/dx_transit_virtual_interface.html.markdown @@ -59,7 +59,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_dx_transit_virtual_interface` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating virtual interface - `update` - (Default `10 minutes`) Used for virtual interface modifications diff --git a/website/docs/r/dynamodb_table.html.markdown b/website/docs/r/dynamodb_table.html.markdown index 5779e9e5d5c..d93972c0c09 100644 --- a/website/docs/r/dynamodb_table.html.markdown +++ b/website/docs/r/dynamodb_table.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a DynamoDB table resource -~> **Note:** It is recommended to use `lifecycle` [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) for `read_capacity` and/or `write_capacity` if there's [autoscaling policy](/docs/providers/aws/r/appautoscaling_policy.html) attached to the table. +~> **Note:** It is recommended to use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for `read_capacity` and/or `write_capacity` if there's [autoscaling policy](/docs/providers/aws/r/appautoscaling_policy.html) attached to the table. ## Example Usage diff --git a/website/docs/r/ebs_snapshot.html.markdown b/website/docs/r/ebs_snapshot.html.markdown index a9e344514fb..190630ff90c 100644 --- a/website/docs/r/ebs_snapshot.html.markdown +++ b/website/docs/r/ebs_snapshot.html.markdown @@ -42,7 +42,7 @@ The following arguments are supported: ### Timeouts `aws_ebs_snapshot` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating the ebs snapshot - `delete` - (Default `10 minutes`) Used for deleting the ebs snapshot diff --git a/website/docs/r/ec2_fleet.html.markdown b/website/docs/r/ec2_fleet.html.markdown index 3e40e8b700d..9a0f06c01a6 100644 --- a/website/docs/r/ec2_fleet.html.markdown +++ b/website/docs/r/ec2_fleet.html.markdown @@ -124,7 +124,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_ec2_fleet` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_ec2_fleet` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for a fleet to be active. * `update` - (Default `10m`) How long to wait for a fleet to be modified. diff --git a/website/docs/r/ecr_repository.html.markdown b/website/docs/r/ecr_repository.html.markdown index 207b72b4fb7..20f9d7d57d1 100644 --- a/website/docs/r/ecr_repository.html.markdown +++ b/website/docs/r/ecr_repository.html.markdown @@ -49,7 +49,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_ecr_repository` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_ecr_repository` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `delete` - (Default `20 minutes`) How long to wait for a repository to be deleted. diff --git a/website/docs/r/ecs_service.html.markdown b/website/docs/r/ecs_service.html.markdown index a6f249075a9..73e43562fc7 100644 --- a/website/docs/r/ecs_service.html.markdown +++ b/website/docs/r/ecs_service.html.markdown @@ -45,7 +45,7 @@ resource "aws_ecs_service" "mongo" { ### Ignoring Changes to Desired Count -You can utilize the generic Terraform resource [lifecycle configuration block](/docs/configuration/resources.html#lifecycle) with `ignore_changes` to create an ECS service with an initial count of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling). +You can utilize the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` to create an ECS service with an initial count of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling). ```hcl resource "aws_ecs_service" "example" { @@ -193,7 +193,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_ecs_service` provides the following -[Timeouts](/docs/configuration/resources.html#operation-timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `delete` - (Default `20 minutes`) diff --git a/website/docs/r/efs_mount_target.html.markdown b/website/docs/r/efs_mount_target.html.markdown index 530420988b0..e5eeec3c07e 100644 --- a/website/docs/r/efs_mount_target.html.markdown +++ b/website/docs/r/efs_mount_target.html.markdown @@ -44,7 +44,7 @@ be for the same VPC as subnet specified) in effect for the mount target. ~> **Note:** The `dns_name` and `mount_target_dns_name` attributes are only useful if the mount target is in a VPC that has support for DNS hostnames enabled. See [Using DNS with Your VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-dns.html) -and [VPC resource](https://www.terraform.io/docs/providers/aws/r/vpc.html#enable_dns_hostnames) in Terraform for more information. +and [VPC resource](/docs/providers/aws/r/vpc.html#enable_dns_hostnames) in Terraform for more information. In addition to all arguments above, the following attributes are exported: diff --git a/website/docs/r/eip.html.markdown b/website/docs/r/eip.html.markdown index 82d9ce0ed7d..ad7a1b85109 100644 --- a/website/docs/r/eip.html.markdown +++ b/website/docs/r/eip.html.markdown @@ -135,7 +135,7 @@ In addition to all arguments above, the following attributes are exported: ~> **Note:** The resource computes the `public_dns` and `private_dns` attributes according to the [VPC DNS Guide](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-hostnames) as they are not available with the EC2 API. ## Timeouts -`aws_eip` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_eip` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `read` - (Default `15 minutes`) How long to wait querying for information about EIPs. - `update` - (Default `5 minutes`) How long to wait for an EIP to be updated. diff --git a/website/docs/r/eks_cluster.html.markdown b/website/docs/r/eks_cluster.html.markdown index 24cd8f96b88..0e4babbc901 100644 --- a/website/docs/r/eks_cluster.html.markdown +++ b/website/docs/r/eks_cluster.html.markdown @@ -79,7 +79,7 @@ resource "aws_iam_role_policy_attachment" "example-AmazonEKSVPCResourceControlle [EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) can be enabled via the `enabled_cluster_log_types` argument. To manage the CloudWatch Log Group retention period, the [`aws_cloudwatch_log_group` resource](/docs/providers/aws/r/cloudwatch_log_group.html) can be used. --> The below configuration uses [`depends_on`](/docs/configuration/resources.html#depends_on-explicit-resource-dependencies) to prevent ordering issues with EKS automatically creating the log group first and a variable for naming consistency. Other ordering and naming methodologies may be more appropriate for your environment. +-> The below configuration uses [`depends_on`](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html) to prevent ordering issues with EKS automatically creating the log group first and a variable for naming consistency. Other ordering and naming methodologies may be more appropriate for your environment. ```hcl variable "cluster_name" { @@ -147,14 +147,14 @@ resource "aws_iam_role" "example" { } ``` -After adding inline IAM Policies (e.g. [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html)) or attaching IAM Policies (e.g. [`aws_iam_policy` resource](/docs/providers/aws/r/iam_policy.html) and [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_policy.html)) with the desired permissions to the IAM Role, annotate the Kubernetes service account (e.g. [`kubernetes_service_account` resource](/docs/providers/kubernetes/r/service_account.html)) and recreate any pods. +After adding inline IAM Policies (e.g. [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html)) or attaching IAM Policies (e.g. [`aws_iam_policy` resource](/docs/providers/aws/r/iam_policy.html) and [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_policy.html)) with the desired permissions to the IAM Role, annotate the Kubernetes service account (e.g. [`kubernetes_service_account` resource](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account)) and recreate any pods. ## Argument Reference The following arguments are supported: * `name` – (Required) Name of the cluster. -* `role_arn` - (Required) The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding [`depends_on`](/docs/configuration/resources.html#depends_on-explicit-resource-dependencies) if using the [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html) or [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_role_policy_attachment.html), otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. +* `role_arn` - (Required) The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. Ensure the resource configuration includes explicit dependencies on the IAM Role permissions by adding [`depends_on`](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html) if using the [`aws_iam_role_policy` resource](/docs/providers/aws/r/iam_role_policy.html) or [`aws_iam_role_policy_attachment` resource](/docs/providers/aws/r/iam_role_policy_attachment.html), otherwise EKS cannot delete EKS managed EC2 infrastructure such as Security Groups on EKS Cluster deletion. * `vpc_config` - (Required) Nested argument for the VPC associated with your cluster. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the Amazon EKS User Guide. Configuration detailed below. * `enabled_cluster_log_types` - (Optional) A list of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) * `encryption_config` - (Optional) Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. @@ -217,7 +217,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_eks_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `30 minutes`) How long to wait for the EKS Cluster to be created. * `update` - (Default `60 minutes`) How long to wait for the EKS Cluster to be updated. diff --git a/website/docs/r/eks_fargate_profile.html.markdown b/website/docs/r/eks_fargate_profile.html.markdown index 8a2d34a3a02..6294a069306 100644 --- a/website/docs/r/eks_fargate_profile.html.markdown +++ b/website/docs/r/eks_fargate_profile.html.markdown @@ -83,7 +83,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_eks_fargate_profile` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_eks_fargate_profile` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10 minutes`) How long to wait for the EKS Fargate Profile to be created. * `delete` - (Default `10 minutes`) How long to wait for the EKS Fargate Profile to be deleted. diff --git a/website/docs/r/eks_node_group.html.markdown b/website/docs/r/eks_node_group.html.markdown index 061b62b7838..e22a29131bd 100644 --- a/website/docs/r/eks_node_group.html.markdown +++ b/website/docs/r/eks_node_group.html.markdown @@ -37,7 +37,7 @@ resource "aws_eks_node_group" "example" { ### Ignoring Changes to Desired Size -You can utilize the generic Terraform resource [lifecycle configuration block](/docs/configuration/resources.html#lifecycle-lifecycle-customizations) with `ignore_changes` to create an EKS Node Group with an initial size of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling). +You can utilize the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` to create an EKS Node Group with an initial size of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling). ```hcl resource "aws_eks_node_group" "example" { @@ -169,7 +169,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_eks_node_group` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `60 minutes`) How long to wait for the EKS Node Group to be created. * `update` - (Default `60 minutes`) How long to wait for the EKS Node Group to be updated. Note that the `update` timeout is used separately for both configuration and version update operations. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 03c4bc96d53..cd886521b48 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -42,7 +42,7 @@ resource "aws_elasticache_replication_group" "example" { You have two options for adjusting the number of replicas: * Adjusting `number_cache_clusters` directly. This will attempt to automatically add or remove replicas, but provides no granular control (e.g. preferred availability zone, cache cluster ID) for the added or removed replicas. This also currently expects cache cluster IDs in the form of `replication_group_id-00#`. -* Otherwise for fine grained control of the underlying cache clusters, they can be added or removed with the [`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html) and its `replication_group_id` attribute. In this situation, you will need to utilize the [lifecycle configuration block](/docs/configuration/resources.html) with `ignore_changes` to prevent perpetual differences during Terraform plan with the `number_cache_cluster` attribute. +* Otherwise for fine grained control of the underlying cache clusters, they can be added or removed with the [`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html) and its `replication_group_id` attribute. In this situation, you will need to utilize the [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` to prevent perpetual differences during Terraform plan with the `number_cache_cluster` attribute. ```hcl resource "aws_elasticache_replication_group" "example" { @@ -153,7 +153,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_elasticache_replication_group` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_elasticache_replication_group` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `60m`) How long to wait for a replication group to be created. diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index 74d7f6a0a2d..ddaca8b3f36 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -321,7 +321,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_elasticsearch_domain` provides the following [Timeouts](/docs/configuration/resources.html#operation-timeouts) configuration options: +`aws_elasticsearch_domain` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `update` - (Optional, Default: `60m`) How long to wait for updates. diff --git a/website/docs/r/emr_cluster.html.markdown b/website/docs/r/emr_cluster.html.markdown index d3e0a235474..7e956794071 100644 --- a/website/docs/r/emr_cluster.html.markdown +++ b/website/docs/r/emr_cluster.html.markdown @@ -242,7 +242,7 @@ resource "aws_emr_instance_fleet" "task" { [Debug logging in EMR](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-debugging.html) is implemented as a step. It is highly recommended to utilize the -[lifecycle configuration block](/docs/configuration/resources.html) with `ignore_changes` if other +[lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` if other steps are being managed outside of Terraform. ```hcl @@ -365,7 +365,7 @@ EOF * `visible_to_all_users` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default `true` * `autoscaling_role` - (Optional) An IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group. -* `step` - (Optional) List of steps to run when creating the cluster. Defined below. It is highly recommended to utilize the [lifecycle configuration block](/docs/configuration/resources.html) with `ignore_changes` if other steps are being managed outside of Terraform. This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). +* `step` - (Optional) List of steps to run when creating the cluster. Defined below. It is highly recommended to utilize the [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` if other steps are being managed outside of Terraform. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `step_concurrency_level` - (Optional) The number of steps that can be executed concurrently. You can specify a maximum of 256 steps. Only valid for EMR clusters with `release_label` 5.28.0 or greater. (default is 1) * `tags` - (Optional) list of tags to apply to the EMR Cluster @@ -850,7 +850,7 @@ EMR clusters can be imported using the `id`, e.g. $ terraform import aws_emr_cluster.cluster j-123456ABCDEF ``` -Since the API does not return the actual values for Kerberos configurations, environments with those Terraform configurations will need to use the [`lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes) available to all Terraform resources to prevent perpetual differences, e.g. +Since the API does not return the actual values for Kerberos configurations, environments with those Terraform configurations will need to use the [`lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) available to all Terraform resources to prevent perpetual differences, e.g. ```hcl resource "aws_emr_cluster" "example" { diff --git a/website/docs/r/fsx_lustre_file_system.html.markdown b/website/docs/r/fsx_lustre_file_system.html.markdown index 39c331dfe2a..2e1ed2bca23 100644 --- a/website/docs/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/r/fsx_lustre_file_system.html.markdown @@ -56,7 +56,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_fsx_lustre_file_system` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_fsx_lustre_file_system` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `30m`) How long to wait for the file system to be created. @@ -70,7 +70,7 @@ FSx File Systems can be imported using the `id`, e.g. $ terraform import aws_fsx_lustre_file_system.example fs-543ab12b1ca672f33 ``` -Certain resource arguments, like `security_group_ids`, do not have a FSx API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `security_group_ids`, do not have a FSx API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_fsx_lustre_file_system" "example" { diff --git a/website/docs/r/fsx_windows_file_system.html.markdown b/website/docs/r/fsx_windows_file_system.html.markdown index cb40e7786b4..9a9ccce83cc 100644 --- a/website/docs/r/fsx_windows_file_system.html.markdown +++ b/website/docs/r/fsx_windows_file_system.html.markdown @@ -95,7 +95,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_fsx_windows_file_system` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_fsx_windows_file_system` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `45m`) How long to wait for the file system to be created. @@ -110,7 +110,7 @@ FSx File Systems can be imported using the `id`, e.g. $ terraform import aws_fsx_windows_file_system.example fs-543ab12b1ca672f33 ``` -Certain resource arguments, like `security_group_ids` and the `self_managed_active_directory` configuation block `password`, do not have a FSx API method for reading the information after creation. If these arguments are set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `security_group_ids` and the `self_managed_active_directory` configuation block `password`, do not have a FSx API method for reading the information after creation. If these arguments are set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_fsx_windows_file_system" "example" { diff --git a/website/docs/r/gamelift_fleet.html.markdown b/website/docs/r/gamelift_fleet.html.markdown index cfce329e742..d8b85441a16 100644 --- a/website/docs/r/gamelift_fleet.html.markdown +++ b/website/docs/r/gamelift_fleet.html.markdown @@ -81,7 +81,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_gamelift_fleet` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_gamelift_fleet` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `70m`) How long to wait for a fleet to be created. * `delete` - (Default `20m`) How long to wait for a fleet to be deleted. diff --git a/website/docs/r/glue_trigger.html.markdown b/website/docs/r/glue_trigger.html.markdown index f6e4cad4092..04ecdca7e4a 100644 --- a/website/docs/r/glue_trigger.html.markdown +++ b/website/docs/r/glue_trigger.html.markdown @@ -152,7 +152,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_glue_trigger` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_glue_trigger` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `5m`) How long to wait for a trigger to be created. diff --git a/website/docs/r/guardduty_invite_accepter.html.markdown b/website/docs/r/guardduty_invite_accepter.html.markdown index 8d511a2ecff..4314fdc5eff 100644 --- a/website/docs/r/guardduty_invite_accepter.html.markdown +++ b/website/docs/r/guardduty_invite_accepter.html.markdown @@ -61,7 +61,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_guardduty_invite_accepter` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_guardduty_invite_accepter` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `1m`) How long to wait for an invite to accept. diff --git a/website/docs/r/guardduty_member.html.markdown b/website/docs/r/guardduty_member.html.markdown index aa0f8d87fd3..7b692071b36 100644 --- a/website/docs/r/guardduty_member.html.markdown +++ b/website/docs/r/guardduty_member.html.markdown @@ -45,7 +45,7 @@ The following arguments are supported: ## Timeouts -`aws_guardduty_member` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_guardduty_member` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `60s`) How long to wait for a verification to be done against inviting GuardDuty member account. diff --git a/website/docs/r/iam_role.html.markdown b/website/docs/r/iam_role.html.markdown index 8fd1c17e6f7..641cb7aca1e 100644 --- a/website/docs/r/iam_role.html.markdown +++ b/website/docs/r/iam_role.html.markdown @@ -48,7 +48,7 @@ The following arguments are supported: * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. * `assume_role_policy` - (Required) The policy that grants an entity permission to assume the role. -~> **NOTE:** This `assume_role_policy` is very similar but slightly different than just a standard IAM policy and cannot use an `aws_iam_policy` resource. It _can_ however, use an `aws_iam_policy_document` [data source](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html), see example below for how this could work. +~> **NOTE:** This `assume_role_policy` is very similar but slightly different than just a standard IAM policy and cannot use an `aws_iam_policy` resource. It _can_ however, use an `aws_iam_policy_document` [data source](/docs/providers/aws/d/iam_policy_document.html), see example below for how this could work. * `force_detach_policies` - (Optional) Specifies to force detaching any policies the role has before destroying it. Defaults to `false`. * `path` - (Optional) The path to the role. diff --git a/website/docs/r/iam_server_certificate.html.markdown b/website/docs/r/iam_server_certificate.html.markdown index f0236a626cd..e6ed655fcfa 100644 --- a/website/docs/r/iam_server_certificate.html.markdown +++ b/website/docs/r/iam_server_certificate.html.markdown @@ -20,7 +20,7 @@ For information about server certificates in IAM, see [Managing Server Certificates][2] in AWS Documentation. ~> **Note:** All arguments including the private key will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/iam_user_login_profile.html.markdown b/website/docs/r/iam_user_login_profile.html.markdown index 818e94a242a..9749630b733 100644 --- a/website/docs/r/iam_user_login_profile.html.markdown +++ b/website/docs/r/iam_user_login_profile.html.markdown @@ -58,7 +58,7 @@ IAM User Login Profiles can be imported without password information support via $ terraform import aws_iam_user_login_profile.example myusername ``` -Since Terraform has no method to read the PGP or password information during import, use the [Terraform resource `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) to ignore them unless password recreation is desired. e.g. +Since Terraform has no method to read the PGP or password information during import, use the [Terraform resource `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to ignore them unless password recreation is desired. e.g. ```hcl resource "aws_iam_user_login_profile" "example" { diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 60905b374f6..f5520686d1b 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -9,7 +9,7 @@ description: |- # Resource: aws_instance Provides an EC2 instance resource. This allows instances to be created, updated, -and deleted. Instances also support [provisioning](/docs/provisioners/index.html). +and deleted. Instances also support [provisioning](https://www.terraform.io/docs/provisioners/index.html). ## Example Usage diff --git a/website/docs/r/kinesis_stream.html.markdown b/website/docs/r/kinesis_stream.html.markdown index 5391e4a0236..5492ca9e271 100644 --- a/website/docs/r/kinesis_stream.html.markdown +++ b/website/docs/r/kinesis_stream.html.markdown @@ -57,7 +57,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_kinesis_stream` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_kinesis_stream` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `5 minutes`) Used for Creating a Kinesis Stream - `update` - (Default `120 minutes`) Used for Updating a Kinesis Stream diff --git a/website/docs/r/kinesis_video_stream.html.markdown b/website/docs/r/kinesis_video_stream.html.markdown index afbb5e7e1d5..9b72933eed1 100644 --- a/website/docs/r/kinesis_video_stream.html.markdown +++ b/website/docs/r/kinesis_video_stream.html.markdown @@ -50,7 +50,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_kinesis_video_stream` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_kinesis_video_stream` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `5 minutes`) Used for Creating a Kinesis Stream - `update` - (Default `120 minutes`) Used for Updating a Kinesis Stream diff --git a/website/docs/r/kms_ciphertext.html.markdown b/website/docs/r/kms_ciphertext.html.markdown index 959cde4aae6..cf629172a57 100644 --- a/website/docs/r/kms_ciphertext.html.markdown +++ b/website/docs/r/kms_ciphertext.html.markdown @@ -14,7 +14,7 @@ is stable across every apply. For a changing ciphertext value each apply, see the [`aws_kms_ciphertext` data source](/docs/providers/aws/d/kms_ciphertext.html). ~> **Note:** All arguments including the plaintext be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/kms_external_key.html.markdown b/website/docs/r/kms_external_key.html.markdown index f264a634db3..60f09b2fa37 100644 --- a/website/docs/r/kms_external_key.html.markdown +++ b/website/docs/r/kms_external_key.html.markdown @@ -10,7 +10,7 @@ description: |- Manages a KMS Customer Master Key that uses external key material. To instead manage a KMS Customer Master Key where AWS automatically generates and potentially rotates key material, see the [`aws_kms_key` resource](/docs/providers/aws/r/kms_key.html). -~> **Note:** All arguments including the key material will be stored in the raw state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html). +~> **Note:** All arguments including the key material will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index 36d63545464..f900acd25b1 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -305,7 +305,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_lambda_function` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_lambda_function` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for slow uploads or EC2 throttling errors. diff --git a/website/docs/r/lambda_provisioned_concurrency_config.html.markdown b/website/docs/r/lambda_provisioned_concurrency_config.html.markdown index 6664ad06a17..4399028be0e 100644 --- a/website/docs/r/lambda_provisioned_concurrency_config.html.markdown +++ b/website/docs/r/lambda_provisioned_concurrency_config.html.markdown @@ -48,7 +48,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_lambda_provisioned_concurrency_config` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_lambda_provisioned_concurrency_config` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `15 minutes`) How long to wait for the Lambda Provisioned Concurrency Config to be ready on creation. * `update` - (Default `15 minutes`) How long to wait for the Lambda Provisioned Concurrency Config to be ready on update. diff --git a/website/docs/r/launch_configuration.html.markdown b/website/docs/r/launch_configuration.html.markdown index ddd47210a35..dd5395a8d43 100644 --- a/website/docs/r/launch_configuration.html.markdown +++ b/website/docs/r/launch_configuration.html.markdown @@ -222,7 +222,7 @@ identified by the `virtual_name` in the format `"ephemeral{0..N}"`. ~> **NOTE:** Changes to `*_block_device` configuration of _existing_ resources cannot currently be detected by Terraform. After updating to block device configuration, resource recreation can be manually triggered by using the -[`taint` command](/docs/commands/taint.html). +[`taint` command](https://www.terraform.io/docs/commands/taint.html). ## Attributes Reference @@ -233,7 +233,7 @@ In addition to all arguments above, the following attributes are exported: * `name` - The name of the launch configuration. [1]: /docs/providers/aws/r/autoscaling_group.html -[2]: /docs/configuration/resources.html#lifecycle +[2]: https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html [3]: /docs/providers/aws/r/spot_instance_request.html ## Import diff --git a/website/docs/r/lb.html.markdown b/website/docs/r/lb.html.markdown index 442e1996d4a..d6c35626706 100644 --- a/website/docs/r/lb.html.markdown +++ b/website/docs/r/lb.html.markdown @@ -150,7 +150,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_lb` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for Creating LB - `update` - (Default `10 minutes`) Used for LB modifications diff --git a/website/docs/r/mq_broker.html.markdown b/website/docs/r/mq_broker.html.markdown index 3646c4a4fbc..a9d16acc33c 100644 --- a/website/docs/r/mq_broker.html.markdown +++ b/website/docs/r/mq_broker.html.markdown @@ -23,7 +23,7 @@ phase because a modification has not yet taken place. You can use the brief downtime as the broker reboots. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/neptune_cluster.html.markdown b/website/docs/r/neptune_cluster.html.markdown index 865c56a35a4..74a1c75bd55 100644 --- a/website/docs/r/neptune_cluster.html.markdown +++ b/website/docs/r/neptune_cluster.html.markdown @@ -80,7 +80,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_neptune_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `120 minutes`) Used for Cluster creation - `update` - (Default `120 minutes`) Used for Cluster modifications diff --git a/website/docs/r/neptune_cluster_instance.html.markdown b/website/docs/r/neptune_cluster_instance.html.markdown index b743332ebec..d59fdbe3767 100644 --- a/website/docs/r/neptune_cluster_instance.html.markdown +++ b/website/docs/r/neptune_cluster_instance.html.markdown @@ -75,12 +75,12 @@ In addition to all arguments above, the following attributes are exported: * `storage_encrypted` - Specifies whether the neptune cluster is encrypted. * `writer` – Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. -[1]: /docs/configuration/resources.html#count +[1]: https://www.terraform.io/docs/configuration/meta-arguments/count.html ## Timeouts `aws_neptune_cluster_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `90 minutes`) How long to wait for creating instances to become available. - `update` - (Default `90 minutes`) How long to wait for updating instances to complete updates. diff --git a/website/docs/r/neptune_cluster_snapshot.html.markdown b/website/docs/r/neptune_cluster_snapshot.html.markdown index 7faae112bfd..3160e963bad 100644 --- a/website/docs/r/neptune_cluster_snapshot.html.markdown +++ b/website/docs/r/neptune_cluster_snapshot.html.markdown @@ -45,7 +45,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_neptune_cluster_snapshot` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_neptune_cluster_snapshot` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `20m`) How long to wait for the snapshot to be available. diff --git a/website/docs/r/neptune_event_subscription.html.markdown b/website/docs/r/neptune_event_subscription.html.markdown index 133747fc5be..770403341e3 100644 --- a/website/docs/r/neptune_event_subscription.html.markdown +++ b/website/docs/r/neptune_event_subscription.html.markdown @@ -83,7 +83,7 @@ The following additional atttributes are provided: ## Timeouts -`aws_neptune_event_subscription` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_neptune_event_subscription` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `40m`) How long to wait for creating event subscription to become available. diff --git a/website/docs/r/network_acl.html.markdown b/website/docs/r/network_acl.html.markdown index dd9b82c5120..12180b46bb2 100644 --- a/website/docs/r/network_acl.html.markdown +++ b/website/docs/r/network_acl.html.markdown @@ -54,9 +54,9 @@ The following arguments are supported: * `vpc_id` - (Required) The ID of the associated VPC. * `subnet_ids` - (Optional) A list of Subnet IDs to apply the ACL to * `ingress` - (Optional) Specifies an ingress rule. Parameters defined below. - This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). + This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `egress` - (Optional) Specifies an egress rule. Parameters defined below. - This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). + This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `tags` - (Optional) A map of tags to assign to the resource. Both `egress` and `ingress` support the following keys: diff --git a/website/docs/r/opsworks_instance.html.markdown b/website/docs/r/opsworks_instance.html.markdown index 68680918d88..61d22039baa 100644 --- a/website/docs/r/opsworks_instance.html.markdown +++ b/website/docs/r/opsworks_instance.html.markdown @@ -110,7 +110,7 @@ identified by the `virtual_name` in the format `"ephemeral{0..N}"`. ~> **NOTE:** Currently, changes to `*_block_device` configuration of _existing_ resources cannot be automatically detected by Terraform. After making updates to block device configuration, resource recreation can be manually triggered by -using the [`taint` command](/docs/commands/taint.html). +using the [`taint` command](https://www.terraform.io/docs/commands/taint.html). ## Attributes Reference @@ -136,7 +136,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_opsworks_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used when the instance is created. It should cover the time needed for the instance to start successfully. - `delete` - (Default `10 minutes`) Used when the instance is deleted. It should cover the time needed for the instance to stop successfully. diff --git a/website/docs/r/opsworks_mysql_layer.html.markdown b/website/docs/r/opsworks_mysql_layer.html.markdown index b8fd4ff0501..f8f7ccd4df3 100644 --- a/website/docs/r/opsworks_mysql_layer.html.markdown +++ b/website/docs/r/opsworks_mysql_layer.html.markdown @@ -11,7 +11,7 @@ description: |- Provides an OpsWorks MySQL layer resource. ~> **Note:** All arguments including the root password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/opsworks_rds_db_instance.html.markdown b/website/docs/r/opsworks_rds_db_instance.html.markdown index 7b95a6fd457..b2a9d9e2ba7 100644 --- a/website/docs/r/opsworks_rds_db_instance.html.markdown +++ b/website/docs/r/opsworks_rds_db_instance.html.markdown @@ -11,7 +11,7 @@ description: |- Provides an OpsWorks RDS DB Instance resource. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/organizations_account.html.markdown b/website/docs/r/organizations_account.html.markdown index 262ec41fe2d..cc7edd97712 100644 --- a/website/docs/r/organizations_account.html.markdown +++ b/website/docs/r/organizations_account.html.markdown @@ -31,7 +31,7 @@ The following arguments are supported: * `email` - (Required) The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account. * `iam_user_access_to_billing` - (Optional) If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information. * `parent_id` - (Optional) Parent Organizational Unit ID or Root ID for the account. Defaults to the Organization default Root ID. A configuration must be present for this argument to perform drift detection. -* `role_name` - (Optional) The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account. The Organizations API provides no method for reading this information after account creation, so Terraform cannot perform drift detection on its value and will always show a difference for a configured value after import unless [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) is used. +* `role_name` - (Optional) The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account. The Organizations API provides no method for reading this information after account creation, so Terraform cannot perform drift detection on its value and will always show a difference for a configured value after import unless [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) is used. * `tags` - (Optional) Key-value map of resource tags. ## Attributes Reference @@ -49,7 +49,7 @@ The AWS member account can be imported by using the `account_id`, e.g. $ terraform import aws_organizations_account.my_org 111111111111 ``` -Certain resource arguments, like `role_name`, do not have an Organizations API method for reading the information after account creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `role_name`, do not have an Organizations API method for reading the information after account creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_organizations_account" "account" { diff --git a/website/docs/r/pinpoint_adm_channel.markdown b/website/docs/r/pinpoint_adm_channel.markdown index 807b5a50283..c3de242a374 100644 --- a/website/docs/r/pinpoint_adm_channel.markdown +++ b/website/docs/r/pinpoint_adm_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint ADM (Amazon Device Messaging) Channel resource. ~> **Note:** All arguments including the Client ID and Client Secret will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_apns_channel.markdown b/website/docs/r/pinpoint_apns_channel.markdown index 27e68da221c..63c37c1a3a1 100644 --- a/website/docs/r/pinpoint_apns_channel.markdown +++ b/website/docs/r/pinpoint_apns_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint APNs Channel resource. ~> **Note:** All arguments, including certificates and tokens, will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_apns_sandbox_channel.markdown b/website/docs/r/pinpoint_apns_sandbox_channel.markdown index 3664f4f9e49..e960514236d 100644 --- a/website/docs/r/pinpoint_apns_sandbox_channel.markdown +++ b/website/docs/r/pinpoint_apns_sandbox_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint APNs Sandbox Channel resource. ~> **Note:** All arguments, including certificates and tokens, will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_apns_voip_channel.markdown b/website/docs/r/pinpoint_apns_voip_channel.markdown index b015ec0beee..f4ad7f4c2d6 100644 --- a/website/docs/r/pinpoint_apns_voip_channel.markdown +++ b/website/docs/r/pinpoint_apns_voip_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint APNs VoIP Channel resource. ~> **Note:** All arguments, including certificates and tokens, will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_apns_voip_sandbox_channel.markdown b/website/docs/r/pinpoint_apns_voip_sandbox_channel.markdown index bbe496d9943..16a6da76193 100644 --- a/website/docs/r/pinpoint_apns_voip_sandbox_channel.markdown +++ b/website/docs/r/pinpoint_apns_voip_sandbox_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint APNs VoIP Sandbox Channel resource. ~> **Note:** All arguments, including certificates and tokens, will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_baidu_channel.markdown b/website/docs/r/pinpoint_baidu_channel.markdown index f4e15a265ef..cbe3767c647 100644 --- a/website/docs/r/pinpoint_baidu_channel.markdown +++ b/website/docs/r/pinpoint_baidu_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint Baidu Channel resource. ~> **Note:** All arguments including the Api Key and Secret Key will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/pinpoint_gcm_channel.markdown b/website/docs/r/pinpoint_gcm_channel.markdown index 89ed41878f1..72adcda5205 100644 --- a/website/docs/r/pinpoint_gcm_channel.markdown +++ b/website/docs/r/pinpoint_gcm_channel.markdown @@ -11,7 +11,7 @@ description: |- Provides a Pinpoint GCM Channel resource. ~> **Note:** Api Key argument will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 4a1c011f3de..0fb0c9c303b 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -26,7 +26,7 @@ brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] for more information. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage @@ -99,7 +99,7 @@ The following arguments are supported: * `allow_major_version_upgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`. See [Amazon RDS Documentation for more information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) -* `availability_zones` - (Optional) A list of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. RDS automatically assigns 3 AZs if less than 3 AZs are configured, which will show as a difference requiring resource recreation next Terraform apply. It is recommended to specify 3 AZs or use [the `lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes) if necessary. +* `availability_zones` - (Optional) A list of EC2 Availability Zones for the DB cluster storage where DB cluster instances can be created. RDS automatically assigns 3 AZs if less than 3 AZs are configured, which will show as a difference requiring resource recreation next Terraform apply. It is recommended to specify 3 AZs or use [the `lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) if necessary. * `backtrack_window` - (Optional) The target backtrack window, in seconds. Only available for `aurora` engine currently. To disable backtracking, set this value to `0`. Defaults to `0`. Must be between `0` and `259200` (72 hours) * `backup_retention_period` - (Optional) The days to retain backups for. Default `1` * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. @@ -124,7 +124,7 @@ The following arguments are supported: * `port` - (Optional) The port on which the DB accepts connections * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30 -* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value. +* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value. * `restore_to_point_in_time` - (Optional) Nested attribute for [point in time restore](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html). More details below. * `scaling_configuration` - (Optional) Nested attribute with scaling properties. Only valid when `engine_mode` is set to `serverless`. More details below. * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. @@ -248,7 +248,7 @@ load-balanced across replicas ## Timeouts `aws_rds_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `120 minutes`) Used for Cluster creation - `update` - (Default `120 minutes`) Used for Cluster modifications diff --git a/website/docs/r/rds_cluster_instance.html.markdown b/website/docs/r/rds_cluster_instance.html.markdown index 2de3a6c99aa..aec821f6ba7 100644 --- a/website/docs/r/rds_cluster_instance.html.markdown +++ b/website/docs/r/rds_cluster_instance.html.markdown @@ -58,7 +58,7 @@ The following arguments are supported: For information on the difference between the available Aurora MySQL engines see [Comparison between Aurora MySQL 1 and Aurora MySQL 2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Updates.20180206.html) in the Amazon RDS User Guide. -* `engine_version` - (Optional) The database engine version. When managing the engine version in the cluster, it is recommended to add the [lifecycle `ignore_changes` configuration](/docs/configuration/resources.html#ignore_changes) for this argument to prevent Terraform from proposing changes to the instance engine version directly. +* `engine_version` - (Optional) The database engine version. When managing the engine version in the cluster, it is recommended to add the [lifecycle `ignore_changes` configuration](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for this argument to prevent Terraform from proposing changes to the instance engine version directly. * `instance_class` - (Required) The instance class to use. For details on CPU and memory, see [Scaling Aurora DB Instances][4]. Aurora uses `db.*` instance classes/types. Please see [AWS Documentation][7] for currently available instance classes and complete details. * `publicly_accessible` - (Optional) Bool to control if instance is publicly accessible. @@ -108,14 +108,14 @@ In addition to all arguments above, the following attributes are exported: [2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html [3]: /docs/providers/aws/r/rds_cluster.html [4]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html -[5]: /docs/configuration/resources.html#count +[5]: https://www.terraform.io/docs/configuration/meta-arguments/count.html [6]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html [7]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html ## Timeouts `aws_rds_cluster_instance` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `90 minutes`) Used for Creating Instances, Replicas, and restoring from Snapshots diff --git a/website/docs/r/rds_global_cluster.html.markdown b/website/docs/r/rds_global_cluster.html.markdown index 41da2e052a3..de76a8e4327 100644 --- a/website/docs/r/rds_global_cluster.html.markdown +++ b/website/docs/r/rds_global_cluster.html.markdown @@ -121,7 +121,7 @@ $ terraform import aws_rds_global_cluster.example example Certain resource arguments, like `force_destroy`, only exist within Terraform. If the argument is set in the Terraform configuration on an imported resource, Terraform will show a difference on the first plan after import to update the state value. This change is safe to apply immediately so the state matches the desired configuration. -Certain resource arguments, like `source_db_cluster_identifier`, do not have an API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `source_db_cluster_identifier`, do not have an API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_rds_global_cluster" "example" { diff --git a/website/docs/r/redshift_cluster.html.markdown b/website/docs/r/redshift_cluster.html.markdown index 266c870fb78..e46ad1e4644 100644 --- a/website/docs/r/redshift_cluster.html.markdown +++ b/website/docs/r/redshift_cluster.html.markdown @@ -11,7 +11,7 @@ description: |- Provides a Redshift Cluster Resource. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Example Usage @@ -76,7 +76,7 @@ string. ### Timeouts `aws_redshift_cluster` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `75 minutes`) Used for creating Clusters. - `update` - (Default `75 minutes`) Used for updating Clusters. diff --git a/website/docs/r/route.html.markdown b/website/docs/r/route.html.markdown index f96657c680f..4dc66df7bb7 100644 --- a/website/docs/r/route.html.markdown +++ b/website/docs/r/route.html.markdown @@ -84,7 +84,7 @@ will be exported as an attribute once the resource is created. ## Timeouts `aws_route` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `2 minutes`) Used for route creation - `delete` - (Default `5 minutes`) Used for route deletion diff --git a/website/docs/r/route53_resolver_endpoint.html.markdown b/website/docs/r/route53_resolver_endpoint.html.markdown index 215d1d5104a..5c6dd617aed 100644 --- a/website/docs/r/route53_resolver_endpoint.html.markdown +++ b/website/docs/r/route53_resolver_endpoint.html.markdown @@ -66,7 +66,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts `aws_route53_resolver_endpoint` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating Route 53 Resolver endpoint - `update` - (Default `10 minutes`) Used for updating Route 53 Resolver endpoint diff --git a/website/docs/r/route53_zone.html.markdown b/website/docs/r/route53_zone.html.markdown index b50dbeec2a1..56a7e1a5cfd 100644 --- a/website/docs/r/route53_zone.html.markdown +++ b/website/docs/r/route53_zone.html.markdown @@ -50,7 +50,7 @@ resource "aws_route53_record" "dev-ns" { ### Private Zone -~> **NOTE:** Terraform provides both exclusive VPC associations defined in-line in this resource via `vpc` configuration blocks and a separate [Zone VPC Association](/docs/providers/aws/r/route53_zone_association.html) resource. At this time, you cannot use in-line VPC associations in conjunction with any `aws_route53_zone_association` resources with the same zone ID otherwise it will cause a perpetual difference in plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](/docs/configuration/resources.html#lifecycle) with `ignore_changes` to manage additional associations via the `aws_route53_zone_association` resource. +~> **NOTE:** Terraform provides both exclusive VPC associations defined in-line in this resource via `vpc` configuration blocks and a separate [Zone VPC Association](/docs/providers/aws/r/route53_zone_association.html) resource. At this time, you cannot use in-line VPC associations in conjunction with any `aws_route53_zone_association` resources with the same zone ID otherwise it will cause a perpetual difference in plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` to manage additional associations via the `aws_route53_zone_association` resource. ~> **NOTE:** Private zones require at least one VPC association at all times. diff --git a/website/docs/r/route53_zone_association.html.markdown b/website/docs/r/route53_zone_association.html.markdown index dde709ee8e6..c39fef313eb 100644 --- a/website/docs/r/route53_zone_association.html.markdown +++ b/website/docs/r/route53_zone_association.html.markdown @@ -12,7 +12,7 @@ Manages a Route53 Hosted Zone VPC association. VPC associations can only be made ~> **NOTE:** Unless explicit association ordering is required (e.g. a separate cross-account association authorization), usage of this resource is not recommended. Use the `vpc` configuration blocks available within the [`aws_route53_zone` resource](/docs/providers/aws/r/route53_zone.html) instead. -~> **NOTE:** Terraform provides both this standalone Zone VPC Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use those in-line VPC associations in conjunction with this resource and the same zone ID otherwise it will cause a perpetual difference in plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](/docs/configuration/resources.html#lifecycle) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this resource. +~> **NOTE:** Terraform provides both this standalone Zone VPC Association resource and exclusive VPC associations defined in-line in the [`aws_route53_zone` resource](/docs/providers/aws/r/route53_zone.html) via `vpc` configuration blocks. At this time, you cannot use those in-line VPC associations in conjunction with this resource and the same zone ID otherwise it will cause a perpetual difference in plan output. You can optionally use the generic Terraform resource [lifecycle configuration block](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html) with `ignore_changes` in the `aws_route53_zone` resource to manage additional associations via this resource. ## Example Usage diff --git a/website/docs/r/route_table.html.markdown b/website/docs/r/route_table.html.markdown index 51bbffe4b6f..a3c4e945aa4 100644 --- a/website/docs/r/route_table.html.markdown +++ b/website/docs/r/route_table.html.markdown @@ -56,7 +56,7 @@ resource "aws_route_table" "r" { The following arguments are supported: * `vpc_id` - (Required) The VPC ID. -* `route` - (Optional) A list of route objects. Their keys are documented below. This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). +* `route` - (Optional) A list of route objects. Their keys are documented below. This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `tags` - (Optional) A map of tags to assign to the resource. * `propagating_vgws` - (Optional) A list of virtual gateways for propagation. diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index da0f0c21efd..f96cf766e40 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -66,10 +66,10 @@ assign a random, unique name to classify your security groups in a way that can be updated, use `tags`. * `ingress` - (Optional) Can be specified multiple times for each ingress rule. Each ingress block supports fields documented below. - This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). + This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `egress` - (Optional, VPC only) Can be specified multiple times for each egress rule. Each egress block supports fields documented below. - This argument is processed in [attribute-as-blocks mode](/docs/configuration/attr-as-blocks.html). + This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). * `revoke_rules_on_delete` - (Optional) Instruct Terraform to revoke all of the Security Groups attached ingress and egress rules before deleting the rule itself. This is normally not needed, however certain AWS services such as @@ -171,7 +171,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_security_group` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_security_group` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10m`) How long to wait for a security group to be created. diff --git a/website/docs/r/ses_domain_identity_verification.html.markdown b/website/docs/r/ses_domain_identity_verification.html.markdown index 56069476a99..2edfa29a94c 100644 --- a/website/docs/r/ses_domain_identity_verification.html.markdown +++ b/website/docs/r/ses_domain_identity_verification.html.markdown @@ -53,7 +53,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`acm_ses_domain_identity_verification` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`acm_ses_domain_identity_verification` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `45m`) How long to wait for a domain identity to be verified. diff --git a/website/docs/r/ssm_document.html.markdown b/website/docs/r/ssm_document.html.markdown index cedf8114c5f..ffeb17ac3ce 100644 --- a/website/docs/r/ssm_document.html.markdown +++ b/website/docs/r/ssm_document.html.markdown @@ -102,7 +102,7 @@ SSM Documents can be imported using the name, e.g. $ terraform import aws_ssm_document.example example ``` -The `attachments_source` argument does not have an SSM API method for reading the attachment information detail after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +The `attachments_source` argument does not have an SSM API method for reading the attachment information detail after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_ssm_document" "test" { diff --git a/website/docs/r/ssm_parameter.html.markdown b/website/docs/r/ssm_parameter.html.markdown index 30b252b603a..a97415e3fd8 100644 --- a/website/docs/r/ssm_parameter.html.markdown +++ b/website/docs/r/ssm_parameter.html.markdown @@ -51,7 +51,7 @@ resource "aws_ssm_parameter" "secret" { ``` ~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ## Argument Reference diff --git a/website/docs/r/storagegateway_gateway.html.markdown b/website/docs/r/storagegateway_gateway.html.markdown index 0a1a1d2581a..c07f62bf937 100644 --- a/website/docs/r/storagegateway_gateway.html.markdown +++ b/website/docs/r/storagegateway_gateway.html.markdown @@ -115,7 +115,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_storagegateway_gateway` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_storagegateway_gateway` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for gateway activation and connection to Storage Gateway. @@ -127,7 +127,7 @@ In addition to all arguments above, the following attributes are exported: $ terraform import aws_storagegateway_gateway.example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678 ``` -Certain resource arguments, like `gateway_ip_address` do not have a Storage Gateway API method for reading the information after creation, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `gateway_ip_address` do not have a Storage Gateway API method for reading the information after creation, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl diff --git a/website/docs/r/storagegateway_nfs_file_share.html.markdown b/website/docs/r/storagegateway_nfs_file_share.html.markdown index e439e04dd6d..deebc12898a 100644 --- a/website/docs/r/storagegateway_nfs_file_share.html.markdown +++ b/website/docs/r/storagegateway_nfs_file_share.html.markdown @@ -70,7 +70,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_storagegateway_nfs_file_share` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_storagegateway_nfs_file_share` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for file share creation. * `update` - (Default `10m`) How long to wait for file share updates. diff --git a/website/docs/r/storagegateway_smb_file_share.html.markdown b/website/docs/r/storagegateway_smb_file_share.html.markdown index 3d340016c67..cd4256f8a2f 100644 --- a/website/docs/r/storagegateway_smb_file_share.html.markdown +++ b/website/docs/r/storagegateway_smb_file_share.html.markdown @@ -83,7 +83,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_storagegateway_smb_file_share` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +`aws_storagegateway_smb_file_share` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: * `create` - (Default `10m`) How long to wait for file share creation. * `update` - (Default `10m`) How long to wait for file share updates. diff --git a/website/docs/r/subnet.html.markdown b/website/docs/r/subnet.html.markdown index 90f55a30606..e3184e6b520 100644 --- a/website/docs/r/subnet.html.markdown +++ b/website/docs/r/subnet.html.markdown @@ -74,7 +74,7 @@ In addition to all arguments above, the following attributes are exported: ## Timeouts -`aws_subnet` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) +`aws_subnet` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10m`) How long to wait for a subnet to be created. diff --git a/website/docs/r/vpc_endpoint.html.markdown b/website/docs/r/vpc_endpoint.html.markdown index 36837b9d978..3a82dbd7e6b 100644 --- a/website/docs/r/vpc_endpoint.html.markdown +++ b/website/docs/r/vpc_endpoint.html.markdown @@ -128,7 +128,7 @@ Defaults to `false`. ### Timeouts `aws_vpc_endpoint` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating a VPC endpoint - `update` - (Default `10 minutes`) Used for VPC endpoint modifications diff --git a/website/docs/r/vpc_endpoint_subnet_association.html.markdown b/website/docs/r/vpc_endpoint_subnet_association.html.markdown index 56fb455d5c9..783e3e038c6 100644 --- a/website/docs/r/vpc_endpoint_subnet_association.html.markdown +++ b/website/docs/r/vpc_endpoint_subnet_association.html.markdown @@ -37,7 +37,7 @@ The following arguments are supported: ### Timeouts `aws_vpc_endpoint_subnet_association` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating the association - `delete` - (Default `10 minutes`) Used for destroying the association diff --git a/website/docs/r/vpc_ipv4_cidr_block_association.html.markdown b/website/docs/r/vpc_ipv4_cidr_block_association.html.markdown index 6534308b93b..0435df8c707 100644 --- a/website/docs/r/vpc_ipv4_cidr_block_association.html.markdown +++ b/website/docs/r/vpc_ipv4_cidr_block_association.html.markdown @@ -36,7 +36,7 @@ The following arguments are supported: ## Timeouts `aws_vpc_ipv4_cidr_block_association` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `10 minutes`) Used for creating the association - `delete` - (Default `10 minutes`) Used for destroying the association diff --git a/website/docs/r/vpc_peering_connection.html.markdown b/website/docs/r/vpc_peering_connection.html.markdown index cc4799ea48b..56bbb88ba5a 100644 --- a/website/docs/r/vpc_peering_connection.html.markdown +++ b/website/docs/r/vpc_peering_connection.html.markdown @@ -140,7 +140,7 @@ connection. ### Timeouts `aws_vpc_peering_connection` provides the following -[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `1 minute`) Used for creating a peering connection - `update` - (Default `1 minute`) Used for peering connection modifications diff --git a/website/docs/r/vpc_peering_connection_accepter.html.markdown b/website/docs/r/vpc_peering_connection_accepter.html.markdown index cb3962282dd..e80165ce49b 100644 --- a/website/docs/r/vpc_peering_connection_accepter.html.markdown +++ b/website/docs/r/vpc_peering_connection_accepter.html.markdown @@ -125,7 +125,7 @@ VPC Peering Connection Accepters can be imported by using the Peering Connection $ terraform import aws_vpc_peering_connection_accepter.example pcx-12345678 ``` -Certain resource arguments, like `auto_accept`, do not have an EC2 API method for reading the information after peering connection creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](/docs/configuration/resources.html#ignore_changes) to hide the difference, e.g. +Certain resource arguments, like `auto_accept`, do not have an EC2 API method for reading the information after peering connection creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g. ```hcl resource "aws_vpc_peering_connection_accepter" "example" { diff --git a/website/docs/r/vpn_connection.html.markdown b/website/docs/r/vpn_connection.html.markdown index c9b046e07cb..3b7edab9947 100644 --- a/website/docs/r/vpn_connection.html.markdown +++ b/website/docs/r/vpn_connection.html.markdown @@ -11,7 +11,7 @@ description: |- Manages an EC2 VPN connection. These objects can be connected to customer gateways, and allow you to establish tunnels between your network and Amazon. ~> **Note:** All arguments including `tunnel1_preshared_key` and `tunnel2_preshared_key` will be stored in the raw state as plain-text. -[Read more about sensitive data in state](/docs/state/sensitive-data.html). +[Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html). ~> **Note:** The CIDR blocks in the arguments `tunnel1_inside_cidr` and `tunnel2_inside_cidr` must have a prefix of /30 and be a part of a specific range. [Read more about this in the AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VpnTunnelOptionsSpecification.html). diff --git a/website/docs/r/workspaces_workspace.html.markdown b/website/docs/r/workspaces_workspace.html.markdown index d4db3eaaa3f..a62b266f437 100644 --- a/website/docs/r/workspaces_workspace.html.markdown +++ b/website/docs/r/workspaces_workspace.html.markdown @@ -66,7 +66,7 @@ The following arguments are supported: ### Timeouts `aws_workspaces_workspace` provides the following -[Timeouts](/docs/configuration/resources.html#operation-timeouts) configuration options: +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - `create` - (Default `30 minutes`) Used for WorkSpace creation. - `update` - (Default `10 minutes`) Used for WorkSpace updating. From 5364084000d622e50479a62a9df488f311b5f41f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 4 Jan 2021 15:03:45 -0500 Subject: [PATCH 0375/1212] resource/gamelift: Fix hardcoded regions --- aws/resource_aws_gamelift_alias_test.go | 24 ++++++++++++--- aws/resource_aws_gamelift_build_test.go | 18 ++++++++++-- aws/resource_aws_gamelift_fleet_test.go | 24 ++++++++++++--- ...ce_aws_gamelift_game_session_queue_test.go | 18 ++++++++++-- aws/resource_aws_gamelift_test.go | 29 ++++++++++--------- 5 files changed, 85 insertions(+), 28 deletions(-) diff --git a/aws/resource_aws_gamelift_alias_test.go b/aws/resource_aws_gamelift_alias_test.go index 8120beb2323..f55bbaa2ddf 100644 --- a/aws/resource_aws_gamelift_alias_test.go +++ b/aws/resource_aws_gamelift_alias_test.go @@ -92,7 +92,11 @@ func TestAccAWSGameliftAlias_basic(t *testing.T) { uMessage := fmt.Sprintf("tf test updated message %s", rString) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftAliasDestroy, Steps: []resource.TestStep{ @@ -138,7 +142,11 @@ func TestAccAWSGameliftAlias_tags(t *testing.T) { aliasName := acctest.RandomWithPrefix("tf-acc-alias") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftAliasDestroy, Steps: []resource.TestStep{ @@ -207,7 +215,11 @@ func TestAccAWSGameliftAlias_fleetRouting(t *testing.T) { resourceName := "aws_gamelift_alias.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftAliasDestroy, Steps: []resource.TestStep{ @@ -244,7 +256,11 @@ func TestAccAWSGameliftAlias_disappears(t *testing.T) { message := fmt.Sprintf("tf test message %s", rString) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftAliasDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_gamelift_build_test.go b/aws/resource_aws_gamelift_build_test.go index 225b71471be..f0e727c38a3 100644 --- a/aws/resource_aws_gamelift_build_test.go +++ b/aws/resource_aws_gamelift_build_test.go @@ -85,7 +85,11 @@ func TestAccAWSGameliftBuild_basic(t *testing.T) { key := *loc.Key resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftBuildDestroy, Steps: []resource.TestStep{ @@ -145,7 +149,11 @@ func TestAccAWSGameliftBuild_tags(t *testing.T) { key := *loc.Key resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftBuildDestroy, Steps: []resource.TestStep{ @@ -203,7 +211,11 @@ func TestAccAWSGameliftBuild_disappears(t *testing.T) { key := *loc.Key resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftBuildDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_gamelift_fleet_test.go b/aws/resource_aws_gamelift_fleet_test.go index 70347ed1b14..00a224a4868 100644 --- a/aws/resource_aws_gamelift_fleet_test.go +++ b/aws/resource_aws_gamelift_fleet_test.go @@ -261,7 +261,11 @@ func TestAccAWSGameliftFleet_basic(t *testing.T) { resourceName := "aws_gamelift_fleet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftFleetDestroy, Steps: []resource.TestStep{ @@ -338,7 +342,11 @@ func TestAccAWSGameliftFleet_tags(t *testing.T) { resourceName := "aws_gamelift_fleet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftFleetDestroy, Steps: []resource.TestStep{ @@ -403,7 +411,11 @@ func TestAccAWSGameliftFleet_allFields(t *testing.T) { resourceName := "aws_gamelift_fleet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftFleetDestroy, Steps: []resource.TestStep{ @@ -516,7 +528,11 @@ func TestAccAWSGameliftFleet_disappears(t *testing.T) { resourceName := "aws_gamelift_fleet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftFleetDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_gamelift_game_session_queue_test.go b/aws/resource_aws_gamelift_game_session_queue_test.go index cb4d6f1ac7b..6a4424997a5 100644 --- a/aws/resource_aws_gamelift_game_session_queue_test.go +++ b/aws/resource_aws_gamelift_game_session_queue_test.go @@ -93,7 +93,11 @@ func TestAccAWSGameliftGameSessionQueue_basic(t *testing.T) { uTimeoutInSeconds := int64(600) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftGameSessionQueueDestroy, Steps: []resource.TestStep{ @@ -152,7 +156,11 @@ func TestAccAWSGameliftGameSessionQueue_tags(t *testing.T) { queueName := testAccGameliftGameSessionQueuePrefix + acctest.RandString(8) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftGameSessionQueueDestroy, Steps: []resource.TestStep{ @@ -208,7 +216,11 @@ func TestAccAWSGameliftGameSessionQueue_disappears(t *testing.T) { timeoutInSeconds := int64(124) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSGamelift(t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(gamelift.EndpointsID, t) + testAccPreCheckAWSGamelift(t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSGameliftGameSessionQueueDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_gamelift_test.go b/aws/resource_aws_gamelift_test.go index bc44bda452e..d767aecceae 100644 --- a/aws/resource_aws_gamelift_test.go +++ b/aws/resource_aws_gamelift_test.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/gamelift" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -45,20 +46,20 @@ func testAccAWSGameliftSampleGame(region string) (*testAccGameliftGame, error) { // Account ID found from CloudTrail event (role ARN) after finishing tutorial in given region func testAccGameliftAccountIdByRegion(region string) (string, error) { m := map[string]string{ - "ap-northeast-1": "120069834884", - "ap-northeast-2": "805673136642", - "ap-south-1": "134975661615", - "ap-southeast-1": "077577004113", - "ap-southeast-2": "112188327105", - "ca-central-1": "800535022691", - "eu-central-1": "797584052317", - "eu-west-1": "319803218673", - "eu-west-2": "937342764187", - "sa-east-1": "028872612690", - "us-east-1": "783764748367", - "us-east-2": "415729564621", - "us-west-1": "715879310420", - "us-west-2": "741061592171", + endpoints.ApNortheast1RegionID: "120069834884", + endpoints.ApNortheast2RegionID: "805673136642", + endpoints.ApSouth1RegionID: "134975661615", + endpoints.ApSoutheast1RegionID: "077577004113", + endpoints.ApSoutheast2RegionID: "112188327105", + endpoints.CaCentral1RegionID: "800535022691", + endpoints.EuCentral1RegionID: "797584052317", + endpoints.EuWest1RegionID: "319803218673", + endpoints.EuWest2RegionID: "937342764187", + endpoints.SaEast1RegionID: "028872612690", + endpoints.UsEast1RegionID: "783764748367", + endpoints.UsEast2RegionID: "415729564621", + endpoints.UsWest1RegionID: "715879310420", + endpoints.UsWest2RegionID: "741061592171", } if accId, ok := m[region]; ok { From debd489494d9805fc5555d2b9666eefbc52469bf Mon Sep 17 00:00:00 2001 From: gunadhya <6939749+gunadhya@users.noreply.github.com> Date: Tue, 5 Jan 2021 02:07:01 +0530 Subject: [PATCH 0376/1212] Restore go mod and sum --- tools/go.mod | 1 - tools/go.sum | 2 -- 2 files changed, 3 deletions(-) diff --git a/tools/go.mod b/tools/go.mod index 0440c631e81..748e315d1bb 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -7,7 +7,6 @@ require ( github.com/client9/misspell v0.3.4 github.com/golangci/golangci-lint v1.33.0 github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 - github.com/pavius/impi v0.0.3 // indirect github.com/terraform-linters/tflint v0.20.3 ) diff --git a/tools/go.sum b/tools/go.sum index 76d6b9d5d22..1384a0fce44 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -640,8 +640,6 @@ github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pavius/impi v0.0.3 h1:DND6MzU+BLABhOZXbELR3FU8b+zDgcq4dOCNLhiTYuI= -github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= From 2e4bec3900f8a58da3a673b17bee2352e5396aeb Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 4 Jan 2021 16:16:08 -0500 Subject: [PATCH 0377/1212] resource/eks_fargate_profile: Fix hardcoded regions --- aws/resource_aws_eks_fargate_profile_test.go | 27 ++++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_eks_fargate_profile_test.go b/aws/resource_aws_eks_fargate_profile_test.go index dc2e355ba25..38e316b26f8 100644 --- a/aws/resource_aws_eks_fargate_profile_test.go +++ b/aws/resource_aws_eks_fargate_profile_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/eks" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -338,14 +339,24 @@ func testAccPreCheckAWSEksFargateProfile(t *testing.T) { // create and destroy an EKS Cluster just to find the real error, instead // we take the least desirable approach of hardcoding allowed regions. allowedRegions := []string{ - "ap-northeast-1", - "ap-southeast-1", - "ap-southeast-2", - "eu-central-1", - "eu-west-1", - "us-east-1", - "us-east-2", - "us-west-2", + endpoints.ApEast1RegionID, + endpoints.ApNortheast1RegionID, + endpoints.ApNortheast2RegionID, + endpoints.ApSouth1RegionID, + endpoints.ApSoutheast1RegionID, + endpoints.ApSoutheast2RegionID, + endpoints.CaCentral1RegionID, + endpoints.EuCentral1RegionID, + endpoints.EuNorth1RegionID, + endpoints.EuWest1RegionID, + endpoints.EuWest2RegionID, + endpoints.EuWest3RegionID, + endpoints.MeSouth1RegionID, + endpoints.SaEast1RegionID, + endpoints.UsEast1RegionID, + endpoints.UsEast2RegionID, + endpoints.UsWest1RegionID, + endpoints.UsWest2RegionID, } region := testAccProvider.Meta().(*AWSClient).region From 2054399a5c3219b3994b39b60d3682aacf852568 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 4 Jan 2021 17:56:38 -0500 Subject: [PATCH 0378/1212] tests/resource/dynamodb_global_table: Fix hardcoded regions --- aws/resource_aws_dynamodb_global_table_test.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_dynamodb_global_table_test.go b/aws/resource_aws_dynamodb_global_table_test.go index 9231314dc16..0ae6d12ba51 100644 --- a/aws/resource_aws_dynamodb_global_table_test.go +++ b/aws/resource_aws_dynamodb_global_table_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -159,8 +160,21 @@ func testAccPreCheckAWSDynamodbGlobalTable(t *testing.T) { } // testAccDynamoDBGlobalTablePreCheck checks if aws_dynamodb_global_table (version 2017.11.29) can be used and skips test if not. +// Region availability for Version 2017.11.29: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html func testAccDynamoDBGlobalTablePreCheck(t *testing.T) { - supportRegionsSort := []string{"ap-northeast-1", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "eu-central-1", "eu-west-1", "eu-west-2", "us-east-1", "us-east-2", "us-west-1", "us-west-2"} + supportRegionsSort := []string{ + endpoints.ApNortheast1RegionID, + endpoints.ApNortheast2RegionID, + endpoints.ApSoutheast1RegionID, + endpoints.ApSoutheast2RegionID, + endpoints.EuCentral1RegionID, + endpoints.EuWest1RegionID, + endpoints.EuWest2RegionID, + endpoints.UsEast1RegionID, + endpoints.UsEast2RegionID, + endpoints.UsWest1RegionID, + endpoints.UsWest2RegionID, + } if testAccGetRegion() != supportRegionsSort[sort.SearchStrings(supportRegionsSort, testAccGetRegion())] { t.Skipf("skipping test; aws_dynamodb_global_table (DynamoDB v2017.11.29) not supported in region %s", testAccGetRegion()) From 5623366c3450cfd4f62fe9e8533b95e52f748874 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 4 Jan 2021 18:23:40 -0500 Subject: [PATCH 0379/1212] tests/resource/budgets_budget: Fix hardcoded regions --- aws/resource_aws_budgets_budget_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index 73c5b2b2fe3..e7819cc3ca7 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -409,7 +409,7 @@ func testAccAWSBudgetsBudgetConfigUpdate(name string) budgets.Budget { }, CostFilters: map[string][]*string{ "AZ": { - aws.String("us-east-2"), + aws.String(testAccGetAlternateRegion()), }, }, CostTypes: &budgets.CostTypes{ @@ -444,7 +444,7 @@ func testAccAWSBudgetsBudgetConfigDefaults(name string) budgets.Budget { }, CostFilters: map[string][]*string{ "AZ": { - aws.String("us-east-1"), + aws.String(testAccGetRegion()), }, }, CostTypes: &budgets.CostTypes{ From 0e9c4617f12cab937c5e9928a81070e6132524bd Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 4 Jan 2021 18:30:52 -0500 Subject: [PATCH 0380/1212] tests/resource/codebuild_project: Fix hardcoded regions --- aws/resource_aws_codebuild_project_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index 4ff33fa4d60..620971eaac5 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -3398,7 +3398,7 @@ resource "aws_codebuild_project" "test" { func testAccAWSCodeBuildProjectConfig_VpcConfig1(rName string) string { return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` data "aws_availability_zones" "available" { - # InvalidInputException: CodeBuild currently doesn't support VPC in us-west-2d, please select subnets in other availability zones. + # InvalidInputException: CodeBuild currently doesn't support VPC in usw2-az4, please select subnets in other availability zones. exclude_zone_ids = ["usw2-az4"] state = "available" @@ -3459,7 +3459,7 @@ resource "aws_codebuild_project" "test" { func testAccAWSCodeBuildProjectConfig_VpcConfig2(rName string) string { return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` data "aws_availability_zones" "available" { - # InvalidInputException: CodeBuild currently doesn't support VPC in us-west-2d, please select subnets in other availability zones. + # InvalidInputException: CodeBuild currently doesn't support VPC in usw2-az4, please select subnets in other availability zones. exclude_zone_ids = ["usw2-az4"] state = "available" From 3ea1c3d0b1669c9314edd5f92d16140966d87d34 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Mon, 4 Jan 2021 16:01:11 -0800 Subject: [PATCH 0381/1212] Update CHANGELOG.md for #16082 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 813a6415837..91e4bf68e07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.23.0 (Unreleased) +FEATURES + +* **New Resource:** `aws_sagemaker_image` [GH-16082] + ENHANCEMENTS * data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] From 06fe7504dbd2efff715e1075fa2a1049aef8aefe Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Mon, 4 Jan 2021 19:56:45 -0500 Subject: [PATCH 0382/1212] Update CHANGELOG for #16914 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91e4bf68e07..c598dbf891f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS * data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] * resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] +* resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation [GH-16914] BUG FIXES From 9db7205937a706c5058f5e56f564e42136927aac Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 5 Jan 2021 07:54:18 -0500 Subject: [PATCH 0383/1212] .github/workflows: Increase goreleaser timeout from 1h to 2h (#16955) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As part of submission checks, we run cross-compilation builds on all release ports to catch unexpected 32-bit architecture (e.g. integer overflows) and kernel specific code (e.g. upstream libraries missing kernel implementations). In [failed builds](https://github.com/hashicorp/terraform-provider-aws/pull/16935/checks?check_run_id=1644053191), its hitting the timeout argument deadline without completing any builds: ``` 2021-01-04T13:45:49.6869555Z  • building binaries 2021-01-04T13:45:49.6871325Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_windows_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9232ddf.exe 2021-01-04T13:45:49.6873982Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_linux_386/terraform-provider-aws_v0.0.0-SNAPSHOT-9232ddf 2021-01-04T13:45:49.6876550Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_darwin_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9232ddf 2021-01-04T13:45:49.6879137Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_freebsd_386/terraform-provider-aws_v0.0.0-SNAPSHOT-9232ddf 2021-01-04T14:45:41.0926325Z  ⨯ build failed after 3605.00s error=context deadline exceeded 2021-01-04T14:45:42.0956772Z ##[error]The process '/opt/hostedtoolcache/goreleaser-action/0.151.1/x64/goreleaser' failed with exit code 1 ``` In [other successful builds](https://github.com/hashicorp/terraform-provider-aws/runs/1640560741?check_suite_focus=true), it finishes much more timely: ``` 2021-01-03T19:42:34.6949476Z  • building binaries 2021-01-03T19:42:34.6951506Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_windows_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48.exe 2021-01-03T19:42:34.6955124Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_linux_386/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T19:42:34.6958777Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_darwin_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T19:42:34.6962514Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_freebsd_386/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:00:50.5974306Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_freebsd_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:00:51.5547377Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_freebsd_arm_6/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:01:01.6084587Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_freebsd_arm64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:02:16.4995804Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_linux_arm64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:12:32.2856869Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_linux_amd64/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:14:57.1041524Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_linux_arm_6/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48 2021-01-03T20:15:36.7230782Z  • building  binary=/home/runner/work/terraform-provider-aws/terraform-provider-aws/dist/terraform-provider-aws_windows_386/terraform-provider-aws_v0.0.0-SNAPSHOT-9685f48.exe 2021-01-03T20:21:14.9668628Z  • build succeeded after 2339.43s ``` Assuming there is not a deadlock in `goreleaser` or `go build`, there could just be contention on GitHub Actions build hosts. For now, just increase the timeout unless other solutions are necessary. --- .github/workflows/terraform_provider.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/terraform_provider.yml b/.github/workflows/terraform_provider.yml index 4f5b4f62752..857d829f980 100644 --- a/.github/workflows/terraform_provider.yml +++ b/.github/workflows/terraform_provider.yml @@ -300,7 +300,7 @@ jobs: - name: goreleaser build uses: goreleaser/goreleaser-action@v2 with: - args: build --snapshot --timeout 1h + args: build --snapshot --timeout 2h semgrep: runs-on: ubuntu-latest From a69c72fff613772b5d3b1e0b71c89c76d1c80341 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20Janu=C3=A1rio?= Date: Tue, 5 Jan 2021 14:03:17 +0000 Subject: [PATCH 0384/1212] resource/aws_transfer_user: Update username validation to support 100 characters (#16938) Output from acceptance testing: ``` --- PASS: TestAccAWSTransferUser_UserName_Validation (17.08s) --- PASS: TestAccAWSTransferUser_modifyWithOptions (184.32s) --- PASS: TestAccAWSTransferUser_disappears (190.26s) --- PASS: TestAccAWSTransferUser_basic (193.45s) --- PASS: TestAccAWSTransferUser_homeDirectoryMappings (203.67s) ``` --- aws/resource_aws_transfer_user_test.go | 15 ++++++++++----- aws/validators.go | 4 ++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_transfer_user_test.go b/aws/resource_aws_transfer_user_test.go index 76b8aefc065..6fbc75d6bc0 100644 --- a/aws/resource_aws_transfer_user_test.go +++ b/aws/resource_aws_transfer_user_test.go @@ -134,19 +134,24 @@ func TestAccAWSTransferUser_UserName_Validation(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSTransferUserName_validation("!@#$%^"), - ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 32 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), + ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 100 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), }, { Config: testAccAWSTransferUserName_validation(acctest.RandString(2)), - ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 32 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), + ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 100 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), }, { - Config: testAccAWSTransferUserName_validation(acctest.RandString(33)), - ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 32 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), + Config: testAccAWSTransferUserName_validation(acctest.RandString(33)), + ExpectNonEmptyPlan: true, + PlanOnly: true, + }, + { + Config: testAccAWSTransferUserName_validation(acctest.RandString(101)), + ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 100 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), }, { Config: testAccAWSTransferUserName_validation("-abcdef"), - ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 32 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), + ExpectError: regexp.MustCompile(`Invalid "user_name": must be between 3 and 100 alphanumeric or special characters hyphen and underscore. However, "user_name" cannot begin with a hyphen`), }, { Config: testAccAWSTransferUserName_validation("valid_username"), diff --git a/aws/validators.go b/aws/validators.go index e982a1e3f20..14f80bb226b 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -97,8 +97,8 @@ func validateTransferServerID(v interface{}, k string) (ws []string, errors []er func validateTransferUserName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) // https://docs.aws.amazon.com/transfer/latest/userguide/API_CreateUser.html - if !regexp.MustCompile(`^[a-zA-Z0-9_][a-zA-Z0-9_-]{2,31}$`).MatchString(value) { - errors = append(errors, fmt.Errorf("Invalid %q: must be between 3 and 32 alphanumeric or special characters hyphen and underscore. However, %q cannot begin with a hyphen", k, k)) + if !regexp.MustCompile(`^[a-zA-Z0-9_][a-zA-Z0-9_-]{2,99}$`).MatchString(value) { + errors = append(errors, fmt.Errorf("Invalid %q: must be between 3 and 100 alphanumeric or special characters hyphen and underscore. However, %q cannot begin with a hyphen", k, k)) } return } From eb5174ab89e852fa16f662e6c07008f350a3a94f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 5 Jan 2021 09:05:10 -0500 Subject: [PATCH 0385/1212] Update CHANGELOG for #16938 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c598dbf891f..a38d1fd1534 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS BUG FIXES * resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit [GH-16905] +* resource/aws_transfer_user: Update `user_name` argument validation to support 100 characters [GH-16938] ## 3.22.0 (December 18, 2020) From 0ed3e50f69285696fc54e238cacefaea67ed1696 Mon Sep 17 00:00:00 2001 From: Matthew Burgess <549318+mattburgess@users.noreply.github.com> Date: Tue, 5 Jan 2021 15:27:41 +0000 Subject: [PATCH 0386/1212] tests/provider: Enable S1039 check for unnecessary fmt.Sprint()/fmt.Sprintf() usage (#16741) --- .golangci.yml | 4 - aws/data_source_aws_autoscaling_group_test.go | 4 +- ...s_codeartifact_authorization_token_test.go | 15 ++-- ...s_codeartifact_repository_endpoint_test.go | 4 +- ...ce_aws_directory_service_directory_test.go | 5 +- ...ta_source_aws_docdb_engine_version_test.go | 8 +- aws/data_source_aws_ebs_snapshot_ids_test.go | 4 +- aws/data_source_aws_ec2_spot_price_test.go | 5 +- aws/data_source_aws_instance_test.go | 21 ++--- ..._source_aws_neptune_engine_version_test.go | 8 +- ...data_source_aws_rds_engine_version_test.go | 12 +-- ...urce_aws_rds_orderable_db_instance_test.go | 28 +++---- aws/data_source_aws_route_test.go | 4 +- ...ta_source_aws_workspaces_workspace_test.go | 13 ++- ...ource_aws_apigatewayv2_integration_test.go | 10 +-- aws/resource_aws_autoscaling_group_test.go | 79 +++++++++---------- ...ource_aws_autoscaling_notification_test.go | 2 +- ...c_location_fsx_windows_file_system_test.go | 2 +- aws/resource_aws_db_instance_test.go | 16 ++-- aws/resource_aws_docdb_subnet_group_test.go | 8 +- ...urce_aws_ec2_transit_gateway_route_test.go | 4 +- ...ec2_transit_gateway_vpc_attachment_test.go | 16 ++-- aws/resource_aws_eip_association_test.go | 4 +- aws/resource_aws_elb_attachment_test.go | 10 +-- aws/resource_aws_emr_cluster_test.go | 4 +- ...source_aws_fsx_windows_file_system_test.go | 2 +- ...e_data_catalog_encryption_settings_test.go | 4 +- aws/resource_aws_instance_test.go | 72 ++++++++--------- ...rce_aws_lambda_code_signing_config_test.go | 12 +-- aws/resource_aws_neptune_cluster_test.go | 4 +- aws/resource_aws_neptune_subnet_group_test.go | 8 +- aws/resource_aws_network_acl_rule_test.go | 8 +- aws/resource_aws_network_interface_test.go | 18 ++--- aws/resource_aws_route_table_test.go | 4 +- aws/resource_aws_route_test.go | 64 +++++++-------- ..._signer_signing_profile_permission_test.go | 2 +- ...esource_aws_signer_signing_profile_test.go | 18 ++--- aws/resource_aws_vpn_gateway_test.go | 4 +- ...esource_aws_xray_encryption_config_test.go | 4 +- staticcheck.conf | 1 - 40 files changed, 256 insertions(+), 259 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 60f8104a518..ace701c8fbb 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,9 +1,5 @@ issues: exclude-rules: - - linters: - - gosimple - text: "S1039:" - # Exclude issues bypassing staticcheck.conf - linters: - staticcheck diff --git a/aws/data_source_aws_autoscaling_group_test.go b/aws/data_source_aws_autoscaling_group_test.go index 970215b10fe..8378e7754b2 100644 --- a/aws/data_source_aws_autoscaling_group_test.go +++ b/aws/data_source_aws_autoscaling_group_test.go @@ -124,7 +124,7 @@ func testAccAutoScalingGroupDataResourceConfig_launchTemplate() string { testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAvailableAZsNoOptInConfig(), testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), - fmt.Sprintf(` + ` data "aws_autoscaling_group" "test" { name = aws_autoscaling_group.test.name } @@ -145,5 +145,5 @@ resource "aws_launch_template" "test" { image_id = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type } -`)) +`) } diff --git a/aws/data_source_aws_codeartifact_authorization_token_test.go b/aws/data_source_aws_codeartifact_authorization_token_test.go index b9c3a35e1e4..9c42f339a7e 100644 --- a/aws/data_source_aws_codeartifact_authorization_token_test.go +++ b/aws/data_source_aws_codeartifact_authorization_token_test.go @@ -85,8 +85,9 @@ resource "aws_codeartifact_domain" "test" { } func testAccCheckAWSCodeArtifactAuthorizationTokenBasicConfig(rName string) string { - return testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName) + - fmt.Sprintf(` + return composeConfig( + testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName), + ` data "aws_codeartifact_authorization_token" "test" { domain = aws_codeartifact_domain.test.domain } @@ -94,8 +95,9 @@ data "aws_codeartifact_authorization_token" "test" { } func testAccCheckAWSCodeArtifactAuthorizationTokenOwnerConfig(rName string) string { - return testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName) + - fmt.Sprintf(` + return composeConfig( + testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName), + ` data "aws_codeartifact_authorization_token" "test" { domain = aws_codeartifact_domain.test.domain domain_owner = aws_codeartifact_domain.test.owner @@ -104,8 +106,9 @@ data "aws_codeartifact_authorization_token" "test" { } func testAccCheckAWSCodeArtifactAuthorizationTokenDurationConfig(rName string) string { - return testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName) + - fmt.Sprintf(` + return composeConfig( + testAccCheckAWSCodeArtifactAuthorizationTokenBaseConfig(rName), + ` data "aws_codeartifact_authorization_token" "test" { domain = aws_codeartifact_domain.test.domain duration_seconds = 900 diff --git a/aws/data_source_aws_codeartifact_repository_endpoint_test.go b/aws/data_source_aws_codeartifact_repository_endpoint_test.go index 17f3a1bd904..6e7e0ed6d24 100644 --- a/aws/data_source_aws_codeartifact_repository_endpoint_test.go +++ b/aws/data_source_aws_codeartifact_repository_endpoint_test.go @@ -102,12 +102,12 @@ data "aws_codeartifact_repository_endpoint" "test" { func testAccCheckAWSCodeArtifactRepositoryEndpointOwnerConfig(rName string) string { return composeConfig( testAccCheckAWSCodeArtifactRepositoryEndpointBaseConfig(rName), - fmt.Sprintf(` + ` data "aws_codeartifact_repository_endpoint" "test" { domain = aws_codeartifact_domain.test.domain repository = aws_codeartifact_repository.test.repository domain_owner = aws_codeartifact_domain.test.owner format = "npm" } -`)) +`) } diff --git a/aws/data_source_aws_directory_service_directory_test.go b/aws/data_source_aws_directory_service_directory_test.go index 248ab8e778c..a8c01c95314 100644 --- a/aws/data_source_aws_directory_service_directory_test.go +++ b/aws/data_source_aws_directory_service_directory_test.go @@ -197,7 +197,8 @@ data "aws_directory_service_directory" "test-microsoft-ad" { } func testAccDataSourceDirectoryServiceDirectoryConfig_connector() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), + ` resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" @@ -254,5 +255,5 @@ resource "aws_subnet" "test" { data "aws_directory_service_directory" "test-ad-connector" { directory_id = aws_directory_service_directory.connector.id } -`)) +`) } diff --git a/aws/data_source_aws_docdb_engine_version_test.go b/aws/data_source_aws_docdb_engine_version_test.go index 83a3fc203b1..a0bdb8111c7 100644 --- a/aws/data_source_aws_docdb_engine_version_test.go +++ b/aws/data_source_aws_docdb_engine_version_test.go @@ -103,15 +103,15 @@ data "aws_docdb_engine_version" "test" { } func testAccAWSDocDBEngineVersionDataSourcePreferredConfig() string { - return fmt.Sprintf(` + return ` data "aws_docdb_engine_version" "test" { preferred_versions = ["34.6.1", "3.6.0", "2.6.0"] } -`) +` } func testAccAWSDocDBEngineVersionDataSourceDefaultOnlyConfig() string { - return fmt.Sprintf(` + return ` data "aws_docdb_engine_version" "test" {} -`) +` } diff --git a/aws/data_source_aws_ebs_snapshot_ids_test.go b/aws/data_source_aws_ebs_snapshot_ids_test.go index 27aacc24149..1bca667a475 100644 --- a/aws/data_source_aws_ebs_snapshot_ids_test.go +++ b/aws/data_source_aws_ebs_snapshot_ids_test.go @@ -71,7 +71,7 @@ func TestAccDataSourceAwsEbsSnapshotIds_empty(t *testing.T) { } func testAccDataSourceAwsEbsSnapshotIdsConfig_basic() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_ebs_volume" "test" { availability_zone = data.aws_availability_zones.available.names[0] size = 1 @@ -84,7 +84,7 @@ resource "aws_ebs_snapshot" "test" { data "aws_ebs_snapshot_ids" "test" { owners = ["self"] } -`)) +`) } func testAccDataSourceAwsEbsSnapshotIdsConfig_sorted1(rName string) string { diff --git a/aws/data_source_aws_ec2_spot_price_test.go b/aws/data_source_aws_ec2_spot_price_test.go index 516989087f9..9fa7ede1b6c 100644 --- a/aws/data_source_aws_ec2_spot_price_test.go +++ b/aws/data_source_aws_ec2_spot_price_test.go @@ -1,7 +1,6 @@ package aws import ( - "fmt" "regexp" "testing" @@ -67,7 +66,7 @@ func testAccPreCheckAwsEc2SpotPrice(t *testing.T) { } func testAccAwsEc2SpotPriceDataSourceConfig() string { - return testAccAvailableAZsNoOptInConfig() + fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` data "aws_region" "current" {} data "aws_ec2_instance_type_offering" "test" { @@ -91,7 +90,7 @@ data "aws_ec2_spot_price" "test" { } func testAccAwsEc2SpotPriceDataSourceFilterConfig() string { - return testAccAvailableAZsNoOptInConfig() + fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` data "aws_region" "current" {} data "aws_ec2_instance_type_offering" "test" { diff --git a/aws/data_source_aws_instance_test.go b/aws/data_source_aws_instance_test.go index d2d0c4b7910..ae488ca058f 100644 --- a/aws/data_source_aws_instance_test.go +++ b/aws/data_source_aws_instance_test.go @@ -575,8 +575,8 @@ data "aws_instance" "test" { } // filter on tag, populate more attributes -var testAccInstanceDataSourceConfig_AzUserData = testAccAvailableAZsNoOptInDefaultExcludeConfig() + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` +var testAccInstanceDataSourceConfig_AzUserData = composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id availability_zone = data.aws_availability_zones.available.names[0] @@ -738,7 +738,7 @@ data "aws_instance" "test" { ` func testAccInstanceDataSourceConfig_privateIP(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfigBasic(rName) + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfigBasic(rName), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -753,7 +753,7 @@ data "aws_instance" "test" { } func testAccInstanceDataSourceConfig_secondaryPrivateIPs(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfigBasic(rName) + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfigBasic(rName), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -799,7 +799,7 @@ data "aws_instance" "test" { } func testAccInstanceDataSourceConfig_VPC(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfigBasic(rName) + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfigBasic(rName), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" @@ -869,10 +869,10 @@ data "aws_instance" "test" { } func testAccInstanceDataSourceConfig_VPCSecurityGroups(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + - testAccAwsInstanceVpcConfigBasic(rName) + - testAccAwsInstanceVpcSecurityGroupConfig(rName) + - fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfigBasic(rName), + testAccAwsInstanceVpcSecurityGroupConfig(rName), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -945,7 +945,8 @@ data "aws_instance" "test" { } func testAccInstanceDataSourceConfig_creditSpecification(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfigBasic(rName) + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfigBasic(rName), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" diff --git a/aws/data_source_aws_neptune_engine_version_test.go b/aws/data_source_aws_neptune_engine_version_test.go index bf3f6f61f28..fd3f3b46e6a 100644 --- a/aws/data_source_aws_neptune_engine_version_test.go +++ b/aws/data_source_aws_neptune_engine_version_test.go @@ -105,15 +105,15 @@ data "aws_neptune_engine_version" "test" { } func testAccAWSNeptuneEngineVersionDataSourcePreferredConfig() string { - return fmt.Sprintf(` + return ` data "aws_neptune_engine_version" "test" { preferred_versions = ["85.9.12", "1.0.3.0", "1.0.2.2"] } -`) +` } func testAccAWSNeptuneEngineVersionDataSourceDefaultOnlyConfig() string { - return fmt.Sprintf(` + return ` data "aws_neptune_engine_version" "test" {} -`) +` } diff --git a/aws/data_source_aws_rds_engine_version_test.go b/aws/data_source_aws_rds_engine_version_test.go index b7c0c1814df..ae24f8358fb 100644 --- a/aws/data_source_aws_rds_engine_version_test.go +++ b/aws/data_source_aws_rds_engine_version_test.go @@ -131,27 +131,27 @@ data "aws_rds_engine_version" "test" { } func testAccAWSRDSEngineVersionDataSourceUpgradeTargetsConfig() string { - return fmt.Sprintf(` + return ` data "aws_rds_engine_version" "test" { engine = "mysql" version = "5.7.17" } -`) +` } func testAccAWSRDSEngineVersionDataSourcePreferredConfig() string { - return fmt.Sprintf(` + return ` data "aws_rds_engine_version" "test" { engine = "mysql" preferred_versions = ["85.9.12", "5.7.19", "5.7.17"] } -`) +` } func testAccAWSRDSEngineVersionDataSourceDefaultOnlyConfig() string { - return fmt.Sprintf(` + return ` data "aws_rds_engine_version" "test" { engine = "mysql" } -`) +` } diff --git a/aws/data_source_aws_rds_orderable_db_instance_test.go b/aws/data_source_aws_rds_orderable_db_instance_test.go index 68e1863c473..0e7cacd6829 100644 --- a/aws/data_source_aws_rds_orderable_db_instance_test.go +++ b/aws/data_source_aws_rds_orderable_db_instance_test.go @@ -310,7 +310,7 @@ data "aws_rds_orderable_db_instance" "test" { } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsEnhancedMonitoring() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -320,11 +320,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["5.6.35", "5.6.41", "5.6.44"] preferred_instance_classes = ["db.t2.small", "db.t3.medium", "db.t3.large"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsIAMDatabaseAuthentication() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -334,11 +334,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["5.6.35", "5.6.41", "5.6.44"] preferred_instance_classes = ["db.t2.small", "db.t3.medium", "db.t3.large"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsIops() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -347,11 +347,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["8.0.20", "8.0.19", "8.0.17"] preferred_instance_classes = ["db.t3.small", "db.t2.xlarge", "db.t2.small"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsKerberosAuthentication() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "postgres" license_model = "postgresql-license" @@ -361,11 +361,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["12.3", "11.1", "10.13"] preferred_instance_classes = ["db.m5.xlarge", "db.r5.large", "db.t3.large"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsPerformanceInsights() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -374,11 +374,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["5.6.35", "5.6.41", "5.6.44"] preferred_instance_classes = ["db.t2.small", "db.t3.medium", "db.t3.large"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsStorageAutoscaling() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -387,11 +387,11 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["8.0.20", "8.0.19", "5.7.30"] preferred_instance_classes = ["db.t3.medium", "db.t2.large", "db.t3.xlarge"] } -`) +` } func testAccAWSRdsOrderableDbInstanceDataSourceConfig_supportsStorageEncryption() string { - return fmt.Sprintf(` + return ` data "aws_rds_orderable_db_instance" "test" { engine = "mysql" license_model = "general-public-license" @@ -401,5 +401,5 @@ data "aws_rds_orderable_db_instance" "test" { preferred_engine_versions = ["5.6.35", "5.6.41", "5.6.44"] preferred_instance_classes = ["db.t2.small", "db.t3.medium", "db.t3.large"] } -`) +` } diff --git a/aws/data_source_aws_route_test.go b/aws/data_source_aws_route_test.go index 4ca660b0a1a..6772660cdca 100644 --- a/aws/data_source_aws_route_test.go +++ b/aws/data_source_aws_route_test.go @@ -250,7 +250,7 @@ data "aws_route" "test" { } func testAccAWSRouteDataSourceConfigLocalGatewayID() string { - return fmt.Sprintf(` + return ` data "aws_ec2_local_gateways" "all" {} data "aws_ec2_local_gateway" "first" { id = tolist(data.aws_ec2_local_gateways.all.ids)[0] @@ -286,5 +286,5 @@ data "aws_route" "by_local_gateway_id" { local_gateway_id = data.aws_ec2_local_gateway.first.id depends_on = [aws_route.test] } -`) +` } diff --git a/aws/data_source_aws_workspaces_workspace_test.go b/aws/data_source_aws_workspaces_workspace_test.go index 969bd4655b1..2e8a910fe6b 100644 --- a/aws/data_source_aws_workspaces_workspace_test.go +++ b/aws/data_source_aws_workspaces_workspace_test.go @@ -1,7 +1,6 @@ package aws import ( - "fmt" "regexp" "testing" @@ -90,7 +89,7 @@ func TestAccDataSourceAwsWorkspacesWorkspace_workspaceIDAndDirectoryIDConflict(t func testAccDataSourceWorkspacesWorkspaceConfig_byWorkspaceID(rName string) string { return composeConfig( testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), - fmt.Sprintf(` + ` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -112,13 +111,13 @@ resource "aws_workspaces_workspace" "test" { data "aws_workspaces_workspace" "test" { workspace_id = aws_workspaces_workspace.test.id } -`)) +`) } func testAccDataSourceWorkspacesWorkspaceConfig_byDirectoryID_userName(rName string) string { return composeConfig( testAccAwsWorkspacesWorkspaceConfig_Prerequisites(rName), - fmt.Sprintf(` + ` resource "aws_workspaces_workspace" "test" { bundle_id = data.aws_workspaces_bundle.test.id directory_id = aws_workspaces_directory.test.id @@ -143,15 +142,15 @@ data "aws_workspaces_workspace" "test" { depends_on = [aws_workspaces_workspace.test] } -`)) +`) } func testAccDataSourceAwsWorkspacesWorkspaceConfig_workspaceIDAndDirectoryIDConflict() string { - return fmt.Sprintf(` + return ` data "aws_workspaces_workspace" "test" { workspace_id = "ws-cj5xcxsz5" directory_id = "d-9967252f57" user_name = "Administrator" } -`) +` } diff --git a/aws/resource_aws_apigatewayv2_integration_test.go b/aws/resource_aws_apigatewayv2_integration_test.go index 6ff3061b65e..b70a53689c4 100644 --- a/aws/resource_aws_apigatewayv2_integration_test.go +++ b/aws/resource_aws_apigatewayv2_integration_test.go @@ -776,7 +776,7 @@ resource "aws_apigatewayv2_integration" "test" { } func testAccAWSAPIGatewayV2IntegrationConfig_httpProxy(rName string) string { - return testAccAWSAPIGatewayV2IntegrationConfig_apiHttp(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSAPIGatewayV2IntegrationConfig_apiHttp(rName), ` resource "aws_apigatewayv2_integration" "test" { api_id = aws_apigatewayv2_api.test.id integration_type = "HTTP_PROXY" @@ -790,7 +790,7 @@ resource "aws_apigatewayv2_integration" "test" { func testAccAWSAPIGatewayV2IntegrationConfig_vpcLinkHttp(rName string) string { return composeConfig( testAccAWSAPIGatewayV2IntegrationConfig_vpcLinkHttpBase(rName), - fmt.Sprintf(` + ` resource "aws_apigatewayv2_integration" "test" { api_id = aws_apigatewayv2_api.test.id integration_type = "HTTP_PROXY" @@ -806,13 +806,13 @@ resource "aws_apigatewayv2_integration" "test" { server_name_to_verify = "www.example.com" } } -`)) +`) } func testAccAWSAPIGatewayV2IntegrationConfig_vpcLinkHttpUpdated(rName string) string { return composeConfig( testAccAWSAPIGatewayV2IntegrationConfig_vpcLinkHttpBase(rName), - fmt.Sprintf(` + ` resource "aws_apigatewayv2_integration" "test" { api_id = aws_apigatewayv2_api.test.id integration_type = "HTTP_PROXY" @@ -827,7 +827,7 @@ resource "aws_apigatewayv2_integration" "test" { server_name_to_verify = "www.example.org" } } -`)) +`) } func testAccAWSAPIGatewayV2IntegrationConfig_vpcLinkWebSocket(rName string) string { diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 90008081638..1be61745c12 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -2242,8 +2242,7 @@ func TestAccAWSAutoScalingGroup_launchTempPartitionNum(t *testing.T) { } func testAccAWSAutoScalingGroupConfig_autoGeneratedName() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2270,8 +2269,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_namePrefix() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2299,8 +2298,8 @@ resource "aws_autoscaling_group" "test" { } func testAccAWSAutoScalingGroupConfig_terminationPoliciesEmpty() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2328,8 +2327,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_terminationPoliciesExplicitDefault() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2358,8 +2357,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_terminationPoliciesUpdate() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2695,8 +2694,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfigWithAZ() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" tags = { @@ -2741,8 +2740,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfigWithVPCIdent() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" tags = { @@ -2833,8 +2832,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withServiceLinkedRoleARN() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2866,8 +2865,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withMaxInstanceLifetime() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2895,8 +2894,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withMaxInstanceLifetime_update() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2924,8 +2923,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoscalingMetricsCollectionConfig_allMetricsCollected() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -2971,8 +2970,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -3010,8 +3009,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_pre() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" @@ -3100,8 +3099,8 @@ resource "aws_security_group" "tf_test_self" { } func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" @@ -3192,8 +3191,8 @@ resource "aws_security_group" "tf_test_self" { } func testAccAWSAutoScalingGroupConfig_ALB_TargetGroup_post_duo() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_vpc" "default" { cidr_block = "10.0.0.0/16" @@ -3624,8 +3623,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_classicVpcZoneIdentifier() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` resource "aws_autoscaling_group" "test" { min_size = 0 max_size = 0 @@ -3652,8 +3651,8 @@ resource "aws_launch_configuration" "test" { } func testAccAWSAutoScalingGroupConfig_withLaunchTemplate() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -3684,8 +3683,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withLaunchTemplate_toLaunchConfig() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -3718,8 +3717,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withLaunchTemplate_toLaunchTemplateName() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] @@ -3760,8 +3759,8 @@ resource "aws_autoscaling_group" "bar" { } func testAccAWSAutoScalingGroupConfig_withLaunchTemplate_toLaunchTemplateVersion() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` data "aws_ami" "test_ami" { most_recent = true owners = ["amazon"] diff --git a/aws/resource_aws_autoscaling_notification_test.go b/aws/resource_aws_autoscaling_notification_test.go index ccd3639d698..d1a462a0d10 100644 --- a/aws/resource_aws_autoscaling_notification_test.go +++ b/aws/resource_aws_autoscaling_notification_test.go @@ -324,7 +324,7 @@ resource "aws_autoscaling_notification" "example" { } func testAccASGNotificationConfig_pagination() string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_sns_topic" "user_updates" { name = "user-updates-topic" } diff --git a/aws/resource_aws_datasync_location_fsx_windows_file_system_test.go b/aws/resource_aws_datasync_location_fsx_windows_file_system_test.go index 7754948fe82..2b517e4379b 100644 --- a/aws/resource_aws_datasync_location_fsx_windows_file_system_test.go +++ b/aws/resource_aws_datasync_location_fsx_windows_file_system_test.go @@ -286,7 +286,7 @@ func testAccWSDataSyncLocationFsxWindowsImportStateIdFunc(resourceName string) r } func testAccAWSDataSyncLocationFsxWindowsConfig() string { - return testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds1() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds1(), ` resource "aws_datasync_location_fsx_windows_file_system" "test" { fsx_filesystem_arn = aws_fsx_windows_file_system.test.arn user = "SomeUser" diff --git a/aws/resource_aws_db_instance_test.go b/aws/resource_aws_db_instance_test.go index fc086b55d1b..44d828c26db 100644 --- a/aws/resource_aws_db_instance_test.go +++ b/aws/resource_aws_db_instance_test.go @@ -3079,7 +3079,7 @@ func testAccAWSDBInstanceConfig_orderableClassSQLServerEx() string { } func testAccAWSDBInstanceConfig_basic() string { - return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), fmt.Sprintf(` + return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), ` resource "aws_db_instance" "bar" { allocated_storage = 10 backup_retention_period = 0 @@ -3097,11 +3097,11 @@ resource "aws_db_instance" "bar" { # validation error). maintenance_window = "Fri:09:00-Fri:09:30" } -`)) +`) } func testAccAWSDBInstanceConfig_namePrefix() string { - return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), fmt.Sprintf(` + return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), ` resource "aws_db_instance" "test" { allocated_storage = 10 engine = data.aws_rds_orderable_db_instance.test.engine @@ -3112,11 +3112,11 @@ resource "aws_db_instance" "test" { skip_final_snapshot = true username = "root" } -`)) +`) } func testAccAWSDBInstanceConfig_generatedName() string { - return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), fmt.Sprintf(` + return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), ` resource "aws_db_instance" "test" { allocated_storage = 10 engine = data.aws_rds_orderable_db_instance.test.engine @@ -3126,7 +3126,7 @@ resource "aws_db_instance" "test" { skip_final_snapshot = true username = "root" } -`)) +`) } func testAccAWSDBInstanceConfig_KmsKeyId(rInt int) string { @@ -3190,7 +3190,7 @@ resource "aws_db_instance" "bar" { } func testAccAWSDBInstanceConfig_WithCACertificateIdentifier() string { - return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), fmt.Sprintf(` + return composeConfig(testAccAWSDBInstanceConfig_orderableClassMysql(), ` data "aws_rds_certificate" "latest" { latest_valid_till = true } @@ -3206,7 +3206,7 @@ resource "aws_db_instance" "bar" { skip_final_snapshot = true username = "foo" } -`)) +`) } func testAccAWSDBInstanceConfig_WithOptionGroup(rName string) string { diff --git a/aws/resource_aws_docdb_subnet_group_test.go b/aws/resource_aws_docdb_subnet_group_test.go index 8b174854571..e76712840c0 100644 --- a/aws/resource_aws_docdb_subnet_group_test.go +++ b/aws/resource_aws_docdb_subnet_group_test.go @@ -313,7 +313,7 @@ resource "aws_docdb_subnet_group" "foo" { } func testAccDocDBSubnetGroupConfig_namePrefix() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -345,11 +345,11 @@ resource "aws_subnet" "b" { resource "aws_docdb_subnet_group" "test" { name_prefix = "tf_test-" subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] -}`)) +}`) } func testAccDocDBSubnetGroupConfig_generatedName() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -380,5 +380,5 @@ resource "aws_subnet" "b" { resource "aws_docdb_subnet_group" "test" { subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] -}`)) +}`) } diff --git a/aws/resource_aws_ec2_transit_gateway_route_test.go b/aws/resource_aws_ec2_transit_gateway_route_test.go index 62206492456..cb5b9941bc6 100644 --- a/aws/resource_aws_ec2_transit_gateway_route_test.go +++ b/aws/resource_aws_ec2_transit_gateway_route_test.go @@ -234,7 +234,7 @@ func testAccCheckAWSEc2TransitGatewayRouteDisappears(transitGateway *ec2.Transit } func testAccAWSEc2TransitGatewayRouteConfigDestinationCidrBlock() string { - return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" @@ -278,5 +278,5 @@ resource "aws_ec2_transit_gateway_route" "test_blackhole" { blackhole = true transit_gateway_route_table_id = aws_ec2_transit_gateway.test.association_default_route_table_id } -`)) +`) } diff --git a/aws/resource_aws_ec2_transit_gateway_vpc_attachment_test.go b/aws/resource_aws_ec2_transit_gateway_vpc_attachment_test.go index 3b63247c1ed..47a5799d9d4 100644 --- a/aws/resource_aws_ec2_transit_gateway_vpc_attachment_test.go +++ b/aws/resource_aws_ec2_transit_gateway_vpc_attachment_test.go @@ -583,7 +583,7 @@ func testAccCheckAWSEc2TransitGatewayVpcAttachmentNotRecreated(i, j *ec2.Transit } func testAccAWSEc2TransitGatewayVpcAttachmentConfig() string { - return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" @@ -609,7 +609,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { transit_gateway_id = aws_ec2_transit_gateway.test.id vpc_id = aws_vpc.test.id } -`)) +`) } func testAccAWSEc2TransitGatewayVpcAttachmentConfigApplianceModeSupport(appModeSupport string) string { @@ -770,7 +770,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { } func testAccAWSEc2TransitGatewayVpcAttachmentConfigSubnetIds1() string { - return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" @@ -798,11 +798,11 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { transit_gateway_id = aws_ec2_transit_gateway.test.id vpc_id = aws_vpc.test.id } -`)) +`) } func testAccAWSEc2TransitGatewayVpcAttachmentConfigSubnetIds2() string { - return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" @@ -830,7 +830,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { transit_gateway_id = aws_ec2_transit_gateway.test.id vpc_id = aws_vpc.test.id } -`)) +`) } func testAccAWSEc2TransitGatewayVpcAttachmentConfigTags1(tagKey1, tagValue1 string) string { @@ -903,7 +903,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { } func testAccAWSEc2TransitGatewayVpcAttachmentConfigTransitGatewayDefaultRouteTableAssociationAndPropagationDisabled() string { - return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" @@ -934,7 +934,7 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "test" { transit_gateway_id = aws_ec2_transit_gateway.test.id vpc_id = aws_vpc.test.id } -`)) +`) } func testAccAWSEc2TransitGatewayVpcAttachmentConfigTransitGatewayDefaultRouteTableAssociation(transitGatewayDefaultRouteTableAssociation bool) string { diff --git a/aws/resource_aws_eip_association_test.go b/aws/resource_aws_eip_association_test.go index 7f6e01078b1..7cf7249ffd9 100644 --- a/aws/resource_aws_eip_association_test.go +++ b/aws/resource_aws_eip_association_test.go @@ -452,7 +452,7 @@ resource "aws_eip_association" "test" { func testAccAWSEIPAssociationConfig_instance() string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), - fmt.Sprintf(` + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" @@ -464,7 +464,7 @@ resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id instance_id = aws_instance.test.id } -`)) +`) } const testAccAWSEIPAssociationConfig_networkInterface = ` diff --git a/aws/resource_aws_elb_attachment_test.go b/aws/resource_aws_elb_attachment_test.go index 01ab322824d..841ff13894a 100644 --- a/aws/resource_aws_elb_attachment_test.go +++ b/aws/resource_aws_elb_attachment_test.go @@ -111,7 +111,7 @@ func testAccAWSELBAttachmentCheckInstanceCount(conf *elb.LoadBalancerDescription // add one attachment func testAccAWSELBAttachmentConfig1() string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` data "aws_availability_zones" "available" { state = "available" @@ -146,7 +146,7 @@ resource "aws_elb_attachment" "foo1" { // add a second attachment func testAccAWSELBAttachmentConfig2() string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` data "aws_availability_zones" "available" { state = "available" @@ -191,7 +191,7 @@ resource "aws_elb_attachment" "foo2" { // swap attachments between resources func testAccAWSELBAttachmentConfig3() string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` data "aws_availability_zones" "available" { state = "available" @@ -236,7 +236,7 @@ resource "aws_elb_attachment" "foo2" { // destroy attachments func testAccAWSELBAttachmentConfig4() string { - return fmt.Sprintf(` + return ` data "aws_availability_zones" "available" { state = "available" @@ -256,5 +256,5 @@ resource "aws_elb" "test" { lb_protocol = "http" } } -`) +` } diff --git a/aws/resource_aws_emr_cluster_test.go b/aws/resource_aws_emr_cluster_test.go index bf6b9b161a6..44392bbf04a 100644 --- a/aws/resource_aws_emr_cluster_test.go +++ b/aws/resource_aws_emr_cluster_test.go @@ -1644,9 +1644,9 @@ func testAccAWSEmrComposeConfig(mapPublicIPOnLaunch bool, config ...string) stri } func testAccAWSEmrClusterConfigCurrentPartition() string { - return fmt.Sprintf(` + return ` data "aws_partition" "current" {} -`) +` } func testAccAWSEmrClusterConfig_bootstrap(r string) string { diff --git a/aws/resource_aws_fsx_windows_file_system_test.go b/aws/resource_aws_fsx_windows_file_system_test.go index 295656f8821..981fb52ba89 100644 --- a/aws/resource_aws_fsx_windows_file_system_test.go +++ b/aws/resource_aws_fsx_windows_file_system_test.go @@ -1045,7 +1045,7 @@ resource "aws_fsx_windows_file_system" "test" { } func testAccAwsFsxWindowsFileSystemConfigSubnetIds2() string { - return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxWindowsFileSystemConfigBase(), ` resource "aws_fsx_windows_file_system" "test" { active_directory_id = aws_directory_service_directory.test.id skip_final_backup = true diff --git a/aws/resource_aws_glue_data_catalog_encryption_settings_test.go b/aws/resource_aws_glue_data_catalog_encryption_settings_test.go index 7d87f869835..e258ee7df5a 100644 --- a/aws/resource_aws_glue_data_catalog_encryption_settings_test.go +++ b/aws/resource_aws_glue_data_catalog_encryption_settings_test.go @@ -137,7 +137,7 @@ resource "aws_glue_data_catalog_encryption_settings" "test" { } func testAccAWSDataCatalogEncryptionSettingsNonEncryptedConfig() string { - return fmt.Sprintf(` + return ` resource "aws_glue_data_catalog_encryption_settings" "test" { data_catalog_encryption_settings { connection_password_encryption { @@ -149,5 +149,5 @@ resource "aws_glue_data_catalog_encryption_settings" "test" { } } } -`) +` } diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index baf2b92e375..a95a1505937 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -3496,13 +3496,13 @@ func testAccInstanceConfigBasic() string { testAccLatestAmazonLinuxHvmEbsAmiConfig(), // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-classic-platform.html#ec2-classic-instance-types testAccAvailableEc2InstanceTypeForRegion("t1.micro", "m1.small", "t3.micro", "t2.micro"), - fmt.Sprintf(` + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type # Explicitly no tags so as to test creation without tags. } -`)) +`) } func testAccInstanceConfigAtLeastOneOtherEbsVolume(rName string) string { @@ -3536,7 +3536,7 @@ resource "aws_instance" "test" { } func testAccInstanceConfigWithUserDataBase64(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3544,11 +3544,11 @@ resource "aws_instance" "test" { instance_type = "t2.small" user_data_base64 = base64encode("hello world") } -`)) +`) } func testAccInstanceConfigWithSmallInstanceType(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3559,11 +3559,11 @@ resource "aws_instance" "test" { Name = "tf-acctest" } } -`)) +`) } func testAccInstanceConfigUpdateInstanceType(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3574,11 +3574,11 @@ resource "aws_instance" "test" { Name = "tf-acctest" } } -`)) +`) } func testAccInstanceGP2IopsDevice() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.medium" @@ -3588,11 +3588,11 @@ resource "aws_instance" "test" { volume_size = 11 } } -`)) +`) } func testAccInstanceGP2WithIopsValue() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.medium" @@ -3604,11 +3604,11 @@ resource "aws_instance" "test" { iops = 10 } } -`)) +`) } func testAccInstanceConfigRootInstanceStore() string { - return composeConfig(testAccLatestAmazonLinuxHvmInstanceStoreAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmInstanceStoreAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-instance-store.id @@ -3617,13 +3617,13 @@ resource "aws_instance" "test" { # tflint-ignore: aws_instance_previous_type instance_type = "m3.medium" } -`)) +`) } func testAccInstanceConfigNoAMIEphemeralDevices() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), - fmt.Sprintf(` + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type @@ -3643,7 +3643,7 @@ resource "aws_instance" "test" { no_device = true } } -`)) +`) } func testAccAwsEc2InstanceEbsRootDeviceBasic() string { @@ -3936,7 +3936,7 @@ resource "aws_instance" "test" { } func testAccCheckInstanceConfigTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" @@ -3945,11 +3945,11 @@ resource "aws_instance" "test" { test = "test2" } } -`)) +`) } func testAccInstanceConfigEbsBlockDeviceKmsKeyArn() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_kms_key" "test" { deletion_window_in_days = 7 } @@ -3973,11 +3973,11 @@ resource "aws_instance" "test" { volume_size = 12 } } -`)) +`) } func testAccInstanceConfigRootBlockDeviceKmsKeyArn(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` resource "aws_kms_key" "test" { deletion_window_in_days = 7 } @@ -3993,11 +3993,11 @@ resource "aws_instance" "test" { kms_key_id = aws_kms_key.test.arn } } -`)) +`) } func testAccCheckInstanceConfigWithAttachedVolume() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.medium" @@ -4028,11 +4028,11 @@ resource "aws_volume_attachment" "test" { volume_id = aws_ebs_volume.test.id instance_id = aws_instance.test.id } -`)) +`) } func testAccCheckInstanceConfigNoVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4066,7 +4066,7 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`)) +`) } var testAccCheckInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` @@ -4100,7 +4100,7 @@ resource "aws_instance" "test" { `) func testAccCheckInstanceConfigWithVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4138,11 +4138,11 @@ resource "aws_instance" "test" { Name = "acceptance-test-volume-tag" } } -`)) +`) } func testAccCheckInstanceConfigWithVolumeTagsUpdate() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4181,11 +4181,11 @@ resource "aws_instance" "test" { Environment = "dev" } } -`)) +`) } func testAccCheckInstanceConfigTagsUpdate() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" @@ -4194,7 +4194,7 @@ resource "aws_instance" "test" { test2 = "test3" } } -`)) +`) } func testAccInstanceConfigWithoutInstanceProfile(rName string) string { @@ -5016,7 +5016,7 @@ data "aws_ami" "amzn-ami-minimal-hvm-instance-store" { // describes the latest Amazon Linux AMI using PV virtualization and an EBS root device. // The data source is named 'amzn-ami-minimal-pv-ebs'. func testAccLatestAmazonLinuxPvEbsAmiConfig() string { - return fmt.Sprintf(` + return ` data "aws_ami" "amzn-ami-minimal-pv-ebs" { most_recent = true owners = ["amazon"] @@ -5031,14 +5031,14 @@ data "aws_ami" "amzn-ami-minimal-pv-ebs" { values = ["ebs"] } } -`) +` } // testAccLatestAmazonLinuxPvInstanceStoreAmiConfig returns the configuration for a data source that // describes the latest Amazon Linux AMI using PV virtualization and an instance store root device. // The data source is named 'amzn-ami-minimal-pv-ebs'. func testAccLatestAmazonLinuxPvInstanceStoreAmiConfig() string { - return fmt.Sprintf(` + return ` data "aws_ami" "amzn-ami-minimal-pv-instance-store" { most_recent = true owners = ["amazon"] @@ -5053,7 +5053,7 @@ data "aws_ami" "amzn-ami-minimal-pv-instance-store" { values = ["instance-store"] } } -`) +` } // testAccLatestWindowsServer2016CoreAmiConfig returns the configuration for a data source that diff --git a/aws/resource_aws_lambda_code_signing_config_test.go b/aws/resource_aws_lambda_code_signing_config_test.go index 49ca6ed7eeb..f2cd480ab52 100644 --- a/aws/resource_aws_lambda_code_signing_config_test.go +++ b/aws/resource_aws_lambda_code_signing_config_test.go @@ -114,7 +114,7 @@ func TestAccAWSLambdaCodeSigningConfig_UpdatePublishers(t *testing.T) { } func testAccAWSLambdaCodeSigningConfigUpdatePublishers() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test1" { platform_id = "AWSLambda-SHA384-ECDSA" } @@ -129,11 +129,11 @@ resource "aws_lambda_code_signing_config" "code_signing_config" { aws_signer_signing_profile.test1.version_arn ] } -}`) +}` } func testAccAWSLambdaCodeSigningConfigUpdatePolicy() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test1" { platform_id = "AWSLambda-SHA384-ECDSA" } @@ -153,11 +153,11 @@ resource "aws_lambda_code_signing_config" "code_signing_config" { policies { untrusted_artifact_on_deployment = "Enforce" } -}`) +}` } func testAccAWSLambdaCodeSigningConfigBasic() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test1" { platform_id = "AWSLambda-SHA384-ECDSA" } @@ -179,7 +179,7 @@ resource "aws_lambda_code_signing_config" "code_signing_config" { } description = "Code Signing Config for test account" -}`) +}` } func testAccCheckAwsCodeSigningConfigExists(n string, mapping *lambda.GetCodeSigningConfigOutput) resource.TestCheckFunc { diff --git a/aws/resource_aws_neptune_cluster_test.go b/aws/resource_aws_neptune_cluster_test.go index 1c93cbb8e89..b7de72447ef 100644 --- a/aws/resource_aws_neptune_cluster_test.go +++ b/aws/resource_aws_neptune_cluster_test.go @@ -570,11 +570,11 @@ func testAccCheckAWSNeptuneClusterSnapshot(rName string) resource.TestCheckFunc } func testAccAWSNeptuneClusterConfigBase() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` locals { availability_zone_names = slice(data.aws_availability_zones.available.names, 0, min(3, length(data.aws_availability_zones.available.names))) } -`)) +`) } func testAccAWSNeptuneClusterConfig(rName string) string { diff --git a/aws/resource_aws_neptune_subnet_group_test.go b/aws/resource_aws_neptune_subnet_group_test.go index 6c865f48e03..492398e35d0 100644 --- a/aws/resource_aws_neptune_subnet_group_test.go +++ b/aws/resource_aws_neptune_subnet_group_test.go @@ -274,7 +274,7 @@ resource "aws_neptune_subnet_group" "foo" { } func testAccNeptuneSubnetGroupConfig_namePrefix() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -307,11 +307,11 @@ resource "aws_neptune_subnet_group" "test" { name_prefix = "tf_test-" subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] } -`)) +`) } func testAccNeptuneSubnetGroupConfig_generatedName() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -343,5 +343,5 @@ resource "aws_subnet" "b" { resource "aws_neptune_subnet_group" "test" { subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] } -`)) +`) } diff --git a/aws/resource_aws_network_acl_rule_test.go b/aws/resource_aws_network_acl_rule_test.go index 9823023ed00..92c3cad51f7 100644 --- a/aws/resource_aws_network_acl_rule_test.go +++ b/aws/resource_aws_network_acl_rule_test.go @@ -680,7 +680,7 @@ resource "aws_network_acl_rule" "test" { } func testAccAWSNetworkAclRuleConfigIpv6VpcAssignGeneratedIpv6CidrBlockUpdate() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "test" { assign_generated_ipv6_cidr_block = true cidr_block = "10.3.0.0/16" @@ -707,11 +707,11 @@ resource "aws_network_acl_rule" "test" { rule_number = 150 to_port = 22 } -`) +` } func testAccAWSNetworkAclRuleConfigIpv6VpcNotAssignGeneratedIpv6CidrBlockUpdate() string { - return fmt.Sprint(` + return ` resource "aws_vpc" "test" { assign_generated_ipv6_cidr_block = false cidr_block = "10.3.0.0/16" @@ -727,7 +727,7 @@ resource "aws_network_acl" "test" { tags = { Name = "tf-acc-test-network-acl-rule-ipv6-not-enabled" } -}`) +}` } func testAccAWSNetworkAclRuleImportStateIdFunc(resourceName, resourceProtocol string) resource.ImportStateIdFunc { diff --git a/aws/resource_aws_network_interface_test.go b/aws/resource_aws_network_interface_test.go index 3d0b34ee889..b43d5a2665c 100644 --- a/aws/resource_aws_network_interface_test.go +++ b/aws/resource_aws_network_interface_test.go @@ -626,7 +626,7 @@ func testAccCheckAWSENIMakeExternalAttachment(n string, conf *ec2.NetworkInterfa } func testAccAWSENIConfig() string { - return testAccAvailableAZsNoOptInConfig() + fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" enable_dns_hostnames = true @@ -714,7 +714,7 @@ resource "aws_security_group" "test" { } func testAccAWSENIIPV6Config(rName string) string { - return testAccAWSENIIPV6ConfigBase(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSENIIPV6ConfigBase(rName), ` resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id private_ips = ["172.16.10.100"] @@ -726,7 +726,7 @@ resource "aws_network_interface" "test" { } func testAccAWSENIIPV6MultipleConfig(rName string) string { - return testAccAWSENIIPV6ConfigBase(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSENIIPV6ConfigBase(rName), ` resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id private_ips = ["172.16.10.100"] @@ -750,7 +750,7 @@ resource "aws_network_interface" "test" { } func testAccAWSENIConfigUpdatedDescription() string { - return testAccAvailableAZsNoOptInConfig() + fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" enable_dns_hostnames = true @@ -825,7 +825,7 @@ resource "aws_network_interface" "test" { } func testAccAWSENIConfigWithNoPrivateIPs() string { - return testAccAvailableAZsNoOptInConfig() + fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" enable_dns_hostnames = true @@ -855,7 +855,7 @@ resource "aws_network_interface" "test" { func testAccAWSENIConfigWithAttachment() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), - testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" enable_dns_hostnames = true @@ -917,13 +917,13 @@ resource "aws_network_interface" "test" { Name = "test_interface" } } -`)) +`) } func testAccAWSENIConfigExternalAttachment() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), - testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" enable_dns_hostnames = true @@ -980,7 +980,7 @@ resource "aws_network_interface" "test" { Name = "test_interface" } } -`)) +`) } func testAccAWSENIConfigPrivateIpsCount(privateIpsCount int) string { diff --git a/aws/resource_aws_route_table_test.go b/aws/resource_aws_route_table_test.go index fca6681a0e2..9424da8b107 100644 --- a/aws/resource_aws_route_table_test.go +++ b/aws/resource_aws_route_table_test.go @@ -1991,7 +1991,7 @@ resource "aws_route_table" "test" { // See https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#nat-instance-ami. // The data source is named 'amzn-ami-nat-instance'. func testAccLatestAmazonNatInstanceAmiConfig() string { - return fmt.Sprintf(` + return ` data "aws_ami" "amzn-ami-nat-instance" { most_recent = true owners = ["amazon"] @@ -2001,5 +2001,5 @@ data "aws_ami" "amzn-ami-nat-instance" { values = ["amzn-ami-vpc-nat-*"] } } -`) +` } diff --git a/aws/resource_aws_route_test.go b/aws/resource_aws_route_test.go index 6a949b2f002..ebcea685091 100644 --- a/aws/resource_aws_route_test.go +++ b/aws/resource_aws_route_test.go @@ -601,7 +601,7 @@ func testAccAWSRouteImportStateIdFunc(resourceName string) resource.ImportStateI } func testAccAWSRouteBasicConfig() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -627,11 +627,11 @@ resource "aws_route" "bar" { destination_cidr_block = "10.3.0.0/16" gateway_id = aws_internet_gateway.foo.id } -`) +` } func testAccAWSRouteConfigIpv6InternetGateway() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true @@ -662,13 +662,13 @@ resource "aws_route" "igw" { destination_ipv6_cidr_block = "::/0" gateway_id = aws_internet_gateway.foo.id } -`) +` } func testAccAWSRouteConfigIpv6NetworkInterface() string { - return testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small") + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + - fmt.Sprintf(` + return composeConfig( + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small"), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_vpc" "examplevpc" { cidr_block = "10.100.0.0/16" enable_dns_hostnames = true @@ -775,9 +775,9 @@ resource "aws_route" "internal-default-route-ipv6" { } func testAccAWSRouteConfigIpv6Instance() string { - return testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small") + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + - fmt.Sprintf(` + return composeConfig( + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small"), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_vpc" "examplevpc" { cidr_block = "10.100.0.0/16" enable_dns_hostnames = true @@ -873,9 +873,9 @@ resource "aws_route" "internal-default-route-ipv6" { } func testAccAWSRouteConfigIpv6InstanceExpanded() string { - return testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small") + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + - fmt.Sprintf(` + return composeConfig( + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.router-network.availability_zone", "t2.small", "t3.small"), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_vpc" "examplevpc" { cidr_block = "10.100.0.0/16" enable_dns_hostnames = true @@ -971,7 +971,7 @@ resource "aws_route" "internal-default-route-ipv6" { } func testAccAWSRouteConfigIpv6PeeringConnection() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.0.0.0/16" assign_generated_ipv6_cidr_block = true @@ -1001,11 +1001,11 @@ resource "aws_route" "pc" { destination_ipv6_cidr_block = aws_vpc.bar.ipv6_cidr_block vpc_peering_connection_id = aws_vpc_peering_connection.foo.id } -`) +` } func testAccAWSRouteConfigIpv6() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true @@ -1028,11 +1028,11 @@ resource "aws_route" "bar" { destination_ipv6_cidr_block = "::/0" egress_only_gateway_id = aws_egress_only_internet_gateway.foo.id } -`) +` } func testAccAWSRouteConfigIpv6Expanded() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true @@ -1055,11 +1055,11 @@ resource "aws_route" "bar" { destination_ipv6_cidr_block = "::0/0" egress_only_gateway_id = aws_egress_only_internet_gateway.foo.id } -`) +` } func testAccAWSRouteBasicConfigChangeCidr() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -1085,13 +1085,13 @@ resource "aws_route" "bar" { destination_cidr_block = "10.2.0.0/16" gateway_id = aws_internet_gateway.foo.id } -`) +` } func testAccAWSRouteNoopChange() string { - return testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t2.nano", "t3.nano") + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + - fmt.Sprint(` + return composeConfig( + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t2.nano", "t3.nano"), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` data "aws_availability_zones" "available" { state = "available" @@ -1138,7 +1138,7 @@ resource "aws_instance" "nat" { } func testAccAWSRouteWithVPCEndpoint() string { - return fmt.Sprintf(` + return ` data "aws_region" "current" {} resource "aws_vpc" "foo" { @@ -1175,11 +1175,11 @@ resource "aws_vpc_endpoint" "baz" { service_name = "com.amazonaws.${data.aws_region.current.name}.s3" route_table_ids = [aws_route_table.foo.id] } -`) +` } func testAccAWSRouteNewRouteTable() string { - return fmt.Sprintf(` + return ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -1233,12 +1233,12 @@ resource "aws_route" "bar" { destination_cidr_block = "10.4.0.0/16" gateway_id = aws_internet_gateway.bar.id } -`) +` } func testAccAWSRouteConfigTransitGatewayIDDestinatationCidrBlock() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInDefaultExcludeConfig(), + ` # IncorrectState: Transit Gateway is not available in availability zone usw2-az4 resource "aws_vpc" "test" { @@ -1327,7 +1327,7 @@ resource "aws_route" "test" { } func testAccAWSRouteResourceConfigLocalGatewayID() string { - return fmt.Sprintf(` + return ` data "aws_ec2_local_gateways" "all" {} data "aws_ec2_local_gateway" "first" { id = tolist(data.aws_ec2_local_gateways.all.ids)[0] @@ -1357,7 +1357,7 @@ resource "aws_route" "test" { destination_cidr_block = "172.16.1.0/24" local_gateway_id = data.aws_ec2_local_gateway.first.id } -`) +` } func testAccAWSRouteResourceConfigVpcEndpointId(rName string) string { diff --git a/aws/resource_aws_signer_signing_profile_permission_test.go b/aws/resource_aws_signer_signing_profile_permission_test.go index badcabb0b53..bdf705cb991 100644 --- a/aws/resource_aws_signer_signing_profile_permission_test.go +++ b/aws/resource_aws_signer_signing_profile_permission_test.go @@ -123,7 +123,7 @@ func TestAccAWSSignerSigningProfilePermission_StatementPrefix(t *testing.T) { profileResourceName := "aws_signer_signing_profile.test_sp" rString := acctest.RandString(53) profileName := fmt.Sprintf("tf_acc_spp_%s", rString) - statementNamePrefix := fmt.Sprintf("tf_acc_spp_statement_") + statementNamePrefix := "tf_acc_spp_statement_" //var conf signer.GetSigningProfileOutput var sppconf signer.ListProfilePermissionsOutput diff --git a/aws/resource_aws_signer_signing_profile_test.go b/aws/resource_aws_signer_signing_profile_test.go index 14d144bcb1b..cac5db06eed 100644 --- a/aws/resource_aws_signer_signing_profile_test.go +++ b/aws/resource_aws_signer_signing_profile_test.go @@ -46,7 +46,7 @@ func TestAccAWSSignerSigningProfile_basic(t *testing.T) { func TestAccAWSSignerSigningProfile_GenerateNameWithNamePrefix(t *testing.T) { resourceName := "aws_signer_signing_profile.test_sp" - namePrefix := fmt.Sprintf("tf_acc_sp_basic_") + namePrefix := "tf_acc_sp_basic_" var conf signer.GetSigningProfileOutput @@ -89,7 +89,7 @@ func TestAccAWSSignerSigningProfile_GenerateName(t *testing.T) { func TestAccAWSSignerSigningProfile_tags(t *testing.T) { resourceName := "aws_signer_signing_profile.test_sp" - namePrefix := fmt.Sprintf("tf_acc_sp_basic_") + namePrefix := "tf_acc_sp_basic_" var conf signer.GetSigningProfileOutput @@ -119,7 +119,7 @@ func TestAccAWSSignerSigningProfile_tags(t *testing.T) { func TestAccAWSSignerSigningProfile_SignatureValidityPeriod(t *testing.T) { resourceName := "aws_signer_signing_profile.test_sp" - namePrefix := fmt.Sprintf("tf_acc_sp_basic_") + namePrefix := "tf_acc_sp_basic_" var conf signer.GetSigningProfileOutput @@ -185,10 +185,10 @@ func testAccAWSSignerSigningProfileConfig(namePrefix string) string { } func testAccAWSSignerSigningProfileConfigGenerateName() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test_sp" { platform_id = "AWSLambda-SHA384-ECDSA" -}`) +}` } func testAccAWSSignerSigningProfileConfigProvidedName(profileName string) string { @@ -226,7 +226,7 @@ resource "aws_signer_signing_profile" "test_sp" { } func testAccAWSSignerSigningProfileUpdateSVP() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test_sp" { platform_id = "AWSLambda-SHA384-ECDSA" @@ -235,18 +235,18 @@ resource "aws_signer_signing_profile" "test_sp" { type = "MONTHS" } } -`) +` } func testAccAWSSignerSigningProfileUpdateTags() string { - return fmt.Sprintf(` + return ` resource "aws_signer_signing_profile" "test_sp" { platform_id = "AWSLambda-SHA384-ECDSA" tags = { "tag1" = "prod" } } -`) +` } func baseAccAWSSignerSigningProfileConfig(namePrefix string) string { diff --git a/aws/resource_aws_vpn_gateway_test.go b/aws/resource_aws_vpn_gateway_test.go index 18c9b5ccaf6..b6397b0fdac 100644 --- a/aws/resource_aws_vpn_gateway_test.go +++ b/aws/resource_aws_vpn_gateway_test.go @@ -614,7 +614,7 @@ resource "aws_vpn_gateway" "test2" { ` func testAccVpnGatewayConfigWithAZ() string { - return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" @@ -631,7 +631,7 @@ resource "aws_vpn_gateway" "test" { Name = "terraform-testacc-vpn-gateway-with-az" } } -`)) +`) } const testAccVpnGatewayConfigWithASN = ` diff --git a/aws/resource_aws_xray_encryption_config_test.go b/aws/resource_aws_xray_encryption_config_test.go index b3f2a62f0d0..98f5290bd23 100644 --- a/aws/resource_aws_xray_encryption_config_test.go +++ b/aws/resource_aws_xray_encryption_config_test.go @@ -76,11 +76,11 @@ func testAccCheckXrayEncryptionConfigExists(n string, EncryptionConfig *xray.Enc } func testAccAWSXrayEncryptionConfigBasicConfig() string { - return fmt.Sprintf(` + return ` resource "aws_xray_encryption_config" "test" { type = "NONE" } -`) +` } func testAccAWSXrayEncryptionConfigWithKeyConfig() string { diff --git a/staticcheck.conf b/staticcheck.conf index 6f10b6ed746..0f022090118 100644 --- a/staticcheck.conf +++ b/staticcheck.conf @@ -1,6 +1,5 @@ checks = [ "all", - "-S1039", "-SA1019", "-ST1000", "-ST1003", From f493f0fe1b5da84c813a6e2f6bef7aff34015360 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 10:57:24 -0500 Subject: [PATCH 0387/1212] tests/resource/codebuild_project: Standardized config with composeConfig() --- aws/resource_aws_codebuild_project_test.go | 326 ++++++++++++--------- 1 file changed, 192 insertions(+), 134 deletions(-) diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index 620971eaac5..1c93aea9d5c 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -2295,7 +2295,7 @@ POLICY } func testAccAWSCodeBuildProjectConfig_basic(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2315,11 +2315,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv()) +`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv())) } func testAccAWSCodebuildProjectConfig_BadgeEnabled(rName string, badgeEnabled bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { badge_enabled = %t name = "%s" @@ -2340,11 +2340,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, badgeEnabled, rName) +`, badgeEnabled, rName)) } func testAccAWSCodeBuildProjectConfig_BuildTimeout(rName string, buildTimeout int) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { build_timeout = %d name = "%s" @@ -2365,11 +2365,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, buildTimeout, rName) +`, buildTimeout, rName)) } func testAccAWSCodeBuildProjectConfig_QueuedTimeout(rName string, queuedTimeout int) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { queued_timeout = %d name = "%s" @@ -2390,11 +2390,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, queuedTimeout, rName) +`, queuedTimeout, rName)) } func testAccAWSCodeBuildProjectConfig_Cache(rName, cacheLocation, cacheType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2419,11 +2419,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, cacheLocation, cacheType) +`, rName, cacheLocation, cacheType)) } func testAccAWSCodeBuildProjectConfig_LocalCache(rName, modeType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2448,11 +2448,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, modeType) +`, rName, modeType)) } func testAccAWSCodeBuildProjectConfig_Description(rName, description string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { description = "%s" name = "%s" @@ -2473,11 +2473,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, description, rName) +`, description, rName)) } func testAccAWSCodeBuildProjectConfig_SourceVersion(rName, sourceVersion string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2499,11 +2499,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, sourceVersion) +`, rName, sourceVersion)) } func testAccAWSCodeBuildProjectConfig_EncryptionKey(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_kms_key" "test" { description = "Terraform acc test" deletion_window_in_days = 7 @@ -2529,11 +2529,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Environment_EnvironmentVariable_One(rName, key1, value1 string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -2558,11 +2558,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, key1, value1) +`, rName, key1, value1)) } func testAccAWSCodeBuildProjectConfig_Environment_EnvironmentVariable_Two(rName, key1, value1, key2, value2 string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -2592,11 +2592,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, key1, value1, key2, value2) +`, rName, key1, value1, key2, value2)) } func testAccAWSCodeBuildProjectConfig_Environment_EnvironmentVariable_Zero(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -2616,11 +2616,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Environment_EnvironmentVariable_Type(rName, environmentVariableType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2651,11 +2651,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, environmentVariableType) +`, rName, environmentVariableType)) } func testAccAWSCodeBuildProjectConfig_Environment_Certificate(rName string, bName string, oName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_s3_bucket_object" "test" { bucket = aws_s3_bucket.test.bucket key = "%s" @@ -2682,11 +2685,11 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, oName, rName) +`, oName, rName)) } func testAccAWSCodeBuildProjectConfig_Environment_RegistryCredential1(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -2722,11 +2725,11 @@ resource "aws_secretsmanager_secret_version" "test" { secret_id = aws_secretsmanager_secret.test.id secret_string = jsonencode(map("username", "user", "password", "pass")) } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Environment_RegistryCredential2(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -2762,11 +2765,11 @@ resource "aws_secretsmanager_secret_version" "test" { secret_id = aws_secretsmanager_secret.test.id secret_string = jsonencode(map("username", "user", "password", "pass")) } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_LogsConfig_CloudWatchLogs(rName, status, gName, sName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2794,11 +2797,14 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, status, gName, sName) +`, rName, status, gName, sName)) } func testAccAWSCodeBuildProjectConfig_LogsConfig_S3Logs(rName, bName, status, location string, encryptionDisabled bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2826,11 +2832,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, status, location, encryptionDisabled) +`, rName, status, location, encryptionDisabled)) } func testAccAWSCodeBuildProjectConfig_Source_Auth(rName, authResource, authType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2855,11 +2861,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, authResource, authType) +`, rName, authResource, authType)) } func testAccAWSCodeBuildProjectConfig_Source_GitCloneDepth(rName string, gitCloneDepth int) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2880,11 +2886,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, gitCloneDepth) +`, rName, gitCloneDepth)) } func testAccAWSCodeBuildProjectConfig_Source_GitSubmodulesConfig_CodeCommit(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2908,11 +2914,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_Source_GitSubmodulesConfig_GitHub(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2936,11 +2942,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_Source_GitSubmodulesConfig_GitHubEnterprise(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -2964,11 +2970,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_SecondarySources_GitSubmodulesConfig_CodeCommit(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%[1]s" service_role = aws_iam_role.test.arn @@ -3012,11 +3018,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_SecondarySources_GitSubmodulesConfig_GitHub(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%[1]s" service_role = aws_iam_role.test.arn @@ -3060,11 +3066,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_SecondarySources_GitSubmodulesConfig_GitHubEnterprise(rName string, fetchSubmodules bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%[1]s" service_role = aws_iam_role.test.arn @@ -3108,11 +3114,11 @@ resource "aws_codebuild_project" "test" { } } } -`, rName, fetchSubmodules) +`, rName, fetchSubmodules)) } func testAccAWSCodeBuildProjectConfig_Source_InsecureSSL(rName string, insecureSSL bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3133,11 +3139,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, insecureSSL) +`, rName, insecureSSL)) } func testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_Bitbucket(rName string, reportBuildStatus bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3158,11 +3164,11 @@ resource "aws_codebuild_project" "test" { type = "BITBUCKET" } } -`, rName, reportBuildStatus) +`, rName, reportBuildStatus)) } func testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_GitHub(rName string, reportBuildStatus bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3183,11 +3189,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, reportBuildStatus) +`, rName, reportBuildStatus)) } func testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_GitHubEnterprise(rName string, reportBuildStatus bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3208,11 +3214,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB_ENTERPRISE" } } -`, rName, reportBuildStatus) +`, rName, reportBuildStatus)) } func testAccAWSCodeBuildProjectConfig_Source_Type_Bitbucket(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3232,11 +3238,11 @@ resource "aws_codebuild_project" "test" { type = "BITBUCKET" } } -`, rName, testAccAWSCodeBuildBitbucketSourceLocationFromEnv()) +`, rName, testAccAWSCodeBuildBitbucketSourceLocationFromEnv())) } func testAccAWSCodeBuildProjectConfig_Source_Type_CodeCommit(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3256,11 +3262,11 @@ resource "aws_codebuild_project" "test" { type = "CODECOMMIT" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Source_Type_CodePipeline(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3279,11 +3285,11 @@ resource "aws_codebuild_project" "test" { type = "CODEPIPELINE" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Source_Type_GitHubEnterprise(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3303,11 +3309,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB_ENTERPRISE" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Source_Type_S3(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -3337,11 +3343,11 @@ resource "aws_codebuild_project" "test" { type = "S3" } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_Source_Type_NoSource(rName string, rLocation string, rBuildspec string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -3362,11 +3368,11 @@ resource "aws_codebuild_project" "test" { buildspec = %q } } -`, rName, rLocation, rBuildspec) +`, rName, rLocation, rBuildspec)) } func testAccAWSCodeBuildProjectConfig_Tags(rName, tagKey, tagValue string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3392,11 +3398,14 @@ resource "aws_codebuild_project" "test" { %s = "%s" } } -`, rName, tagKey, tagValue) +`, rName, tagKey, tagValue)) } func testAccAWSCodeBuildProjectConfig_VpcConfig1(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` data "aws_availability_zones" "available" { # InvalidInputException: CodeBuild currently doesn't support VPC in usw2-az4, please select subnets in other availability zones. exclude_zone_ids = ["usw2-az4"] @@ -3453,22 +3462,14 @@ resource "aws_codebuild_project" "test" { vpc_id = aws_vpc.test.id } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_VpcConfig2(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` -data "aws_availability_zones" "available" { - # InvalidInputException: CodeBuild currently doesn't support VPC in usw2-az4, please select subnets in other availability zones. - exclude_zone_ids = ["usw2-az4"] - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } @@ -3477,7 +3478,7 @@ resource "aws_subnet" "test" { count = 2 availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = "10.0.${count.index}.0/24" + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id tags = { @@ -3514,11 +3515,11 @@ resource "aws_codebuild_project" "test" { vpc_id = aws_vpc.test.id } } -`, rName) +`, rName)) } func testAccAWSCodeBuildProjectConfig_WindowsServer2019Container(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3538,11 +3539,11 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv()) +`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv())) } func testAccAWSCodeBuildProjectConfig_ARMContainer(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3562,11 +3563,14 @@ resource "aws_codebuild_project" "test" { type = "GITHUB" } } -`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv()) +`, rName, testAccAWSCodeBuildGitHubSourceLocationFromEnv())) } func testAccAWSCodebuildProjectConfig_Artifacts_ArtifactIdentifier(rName string, bName string, artifactIdentifier string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3588,11 +3592,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, artifactIdentifier) +`, rName, artifactIdentifier)) } func testAccAWSCodebuildProjectConfig_Artifacts_EncryptionDisabled(rName string, bName string, encryptionDisabled bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3614,11 +3621,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, encryptionDisabled) +`, rName, encryptionDisabled)) } func testAccAWSCodebuildProjectConfig_Artifacts_Location(rName, bName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3639,11 +3649,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName) +`, rName)) } func testAccAWSCodebuildProjectConfig_Artifacts_Name(rName string, bName string, name string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3665,11 +3678,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, name) +`, rName, name)) } func testAccAWSCodebuildProjectConfig_Artifacts_NamespaceType(rName, namespaceType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3691,11 +3707,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, namespaceType) +`, rName, namespaceType)) } func testAccAWSCodebuildProjectConfig_Artifacts_OverrideArtifactName(rName string, bName string, overrideArtifactName bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3717,11 +3736,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, overrideArtifactName) +`, rName, overrideArtifactName)) } func testAccAWSCodebuildProjectConfig_Artifacts_Packaging(rName, packaging string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3743,11 +3765,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, packaging) +`, rName, packaging)) } func testAccAWSCodebuildProjectConfig_Artifacts_Path(rName, path string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3769,11 +3794,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, path) +`, rName, path)) } func testAccAWSCodebuildProjectConfig_Artifacts_Type(rName string, bName string, artifactType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3794,11 +3822,14 @@ resource "aws_codebuild_project" "test" { location = "${aws_s3_bucket.test.bucket}/" } } -`, rName, artifactType) +`, rName, artifactType)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts(rName string, bName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3831,11 +3862,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName) +`, rName)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_ArtifactIdentifier(rName string, bName string, artifactIdentifier string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3862,11 +3896,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, artifactIdentifier) +`, rName, artifactIdentifier)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_EncryptionDisabled(rName string, bName string, encryptionDisabled bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -3894,11 +3931,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, encryptionDisabled) +`, rName, encryptionDisabled)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_Location(rName, bName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3925,11 +3965,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName) +`, rName)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_Name(rName string, bName string, name string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3957,11 +4000,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, name) +`, rName, name)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_NamespaceType(rName, namespaceType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -3989,11 +4035,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, namespaceType) +`, rName, namespaceType)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_OverrideArtifactName(rName string, bName string, overrideArtifactName bool) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = "%s" service_role = aws_iam_role.test.arn @@ -4021,11 +4070,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, overrideArtifactName) +`, rName, overrideArtifactName)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_Packaging(rName, packaging string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -4053,11 +4105,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, packaging) +`, rName, packaging)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_Path(rName, path string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(rName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -4085,11 +4140,14 @@ resource "aws_codebuild_project" "test" { location = "https://github.com/hashicorp/packer.git" } } -`, rName, path) +`, rName, path)) } func testAccAWSCodebuildProjectConfig_SecondaryArtifacts_Type(rName string, bName string, artifactType string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName) + fmt.Sprintf(` + return composeConfig( + testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), + testAccAWSCodeBuildProjectConfig_Base_Bucket(bName), + fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %[1]q service_role = aws_iam_role.test.arn @@ -4116,11 +4174,11 @@ resource "aws_codebuild_project" "test" { type = "CODECOMMIT" } } -`, rName, artifactType) +`, rName, artifactType)) } func testAccAWSCodeBuildProjectConfig_SecondarySources_CodeCommit(rName string) string { - return testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName) + fmt.Sprintf(` + return composeConfig(testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), fmt.Sprintf(` resource "aws_codebuild_project" "test" { name = %q service_role = aws_iam_role.test.arn @@ -4152,5 +4210,5 @@ resource "aws_codebuild_project" "test" { source_identifier = "secondarySource2" } } -`, rName) +`, rName)) } From b3dc0ebb29473dce2c79f97ab9b5f4bb55af4f03 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 11:00:16 -0500 Subject: [PATCH 0388/1212] tests/resource/codebuild_project: Fix duplicate AZ config --- aws/resource_aws_codebuild_project_test.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index 1c93aea9d5c..2982f1ce2c0 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -3406,17 +3406,6 @@ func testAccAWSCodeBuildProjectConfig_VpcConfig1(rName string) string { testAccAWSCodeBuildProjectConfig_Base_ServiceRole(rName), testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` -data "aws_availability_zones" "available" { - # InvalidInputException: CodeBuild currently doesn't support VPC in usw2-az4, please select subnets in other availability zones. - exclude_zone_ids = ["usw2-az4"] - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } From 2dd0e842512cf9d6477af247f3e7a11ddbc9b2ab Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 5 Jan 2021 11:03:16 -0500 Subject: [PATCH 0389/1212] tests/resource/aws_autoscaling_group: Fix remaining S1039 reports Previously: ``` $ make lint aws/resource_aws_autoscaling_group_test.go:4353:9: S1039: unnecessary use of fmt.Sprintf (gosimple) return fmt.Sprintf(` ^ aws/resource_aws_autoscaling_group_test.go:4393:9: S1039: unnecessary use of fmt.Sprintf (gosimple) return fmt.Sprintf(` ^ aws/resource_aws_autoscaling_group_test.go:4437:9: S1039: unnecessary use of fmt.Sprintf (gosimple) return fmt.Sprintf(` ^ aws/resource_aws_autoscaling_group_test.go:4518:9: S1039: unnecessary use of fmt.Sprintf (gosimple) return fmt.Sprintf(` ^ make: *** [golangci-lint] Error 1 ``` --- aws/resource_aws_autoscaling_group_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_autoscaling_group_test.go b/aws/resource_aws_autoscaling_group_test.go index 1be61745c12..41aa9881140 100644 --- a/aws/resource_aws_autoscaling_group_test.go +++ b/aws/resource_aws_autoscaling_group_test.go @@ -4350,7 +4350,7 @@ resource "aws_autoscaling_group" "test" { } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Basic() string { - return fmt.Sprintf(` + return ` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] max_size = 2 @@ -4386,11 +4386,11 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`) +` } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Full() string { - return fmt.Sprintf(` + return ` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] max_size = 2 @@ -4430,11 +4430,11 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`) +` } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Disabled() string { - return fmt.Sprintf(` + return ` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] max_size = 2 @@ -4466,7 +4466,7 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`) +` } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Start(launchConfigurationName string) string { @@ -4515,7 +4515,7 @@ resource "aws_launch_configuration" "test" { } func testAccAwsAutoScalingGroupConfig_InstanceRefresh_Triggers() string { - return fmt.Sprintf(` + return ` resource "aws_autoscaling_group" "test" { availability_zones = [data.aws_availability_zones.current.names[0]] max_size = 2 @@ -4558,7 +4558,7 @@ resource "aws_launch_configuration" "test" { image_id = data.aws_ami.test.id instance_type = "t3.nano" } -`) +` } func testAccCheckAutoScalingInstanceRefreshCount(group *autoscaling.Group, expected int) resource.TestCheckFunc { From 76c0f34f50cc74cde2d9a6aaf2cfbd08ba598cfa Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 11:13:18 -0500 Subject: [PATCH 0390/1212] tests/resource/codebuild_project: Fix VPC issue --- aws/resource_aws_codebuild_project_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index 2982f1ce2c0..fa27484da2f 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -3467,7 +3467,7 @@ resource "aws_subnet" "test" { count = 2 availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = "10.0.${count.index}.0/24" vpc_id = aws_vpc.test.id tags = { From f6880bb3b4d61dbb89eed3b3386437a413030ba2 Mon Sep 17 00:00:00 2001 From: Stijn De Haes Date: Tue, 5 Jan 2021 17:29:37 +0100 Subject: [PATCH 0391/1212] resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute (#16495) Output from acceptance testing: ``` --- PASS: TestAccAWSVpcEndpointService_GatewayLoadBalancerArns (211.72s) --- PASS: TestAccAWSVpcEndpointService_disappears (259.98s) --- PASS: TestAccAWSVpcEndpointService_private_dns_name (260.23s) --- PASS: TestAccAWSVpcEndpointService_tags (260.39s) --- PASS: TestAccAWSVpcEndpointService_basic (315.92s) --- PASS: TestAccAWSVpcEndpointService_AllowedPrincipals (326.95s) ``` --- aws/resource_aws_vpc_endpoint_service.go | 69 ++++++++++++++++++- aws/resource_aws_vpc_endpoint_service_test.go | 54 +++++++++++++++ .../docs/r/vpc_endpoint_service.html.markdown | 7 +- 3 files changed, 128 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_vpc_endpoint_service.go b/aws/resource_aws_vpc_endpoint_service.go index a1322b374e4..eead52efb51 100644 --- a/aws/resource_aws_vpc_endpoint_service.go +++ b/aws/resource_aws_vpc_endpoint_service.go @@ -79,6 +79,31 @@ func resourceAwsVpcEndpointService() *schema.Resource { "private_dns_name": { Type: schema.TypeString, Computed: true, + Optional: true, + }, + "private_dns_name_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "value": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, "service_name": { Type: schema.TypeString, @@ -104,6 +129,9 @@ func resourceAwsVpcEndpointServiceCreate(d *schema.ResourceData, meta interface{ AcceptanceRequired: aws.Bool(d.Get("acceptance_required").(bool)), TagSpecifications: ec2TagSpecificationsFromMap(d.Get("tags").(map[string]interface{}), "vpc-endpoint-service"), } + if v, ok := d.GetOk("private_dns_name"); ok { + req.PrivateDnsName = aws.String(v.(string)) + } if v, ok := d.GetOk("gateway_load_balancer_arns"); ok { if v, ok := v.(*schema.Set); ok && v.Len() > 0 { @@ -214,17 +242,56 @@ func resourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error setting allowed_principals: %s", err) } + err = d.Set("private_dns_name_configuration", flattenPrivateDnsNameConfiguration(svcCfg.PrivateDnsNameConfiguration)) + if err != nil { + return fmt.Errorf("error setting private_dns_name_configuration: %w", err) + } + return nil } +func flattenPrivateDnsNameConfiguration(privateDnsNameConfiguration *ec2.PrivateDnsNameConfiguration) []interface{} { + if privateDnsNameConfiguration == nil { + return nil + } + tfMap := map[string]interface{}{} + + if v := privateDnsNameConfiguration.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := privateDnsNameConfiguration.State; v != nil { + tfMap["state"] = aws.StringValue(v) + } + + if v := privateDnsNameConfiguration.Type; v != nil { + tfMap["type"] = aws.StringValue(v) + } + + if v := privateDnsNameConfiguration.Value; v != nil { + tfMap["value"] = aws.StringValue(v) + } + + // The EC2 API can return a XML structure with no elements + if len(tfMap) == 0 { + return nil + } + + return []interface{}{tfMap} +} + func resourceAwsVpcEndpointServiceUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - if d.HasChanges("acceptance_required", "gateway_load_balancer_arns", "network_load_balancer_arns") { + if d.HasChanges("acceptance_required", "gateway_load_balancer_arns", "network_load_balancer_arns", "private_dns_name") { modifyCfgReq := &ec2.ModifyVpcEndpointServiceConfigurationInput{ ServiceId: aws.String(d.Id()), } + if d.HasChange("private_dns_name") { + modifyCfgReq.PrivateDnsName = aws.String(d.Get("private_dns_name").(string)) + } + if d.HasChange("acceptance_required") { modifyCfgReq.AcceptanceRequired = aws.Bool(d.Get("acceptance_required").(bool)) } diff --git a/aws/resource_aws_vpc_endpoint_service_test.go b/aws/resource_aws_vpc_endpoint_service_test.go index e06ab30404b..9ef445892d9 100644 --- a/aws/resource_aws_vpc_endpoint_service_test.go +++ b/aws/resource_aws_vpc_endpoint_service_test.go @@ -99,6 +99,7 @@ func TestAccAWSVpcEndpointService_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "allowed_principals.#", "0"), resource.TestCheckResourceAttr(resourceName, "manages_vpc_endpoints", "false"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "private_dns_name_configuration.#", "0"), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`vpc-endpoint-service/vpce-svc-.+`)), ), }, @@ -256,6 +257,44 @@ func TestAccAWSVpcEndpointService_tags(t *testing.T) { }) } +func TestAccAWSVpcEndpointService_private_dns_name(t *testing.T) { + var svcCfg ec2.ServiceConfiguration + resourceName := "aws_vpc_endpoint_service.test" + rName1 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVpcEndpointServiceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccVpcEndpointServiceConfigPrivateDnsName(rName1, rName2, "example.com"), + Check: resource.ComposeTestCheckFunc( + testAccCheckVpcEndpointServiceExists(resourceName, &svcCfg), + resource.TestCheckResourceAttr(resourceName, "private_dns_name", "example.com"), + resource.TestCheckResourceAttr(resourceName, "private_dns_name_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "private_dns_name_configuration.0.type", "TXT"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccVpcEndpointServiceConfigPrivateDnsName(rName1, rName2, "changed.com"), + Check: resource.ComposeTestCheckFunc( + testAccCheckVpcEndpointServiceExists(resourceName, &svcCfg), + resource.TestCheckResourceAttr(resourceName, "private_dns_name", "changed.com"), + resource.TestCheckResourceAttr(resourceName, "private_dns_name_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "private_dns_name_configuration.0.type", "TXT"), + ), + }, + }, + }) +} + func testAccCheckVpcEndpointServiceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ec2conn @@ -525,3 +564,18 @@ resource "aws_vpc_endpoint_service" "test" { } `, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccVpcEndpointServiceConfigPrivateDnsName(rName1, rName2, dnsName string) string { + return composeConfig( + testAccVpcEndpointServiceConfig_base(rName1, rName2), + fmt.Sprintf(` +resource "aws_vpc_endpoint_service" "test" { + acceptance_required = false + private_dns_name = "%s" + + network_load_balancer_arns = [ + aws_lb.test1.arn, + ] +} +`, dnsName)) +} diff --git a/website/docs/r/vpc_endpoint_service.html.markdown b/website/docs/r/vpc_endpoint_service.html.markdown index aae0841f7ff..b9291fb8541 100644 --- a/website/docs/r/vpc_endpoint_service.html.markdown +++ b/website/docs/r/vpc_endpoint_service.html.markdown @@ -46,6 +46,7 @@ The following arguments are supported: * `gateway_load_balancer_arns` - (Optional) Amazon Resource Names (ARNs) of one or more Gateway Load Balancers for the endpoint service. * `network_load_balancer_arns` - (Optional) Amazon Resource Names (ARNs) of one or more Network Load Balancers for the endpoint service. * `tags` - (Optional) A map of tags to assign to the resource. +* `private_dns_name` - (Optional) The private DNS name for the service. ## Attributes Reference @@ -56,10 +57,14 @@ In addition to all arguments above, the following attributes are exported: * `arn` - The Amazon Resource Name (ARN) of the VPC endpoint service. * `base_endpoint_dns_names` - The DNS names for the service. * `manages_vpc_endpoints` - Whether or not the service manages its VPC endpoints - `true` or `false`. -* `private_dns_name` - The private DNS name for the service. * `service_name` - The service name. * `service_type` - The service type, `Gateway` or `Interface`. * `state` - The state of the VPC endpoint service. +* `private_dns_name_configuration` - List of objects containing information about the endpoint service private DNS name configuration. + * `name` - Name of the record subdomain the service provider needs to create. + * `state` - Verification state of the VPC endpoint service. Consumers of the endpoint service can use the private name only when the state is `verified`. + * `type` - Endpoint service verification type, for example `TXT`. + * `value` - Value the service provider adds to the private DNS name domain record before verification. ## Import From 3ce21df1fc4691e0016edc3b5043f4829260fe3b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 5 Jan 2021 11:30:30 -0500 Subject: [PATCH 0392/1212] Update CHANGELOG for #16495 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a38d1fd1534..33460fcf148 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ENHANCEMENTS * resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] * resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation [GH-16914] +* resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute [GH-16495] BUG FIXES From 471208decf7f5b1361c438e5273da5187cc84d4c Mon Sep 17 00:00:00 2001 From: Alvaro Del Valle <32401961+alvarodelvalle@users.noreply.github.com> Date: Tue, 5 Jan 2021 12:44:45 -0500 Subject: [PATCH 0393/1212] docs/resource/aws_emr_cluster: Link to AWS documentation for configuration settings (#16817) --- website/docs/r/emr_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/emr_cluster.html.markdown b/website/docs/r/emr_cluster.html.markdown index 7e956794071..9f33d1193d9 100644 --- a/website/docs/r/emr_cluster.html.markdown +++ b/website/docs/r/emr_cluster.html.markdown @@ -334,7 +334,7 @@ The following arguments are supported: * `ebs_root_volume_size` - (Optional) Size in GiB of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later. * `custom_ami_id` - (Optional) A custom Amazon Linux AMI for the cluster (instead of an EMR-owned AMI). Available in Amazon EMR version 5.7.0 and later. * `bootstrap_action` - (Optional) Ordered list of bootstrap actions that will be run before Hadoop is started on the cluster nodes. Defined below. -* `configurations` - (Optional) List of configurations supplied for the EMR cluster you are creating +* `configurations` - (Optional) List of configurations supplied for the EMR cluster you are creating. Supply a configuration object for applications to override their default configuration. See [AWS Documentation](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-configure-apps.html) for more information. * `configurations_json` - (Optional) A JSON string for supplying list of configurations for the EMR cluster. ~> **NOTE on configurations_json:** If the `Configurations` value is empty then you should skip From d2fba843995b621c3a87eddd2f4b855dbd333f43 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 5 Jan 2021 10:50:19 -0800 Subject: [PATCH 0394/1212] Update CHANGELOG.md for #16727 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33460fcf148..045fe5aed70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS BUG FIXES +* resource/aws_glue_catalog_table: Glue table partition keys should be set to empty list instead of being unset [GH-16727] * resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit [GH-16905] * resource/aws_transfer_user: Update `user_name` argument validation to support 100 characters [GH-16938] From 3ed72260fc3da7d13c462ec03081207f69e8d42f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 13:58:26 -0500 Subject: [PATCH 0395/1212] tests/resource/cognito_user_group: Fix hardcoded region --- aws/resource_aws_cognito_user_group_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_cognito_user_group_test.go b/aws/resource_aws_cognito_user_group_test.go index ee3ded40641..f5c3ad3cbc5 100644 --- a/aws/resource_aws_cognito_user_group_test.go +++ b/aws/resource_aws_cognito_user_group_test.go @@ -215,7 +215,7 @@ resource "aws_iam_role" "group_role" { "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { - "cognito-identity.amazonaws.com:aud": "us-east-1:12345678-dead-beef-cafe-123456790ab" + "cognito-identity.amazonaws.com:aud": "%[5]s:12345678-dead-beef-cafe-123456790ab" }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" @@ -234,7 +234,7 @@ resource "aws_cognito_user_group" "main" { precedence = %[4]d role_arn = aws_iam_role.group_role.arn } -`, poolName, groupName, groupDescription, precedence) +`, poolName, groupName, groupDescription, precedence, testAccGetRegion()) } func testAccAWSCognitoUserGroupConfig_RoleArn(rName string) string { From 2b14e0eac2774b15385cbecf2354c0298cb9d151 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Tue, 5 Jan 2021 11:48:06 -0800 Subject: [PATCH 0396/1212] Update CHANGELOG.md for #16714 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 045fe5aed70..de197f67531 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ ENHANCEMENTS * data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] * resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] +* resource/aws_glue_crawler: add support for `lineage_configuration` and `recrawl_policy` [GH-16714] +* resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` [GH-16714] * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] * resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation [GH-16914] * resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute [GH-16495] From 04e3b2741736d9a0c7c6a61b8ecd221e52e0cf78 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Jan 2021 12:19:05 -0800 Subject: [PATCH 0397/1212] Update CHANGELOG for #9979 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index de197f67531..30c12467b7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ FEATURES ENHANCEMENTS * data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] +* data-source/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] +* resource/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] * resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] * resource/aws_glue_crawler: add support for `lineage_configuration` and `recrawl_policy` [GH-16714] * resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` [GH-16714] From 5cce1b2ce2c6c95fb49cb0376a74a97c0da31157 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 15:28:38 -0500 Subject: [PATCH 0398/1212] resource/devicefarm_project: Fix hardcoded regions --- aws/resource_aws_devicefarm_project.go | 7 ---- aws/resource_aws_devicefarm_project_test.go | 37 ++++++++++++++++++- .../docs/r/devicefarm_project.html.markdown | 4 +- 3 files changed, 37 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_devicefarm_project.go b/aws/resource_aws_devicefarm_project.go index 503cae5febd..4fc1249f96e 100644 --- a/aws/resource_aws_devicefarm_project.go +++ b/aws/resource_aws_devicefarm_project.go @@ -35,13 +35,6 @@ func resourceAwsDevicefarmProject() *schema.Resource { func resourceAwsDevicefarmProjectCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).devicefarmconn - region := meta.(*AWSClient).region - - // We need to ensure that DeviceFarm is only being run against us-west-2 - // As this is the only place that AWS currently supports it - if region != "us-west-2" { - return fmt.Errorf("DeviceFarm can only be used with us-west-2. You are trying to use it on %s", region) - } input := &devicefarm.CreateProjectInput{ Name: aws.String(d.Get("name").(string)), diff --git a/aws/resource_aws_devicefarm_project_test.go b/aws/resource_aws_devicefarm_project_test.go index c3a4eef23d2..63acf16cc2c 100644 --- a/aws/resource_aws_devicefarm_project_test.go +++ b/aws/resource_aws_devicefarm_project_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/devicefarm" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -18,7 +19,13 @@ func TestAccAWSDeviceFarmProject_basic(t *testing.T) { resourceName := "aws_devicefarm_project.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) + // Currently, DeviceFarm is only supported in us-west-2 + // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html + testAccRegionPreCheck(t, endpoints.UsWest2RegionID) + }, Providers: testAccProviders, CheckDestroy: testAccCheckDeviceFarmProjectDestroy, Steps: []resource.TestStep{ @@ -39,13 +46,39 @@ func TestAccAWSDeviceFarmProject_basic(t *testing.T) { }) } +func TestAccAWSDeviceFarmProject_otherRegion(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + + if testAccGetRegion() == endpoints.UsWest2RegionID { + t.Skipf("skipping test; test does not run in current region (%s)", testAccGetRegion()) + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDeviceFarmProjectDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDeviceFarmProjectConfig(rName), + ExpectError: regexp.MustCompile(`no such host`), + }, + }, + }) +} + func TestAccAWSDeviceFarmProject_disappears(t *testing.T) { var proj devicefarm.Project rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_devicefarm_project.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) + // Currently, DeviceFarm is only supported in us-west-2 + // https://docs.aws.amazon.com/general/latest/gr/devicefarm.html + testAccRegionPreCheck(t, endpoints.UsWest2RegionID) + }, Providers: testAccProviders, CheckDestroy: testAccCheckDeviceFarmProjectDestroy, Steps: []resource.TestStep{ diff --git a/website/docs/r/devicefarm_project.html.markdown b/website/docs/r/devicefarm_project.html.markdown index 3fd15867d0b..7c77699b4d9 100644 --- a/website/docs/r/devicefarm_project.html.markdown +++ b/website/docs/r/devicefarm_project.html.markdown @@ -9,12 +9,12 @@ description: |- # Resource: aws_devicefarm_project Provides a resource to manage AWS Device Farm Projects. -Please keep in mind that this feature is only supported on the "us-west-2" region. -This resource will error if you try to create a project in another region. For more information about Device Farm Projects, see the AWS Documentation on [Device Farm Projects][aws-get-project]. +~> **NOTE:** AWS currently has limited regional support for Device Farm (e.g. `us-west-2`). See [AWS Device Farm endpoints and quotas](https://docs.aws.amazon.com/general/latest/gr/devicefarm.html) for information on supported regions. + ## Basic Example Usage From cd8a23443d7fcaf051b7d15958acffa467d518f8 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Jan 2021 13:30:41 -0800 Subject: [PATCH 0399/1212] Clean up --- ...ce_aws_elasticache_parameter_group_test.go | 52 ++++++++++--------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index b7dcb013751..332d6b886ac 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -14,7 +14,7 @@ import ( func TestAccAWSElasticacheParameterGroup_basic(t *testing.T) { var v elasticache.CacheParameterGroup - resourceName := "aws_elasticache_parameter_group.bar" + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -43,6 +43,7 @@ func TestAccAWSElasticacheParameterGroup_basic(t *testing.T) { func TestAccAWSElasticacheParameterGroup_addParameter(t *testing.T) { var v elasticache.CacheParameterGroup + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -53,29 +54,29 @@ func TestAccAWSElasticacheParameterGroup_addParameter(t *testing.T) { { Config: testAccAWSElasticacheParameterGroupConfigParameter1(rName, "redis2.8", "appendonly", "yes"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - resource.TestCheckResourceAttr("aws_elasticache_parameter_group.bar", "parameter.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs("aws_elasticache_parameter_group.bar", "parameter.*", map[string]string{ + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ "name": "appendonly", "value": "yes", }), ), }, { - ResourceName: "aws_elasticache_parameter_group.bar", + ResourceName: "aws_elasticache_parameter_group.test", ImportState: true, ImportStateVerify: true, }, { Config: testAccAWSElasticacheParameterGroupConfigParameter2(rName, "redis2.8", "appendonly", "yes", "appendfsync", "always"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - resource.TestCheckResourceAttr("aws_elasticache_parameter_group.bar", "parameter.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs("aws_elasticache_parameter_group.bar", "parameter.*", map[string]string{ + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ "name": "appendonly", "value": "yes", }), - resource.TestCheckTypeSetElemNestedAttrs("aws_elasticache_parameter_group.bar", "parameter.*", map[string]string{ + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ "name": "appendfsync", "value": "always", }), @@ -88,6 +89,7 @@ func TestAccAWSElasticacheParameterGroup_addParameter(t *testing.T) { // Regression for https://github.com/hashicorp/terraform-provider-aws/issues/116 func TestAccAWSElasticacheParameterGroup_removeAllParameters(t *testing.T) { var v elasticache.CacheParameterGroup + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -98,15 +100,15 @@ func TestAccAWSElasticacheParameterGroup_removeAllParameters(t *testing.T) { { Config: testAccAWSElasticacheParameterGroupConfigParameter2(rName, "redis2.8", "appendonly", "yes", "appendfsync", "always"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - resource.TestCheckResourceAttr("aws_elasticache_parameter_group.bar", "parameter.#", "2"), + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "2"), ), }, { Config: testAccAWSElasticacheParameterGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - resource.TestCheckResourceAttr("aws_elasticache_parameter_group.bar", "parameter.#", "0"), + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "0"), ), }, }, @@ -117,7 +119,7 @@ func TestAccAWSElasticacheParameterGroup_removeAllParameters(t *testing.T) { // This covers our custom logic handling for this situation. func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup - resourceName := "aws_elasticache_parameter_group.bar" + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -152,7 +154,7 @@ func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter(t *testin // This covers our custom logic handling for this situation. func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup - resourceName := "aws_elasticache_parameter_group.bar" + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -187,7 +189,7 @@ func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testin // This covers our custom logic handling for this situation. func TestAccAWSElasticacheParameterGroup_updateReservedMemoryParameter(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup - resourceName := "aws_elasticache_parameter_group.bar" + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -220,6 +222,7 @@ func TestAccAWSElasticacheParameterGroup_updateReservedMemoryParameter(t *testin func TestAccAWSElasticacheParameterGroup_UppercaseName(t *testing.T) { var v elasticache.CacheParameterGroup + resourceName := "aws_elasticache_parameter_group.test" rInt := acctest.RandInt() rName := fmt.Sprintf("TF-ELASTIPG-%d", rInt) @@ -231,13 +234,12 @@ func TestAccAWSElasticacheParameterGroup_UppercaseName(t *testing.T) { { Config: testAccAWSElasticacheParameterGroupConfigParameter1(rName, "redis2.8", "appendonly", "yes"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheParameterGroupExists("aws_elasticache_parameter_group.bar", &v), - resource.TestCheckResourceAttr( - "aws_elasticache_parameter_group.bar", "name", fmt.Sprintf("tf-elastipg-%d", rInt)), + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), + resource.TestCheckResourceAttr("aws_elasticache_parameter_group.test", "name", fmt.Sprintf("tf-elastipg-%d", rInt)), ), }, { - ResourceName: "aws_elasticache_parameter_group.bar", + ResourceName: "aws_elasticache_parameter_group.test", ImportState: true, ImportStateVerify: true, }, @@ -247,7 +249,7 @@ func TestAccAWSElasticacheParameterGroup_UppercaseName(t *testing.T) { func TestAccAWSElasticacheParameterGroup_Description(t *testing.T) { var v elasticache.CacheParameterGroup - resourceName := "aws_elasticache_parameter_group.bar" + resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -356,7 +358,7 @@ func testAccCheckAWSElasticacheParameterGroupExists(n string, v *elasticache.Cac func testAccAWSElasticacheParameterGroupConfig(rName string) string { return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { +resource "aws_elasticache_parameter_group" "test" { family = "redis2.8" name = %q } @@ -365,7 +367,7 @@ resource "aws_elasticache_parameter_group" "bar" { func testAccAWSElasticacheParameterGroupConfigDescription(rName, description string) string { return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { +resource "aws_elasticache_parameter_group" "test" { description = %q family = "redis2.8" name = %q @@ -375,7 +377,7 @@ resource "aws_elasticache_parameter_group" "bar" { func testAccAWSElasticacheParameterGroupConfigParameter1(rName, family, parameterName1, parameterValue1 string) string { return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { +resource "aws_elasticache_parameter_group" "test" { family = %q name = %q @@ -389,7 +391,7 @@ resource "aws_elasticache_parameter_group" "bar" { func testAccAWSElasticacheParameterGroupConfigParameter2(rName, family, parameterName1, parameterValue1, parameterName2, parameterValue2 string) string { return fmt.Sprintf(` -resource "aws_elasticache_parameter_group" "bar" { +resource "aws_elasticache_parameter_group" "test" { family = %q name = %q From 7c0e5d8af968e667d1f7a34ac3b50de99f67d796 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Jan 2021 14:23:07 -0800 Subject: [PATCH 0400/1212] Moves ElastiCache parameter expander and flattener to resource file --- ...esource_aws_elasticache_parameter_group.go | 35 ++++++++++++ ...ce_aws_elasticache_parameter_group_test.go | 53 +++++++++++++++++++ aws/structure.go | 35 ------------ aws/structure_test.go | 53 ------------------- 4 files changed, 88 insertions(+), 88 deletions(-) diff --git a/aws/resource_aws_elasticache_parameter_group.go b/aws/resource_aws_elasticache_parameter_group.go index 0be28e769de..8779607ce13 100644 --- a/aws/resource_aws_elasticache_parameter_group.go +++ b/aws/resource_aws_elasticache_parameter_group.go @@ -338,3 +338,38 @@ func resourceAwsElasticacheParameterHash(v interface{}) int { return hashcode.String(buf.String()) } + +// Flattens an array of Parameters into a []map[string]interface{} +func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(list)) + for _, i := range list { + if i.ParameterValue != nil { + result = append(result, map[string]interface{}{ + "name": strings.ToLower(aws.StringValue(i.ParameterName)), + "value": aws.StringValue(i.ParameterValue), + }) + } + } + return result +} + +// Takes the result of flatmap.Expand for an array of parameters and +// returns Parameter API compatible objects +func expandElastiCacheParameters(configured []interface{}) []*elasticache.ParameterNameValue { + parameters := make([]*elasticache.ParameterNameValue, len(configured)) + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatible objects + for i, pRaw := range configured { + parameters[i] = expandElastiCacheParameter(pRaw.(map[string]interface{})) + } + + return parameters +} + +func expandElastiCacheParameter(param map[string]interface{}) *elasticache.ParameterNameValue { + return &elasticache.ParameterNameValue{ + ParameterName: aws.String(param["name"].(string)), + ParameterValue: aws.String(param["value"].(string)), + } +} diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index 332d6b886ac..c55b9454178 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "reflect" "testing" "github.com/aws/aws-sdk-go/aws" @@ -407,3 +408,55 @@ resource "aws_elasticache_parameter_group" "test" { } `, family, rName, parameterName1, parameterValue1, parameterName2, parameterValue2) } + +func TestFlattenElasticacheParameters(t *testing.T) { + cases := []struct { + Input []*elasticache.Parameter + Output []map[string]interface{} + }{ + { + Input: []*elasticache.Parameter{ + { + ParameterName: aws.String("activerehashing"), + ParameterValue: aws.String("yes"), + }, + }, + Output: []map[string]interface{}{ + { + "name": "activerehashing", + "value": "yes", + }, + }, + }, + } + + for _, tc := range cases { + output := flattenElastiCacheParameters(tc.Input) + if !reflect.DeepEqual(output, tc.Output) { + t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) + } + } +} + +func TestExpandElasticacheParameters(t *testing.T) { + expanded := []interface{}{ + map[string]interface{}{ + "name": "activerehashing", + "value": "yes", + "apply_method": "immediate", + }, + } + parameters := expandElastiCacheParameters(expanded) + + expected := &elasticache.ParameterNameValue{ + ParameterName: aws.String("activerehashing"), + ParameterValue: aws.String("yes"), + } + + if !reflect.DeepEqual(parameters[0], expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + parameters[0], + expected) + } +} diff --git a/aws/structure.go b/aws/structure.go index 10a7f881773..9abef368aa0 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -472,27 +472,6 @@ func expandOptionSetting(list []interface{}) []*rds.OptionSetting { return options } -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func expandElastiCacheParameters(configured []interface{}) []*elasticache.ParameterNameValue { - parameters := make([]*elasticache.ParameterNameValue, 0, len(configured)) - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for _, pRaw := range configured { - data := pRaw.(map[string]interface{}) - - p := &elasticache.ParameterNameValue{ - ParameterName: aws.String(data["name"].(string)), - ParameterValue: aws.String(data["value"].(string)), - } - - parameters = append(parameters, p) - } - - return parameters -} - // Takes the result of flatmap.Expand for an array of parameters and // returns Parameter API compatible objects func expandNeptuneParameters(configured []interface{}) []*neptune.Parameter { @@ -961,20 +940,6 @@ func flattenRedshiftParameters(list []*redshift.Parameter) []map[string]interfac return result } -// Flattens an array of Parameters into a []map[string]interface{} -func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.ParameterValue != nil { - result = append(result, map[string]interface{}{ - "name": strings.ToLower(*i.ParameterName), - "value": *i.ParameterValue, - }) - } - } - return result -} - // Flattens an array of Parameters into a []map[string]interface{} func flattenNeptuneParameters(list []*neptune.Parameter) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) diff --git a/aws/structure_test.go b/aws/structure_test.go index 8c8a6cea243..c5585a5f971 100644 --- a/aws/structure_test.go +++ b/aws/structure_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/organizations" @@ -618,29 +617,6 @@ func TestExpandRedshiftParameters(t *testing.T) { } } -func TestExpandElasticacheParameters(t *testing.T) { - expanded := []interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - "apply_method": "immediate", - }, - } - parameters := expandElastiCacheParameters(expanded) - - expected := &elasticache.ParameterNameValue{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - func TestExpandStepAdjustments(t *testing.T) { expanded := []interface{}{ map[string]interface{}{ @@ -726,35 +702,6 @@ func TestFlattenRedshiftParameters(t *testing.T) { } } -func TestFlattenElasticacheParameters(t *testing.T) { - cases := []struct { - Input []*elasticache.Parameter - Output []map[string]interface{} - }{ - { - Input: []*elasticache.Parameter{ - { - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - }, - }, - Output: []map[string]interface{}{ - { - "name": "activerehashing", - "value": "yes", - }, - }, - }, - } - - for _, tc := range cases { - output := flattenElastiCacheParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - func TestExpandInstanceString(t *testing.T) { expected := []*elb.Instance{ From ecfa6fc6b07fa420919a4df5605ea7573100f666 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 17:43:42 -0500 Subject: [PATCH 0401/1212] tests/resource/iam_access_key: Fix hardcoded region --- aws/resource_aws_iam_access_key_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_iam_access_key_test.go b/aws/resource_aws_iam_access_key_test.go index 3fe3d5ee951..8edcabbe003 100644 --- a/aws/resource_aws_iam_access_key_test.go +++ b/aws/resource_aws_iam_access_key_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -240,10 +241,10 @@ func TestSesSmtpPasswordFromSecretKeySigV4(t *testing.T) { Input string Expected string }{ - {"eu-central-1", "some+secret+key", "BMXhUYlu5Z3gSXVQORxlVa7XPaz91aGWdfHxvkOZdWZ2"}, - {"eu-central-1", "another+secret+key", "BBbphbrQmrKMx42d1N6+C7VINYEBGI5v9VsZeTxwskfh"}, - {"us-west-1", "some+secret+key", "BH+jbMzper5WwlwUar9E1ySBqHa9whi0GPo+sJ0mVYJj"}, - {"us-west-1", "another+secret+key", "BKVmjjMDFk/qqw8EROW99bjCS65PF8WKvK5bSr4Y6EqF"}, + {endpoints.EuCentral1RegionID, "some+secret+key", "BMXhUYlu5Z3gSXVQORxlVa7XPaz91aGWdfHxvkOZdWZ2"}, + {endpoints.EuCentral1RegionID, "another+secret+key", "BBbphbrQmrKMx42d1N6+C7VINYEBGI5v9VsZeTxwskfh"}, + {endpoints.UsWest1RegionID, "some+secret+key", "BH+jbMzper5WwlwUar9E1ySBqHa9whi0GPo+sJ0mVYJj"}, + {endpoints.UsWest1RegionID, "another+secret+key", "BKVmjjMDFk/qqw8EROW99bjCS65PF8WKvK5bSr4Y6EqF"}, } for _, tc := range cases { From eb402a786eb792543f38ce497659741e3d6c201b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 17:55:58 -0500 Subject: [PATCH 0402/1212] tests/resource/instance: Fix hardcoded region --- aws/resource_aws_instance_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index a95a1505937..2ac670fbad5 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" @@ -1391,7 +1392,7 @@ func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccRegionPreCheck(t, "us-west-2") }, //lintignore:AWSAT003 + PreCheck: func() { testAccPreCheck(t); testAccRegionPreCheck(t, endpoints.UsWest2RegionID) }, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ @@ -4397,7 +4398,7 @@ resource "aws_instance" "test" { func testAccInstanceConfigRootBlockDeviceMismatch(rName string) string { return testAccAwsInstanceVpcConfig(rName, false) + ` resource "aws_instance" "test" { - # This is an AMI in us-west-2 with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" + # This is an AMI in endpoints.UsWest2RegionID with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" ami = "ami-ef5b69df" # tflint-ignore: aws_instance_previous_type From c9bcbacf1592e3153359aa0bb49a39fc24bd2ca8 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Jan 2021 15:47:00 -0800 Subject: [PATCH 0403/1212] Adds parameter value tests --- ...ce_aws_elasticache_parameter_group_test.go | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index c55b9454178..6fe1b92e533 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -103,6 +103,14 @@ func TestAccAWSElasticacheParameterGroup_removeAllParameters(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "parameter.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "appendonly", + "value": "yes", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "appendfsync", + "value": "always", + }), ), }, { @@ -133,6 +141,10 @@ func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter(t *testin Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory", + "value": "0", + }), ), }, { @@ -168,6 +180,10 @@ func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testin Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory", + "value": "0", + }), ), }, { @@ -175,6 +191,10 @@ func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testin Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory-percent", + "value": "25", + }), ), }, { @@ -203,6 +223,10 @@ func TestAccAWSElasticacheParameterGroup_updateReservedMemoryParameter(t *testin Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory", + "value": "0", + }), ), }, { @@ -210,6 +234,10 @@ func TestAccAWSElasticacheParameterGroup_updateReservedMemoryParameter(t *testin Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory", + "value": "1", + }), ), }, { From 857cc5ad7607af03d8fe1742b9bd2d3f7d5d0b70 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Jan 2021 16:26:21 -0800 Subject: [PATCH 0404/1212] Naming cleanup --- ...esource_aws_elasticache_parameter_group.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_elasticache_parameter_group.go b/aws/resource_aws_elasticache_parameter_group.go index 8779607ce13..aef8efca53c 100644 --- a/aws/resource_aws_elasticache_parameter_group.go +++ b/aws/resource_aws_elasticache_parameter_group.go @@ -74,14 +74,14 @@ func resourceAwsElasticacheParameterGroupCreate(d *schema.ResourceData, meta int Description: aws.String(d.Get("description").(string)), } - log.Printf("[DEBUG] Create Cache Parameter Group: %#v", createOpts) + log.Printf("[DEBUG] Create ElastiCache Parameter Group: %#v", createOpts) resp, err := conn.CreateCacheParameterGroup(&createOpts) if err != nil { - return fmt.Errorf("Error creating Cache Parameter Group: %s", err) + return fmt.Errorf("error creating ElastiCache Parameter Group: %w", err) } d.SetId(aws.StringValue(resp.CacheParameterGroup.CacheParameterGroupName)) - log.Printf("[INFO] Cache Parameter Group ID: %s", d.Id()) + log.Printf("[INFO] ElastiCache Parameter Group ID: %s", d.Id()) return resourceAwsElasticacheParameterGroupUpdate(d, meta) } @@ -100,7 +100,7 @@ func resourceAwsElasticacheParameterGroupRead(d *schema.ResourceData, meta inter if len(describeResp.CacheParameterGroups) != 1 || aws.StringValue(describeResp.CacheParameterGroups[0].CacheParameterGroupName) != d.Id() { - return fmt.Errorf("Unable to find Parameter Group: %#v", describeResp.CacheParameterGroups) + return fmt.Errorf("unable to find Parameter Group: %#v", describeResp.CacheParameterGroups) } d.Set("name", describeResp.CacheParameterGroups[0].CacheParameterGroupName) @@ -162,7 +162,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int ParameterNameValues: paramsToModify, } - log.Printf("[DEBUG] Reset Cache Parameter Group: %s", resetOpts) + log.Printf("[DEBUG] Reset ElastiCache Parameter Group: %s", resetOpts) err := resource.Retry(30*time.Second, func() *resource.RetryError { _, err := conn.ResetCacheParameterGroup(&resetOpts) if err != nil { @@ -201,7 +201,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int allConfiguredParameters := expandElastiCacheParameters(d.Get("parameter").(*schema.Set).List()) if err != nil { - return fmt.Errorf("error expanding parameter configuration: %s", err) + return fmt.Errorf("error expanding parameter configuration: %w", err) } for _, configuredParameter := range allConfiguredParameters { @@ -218,7 +218,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int // The reserved-memory-percentage parameter does not exist in redis2.6 and redis2.8 family := d.Get("family").(string) if family == "redis2.6" || family == "redis2.8" { - log.Printf("[WARN] Cannot reset Elasticache Parameter Group (%s) reserved-memory parameter with %s family", d.Id(), family) + log.Printf("[WARN] Cannot reset ElastiCache Parameter Group (%s) reserved-memory parameter with %s family", d.Id(), family) break } @@ -269,7 +269,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int } if err != nil { - return fmt.Errorf("Error resetting Cache Parameter Group: %s", err) + return fmt.Errorf("error resetting ElastiCache Parameter Group: %w", err) } } @@ -285,10 +285,10 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int ParameterNameValues: paramsToModify, } - log.Printf("[DEBUG] Modify Cache Parameter Group: %s", modifyOpts) + log.Printf("[DEBUG] Modify ElastiCache Parameter Group: %s", modifyOpts) _, err := conn.ModifyCacheParameterGroup(&modifyOpts) if err != nil { - return fmt.Errorf("Error modifying Cache Parameter Group: %s", err) + return fmt.Errorf("error modifying ElastiCache Parameter Group: %w", err) } } } @@ -324,7 +324,7 @@ func resourceAwsElasticacheParameterGroupDelete(d *schema.ResourceData, meta int } if err != nil { - return fmt.Errorf("error deleting Elasticache Parameter Group (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting ElastiCache Parameter Group (%s): %w", d.Id(), err) } return nil From f438dfcfaf562f65c431c2837de5fcc2d614cdac Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 21:01:58 -0500 Subject: [PATCH 0405/1212] resource/opsworks: Fix hardcoded regions --- aws/resource_aws_opsworks_instance_test.go | 10 ++++------ aws/resource_aws_opsworks_stack.go | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_opsworks_instance_test.go b/aws/resource_aws_opsworks_instance_test.go index 0950e9d0e7f..faeabf6d69d 100644 --- a/aws/resource_aws_opsworks_instance_test.go +++ b/aws/resource_aws_opsworks_instance_test.go @@ -16,6 +16,7 @@ func TestAccAWSOpsworksInstance_basic(t *testing.T) { stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) var opsinst opsworks.Instance resourceName := "aws_opsworks_instance.tf-acc" + dataSourceName := "data.aws_availability_zones.available" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(opsworks.EndpointsID, t) }, @@ -34,9 +35,9 @@ func TestAccAWSOpsworksInstance_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "install_updates_on_boot", "true"), resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), resource.TestCheckResourceAttr(resourceName, "tenancy", "default"), - resource.TestCheckResourceAttr(resourceName, "os", "Amazon Linux 2016.09"), // inherited from opsworks_stack_test - resource.TestCheckResourceAttr(resourceName, "root_device_type", "ebs"), // inherited from opsworks_stack_test - resource.TestCheckResourceAttr(resourceName, "availability_zone", "us-west-2a"), // inherited from opsworks_stack_test + resource.TestCheckResourceAttr(resourceName, "os", "Amazon Linux 2016.09"), // inherited from opsworks_stack_test + resource.TestCheckResourceAttr(resourceName, "root_device_type", "ebs"), // inherited from opsworks_stack_test + resource.TestCheckResourceAttrPair(resourceName, "availability_zone", dataSourceName, "names.0"), // inherited from opsworks_stack_test ), }, { @@ -146,9 +147,6 @@ func testAccCheckAWSOpsworksInstanceAttributes( if *opsinst.Status != "stopped" && *opsinst.Status != "requested" { return fmt.Errorf("Unexpected request status: %s", *opsinst.Status) } - if *opsinst.AvailabilityZone != "us-west-2a" { - return fmt.Errorf("Unexpected availability zone: %s", *opsinst.AvailabilityZone) - } if *opsinst.Architecture != "x86_64" { return fmt.Errorf("Unexpected architecture: %s", *opsinst.Architecture) } diff --git a/aws/resource_aws_opsworks_stack.go b/aws/resource_aws_opsworks_stack.go index 28af6248ac5..2bd7b28001b 100644 --- a/aws/resource_aws_opsworks_stack.go +++ b/aws/resource_aws_opsworks_stack.go @@ -308,7 +308,7 @@ func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) erro // If we haven't already, try us-east-1, legacy connection notFound++ var connErr error - client, connErr = opsworksConnForRegion("us-east-1", meta) + client, connErr = opsworksConnForRegion("us-east-1", meta) //lintignore:AWSAT003 if connErr != nil { return connErr } From a46304c66674da0b817820884ad208ddc403df8b Mon Sep 17 00:00:00 2001 From: Adrian Petrescu Date: Wed, 6 Jan 2021 09:03:43 -0500 Subject: [PATCH 0406/1212] resource/aws_emr_cluster: Remove from state instead of error on long terminated cluster (#16924) After a certain amount of time, destroyed EMR clusters completely disappear from AWS API output. They don't just display some deleted state, they're just gone. Currently, if you have one of these clusters in a statefile and you never update it in between the time when it was deleted and when it is purged from the API, terraform crashes the next time you try to run it. This patch fixes that. Output from acceptance testing: ``` --- PASS: TestAccAWSEMRCluster_configurationsJson (414.06s) --- PASS: TestAccAWSEMRCluster_disappears (458.00s) --- PASS: TestAccAWSEMRCluster_Kerberos_ClusterDedicatedKdc (458.66s) --- PASS: TestAccAWSEMRCluster_additionalInfo (468.10s) --- PASS: TestAccAWSEMRCluster_security_config (543.09s) --- PASS: TestAccAWSEMRCluster_CoreInstanceGroup_InstanceCount (551.34s) --- PASS: TestAccAWSEMRCluster_basic (611.02s) --- PASS: TestAccAWSEMRCluster_CoreInstanceGroup_AutoscalingPolicy (643.71s) --- PASS: TestAccAWSEMRCluster_Step_Multiple (682.41s) --- PASS: TestAccAWSEMRCluster_keepJob (413.06s) --- PASS: TestAccAWSEMRCluster_terminationProtected (466.42s) --- PASS: TestAccAWSEMRCluster_Step_Basic (922.25s) --- PASS: TestAccAWSEMRCluster_CoreInstanceGroup_InstanceType (922.69s) --- PASS: TestAccAWSEMRCluster_Ec2Attributes_DefaultManagedSecurityGroups (923.50s) --- PASS: TestAccAWSEMRCluster_MasterInstanceGroup_BidPrice (958.49s) --- PASS: TestAccAWSEMRCluster_Step_ConfigMode (966.07s) --- PASS: TestAccAWSEMRCluster_MasterInstanceGroup_InstanceType (987.75s) --- PASS: TestAccAWSEMRCluster_MasterInstanceGroup_Name (989.32s) --- PASS: TestAccAWSEMRCluster_CoreInstanceGroup_BidPrice (1020.07s) --- PASS: TestAccAWSEMRCluster_CoreInstanceGroup_Name (1030.37s) --- PASS: TestAccAWSEMRCluster_s3Logging (562.82s) --- PASS: TestAccAWSEMRCluster_step_concurrency_level (471.16s) --- PASS: TestAccAWSEMRCluster_ebs_config (449.44s) --- PASS: TestAccAWSEMRCluster_MasterInstanceGroup_InstanceCount (1117.85s) --- PASS: TestAccAWSEMRCluster_custom_ami_id (475.35s) --- PASS: TestAccAWSEMRCluster_visibleToAllUsers (700.80s) --- PASS: TestAccAWSEMRCluster_tags (734.40s) --- PASS: TestAccAWSEMRCluster_root_volume_size (754.76s) --- PASS: TestAccAWSEMRCluster_instance_fleet_master_only (456.76s) --- PASS: TestAccAWSEMRCluster_bootstrap_ordering (1343.54s) --- PASS: TestAccAWSEMRCluster_instance_fleet (563.45s) ``` --- aws/resource_aws_emr_cluster.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/aws/resource_aws_emr_cluster.go b/aws/resource_aws_emr_cluster.go index 0f6810c7660..08936cba639 100644 --- a/aws/resource_aws_emr_cluster.go +++ b/aws/resource_aws_emr_cluster.go @@ -978,6 +978,18 @@ func resourceAwsEMRClusterRead(d *schema.ResourceData, meta interface{}) error { resp, err := emrconn.DescribeCluster(req) if err != nil { + // After a Cluster has been terminated for an indeterminate period of time, + // the EMR API will return this type of error: + // InvalidRequestException: Cluster id 'j-XXX' is not valid. + // If this causes issues with masking other legitimate request errors, the + // handling should be updated for deeper inspection of the special error type + // which includes an accurate error code: + // ErrorCode: "NoSuchCluster", + if isAWSErr(err, emr.ErrCodeInvalidRequestException, "is not valid") { + log.Printf("[DEBUG] EMR Cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } return fmt.Errorf("Error reading EMR cluster: %s", err) } From f96188d63d627488c25b7b0bb9771ce8f4754b7e Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 6 Jan 2021 09:05:23 -0500 Subject: [PATCH 0407/1212] Update CHANGELOG for #16924 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 30c12467b7b..0195dca82c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS BUG FIXES +* resource/aws_emr_cluster: Remove from state instead of returning an error on long terminated cluster [GH-16924] * resource/aws_glue_catalog_table: Glue table partition keys should be set to empty list instead of being unset [GH-16727] * resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit [GH-16905] * resource/aws_transfer_user: Update `user_name` argument validation to support 100 characters [GH-16938] From 948f5e1ad1b695be9a10c1834f7e9149adf1f934 Mon Sep 17 00:00:00 2001 From: Francis Laforge <13852265+FrancisLfg@users.noreply.github.com> Date: Wed, 6 Jan 2021 19:34:23 +0100 Subject: [PATCH 0408/1212] resource/aws_route53_zone: Add length validations for `delegation_set_id` and `name` arguments (#12340) Output from acceptance testing: ``` --- PASS: TestAccAWSRoute53Zone_basic (68.77s) --- PASS: TestAccAWSRoute53Zone_Comment (81.81s) --- PASS: TestAccAWSRoute53Zone_DelegationSetID (70.15s) --- PASS: TestAccAWSRoute53Zone_disappears (60.90s) --- PASS: TestAccAWSRoute53Zone_ForceDestroy (189.58s) --- PASS: TestAccAWSRoute53Zone_ForceDestroy_TrailingPeriod (189.64s) --- PASS: TestAccAWSRoute53Zone_multiple (68.79s) --- PASS: TestAccAWSRoute53Zone_Tags (114.64s) --- PASS: TestAccAWSRoute53Zone_VPC_Multiple (182.20s) --- PASS: TestAccAWSRoute53Zone_VPC_Single (111.32s) --- PASS: TestAccAWSRoute53Zone_VPC_Updates (233.79s) --- PASS: TestAccAWSRoute53ZoneDataSource_id (79.59s) --- PASS: TestAccAWSRoute53ZoneDataSource_name (70.23s) --- PASS: TestAccAWSRoute53ZoneDataSource_serviceDiscovery (124.80s) --- PASS: TestAccAWSRoute53ZoneDataSource_tags (119.05s) --- PASS: TestAccAWSRoute53ZoneDataSource_vpc (133.88s) ``` --- aws/resource_aws_route53_zone.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_route53_zone.go b/aws/resource_aws_route53_zone.go index 9b5b28bd1dd..6ba07167395 100644 --- a/aws/resource_aws_route53_zone.go +++ b/aws/resource_aws_route53_zone.go @@ -33,10 +33,11 @@ func resourceAwsRoute53Zone() *schema.Resource { // returned from API, no longer requiring custom DiffSuppressFunc; // instead a StateFunc allows input to be provided // with or without the trailing period - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: trimTrailingPeriod, + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: trimTrailingPeriod, + ValidateFunc: validation.StringLenBetween(1, 1024), }, "comment": { @@ -77,6 +78,7 @@ func resourceAwsRoute53Zone() *schema.Resource { Optional: true, ForceNew: true, ConflictsWith: []string{"vpc"}, + ValidateFunc: validation.StringLenBetween(0, 32), }, "name_servers": { From bb7830a6b42dbb45c55280b9b3ba920aefbb6d80 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 6 Jan 2021 13:36:30 -0500 Subject: [PATCH 0409/1212] Update CHANGELOG for #12340 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0195dca82c8..dd7b1b70017 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS * resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` [GH-16714] * resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] * resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation [GH-16914] +* resource/aws_route53_zone: Add length validations for `delegation_set_id` and `name` arguments [GH-12340] * resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute [GH-16495] BUG FIXES From 0c78f6c477976aacc9f43a306ae844f2c061ffa2 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 6 Jan 2021 14:41:36 -0800 Subject: [PATCH 0410/1212] Update CHANGELOG for #16829 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd7b1b70017..75895ab63fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS * data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] * data-source/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] * resource/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] +* resource/aws_elasticache_replication_group: Allows configuring `replicas_per_node_group` for "Redis (cluster mode disabled)" [GH-16829] * resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] * resource/aws_glue_crawler: add support for `lineage_configuration` and `recrawl_policy` [GH-16714] * resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` [GH-16714] From f728fbc8c3d532bcac1f164b2394c7e4fc4570b6 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 6 Jan 2021 16:18:12 -0800 Subject: [PATCH 0411/1212] No longer removes and adds modified parameters --- ...esource_aws_elasticache_parameter_group.go | 60 ++++++--- ...ce_aws_elasticache_parameter_group_test.go | 115 ++++++++++++++++++ 2 files changed, 160 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_elasticache_parameter_group.go b/aws/resource_aws_elasticache_parameter_group.go index aef8efca53c..8cce3534f94 100644 --- a/aws/resource_aws_elasticache_parameter_group.go +++ b/aws/resource_aws_elasticache_parameter_group.go @@ -128,23 +128,11 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int if d.HasChange("parameter") { o, n := d.GetChange("parameter") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - toRemove := expandElastiCacheParameters(os.Difference(ns).List()) - - log.Printf("[DEBUG] Parameters to remove: %#v", toRemove) + toRemove, toAdd := elastiCacheParameterChanges(o, n) - toAdd := expandElastiCacheParameters(ns.Difference(os).List()) + log.Printf("[WARN] Parameters to remove: %#v", toRemove) - log.Printf("[DEBUG] Parameters to add: %#v", toAdd) + log.Printf("[WARN] Parameters to add or update: %#v", toAdd) // We can only modify 20 parameters at a time, so walk them until // we've got them all. @@ -339,6 +327,48 @@ func resourceAwsElasticacheParameterHash(v interface{}) int { return hashcode.String(buf.String()) } +func elastiCacheParameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.ParameterNameValue) { + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + om := make(map[string]*elasticache.ParameterNameValue, os.Len()) + for _, raw := range os.List() { + param := raw.(map[string]interface{}) + om[param["name"].(string)] = expandElastiCacheParameter(param) + } + nm := make(map[string]*elasticache.ParameterNameValue, len(addOrUpdate)) + for _, raw := range ns.List() { + param := raw.(map[string]interface{}) + nm[param["name"].(string)] = expandElastiCacheParameter(param) + } + + // Remove: key is in old, but not in new + remove = make([]*elasticache.ParameterNameValue, 0, os.Len()) + for k := range om { + if _, ok := nm[k]; !ok { + remove = append(remove, om[k]) + } + } + + // Add or Update: key is in new, but not in old or has changed value + addOrUpdate = make([]*elasticache.ParameterNameValue, 0, ns.Len()) + for k, nv := range nm { + ov, ok := om[k] + if !ok || ok && (aws.StringValue(nv.ParameterValue) != aws.StringValue(ov.ParameterValue)) { + addOrUpdate = append(addOrUpdate, nm[k]) + } + } + + return remove, addOrUpdate +} + // Flattens an array of Parameters into a []map[string]interface{} func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index 6fe1b92e533..63495bf6080 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/service/elasticache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -488,3 +489,117 @@ func TestExpandElasticacheParameters(t *testing.T) { expected) } } + +func TestElastiCacheParameterChanges(t *testing.T) { + cases := []struct { + Name string + Old *schema.Set + New *schema.Set + ExpectedRemove []*elasticache.ParameterNameValue + ExpectedAddOrUpdate []*elasticache.ParameterNameValue + }{ + { + Name: "Empty", + Old: new(schema.Set), + New: new(schema.Set), + ExpectedRemove: []*elasticache.ParameterNameValue{}, + ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + }, + { + Name: "Remove all", + Old: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "reserved-memory", + "value": "0", + }, + }), + New: new(schema.Set), + ExpectedRemove: []*elasticache.ParameterNameValue{ + { + ParameterName: aws.String("reserved-memory"), + ParameterValue: aws.String("0"), + }, + }, + ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + }, + { + Name: "No change", + Old: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "reserved-memory", + "value": "0", + }, + }), + New: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "reserved-memory", + "value": "0", + }, + }), + ExpectedRemove: []*elasticache.ParameterNameValue{}, + ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + }, + { + Name: "Remove partial", + Old: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "reserved-memory", + "value": "0", + }, + map[string]interface{}{ + "name": "appendonly", + "value": "yes", + }, + }), + New: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "appendonly", + "value": "yes", + }, + }), + ExpectedRemove: []*elasticache.ParameterNameValue{ + { + ParameterName: aws.String("reserved-memory"), + ParameterValue: aws.String("0"), + }, + }, + ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + }, + { + Name: "Add to existing", + Old: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "appendonly", + "value": "yes", + }, + }), + New: schema.NewSet(resourceAwsElasticacheParameterHash, []interface{}{ + map[string]interface{}{ + "name": "appendonly", + "value": "yes", + }, + map[string]interface{}{ + "name": "appendfsync", + "value": "always", + }, + }), + ExpectedRemove: []*elasticache.ParameterNameValue{}, + ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{ + { + ParameterName: aws.String("appendfsync"), + ParameterValue: aws.String("always"), + }, + }, + }, + } + + for _, tc := range cases { + remove, addOrUpdate := elastiCacheParameterChanges(tc.Old, tc.New) + if !reflect.DeepEqual(remove, tc.ExpectedRemove) { + t.Errorf("Case %q: Remove did not match\n%#v\n\nGot:\n%#v", tc.Name, tc.ExpectedRemove, remove) + } + if !reflect.DeepEqual(addOrUpdate, tc.ExpectedAddOrUpdate) { + t.Errorf("Case %q: AddOrUpdate did not match\n%#v\n\nGot:\n%#v", tc.Name, tc.ExpectedAddOrUpdate, addOrUpdate) + } + } +} From 16052f162566400c954dc63835c30b9328fb82b5 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 6 Jan 2021 17:16:04 -0800 Subject: [PATCH 0412/1212] Correctly handles error returned in Commercial partition and uses correct parameter name for workaround --- ...esource_aws_elasticache_parameter_group.go | 123 ++++++++---------- ...ce_aws_elasticache_parameter_group_test.go | 55 +++++++- 2 files changed, 106 insertions(+), 72 deletions(-) diff --git a/aws/resource_aws_elasticache_parameter_group.go b/aws/resource_aws_elasticache_parameter_group.go index 8cce3534f94..4cb198f79cd 100644 --- a/aws/resource_aws_elasticache_parameter_group.go +++ b/aws/resource_aws_elasticache_parameter_group.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" @@ -130,13 +131,12 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int o, n := d.GetChange("parameter") toRemove, toAdd := elastiCacheParameterChanges(o, n) - log.Printf("[WARN] Parameters to remove: %#v", toRemove) - - log.Printf("[WARN] Parameters to add or update: %#v", toAdd) + log.Printf("[DEBUG] Parameters to remove: %#v", toRemove) + log.Printf("[DEBUG] Parameters to add or update: %#v", toAdd) // We can only modify 20 parameters at a time, so walk them until // we've got them all. - maxParams := 20 + const maxParams = 20 for len(toRemove) > 0 { var paramsToModify []*elasticache.ParameterNameValue @@ -145,32 +145,24 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int } else { paramsToModify, toRemove = toRemove[:maxParams], toRemove[maxParams:] } - resetOpts := elasticache.ResetCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: paramsToModify, - } - log.Printf("[DEBUG] Reset ElastiCache Parameter Group: %s", resetOpts) - err := resource.Retry(30*time.Second, func() *resource.RetryError { - _, err := conn.ResetCacheParameterGroup(&resetOpts) - if err != nil { - if isAWSErr(err, "InvalidCacheParameterGroupState", " has pending changes") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) + err := resourceAwsElastiCacheResetParameterGroup(conn, d.Get("name").(string), paramsToModify) // When attempting to reset the reserved-memory parameter, the API - // can return the below 500 error, which causes the AWS Go SDK to - // automatically retry and hence timeout resource.Retry(): + // can return two types of error. + // + // In the commercial partition, it will return a 400 error with: + // InvalidParameterValue: Parameter reserved-memory doesn't exist + // + // In the GovCloud partition it will return the below 500 error, + // which causes the AWS Go SDK to automatically retry and timeout: // InternalFailure: An internal error has occurred. Please try your query again at a later time. + // // Instead of hardcoding the reserved-memory parameter removal // above, which may become out of date, here we add logic to // workaround this API behavior - if isResourceTimeoutError(err) { + if isResourceTimeoutError(err) || tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "Parameter reserved-memory doesn't exist") { for i, paramToModify := range paramsToModify { if aws.StringValue(paramToModify.ParameterName) != "reserved-memory" { continue @@ -181,19 +173,13 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int paramsToModify = append(paramsToModify[:i], paramsToModify[i+1:]...) // If we are only trying to remove reserved-memory and not perform - // an update to reserved-memory or reserved-memory-percentage, we + // an update to reserved-memory or reserved-memory-percent, we // can attempt to workaround the API issue by switching it to - // reserved-memory-percentage first then reset that temporary parameter. + // reserved-memory-percent first then reset that temporary parameter. tryReservedMemoryPercentageWorkaround := true - - allConfiguredParameters := expandElastiCacheParameters(d.Get("parameter").(*schema.Set).List()) - if err != nil { - return fmt.Errorf("error expanding parameter configuration: %w", err) - } - - for _, configuredParameter := range allConfiguredParameters { - if aws.StringValue(configuredParameter.ParameterName) == "reserved-memory" || aws.StringValue(configuredParameter.ParameterName) == "reserved-memory-percentage" { + for _, configuredParameter := range toAdd { + if aws.StringValue(configuredParameter.ParameterName) == "reserved-memory-percent" { tryReservedMemoryPercentageWorkaround = false break } @@ -203,43 +189,28 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int break } - // The reserved-memory-percentage parameter does not exist in redis2.6 and redis2.8 + // The reserved-memory-percent parameter does not exist in redis2.6 and redis2.8 family := d.Get("family").(string) if family == "redis2.6" || family == "redis2.8" { log.Printf("[WARN] Cannot reset ElastiCache Parameter Group (%s) reserved-memory parameter with %s family", d.Id(), family) break } - modifyInput := &elasticache.ModifyCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: []*elasticache.ParameterNameValue{ - { - ParameterName: aws.String("reserved-memory-percentage"), - ParameterValue: aws.String("0"), - }, + workaroundParams := []*elasticache.ParameterNameValue{ + { + ParameterName: aws.String("reserved-memory-percent"), + ParameterValue: aws.String("0"), }, } - _, err = conn.ModifyCacheParameterGroup(modifyInput) - + err = resourceAwsElastiCacheModifyParameterGroup(conn, d.Get("name").(string), paramsToModify) if err != nil { - log.Printf("[WARN] Error attempting reserved-memory workaround to switch to reserved-memory-percentage: %s", err) + log.Printf("[WARN] Error attempting reserved-memory workaround to switch to reserved-memory-percent: %s", err) break } - resetInput := &elasticache.ResetCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: []*elasticache.ParameterNameValue{ - { - ParameterName: aws.String("reserved-memory-percentage"), - ParameterValue: aws.String("0"), - }, - }, - } - - _, err = conn.ResetCacheParameterGroup(resetInput) - + err = resourceAwsElastiCacheResetParameterGroup(conn, d.Get("name").(string), workaroundParams) if err != nil { - log.Printf("[WARN] Error attempting reserved-memory workaround to reset reserved-memory-percentage: %s", err) + log.Printf("[WARN] Error attempting reserved-memory workaround to reset reserved-memory-percent: %s", err) } break @@ -247,12 +218,7 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int // Retry any remaining parameter resets with reserved-memory potentially removed if len(paramsToModify) > 0 { - resetOpts = elasticache.ResetCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: paramsToModify, - } - // Reset top level error with potentially any new errors - _, err = conn.ResetCacheParameterGroup(&resetOpts) + err = resourceAwsElastiCacheResetParameterGroup(conn, d.Get("name").(string), paramsToModify) } } @@ -268,13 +234,8 @@ func resourceAwsElasticacheParameterGroupUpdate(d *schema.ResourceData, meta int } else { paramsToModify, toAdd = toAdd[:maxParams], toAdd[maxParams:] } - modifyOpts := elasticache.ModifyCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(d.Get("name").(string)), - ParameterNameValues: paramsToModify, - } - log.Printf("[DEBUG] Modify ElastiCache Parameter Group: %s", modifyOpts) - _, err := conn.ModifyCacheParameterGroup(&modifyOpts) + err := resourceAwsElastiCacheModifyParameterGroup(conn, d.Get("name").(string), paramsToModify) if err != nil { return fmt.Errorf("error modifying ElastiCache Parameter Group: %w", err) } @@ -369,6 +330,32 @@ func elastiCacheParameterChanges(o, n interface{}) (remove, addOrUpdate []*elast return remove, addOrUpdate } +func resourceAwsElastiCacheResetParameterGroup(conn *elasticache.ElastiCache, name string, parameters []*elasticache.ParameterNameValue) error { + input := elasticache.ResetCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(name), + ParameterNameValues: parameters, + } + return resource.Retry(30*time.Second, func() *resource.RetryError { + _, err := conn.ResetCacheParameterGroup(&input) + if err != nil { + if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidCacheParameterGroupStateFault, " has pending changes") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) +} + +func resourceAwsElastiCacheModifyParameterGroup(conn *elasticache.ElastiCache, name string, parameters []*elasticache.ParameterNameValue) error { + input := elasticache.ModifyCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(name), + ParameterNameValues: parameters, + } + _, err := conn.ModifyCacheParameterGroup(&input) + return err +} + // Flattens an array of Parameters into a []map[string]interface{} func flattenElastiCacheParameters(list []*elasticache.Parameter) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index 63495bf6080..cd8c5599006 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -125,9 +125,9 @@ func TestAccAWSElasticacheParameterGroup_removeAllParameters(t *testing.T) { }) } -// The API throws 500 errors when attempting to reset the reserved-memory parameter. +// The API returns errors when attempting to reset the reserved-memory parameter. // This covers our custom logic handling for this situation. -func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter(t *testing.T) { +func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter_AllParameters(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) @@ -164,7 +164,54 @@ func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter(t *testin }) } -// The API throws 500 errors when attempting to reset the reserved-memory parameter. +// The API returns errors when attempting to reset the reserved-memory parameter. +// This covers our custom logic handling for this situation. +func TestAccAWSElasticacheParameterGroup_removeReservedMemoryParameter_RemainingParameters(t *testing.T) { + var cacheParameterGroup1 elasticache.CacheParameterGroup + resourceName := "aws_elasticache_parameter_group.test" + rName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheParameterGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheParameterGroupConfigParameter2(rName, "redis3.2", "reserved-memory", "0", "tcp-keepalive", "360"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "reserved-memory", + "value": "0", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "tcp-keepalive", + "value": "360", + }), + ), + }, + { + Config: testAccAWSElasticacheParameterGroupConfigParameter1(rName, "redis3.2", "tcp-keepalive", "360"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheParameterGroupExists(resourceName, &cacheParameterGroup1), + resource.TestCheckResourceAttr(resourceName, "parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "parameter.*", map[string]string{ + "name": "tcp-keepalive", + "value": "360", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// The API returns errors when attempting to reset the reserved-memory parameter. // This covers our custom logic handling for this situation. func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup @@ -207,7 +254,7 @@ func TestAccAWSElasticacheParameterGroup_switchReservedMemoryParameter(t *testin }) } -// The API throws 500 errors when attempting to reset the reserved-memory parameter. +// The API returns errors when attempting to reset the reserved-memory parameter. // This covers our custom logic handling for this situation. func TestAccAWSElasticacheParameterGroup_updateReservedMemoryParameter(t *testing.T) { var cacheParameterGroup1 elasticache.CacheParameterGroup From 22320f0486df0651fbf2b3732f262f4f9eace123 Mon Sep 17 00:00:00 2001 From: Top <50546939+hashitop@users.noreply.github.com> Date: Fri, 8 Jan 2021 07:04:35 +1100 Subject: [PATCH 0413/1212] Update wording website/docs/r/security_group.html.markdown Co-authored-by: Simon Davis --- website/docs/r/security_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index 113cce0e681..373d1418c33 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -122,7 +122,7 @@ The `egress` block supports: * `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints) * `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp") * `protocol` - (Required) The protocol. If you select a protocol of -"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. The supported values are defined in the "IpProtocol" argument on the [IpPermission](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html) API reference. This argument is normalized to a lowercase value to match the AWS API requirement when using with Terraform 0.12.x and above, please make sure that the value of the protocol is specified as lowercase when using with older version of Terraform to avoid an issue during upgrade. +"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. The supported values are defined in the "IpProtocol" argument in the [IpPermission](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html) API reference. This argument is normalized to a lowercase value to match the AWS API requirement when using Terraform 0.12.x and above. Please make sure that the value of the protocol is specified as lowercase when used with older version of Terraform to avoid issues during upgrade. * `security_groups` - (Optional) List of security group Group Names if using EC2-Classic, or Group IDs if using a VPC. * `self` - (Optional) If true, the security group itself will be added as From 1f504e27376f1bf00d6da9b4c7ab6aeae0faecfa Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 7 Jan 2021 15:20:56 -0500 Subject: [PATCH 0414/1212] tests/resource/instance: Update config comment --- aws/resource_aws_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 2ac670fbad5..1b38cd0e5cc 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -4398,7 +4398,7 @@ resource "aws_instance" "test" { func testAccInstanceConfigRootBlockDeviceMismatch(rName string) string { return testAccAwsInstanceVpcConfig(rName, false) + ` resource "aws_instance" "test" { - # This is an AMI in endpoints.UsWest2RegionID with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" + # This is an AMI in UsWest2 with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" ami = "ami-ef5b69df" # tflint-ignore: aws_instance_previous_type From e4c859fe25df421adf0b90fbcd2c773ec7368c9b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 7 Jan 2021 15:25:12 -0500 Subject: [PATCH 0415/1212] tests/resource/devicefarm: Remove unneeded test --- aws/resource_aws_devicefarm_project_test.go | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/aws/resource_aws_devicefarm_project_test.go b/aws/resource_aws_devicefarm_project_test.go index 63acf16cc2c..e6f5e1bd14e 100644 --- a/aws/resource_aws_devicefarm_project_test.go +++ b/aws/resource_aws_devicefarm_project_test.go @@ -46,26 +46,6 @@ func TestAccAWSDeviceFarmProject_basic(t *testing.T) { }) } -func TestAccAWSDeviceFarmProject_otherRegion(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") - - if testAccGetRegion() == endpoints.UsWest2RegionID { - t.Skipf("skipping test; test does not run in current region (%s)", testAccGetRegion()) - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(devicefarm.EndpointsID, t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDeviceFarmProjectDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDeviceFarmProjectConfig(rName), - ExpectError: regexp.MustCompile(`no such host`), - }, - }, - }) -} - func TestAccAWSDeviceFarmProject_disappears(t *testing.T) { var proj devicefarm.Project rName := acctest.RandomWithPrefix("tf-acc-test") From c4bb41652f1b7269d241fb1bb20f43b74daee1b2 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 7 Jan 2021 15:31:18 -0500 Subject: [PATCH 0416/1212] tests/resource/cognito_user_group: Use data source for region --- aws/resource_aws_cognito_user_group_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_cognito_user_group_test.go b/aws/resource_aws_cognito_user_group_test.go index f5c3ad3cbc5..545d19a071b 100644 --- a/aws/resource_aws_cognito_user_group_test.go +++ b/aws/resource_aws_cognito_user_group_test.go @@ -199,6 +199,8 @@ resource "aws_cognito_user_pool" "main" { name = "%[1]s" } +data "aws_region" "current" {} + resource "aws_iam_role" "group_role" { name = "%[2]s" @@ -215,7 +217,7 @@ resource "aws_iam_role" "group_role" { "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { - "cognito-identity.amazonaws.com:aud": "%[5]s:12345678-dead-beef-cafe-123456790ab" + "cognito-identity.amazonaws.com:aud": "${data.aws_region.current.name}:12345678-dead-beef-cafe-123456790ab" }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" @@ -234,7 +236,7 @@ resource "aws_cognito_user_group" "main" { precedence = %[4]d role_arn = aws_iam_role.group_role.arn } -`, poolName, groupName, groupDescription, precedence, testAccGetRegion()) +`, poolName, groupName, groupDescription, precedence) } func testAccAWSCognitoUserGroupConfig_RoleArn(rName string) string { From a8a7f13ee7e478dc381345a68094bbec763bbefd Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 7 Jan 2021 16:58:45 -0500 Subject: [PATCH 0417/1212] tests/resource/organizations_policy: Fix hardcoded regions --- aws/resource_aws_organizations_policy_test.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_organizations_policy_test.go b/aws/resource_aws_organizations_policy_test.go index e54826ef8c5..08c24544e35 100644 --- a/aws/resource_aws_organizations_policy_test.go +++ b/aws/resource_aws_organizations_policy_test.go @@ -225,9 +225,8 @@ func testAccAwsOrganizationsPolicy_type_Backup(t *testing.T) { "PII_Backup_Plan":{ "regions":{ "@@assign":[ - "ap-northeast-2", - "us-east-1", - "eu-north-1" + "%[1]s", + "%[2]s" ] }, "rules":{ @@ -253,9 +252,9 @@ func testAccAwsOrganizationsPolicy_type_Backup(t *testing.T) { "@@assign":"FortKnox" }, "copy_actions":{ - "arn:%[1]s:backup:us-east-1:$account:backup-vault:secondary_vault":{ + "arn:%[3]s:backup:%[1]s:$account:backup-vault:secondary_vault":{ "target_backup_vault_arn":{ - "@@assign":"arn:%[1]s:backup:us-east-1:$account:backup-vault:secondary_vault" + "@@assign":"arn:%[3]s:backup:%[1]s:$account:backup-vault:secondary_vault" }, "lifecycle":{ "delete_after_days":{ @@ -273,7 +272,7 @@ func testAccAwsOrganizationsPolicy_type_Backup(t *testing.T) { "tags":{ "datatype":{ "iam_role_arn":{ - "@@assign":"arn:%[1]s:iam::$account:role/MyIamRole" + "@@assign":"arn:%[3]s:iam::$account:role/MyIamRole" }, "tag_key":{ "@@assign":"dataType" @@ -289,7 +288,7 @@ func testAccAwsOrganizationsPolicy_type_Backup(t *testing.T) { } } } -}`, testAccGetPartition()) +}`, testAccGetAlternateRegion(), testAccGetRegion(), testAccGetPartition()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccOrganizationsAccountPreCheck(t) }, From fbf5fb287e3b053f818709001572f1a88a43f720 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Thu, 22 Oct 2020 18:44:22 -0500 Subject: [PATCH 0418/1212] add aws_sso_instance data source --- aws/data_source_aws_sso_instance.go | 63 ++++++++++++++++++ aws/data_source_aws_sso_instance_test.go | 79 +++++++++++++++++++++++ aws/provider.go | 3 + website/docs/d/sso_instance.html.markdown | 34 ++++++++++ 4 files changed, 179 insertions(+) create mode 100644 aws/data_source_aws_sso_instance.go create mode 100644 aws/data_source_aws_sso_instance_test.go create mode 100644 website/docs/d/sso_instance.html.markdown diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go new file mode 100644 index 00000000000..2104b5f0cd0 --- /dev/null +++ b/aws/data_source_aws_sso_instance.go @@ -0,0 +1,63 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsSsoInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoInstanceRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "identity_store_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + log.Printf("[DEBUG] Reading AWS SSO Instances") + instances := []*ssoadmin.InstanceMetadata{} + err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page != nil && len(page.Instances) != 0 { + instances = append(instances, page.Instances...) + } + return !lastPage + }) + if err != nil { + return fmt.Errorf("Error getting AWS SSO Instances: %s", err) + } + + if len(instances) == 0 { + log.Printf("[DEBUG] No AWS SSO Instance found") + d.SetId("") + return nil + } + + if len(instances) > 1 { + return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", instances) + } + + instance := instances[0] + log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) + + d.SetId(time.Now().UTC().String()) + d.Set("arn", instance.InstanceArn) + d.Set("identity_store_id", instance.IdentityStoreId) + + return nil +} diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go new file mode 100644 index 00000000000..f9b51dae6dd --- /dev/null +++ b/aws/data_source_aws_sso_instance_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccPreCheckAWSSSOInstance(t *testing.T) { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + instances := []*ssoadmin.InstanceMetadata{} + err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page != nil && len(page.Instances) != 0 { + instances = append(instances, page.Instances...) + } + return !lastPage + }) + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if len(instances) == 0 { + t.Skip("skipping acceptance testing: No AWS SSO Instance found.") + } + + if len(instances) > 1 { + t.Skip("skipping acceptance testing: Found multiple AWS SSO Instances. Not sure which one to use.") + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func TestAccDataSourceAwsSsoInstance_Basic(t *testing.T) { + datasourceName := "data.aws_sso_instance.selected" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoInstanceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccMatchResourceAttrAwsSsoARN(datasourceName, "arn", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), + resource.TestMatchResourceAttr(datasourceName, "identity_store_id", regexp.MustCompile("^[a-zA-Z0-9-]*")), + ), + }, + }, + }) +} + +func testAccDataSourceAwsSsoInstanceConfigBasic() string { + return `data "aws_sso_instance" "selected" {}` +} + +func testAccMatchResourceAttrAwsSsoARN(resourceName, attributeName string, arnResourceRegexp *regexp.Regexp) resource.TestCheckFunc { + return func(s *terraform.State) error { + arnRegexp := arn.ARN{ + Partition: testAccGetPartition(), + Resource: arnResourceRegexp.String(), + Service: "sso", + }.String() + + attributeMatch, err := regexp.Compile(arnRegexp) + + if err != nil { + return fmt.Errorf("Unable to compile ARN regexp (%s): %s", arnRegexp, err) + } + + return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) + } +} diff --git a/aws/provider.go b/aws/provider.go index 2d2fe3b412b..9e4bf123c83 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -355,6 +355,8 @@ func Provider() *schema.Provider { "aws_ssm_document": dataSourceAwsSsmDocument(), "aws_ssm_parameter": dataSourceAwsSsmParameter(), "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), + "aws_sso_instance": dataSourceAwsSsoInstance(), + "aws_sso_permission_set": dataSourceAwsSsoPermissionSet(), "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), "aws_subnet": dataSourceAwsSubnet(), "aws_subnet_ids": dataSourceAwsSubnetIDs(), @@ -937,6 +939,7 @@ func Provider() *schema.Provider { "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), + "aws_sso_permission_set": resourceAwsSsoPermissionSet(), "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown new file mode 100644 index 00000000000..3cc268991ff --- /dev/null +++ b/website/docs/d/sso_instance.html.markdown @@ -0,0 +1,34 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_instance" +description: |- + Get information on an AWS Single Sign-On Instance. +--- + +# Data Source: aws_sso_instance + +Use this data source to get the Single Sign-On Instance ARN and Identity Store ID. + +## Example Usage + +```hcl +data "aws_sso_instance" "selected" {} + +output "arn" { + value = data.aws_sso_instance.selected.arn +} + +output "identity_store_id" { + value = data.aws_sso_instance.selected.identity_store_id +} +``` + +## Argument Reference + +There are no arguments available for this data source. + +## Attributes Reference + +* `arn` - The AWS ARN associated with the AWS Single Sign-On Instance. +* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. \ No newline at end of file From dfdcee6ab25a5b2a43a0f04aaee3d130875d9282 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Thu, 22 Oct 2020 18:44:47 -0500 Subject: [PATCH 0419/1212] add aws_sso_permission_set data source --- aws/data_source_aws_sso_permission_set.go | 182 +++++ ...data_source_aws_sso_permission_set_test.go | 110 +++ aws/internal/keyvaluetags/sso_tags.go | 95 +++ aws/resource_aws_sso_permission_set.go | 642 ++++++++++++++++++ aws/resource_aws_sso_permission_set_test.go | 303 +++++++++ .../docs/d/sso_permission_set.html.markdown | 47 ++ .../docs/r/sso_permission_set.html.markdown | 73 ++ 7 files changed, 1452 insertions(+) create mode 100644 aws/data_source_aws_sso_permission_set.go create mode 100644 aws/data_source_aws_sso_permission_set_test.go create mode 100644 aws/internal/keyvaluetags/sso_tags.go create mode 100644 aws/resource_aws_sso_permission_set.go create mode 100644 aws/resource_aws_sso_permission_set_test.go create mode 100644 website/docs/d/sso_permission_set.html.markdown create mode 100644 website/docs/r/sso_permission_set.html.markdown diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go new file mode 100644 index 00000000000..58ac29a0ba8 --- /dev/null +++ b/aws/data_source_aws_sso_permission_set.go @@ -0,0 +1,182 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func dataSourceAwsSsoPermissionSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoPermissionSetRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + ), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), + ), + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "session_duration": { + Type: schema.TypeString, + Computed: true, + }, + + "relay_state": { + Type: schema.TypeString, + Computed: true, + }, + + "inline_policy": { + Type: schema.TypeString, + Computed: true, + }, + + "managed_policy_arns": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + instanceArn := d.Get("instance_arn").(string) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading AWS SSO Permission Sets") + + var permissionSetArn string + var permissionSet *ssoadmin.PermissionSet + var permissionSetErr error + + req := &ssoadmin.ListPermissionSetsInput{ + InstanceArn: aws.String(instanceArn), + } + err := conn.ListPermissionSetsPages(req, func(page *ssoadmin.ListPermissionSetsOutput, lastPage bool) bool { + if page != nil && len(page.PermissionSets) != 0 { + for _, ps := range page.PermissionSets { + permissionSetArn = aws.StringValue(ps) + log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) + var permissionSetResp *ssoadmin.DescribePermissionSetOutput + permissionSetResp, permissionSetErr = conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if permissionSetErr != nil { + return false + } + if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { + permissionSet = permissionSetResp.PermissionSet + return false + } + } + } + return !lastPage + }) + + if err != nil { + return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) + } + + if permissionSetErr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) + } + + if permissionSet == nil { + log.Printf("[DEBUG] AWS SSO Permission Set %v not found", name) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := conn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + var managedPolicyArns []string + managedPoliciesReq := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + managedPoliciesErr := conn.ListManagedPoliciesInPermissionSetPages(managedPoliciesReq, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { + for _, managedPolicy := range page.AttachedManagedPolicies { + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) + } + return !lastPage + }) + if managedPoliciesErr != nil { + return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) + } + + tags, tagsErr := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) + if tagsErr != nil { + return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, tagsErr) + } + + d.SetId(permissionSetArn) + d.Set("arn", permissionSetArn) + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("description", permissionSet.Description) + d.Set("session_duration", permissionSet.SessionDuration) + d.Set("relay_state", permissionSet.RelayState) + d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + d.Set("managed_policy_arns", managedPolicyArns) + tagsMapErr := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) + if tagsMapErr != nil { + return fmt.Errorf("Error setting tags: %s", tagsMapErr) + } + + return nil +} diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go new file mode 100644 index 00000000000..1afa28ef056 --- /dev/null +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -0,0 +1,110 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func TestAccDataSourceAwsSsoPermissionSet_Basic(t *testing.T) { + datasourceName := "data.aws_sso_permission_set.test" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(datasourceName, "name", rName), + resource.TestCheckResourceAttr(datasourceName, "description", "testing"), + resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), + resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), + resource.TestCheckResourceAttr(datasourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsSsoPermissionSet_Tags(t *testing.T) { + datasourceName := "data.aws_sso_permission_set.test" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(datasourceName, "name", rName), + resource.TestCheckResourceAttr(datasourceName, "description", "testing"), + resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), + resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), + resource.TestCheckResourceAttr(datasourceName, "tags.%", "3"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsSsoPermissionSetConfigBasic(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "test" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} + +data "aws_sso_permission_set" "test" { + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name +} +`, rName) +} + +func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "test" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} + +data "aws_sso_permission_set" "test" { + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} +`, rName) +} diff --git a/aws/internal/keyvaluetags/sso_tags.go b/aws/internal/keyvaluetags/sso_tags.go new file mode 100644 index 00000000000..05391e0c734 --- /dev/null +++ b/aws/internal/keyvaluetags/sso_tags.go @@ -0,0 +1,95 @@ +// +build !generate + +package keyvaluetags + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" +) + +// Custom SSO tag service functions using the same format as generated code. + +// SsoListTags lists sso service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoListTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string) (KeyValueTags, error) { + input := &ssoadmin.ListTagsForResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SsoKeyValueTags(output.Tags), nil +} + +// SsoUpdateTags updates sso service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoUpdateTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ssoadmin.UntagResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ssoadmin.TagResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SsoTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SsoTags returns sso service tags. +func (tags KeyValueTags) SsoTags() []*ssoadmin.Tag { + result := make([]*ssoadmin.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ssoadmin.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SsoKeyValueTags creates KeyValueTags from sso service tags. +func SsoKeyValueTags(tags []*ssoadmin.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go new file mode 100644 index 00000000000..4473240d72a --- /dev/null +++ b/aws/resource_aws_sso_permission_set.go @@ -0,0 +1,642 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +const ( + AWSSSOPermissionSetCreateTimeout = 5 * time.Minute + AWSSSOPermissionSetUpdateTimeout = 10 * time.Minute + AWSSSOPermissionSetDeleteTimeout = 5 * time.Minute + AWSSSOPermissionSetProvisioningRetryDelay = 5 * time.Second + AWSSSOPermissionSetProvisioningRetryMinTimeout = 3 * time.Second +) + +func resourceAwsSsoPermissionSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoPermissionSetCreate, + Read: resourceAwsSsoPermissionSetRead, + Update: resourceAwsSsoPermissionSetUpdate, + Delete: resourceAwsSsoPermissionSetDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsSsoPermissionSetImport, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSSSOPermissionSetCreateTimeout), + Update: schema.DefaultTimeout(AWSSSOPermissionSetUpdateTimeout), + Delete: schema.DefaultTimeout(AWSSSOPermissionSetDeleteTimeout), + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_failure_reason": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_request_id": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_status": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + ), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), + ), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 700), + validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*$`), "must match [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]"), + ), + }, + + "session_duration": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + Default: "PT1H", + }, + + "relay_state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 240), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9&$@#\\\/%?=~\-_'"|!:,.;*+\[\]\(\)\{\} ]+$`), "must match [a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\} ]"), + ), + }, + + "inline_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateIAMPolicyJson, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, + }, + + "managed_policy_arns": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + Set: schema.HashString, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSsoPermissionSetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + permissionSetArn := d.Id() + instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error parsing AWS Permission Set (%s) for import: %s", permissionSetArn, err) + } + + ssoadminconn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + + if permissionSetErr != nil { + return []*schema.ResourceData{}, permissionSetErr + } + + permissionSet := permissionSetResp.PermissionSet + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing Inline Policy for AWS SSO Permission Set (%s): %s", permissionSetArn, inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if managedPoliciesErr != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing Managed Policies for AWS SSO Permission Set (%s): %s", permissionSetArn, managedPoliciesErr) + } + var managedPolicyArns []string + for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) + } + + tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error listing tags during AWS SSO Permission Set (%s) import: %s", permissionSetArn, err) + } + + err = d.Set("instance_arn", instanceArn) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("arn", permissionSetArn) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("name", permissionSet.Name) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("description", permissionSet.Description) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("session_duration", permissionSet.SessionDuration) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("relay_state", permissionSet.RelayState) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("managed_policy_arns", managedPolicyArns) + if err != nil { + return []*schema.ResourceData{}, err + } + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing AWS SSO Permission Set (%s) tags: %s", permissionSetArn, err) + } + d.SetId(permissionSetArn) + + return []*schema.ResourceData{d}, nil +} + +func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { + ssoadminconn := meta.(*AWSClient).ssoadminconn + + log.Printf("[INFO] Creating AWS SSO Permission Set") + + instanceArn := aws.String(d.Get("instance_arn").(string)) + + params := &ssoadmin.CreatePermissionSetInput{ + InstanceArn: instanceArn, + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("relay_state"); ok { + params.RelayState = aws.String(v.(string)) + } + + if v, ok := d.GetOk("session_duration"); ok { + params.SessionDuration = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoTags() + } + + createPermissionSetResp, createPermissionerr := ssoadminconn.CreatePermissionSet(params) + if createPermissionerr != nil { + return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) + } + + permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn + d.SetId(*permissionSetArn) + + if attachPoliciesErr := attachPoliciesToPermissionSet(ssoadminconn, d, permissionSetArn, instanceArn); attachPoliciesErr != nil { + return attachPoliciesErr + } + + return resourceAwsSsoPermissionSetRead(d, meta) +} + +func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + ssoadminconn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + var permissionSet *ssoadmin.PermissionSet + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) + + permissionSetResp, permissionerr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + + if isAWSErr(permissionerr, ssoadmin.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] AWS SSO Permission Set (%s) not found, removing from state", permissionSetArn) + d.SetId("") + return nil + } + + if permissionerr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) + } + if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { + permissionSet = permissionSetResp.PermissionSet + } + + if permissionSet == nil { + log.Printf("[WARN] AWS SSO Permission Set %s not found, removing from state", name) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if managedPoliciesErr != nil { + return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) + } + var managedPolicyArns []string + for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) + } + + tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) + if err != nil { + return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) + } + + err = d.Set("arn", permissionSetArn) + if err != nil { + return err + } + err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + if err != nil { + return err + } + err = d.Set("instance_arn", instanceArn) + if err != nil { + return err + } + err = d.Set("name", permissionSet.Name) + if err != nil { + return err + } + err = d.Set("description", permissionSet.Description) + if err != nil { + return err + } + err = d.Set("session_duration", permissionSet.SessionDuration) + if err != nil { + return err + } + err = d.Set("relay_state", permissionSet.RelayState) + if err != nil { + return err + } + err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + if err != nil { + return err + } + err = d.Set("managed_policy_arns", managedPolicyArns) + if err != nil { + return err + } + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + + return nil +} + +func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { + ssoadminconn := meta.(*AWSClient).ssoadminconn + + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + + log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) + + if d.HasChanges("description", "relay_state", "session_duration") { + input := &ssoadmin.UpdatePermissionSetInput{ + PermissionSetArn: aws.String(permissionSetArn), + InstanceArn: aws.String(instanceArn), + } + + if d.HasChange("description") { + input.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("relay_state") { + input.RelayState = aws.String(d.Get("relay_state").(string)) + } + + if d.HasChange("session_duration") { + input.SessionDuration = aws.String(d.Get("session_duration").(string)) + } + + log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) + _, permissionerr := ssoadminconn.UpdatePermissionSet(input) + if permissionerr != nil { + return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionerr) + } + } + + if d.HasChange("tags") { + oldTags, newTags := d.GetChange("tags") + if updateTagsErr := keyvaluetags.SsoUpdateTags(ssoadminconn, d.Get("arn").(string), d.Get("instance_arn").(string), oldTags, newTags); updateTagsErr != nil { + return fmt.Errorf("Error updating tags: %s", updateTagsErr) + } + } + + if v, ok := d.GetOk("inline_policy"); ok { + log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) + + inlinePolicy := aws.String(v.(string)) + + updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } else if d.HasChange("inline_policy") { + deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + + if d.HasChange("managed_policy_arns") { + o, n := d.GetChange("managed_policy_arns") + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removalList := os.Difference(ns) + for _, v := range removalList.List() { + input := &ssoadmin.DetachManagedPolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + ManagedPolicyArn: aws.String(v.(string)), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, managedPoliciesErr := ssoadminconn.DetachManagedPolicyFromPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error detaching Managed Policy from AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + + additionList := ns.Difference(os) + for _, v := range additionList.List() { + input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + ManagedPolicyArn: aws.String(v.(string)), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + } + + // Reprovision if anything has changed + if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policy_arns", "tags") { + + // Auto provision all accounts + targetType := ssoadmin.ProvisionTargetTypeAllProvisionedAccounts + provisionInput := &ssoadmin.ProvisionPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + TargetType: aws.String(targetType), + } + + log.Printf("[INFO] Provisioning AWS SSO Permission Set") + provisionResponse, err := ssoadminconn.ProvisionPermissionSet(provisionInput) + if err != nil { + return fmt.Errorf("Error provisioning AWS SSO Permission Set (%s): %w", d.Id(), err) + } + + status := provisionResponse.PermissionSetProvisioningStatus + + _, waitErr := waitForPermissionSetProvisioning(ssoadminconn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutUpdate)) + if waitErr != nil { + return waitErr + } + } + + return resourceAwsSsoPermissionSetRead(d, meta) +} + +func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { + ssoadminconn := meta.(*AWSClient).ssoadminconn + + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + + log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) + + params := &ssoadmin.DeletePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, err := ssoadminconn.DeletePermissionSet(params) + + if err != nil { + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { + log.Printf("[DEBUG] AWS SSO Permission Set not found") + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) + } + + d.SetId("") + return nil +} + +func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, permissionSetArn *string, instanceArn *string) error { + + if v, ok := d.GetOk("inline_policy"); ok { + log.Printf("[INFO] Attaching IAM inline policy to AWS SSO Permission Set") + + inlinePolicy := aws.String(v.(string)) + + input := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: instanceArn, + PermissionSetArn: permissionSetArn, + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(input) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + + if v, ok := d.GetOk("managed_policy_arns"); ok { + log.Printf("[INFO] Attaching Managed Policies to AWS SSO Permission Set") + + managedPolicies := expandStringSet(v.(*schema.Set)) + + for _, managedPolicyArn := range managedPolicies { + + input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ + InstanceArn: instanceArn, + ManagedPolicyArn: managedPolicyArn, + PermissionSetArn: permissionSetArn, + } + + _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + } + + return nil +} + +func resourceAwsSsoPermissionSetParseID(id string) (string, error) { + // id = arn:${Partition}:sso:::permissionSet/${InstanceID}/${PermissionSetID} + idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", id) + permissionSetARN, err := arn.Parse(id) + if err != nil { + return "", idFormatErr + } + + // We need: + // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/${InstanceId}/${PermissionSetId}) + // Split up the resource of the permission set ARN + resourceParts := strings.Split(permissionSetARN.Resource, "/") + if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { + return "", idFormatErr + } + + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceARN := &arn.ARN{ + Partition: permissionSetARN.Partition, + Service: permissionSetARN.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), + } + + return instanceARN.String(), nil +} + +func waitForPermissionSetProvisioning(ssoadminconn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.PermissionSetProvisioningStatus, error) { + + stateConf := resource.StateChangeConf{ + Delay: AWSSSOPermissionSetProvisioningRetryDelay, + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Timeout: timeout, + MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, + Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, requestID, instanceArn), + } + status, err := stateConf.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for AWS SSO Permission Set provisioning status: %s", err) + } + return status.(*ssoadmin.PermissionSetProvisioningStatus), nil +} + +func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, requestID, instanceArn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ + InstanceArn: aws.String(instanceArn), + ProvisionPermissionSetRequestId: aws.String(requestID), + } + + resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) + if err != nil { + return resp, "", fmt.Errorf("Error describing permission set provisioning status: %s", err) + } + status := resp.PermissionSetProvisioningStatus + if aws.StringValue(status.Status) == ssoadmin.StatusValuesFailed { + return resp, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(status.PermissionSetArn), aws.StringValue(status.FailureReason)) + } + return status, aws.StringValue(status.Status), nil + + } +} diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go new file mode 100644 index 00000000000..3baf6cdc3e9 --- /dev/null +++ b/aws/resource_aws_sso_permission_set_test.go @@ -0,0 +1,303 @@ +package aws + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { + var permissionSet, updatedPermissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSSOPermissionSetBasicConfigUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), + resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { + var permissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetDisappears(&permissionSet), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { + var permissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSSOPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + instanceArn, err := resourceAwsSsoPermissionSetParseID(rs.Primary.ID) + + if err != nil { + return err + } + + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(rs.Primary.ID), + }) + + if permissionSetErr != nil { + return permissionSetErr + } + + if *permissionSetResp.PermissionSet.PermissionSetArn == rs.Primary.ID { + *permissionSet = *permissionSetResp.PermissionSet + return nil + } + + return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) + } +} + +func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sso_permission_set" { + continue + } + + idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", rs.Primary.ID) + permissionSetArn, err := arn.Parse(rs.Primary.ID) + if err != nil { + return err + } + + resourceParts := strings.Split(permissionSetArn.Resource, "/") + if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { + return idFormatErr + } + + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceArn := arn.ARN{ + Partition: permissionSetArn.Partition, + Service: permissionSetArn.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), + }.String() + + input := &ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(rs.Primary.ID), + } + + output, err := ssoadminconn.DescribePermissionSet(input) + + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { + continue + } + + if err != nil { + return err + } + + if output != nil { + return fmt.Errorf("AWS SSO Permission Set (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSSOPermissionSetDisappears(permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { + return func(s *terraform.State) error { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + permissionSetArn, permissionSetErr := arn.Parse(*permissionSet.PermissionSetArn) + if permissionSetErr != nil { + return permissionSetErr + } + + resourceParts := strings.Split(permissionSetArn.Resource, "/") + + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceArn := arn.ARN{ + Partition: permissionSetArn.Partition, + Service: permissionSetArn.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), + }.String() + + input := &ssoadmin.DeletePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: permissionSet.PermissionSetArn, + } + + _, err := ssoadminconn.DeletePermissionSet(input) + + return err + + } +} + +func testAccSSOPermissionSetBasicConfig(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} +`, rName) +} + +func testAccSSOPermissionSetBasicConfigUpdated(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test update" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" + ] +} +`, rName) +} + +func testAccSSOPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccSSOPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown new file mode 100644 index 00000000000..c6e90109ef7 --- /dev/null +++ b/website/docs/d/sso_permission_set.html.markdown @@ -0,0 +1,47 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_permission_set" +description: |- + Get information on an AWS Single Sign-On Permission Set. +--- + +# Data Source: aws_sso_permission_set + +Use this data source to get the Single Sign-On Permission Set. + +## Example Usage + +```hcl +data "aws_sso_instance" "selected" {} + +data "aws_sso_permission_set" "example" { + instance_arn = data.aws_sso_instance.selected.arn + name = "Example" +} + +output "arn" { + value = data.aws_sso_permission_set.example.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. +* `name` - (Required) The name of the AWS Single Sign-On Permission Set. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The arn of the permission set. +* `arn` - The arn of the permission set. +* `created_date` - The created date of the permission set. +* `description` - The description of the permission set. +* `session_duration` - The session duration of the permission set. +* `relay_state` - The relay state of the permission set. +* `inline_policy` - The inline policy of the permission set. +* `managed_policy_arns` - The managed policies attached to the permission set. +* `tags` - The tags of the permission set. \ No newline at end of file diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown new file mode 100644 index 00000000000..be261c581cb --- /dev/null +++ b/website/docs/r/sso_permission_set.html.markdown @@ -0,0 +1,73 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_permission_set" +description: |- + Manages an AWS Single Sign-On permission set +--- + +# Resource: aws_sso_permission_set + +Provides an AWS Single Sign-On Permission Set resource + +## Example Usage + +```hcl +data "aws_sso_instance" "selected" {} + +data "aws_iam_policy_document" "example" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } +} + +resource "aws_sso_permission_set" "example" { + name = "Example" + description = "An example" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + inline_policy = data.aws_iam_policy_document.example.json + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + ] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. +* `name` - (Required) The name of the AWS Single Sign-On Permission Set. +* `description` - (Optional) The description of the AWS Single Sign-On Permission Set. +* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. +* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. +* `inline_policy` - (Optional) The inline policy of the AWS Single Sign-On Permission Set. +* `managed_policy_arns` - (Optional) The managed policies attached to the AWS Single Sign-On Permission Set. +* `tags` - (Optional) Key-value map of resource tags. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The arn of the AWS Single Sign-On Permission Set. +* `arn` - The arn of the AWS Single Sign-On Permission Set. +* `created_date` - The created date of the AWS Single Sign-On Permission Set. + +## Import + +`aws_sso_permission_set` can be imported by using the AWS Single Sign-On Permission Set Resource Name (ARN), e.g. + +``` +$ terraform import aws_sso_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk +``` \ No newline at end of file From a0a625bd0c2ea999967aecc0446110f0d44ff7d4 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Fri, 23 Oct 2020 10:56:43 -0500 Subject: [PATCH 0420/1212] use testAccCheckResourceDisappears --- aws/resource_aws_sso_permission_set_test.go | 32 +-------------------- 1 file changed, 1 insertion(+), 31 deletions(-) diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 3baf6cdc3e9..7a8b1cdaf2e 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -70,7 +70,7 @@ func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { Config: testAccSSOPermissionSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - testAccCheckAWSSSOPermissionSetDisappears(&permissionSet), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoPermissionSet(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -208,36 +208,6 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { return nil } -func testAccCheckAWSSSOPermissionSetDisappears(permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - permissionSetArn, permissionSetErr := arn.Parse(*permissionSet.PermissionSetArn) - if permissionSetErr != nil { - return permissionSetErr - } - - resourceParts := strings.Split(permissionSetArn.Resource, "/") - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceArn := arn.ARN{ - Partition: permissionSetArn.Partition, - Service: permissionSetArn.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - }.String() - - input := &ssoadmin.DeletePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: permissionSet.PermissionSetArn, - } - - _, err := ssoadminconn.DeletePermissionSet(input) - - return err - - } -} - func testAccSSOPermissionSetBasicConfig(rName string) string { return fmt.Sprintf(` data "aws_sso_instance" "selected" {} From b098aed22b03e3a646d2e8ed8b0a8e4032754596 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 3 Nov 2020 20:04:33 -0600 Subject: [PATCH 0421/1212] fix misspelling --- aws/data_source_aws_sso_permission_set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 58ac29a0ba8..b468afadfae 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -160,7 +160,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) tags, tagsErr := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) if tagsErr != nil { - return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, tagsErr) + return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, tagsErr) } d.SetId(permissionSetArn) From 48ab1f0ea8b82d26e8d69bcd62241e3a94a1ac49 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 4 Nov 2020 19:57:56 +0000 Subject: [PATCH 0422/1212] fix aws partition lints --- aws/data_source_aws_sso_permission_set.go | 2 +- aws/data_source_aws_sso_permission_set_test.go | 8 ++++---- aws/resource_aws_sso_permission_set.go | 2 +- aws/resource_aws_sso_permission_set_test.go | 14 +++++++------- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index b468afadfae..fdff316aa02 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -33,7 +33,7 @@ func dataSourceAwsSsoPermissionSet() *schema.Resource { Required: true, ValidateFunc: validation.All( validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + validation.StringMatch(regexp.MustCompile(`^arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), ), }, diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index 1afa28ef056..bf9c82cc77c 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -21,7 +21,7 @@ func TestAccDataSourceAwsSsoPermissionSet_Basic(t *testing.T) { Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(datasourceName, "name", rName), resource.TestCheckResourceAttr(datasourceName, "description", "testing"), resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), @@ -45,7 +45,7 @@ func TestAccDataSourceAwsSsoPermissionSet_Tags(t *testing.T) { Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(datasourceName, "name", rName), resource.TestCheckResourceAttr(datasourceName, "description", "testing"), resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), @@ -74,7 +74,7 @@ data "aws_sso_permission_set" "test" { instance_arn = data.aws_sso_instance.selected.arn name = aws_sso_permission_set.test.name } -`, rName) +`, rName) // lintignore:AWSAT005 } func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { @@ -106,5 +106,5 @@ data "aws_sso_permission_set" "test" { Key3 = "Value3" } } -`, rName) +`, rName) // lintignore:AWSAT005 } diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 4473240d72a..159e557eca7 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -75,7 +75,7 @@ func resourceAwsSsoPermissionSet() *schema.Resource { ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + validation.StringMatch(regexp.MustCompile(`^arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), ), }, diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 7a8b1cdaf2e..6e33304fb05 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -29,7 +29,7 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -45,8 +45,8 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -218,7 +218,7 @@ resource "aws_sso_permission_set" "example" { instance_arn = data.aws_sso_instance.selected.arn managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] } -`, rName) +`, rName) // lintignore:AWSAT005 } func testAccSSOPermissionSetBasicConfigUpdated(rName string) string { @@ -234,7 +234,7 @@ resource "aws_sso_permission_set" "example" { "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" ] } -`, rName) +`, rName) // lintignore:AWSAT005 } func testAccSSOPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { @@ -251,7 +251,7 @@ resource "aws_sso_permission_set" "example" { %[2]q = %[3]q } } -`, rName, tagKey1, tagValue1) +`, rName, tagKey1, tagValue1) // lintignore:AWSAT005 } func testAccSSOPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { @@ -269,5 +269,5 @@ resource "aws_sso_permission_set" "example" { %[4]q = %[5]q } } -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) // lintignore:AWSAT005 } From b105c99d8e54ffd1db84eb583f1699244a5fd2cf Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 4 Nov 2020 20:34:26 +0000 Subject: [PATCH 0423/1212] fix aws_sso_instance id --- aws/data_source_aws_sso_instance.go | 30 +++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 2104b5f0cd0..b642f832e51 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -3,8 +3,10 @@ package aws import ( "fmt" "log" - "time" + "strings" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -55,9 +57,33 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro instance := instances[0] log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) - d.SetId(time.Now().UTC().String()) + id, idErr := dataSourceAwsSsoInstanceID(aws.StringValue(instance.InstanceArn), aws.StringValue(instance.IdentityStoreId)) + if idErr != nil { + return idErr + } + d.SetId(id) + d.Set("arn", instance.InstanceArn) d.Set("identity_store_id", instance.IdentityStoreId) return nil } + +func dataSourceAwsSsoInstanceID(instanceArn string, identityStoreId string) (string, error) { + // arn:${Partition}:sso:::instance/${InstanceId} + iArn, err := arn.Parse(instanceArn) + if err != nil { + return "", err + } + iArnResourceParts := strings.Split(iArn.Resource, "/") + if len(iArnResourceParts) != 2 || iArnResourceParts[0] != "instance" || iArnResourceParts[1] == "" { + return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::instance/${InstanceId}", instanceArn) + } + instanceID := iArnResourceParts[1] + + vars := []string{ + instanceID, + identityStoreId, + } + return strings.Join(vars, "/"), nil +} From e33652fb8d657ccd931440ffa777d9fbb9a457ff Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 16:47:57 -0600 Subject: [PATCH 0424/1212] Update aws/data_source_aws_sso_instance.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index b642f832e51..cdb49ea9e8e 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -41,7 +41,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro return !lastPage }) if err != nil { - return fmt.Errorf("Error getting AWS SSO Instances: %s", err) + return fmt.Errorf("error getting AWS SSO Instances: %w", err) } if len(instances) == 0 { From cca808112257847845f89c94f5ed7f0f82536b25 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 16:48:38 -0600 Subject: [PATCH 0425/1212] Update aws/data_source_aws_sso_instance.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_instance.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index cdb49ea9e8e..49be0616201 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -45,9 +45,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro } if len(instances) == 0 { - log.Printf("[DEBUG] No AWS SSO Instance found") - d.SetId("") - return nil + return fmt.Errorf("error getting AWS SSO Instances: no instance found") } if len(instances) > 1 { From 85249f3c9225c72843bc090fd7aa8fe0015fca66 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:02:01 -0600 Subject: [PATCH 0426/1212] Update aws/data_source_aws_sso_instance.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_instance.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 49be0616201..f5a21dd1402 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -55,12 +55,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro instance := instances[0] log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) - id, idErr := dataSourceAwsSsoInstanceID(aws.StringValue(instance.InstanceArn), aws.StringValue(instance.IdentityStoreId)) - if idErr != nil { - return idErr - } - d.SetId(id) - + d.SetId(aws.StringValue(instance.InstanceArn)) d.Set("arn", instance.InstanceArn) d.Set("identity_store_id", instance.IdentityStoreId) From 47120773f357537be4ed163ed39ec4327e33f59b Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:04:14 -0600 Subject: [PATCH 0427/1212] Update aws/data_source_aws_sso_instance.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index f5a21dd1402..3def0a6f3a1 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -53,7 +53,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro } instance := instances[0] - log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) + log.Printf("[DEBUG] Received AWS SSO Instance: %s", aws.StringValue(instance.InstanceArn)) d.SetId(aws.StringValue(instance.InstanceArn)) d.Set("arn", instance.InstanceArn) From 9bef8e53ec121134e09c759d156cd1a235770c3e Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:07:12 -0600 Subject: [PATCH 0428/1212] remove unused function --- aws/data_source_aws_sso_instance.go | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 3def0a6f3a1..d4a125a39fb 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -61,22 +61,3 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro return nil } - -func dataSourceAwsSsoInstanceID(instanceArn string, identityStoreId string) (string, error) { - // arn:${Partition}:sso:::instance/${InstanceId} - iArn, err := arn.Parse(instanceArn) - if err != nil { - return "", err - } - iArnResourceParts := strings.Split(iArn.Resource, "/") - if len(iArnResourceParts) != 2 || iArnResourceParts[0] != "instance" || iArnResourceParts[1] == "" { - return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::instance/${InstanceId}", instanceArn) - } - instanceID := iArnResourceParts[1] - - vars := []string{ - instanceID, - identityStoreId, - } - return strings.Join(vars, "/"), nil -} From a35a77bb014623adf476ef5eb14ad54a9d3ca482 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:16:56 -0600 Subject: [PATCH 0429/1212] Update aws/data_source_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_permission_set.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index fdff316aa02..25050b4f733 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -126,9 +126,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) } if permissionSet == nil { - log.Printf("[DEBUG] AWS SSO Permission Set %v not found", name) - d.SetId("") - return nil + return fmt.Errorf("AWS SSO Permission Set %s not found", name) } log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) From c7b1c878c217e900a9dd4c35897bab5235bdd2c6 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:17:16 -0600 Subject: [PATCH 0430/1212] Update aws/data_source_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_permission_set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 25050b4f733..81369bd51b4 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -118,7 +118,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) + return fmt.Errorf("Error getting AWS SSO Permission Sets: %w", err) } if permissionSetErr != nil { From e2587d49d082bb8c191157f5aa39422124e0f731 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:17:36 -0600 Subject: [PATCH 0431/1212] Update aws/data_source_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/data_source_aws_sso_permission_set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 81369bd51b4..8d8568544ad 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -122,7 +122,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) } if permissionSetErr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) + return fmt.Errorf("Error getting AWS SSO Permission Set: %w", permissionSetErr) } if permissionSet == nil { From b0e14a2bd9f076b4a317da16b8560d89728a2823 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:18:00 -0600 Subject: [PATCH 0432/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 159e557eca7..acdf852bf85 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -262,7 +262,7 @@ func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) } permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn - d.SetId(*permissionSetArn) + d.SetId(aws.StringValue(permissionSetArn)) if attachPoliciesErr := attachPoliciesToPermissionSet(ssoadminconn, d, permissionSetArn, instanceArn); attachPoliciesErr != nil { return attachPoliciesErr From 8b850702aedfc79842f546dc829f82aa372bdd39 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:19:28 -0600 Subject: [PATCH 0433/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index acdf852bf85..24361f8a085 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -335,10 +335,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) } - err = d.Set("arn", permissionSetArn) - if err != nil { - return err - } + d.Set("arn", permissionSetArn) err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) if err != nil { return err From 207b3faadf5a9f8601edb1d88dcca1920a84aabd Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:20:30 -0600 Subject: [PATCH 0434/1212] Update aws/resource_aws_sso_permission_set_test.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 6e33304fb05..9edaef0d12c 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -192,7 +192,7 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { output, err := ssoadminconn.DescribePermissionSet(input) - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { continue } From 46615f236a3e951d96e63adc11464e7f1a76687d Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:40:39 -0600 Subject: [PATCH 0435/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 24361f8a085..06fa2cb1af0 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -364,8 +364,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e if err != nil { return err } - err = d.Set("managed_policy_arns", managedPolicyArns) - if err != nil { + if err = d.Set("managed_policy_arns", managedPolicyArns); err != nil { return err } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { From 014e53e0974c38dc33ed49d066751e8d757db35e Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:41:51 -0600 Subject: [PATCH 0436/1212] Update aws/resource_aws_sso_permission_set_test.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 9edaef0d12c..78f2faebe8d 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -150,10 +150,10 @@ func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *s return permissionSetErr } - if *permissionSetResp.PermissionSet.PermissionSetArn == rs.Primary.ID { - *permissionSet = *permissionSetResp.PermissionSet + if permissionSetResp != nil { + if arn := aws.StringValue(permissionStRespPermissionSet.PermissionSetArn); arn == rs.Primary.ID { return nil - } + } return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) } From 6695ca1557ecd1cf9b378725304fe2d604e2f2f7 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:42:46 -0600 Subject: [PATCH 0437/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 06fa2cb1af0..78ab6d9521d 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -261,6 +261,9 @@ func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) } + if createPermissionSetResp == nil || createPermissionSetResp.PermissionSet == nil { + return fmt.Errorf("error creating AWS SSO Permission Set (%s): empty output, d.Get("name").(string)) + } permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn d.SetId(aws.StringValue(permissionSetArn)) From 7a2bfcb895c50e5f926199530473cbffceb83571 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:44:10 -0600 Subject: [PATCH 0438/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 78ab6d9521d..49016e92614 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -299,14 +299,8 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e if permissionerr != nil { return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) } - if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { - permissionSet = permissionSetResp.PermissionSet - } - - if permissionSet == nil { - log.Printf("[WARN] AWS SSO Permission Set %s not found, removing from state", name) - d.SetId("") - return nil + if permissionSetResp == nil || permissionSetRep.permissionSet == nil { + return fmt.Errorf("error reading AWS SSO Permission Set (%s): empty output", name) } log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) From a0fd88a7ec6337808eb0422f8c9904a5e9f358ca Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:46:26 -0600 Subject: [PATCH 0439/1212] fix missing quote --- aws/resource_aws_sso_permission_set.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 49016e92614..81a2fc739b6 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -261,9 +261,10 @@ func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) } - if createPermissionSetResp == nil || createPermissionSetResp.PermissionSet == nil { - return fmt.Errorf("error creating AWS SSO Permission Set (%s): empty output, d.Get("name").(string)) - } + if createPermissionSetResp == nil || createPermissionSetResp.PermissionSet == nil { + return fmt.Errorf("error creating AWS SSO Permission Set (%s): empty output", d.Get("name").(string)) + } + permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn d.SetId(aws.StringValue(permissionSetArn)) From 0a367d3dde28ed0ff6feb81be60749adea7f9aea Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:50:56 -0600 Subject: [PATCH 0440/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 55 +++++++++++++------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 81a2fc739b6..56b730c796d 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -412,33 +412,34 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } } - if v, ok := d.GetOk("inline_policy"); ok { - log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) - - inlinePolicy := aws.String(v.(string)) - - updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } else if d.HasChange("inline_policy") { - deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - + if d.HasChange("inline_policy") { + if v, ok := d.GetOk("inline_policy") { + log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) + + inlinePolicy := aws.String(v.(string)) + + updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } else { + deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + } if d.HasChange("managed_policy_arns") { o, n := d.GetChange("managed_policy_arns") From 0b7692b7500622e661634fac4fe3b147496eca20 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:52:46 -0600 Subject: [PATCH 0441/1212] Update aws/resource_aws_sso_permission_set.go Co-authored-by: angie pinilla --- aws/resource_aws_sso_permission_set.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 56b730c796d..227a45f2cd4 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -519,15 +519,11 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) _, err := ssoadminconn.DeletePermissionSet(params) if err != nil { - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { - log.Printf("[DEBUG] AWS SSO Permission Set not found") - d.SetId("") + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { return nil } - return fmt.Errorf("Error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting AWS SSO Permission Set (%s): %w", d.Id(), err) } - - d.SetId("") return nil } From 69ebf8368c246a0f8bb55e032747c0f97c50c92c Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:54:14 -0600 Subject: [PATCH 0442/1212] Update website/docs/d/sso_instance.html.markdown Co-authored-by: angie pinilla --- website/docs/d/sso_instance.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown index 3cc268991ff..72f80407dd5 100644 --- a/website/docs/d/sso_instance.html.markdown +++ b/website/docs/d/sso_instance.html.markdown @@ -31,4 +31,5 @@ There are no arguments available for this data source. ## Attributes Reference * `arn` - The AWS ARN associated with the AWS Single Sign-On Instance. -* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. \ No newline at end of file +* `id` - The AWS ARN associated with the AWS Single Sign-On Instance. +* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. From a539d122d4531612cca1441fec02d73c43da96e1 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 17:57:24 -0600 Subject: [PATCH 0443/1212] remove redundant error checks --- aws/resource_aws_sso_permission_set.go | 89 ++++++++++---------------- 1 file changed, 34 insertions(+), 55 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 227a45f2cd4..0f3c3021b43 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -334,34 +334,13 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e } d.Set("arn", permissionSetArn) - err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - if err != nil { - return err - } - err = d.Set("instance_arn", instanceArn) - if err != nil { - return err - } - err = d.Set("name", permissionSet.Name) - if err != nil { - return err - } - err = d.Set("description", permissionSet.Description) - if err != nil { - return err - } - err = d.Set("session_duration", permissionSet.SessionDuration) - if err != nil { - return err - } - err = d.Set("relay_state", permissionSet.RelayState) - if err != nil { - return err - } - err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - if err != nil { - return err - } + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("description", permissionSet.Description) + d.Set("session_duration", permissionSet.SessionDuration) + d.Set("relay_state", permissionSet.RelayState) + d.Set("inline_policy", inlinePolicyResp.InlinePolicy) if err = d.Set("managed_policy_arns", managedPolicyArns); err != nil { return err } @@ -413,33 +392,33 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("inline_policy") { - if v, ok := d.GetOk("inline_policy") { - log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) - - inlinePolicy := aws.String(v.(string)) - - updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } else { - deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - } + if v, ok := d.GetOk("inline_policy") { + log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) + + inlinePolicy := aws.String(v.(string)) + + updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } else { + deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + } if d.HasChange("managed_policy_arns") { o, n := d.GetChange("managed_policy_arns") From 7949b0f35d3fdd06775adf596da02185ea9800fd Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 1 Jan 2021 18:04:12 -0600 Subject: [PATCH 0444/1212] update tfawsresource to use resource package --- aws/data_source_aws_sso_permission_set_test.go | 5 ++--- aws/resource_aws_sso_permission_set_test.go | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index bf9c82cc77c..88fd4aa7de8 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" ) func TestAccDataSourceAwsSsoPermissionSet_Basic(t *testing.T) { @@ -21,7 +20,7 @@ func TestAccDataSourceAwsSsoPermissionSet_Basic(t *testing.T) { Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 + resource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(datasourceName, "name", rName), resource.TestCheckResourceAttr(datasourceName, "description", "testing"), resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), @@ -45,7 +44,7 @@ func TestAccDataSourceAwsSsoPermissionSet_Tags(t *testing.T) { Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 + resource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(datasourceName, "name", rName), resource.TestCheckResourceAttr(datasourceName, "description", "testing"), resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 78f2faebe8d..13e719ad913 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" ) func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { @@ -29,7 +28,7 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 + resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -45,8 +44,8 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), // lintignore:AWSAT005 + resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 + resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), From f5495d08e47e479a05c1a3680010b7764cc5a0eb Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 4 Jan 2021 17:26:50 -0600 Subject: [PATCH 0445/1212] Update aws/resource_aws_sso_permission_set_test.go --- aws/data_source_aws_sso_instance.go | 2 -- aws/resource_aws_sso_permission_set.go | 12 +++++--- aws/resource_aws_sso_permission_set_test.go | 34 +++++++++++++++++++-- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index d4a125a39fb..79d83cf27e9 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -3,10 +3,8 @@ package aws import ( "fmt" "log" - "strings" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 0f3c3021b43..b8f2d725d85 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -300,8 +301,9 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e if permissionerr != nil { return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) } - if permissionSetResp == nil || permissionSetRep.permissionSet == nil { - return fmt.Errorf("error reading AWS SSO Permission Set (%s): empty output", name) + + if permissionSetResp == nil || permissionSetResp.PermissionSet == nil { + return fmt.Errorf("error reading AWS SSO Permission Set (%s): empty output", name) } log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) @@ -392,7 +394,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("inline_policy") { - if v, ok := d.GetOk("inline_policy") { + if v, ok := d.GetOk("inline_policy"); ok { log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) inlinePolicy := aws.String(v.(string)) @@ -407,7 +409,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) if inlinePolicyErr != nil { return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) } - } else { + } else { deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), @@ -415,7 +417,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) if inlinePolicyErr != nil { - return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) + return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) } } } diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 13e719ad913..871d150bc02 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -8,12 +8,39 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { + var permissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { var permissionSet, updatedPermissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -150,9 +177,10 @@ func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *s } if permissionSetResp != nil { - if arn := aws.StringValue(permissionStRespPermissionSet.PermissionSetArn); arn == rs.Primary.ID { - return nil - } + if arn := aws.StringValue(permissionSetResp.PermissionSet.PermissionSetArn); arn == rs.Primary.ID { + return nil + } + } return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) } From ab182a181f0434e484767b2ad88ea9a687f02fe8 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 4 Jan 2021 17:41:38 -0600 Subject: [PATCH 0446/1212] remove unused parameter --- aws/resource_aws_sso_permission_set_test.go | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 871d150bc02..ba152e9070c 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -15,7 +15,6 @@ import ( ) func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { - var permissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -27,7 +26,7 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { { Config: testAccSSOPermissionSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, @@ -41,7 +40,6 @@ func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { } func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { - var permissionSet, updatedPermissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -53,7 +51,7 @@ func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { { Config: testAccSSOPermissionSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckResourceAttr(resourceName, "name", rName), @@ -69,7 +67,7 @@ func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { { Config: testAccSSOPermissionSetBasicConfigUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), // lintignore:AWSAT005 @@ -83,7 +81,6 @@ func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { } func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { - var permissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -95,7 +92,7 @@ func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { { Config: testAccSSOPermissionSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoPermissionSet(), resourceName), ), ExpectNonEmptyPlan: true, @@ -105,7 +102,6 @@ func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { } func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { - var permissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -117,7 +113,7 @@ func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { { Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -130,7 +126,7 @@ func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { { Config: testAccSSOPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -139,7 +135,7 @@ func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { { Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -148,7 +144,7 @@ func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { }) } -func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { +func testAccCheckAWSSSOPermissionSetExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { From 211f5dc9808b63db9cba53779f05ef1300c8ebd3 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 4 Jan 2021 18:24:34 -0600 Subject: [PATCH 0447/1212] move global/no account arn test --- aws/data_source_aws_sso_instance_test.go | 23 +---------------------- aws/provider_test.go | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go index f9b51dae6dd..cad45fc847d 100644 --- a/aws/data_source_aws_sso_instance_test.go +++ b/aws/data_source_aws_sso_instance_test.go @@ -1,14 +1,11 @@ package aws import ( - "fmt" "regexp" "testing" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccPreCheckAWSSSOInstance(t *testing.T) { @@ -48,7 +45,7 @@ func TestAccDataSourceAwsSsoInstance_Basic(t *testing.T) { { Config: testAccDataSourceAwsSsoInstanceConfigBasic(), Check: resource.ComposeTestCheckFunc( - testAccMatchResourceAttrAwsSsoARN(datasourceName, "arn", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), + testAccMatchResourceAttrGlobalARNNoAccount(datasourceName, "arn", "sso", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), resource.TestMatchResourceAttr(datasourceName, "identity_store_id", regexp.MustCompile("^[a-zA-Z0-9-]*")), ), }, @@ -59,21 +56,3 @@ func TestAccDataSourceAwsSsoInstance_Basic(t *testing.T) { func testAccDataSourceAwsSsoInstanceConfigBasic() string { return `data "aws_sso_instance" "selected" {}` } - -func testAccMatchResourceAttrAwsSsoARN(resourceName, attributeName string, arnResourceRegexp *regexp.Regexp) resource.TestCheckFunc { - return func(s *terraform.State) error { - arnRegexp := arn.ARN{ - Partition: testAccGetPartition(), - Resource: arnResourceRegexp.String(), - Service: "sso", - }.String() - - attributeMatch, err := regexp.Compile(arnRegexp) - - if err != nil { - return fmt.Errorf("Unable to compile ARN regexp (%s): %s", arnRegexp, err) - } - - return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) - } -} diff --git a/aws/provider_test.go b/aws/provider_test.go index 8cbab7ba700..55a7852cc48 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -486,6 +486,25 @@ func testAccCheckResourceAttrRegionalARNIgnoreRegionAndAccount(resourceName, att } } +// testAccMatchResourceAttrGlobalARNNoAccount ensures the Terraform state regexp matches a formatted ARN without region or account ID +func testAccMatchResourceAttrGlobalARNNoAccount(resourceName, attributeName, arnService string, arnResourceRegexp *regexp.Regexp) resource.TestCheckFunc { + return func(s *terraform.State) error { + arnRegexp := arn.ARN{ + Partition: testAccGetPartition(), + Resource: arnResourceRegexp.String(), + Service: arnService, + }.String() + + attributeMatch, err := regexp.Compile(arnRegexp) + + if err != nil { + return fmt.Errorf("Unable to compile ARN regexp (%s): %s", arnRegexp, err) + } + + return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) + } +} + // testAccCheckResourceAttrRfc3339 ensures the Terraform state matches a RFC3339 value // This TestCheckFunc will likely be moved to the Terraform Plugin SDK in the future. func testAccCheckResourceAttrRfc3339(resourceName, attributeName string) resource.TestCheckFunc { From 8fcc98f9092f7c12cf64c09e780008cbe5dd45cb Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 6 Jan 2021 03:45:53 -0500 Subject: [PATCH 0448/1212] CR updates; align with design --- aws/data_source_aws_sso_instance.go | 61 -- aws/data_source_aws_sso_instance_test.go | 58 -- aws/data_source_aws_sso_permission_set.go | 180 ----- ...data_source_aws_sso_permission_set_test.go | 109 ---- aws/data_source_aws_ssoadmin_instances.go | 69 ++ ...data_source_aws_ssoadmin_instances_test.go | 60 ++ ...data_source_aws_ssoadmin_permission_set.go | 165 +++++ ...source_aws_ssoadmin_permission_set_test.go | 127 ++++ .../keyvaluetags/generators/listtags/main.go | 1 + .../generators/servicetags/main.go | 1 + .../generators/updatetags/main.go | 1 + aws/internal/keyvaluetags/list_tags_gen.go | 19 + .../service_generation_customizations.go | 8 +- aws/internal/keyvaluetags/service_tags_gen.go | 28 + aws/internal/keyvaluetags/sso_tags.go | 95 --- aws/internal/keyvaluetags/update_tags_gen.go | 39 ++ .../service/ssoadmin/finder/finder.go | 48 ++ .../service/ssoadmin/waiter/status.go | 53 ++ .../service/ssoadmin/waiter/waiter.go | 43 ++ aws/provider.go | 6 +- aws/resource_aws_sso_permission_set.go | 614 ------------------ aws/resource_aws_sso_permission_set_test.go | 296 --------- aws/resource_aws_ssoadmin_permission_set.go | 302 +++++++++ ...source_aws_ssoadmin_permission_set_test.go | 415 ++++++++++++ website/docs/d/sso_instance.html.markdown | 35 - .../docs/d/sso_permission_set.html.markdown | 47 -- .../docs/d/ssoadmin_instances.html.markdown | 35 + .../d/ssoadmin_permission_set.html.markdown | 46 ++ .../docs/r/sso_permission_set.html.markdown | 73 --- .../r/ssoadmin_permission_set.html.markdown | 52 ++ 30 files changed, 1514 insertions(+), 1572 deletions(-) delete mode 100644 aws/data_source_aws_sso_instance.go delete mode 100644 aws/data_source_aws_sso_instance_test.go delete mode 100644 aws/data_source_aws_sso_permission_set.go delete mode 100644 aws/data_source_aws_sso_permission_set_test.go create mode 100644 aws/data_source_aws_ssoadmin_instances.go create mode 100644 aws/data_source_aws_ssoadmin_instances_test.go create mode 100644 aws/data_source_aws_ssoadmin_permission_set.go create mode 100644 aws/data_source_aws_ssoadmin_permission_set_test.go delete mode 100644 aws/internal/keyvaluetags/sso_tags.go create mode 100644 aws/internal/service/ssoadmin/finder/finder.go create mode 100644 aws/internal/service/ssoadmin/waiter/status.go create mode 100644 aws/internal/service/ssoadmin/waiter/waiter.go delete mode 100644 aws/resource_aws_sso_permission_set.go delete mode 100644 aws/resource_aws_sso_permission_set_test.go create mode 100644 aws/resource_aws_ssoadmin_permission_set.go create mode 100644 aws/resource_aws_ssoadmin_permission_set_test.go delete mode 100644 website/docs/d/sso_instance.html.markdown delete mode 100644 website/docs/d/sso_permission_set.html.markdown create mode 100644 website/docs/d/ssoadmin_instances.html.markdown create mode 100644 website/docs/d/ssoadmin_permission_set.html.markdown delete mode 100644 website/docs/r/sso_permission_set.html.markdown create mode 100644 website/docs/r/ssoadmin_permission_set.html.markdown diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go deleted file mode 100644 index 79d83cf27e9..00000000000 --- a/aws/data_source_aws_sso_instance.go +++ /dev/null @@ -1,61 +0,0 @@ -package aws - -import ( - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func dataSourceAwsSsoInstance() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSsoInstanceRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "identity_store_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - - log.Printf("[DEBUG] Reading AWS SSO Instances") - instances := []*ssoadmin.InstanceMetadata{} - err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { - if page != nil && len(page.Instances) != 0 { - instances = append(instances, page.Instances...) - } - return !lastPage - }) - if err != nil { - return fmt.Errorf("error getting AWS SSO Instances: %w", err) - } - - if len(instances) == 0 { - return fmt.Errorf("error getting AWS SSO Instances: no instance found") - } - - if len(instances) > 1 { - return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", instances) - } - - instance := instances[0] - log.Printf("[DEBUG] Received AWS SSO Instance: %s", aws.StringValue(instance.InstanceArn)) - - d.SetId(aws.StringValue(instance.InstanceArn)) - d.Set("arn", instance.InstanceArn) - d.Set("identity_store_id", instance.IdentityStoreId) - - return nil -} diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go deleted file mode 100644 index cad45fc847d..00000000000 --- a/aws/data_source_aws_sso_instance_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package aws - -import ( - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func testAccPreCheckAWSSSOInstance(t *testing.T) { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - instances := []*ssoadmin.InstanceMetadata{} - err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { - if page != nil && len(page.Instances) != 0 { - instances = append(instances, page.Instances...) - } - return !lastPage - }) - if testAccPreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if len(instances) == 0 { - t.Skip("skipping acceptance testing: No AWS SSO Instance found.") - } - - if len(instances) > 1 { - t.Skip("skipping acceptance testing: Found multiple AWS SSO Instances. Not sure which one to use.") - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - -func TestAccDataSourceAwsSsoInstance_Basic(t *testing.T) { - datasourceName := "data.aws_sso_instance.selected" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoInstanceConfigBasic(), - Check: resource.ComposeTestCheckFunc( - testAccMatchResourceAttrGlobalARNNoAccount(datasourceName, "arn", "sso", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), - resource.TestMatchResourceAttr(datasourceName, "identity_store_id", regexp.MustCompile("^[a-zA-Z0-9-]*")), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSsoInstanceConfigBasic() string { - return `data "aws_sso_instance" "selected" {}` -} diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go deleted file mode 100644 index 8d8568544ad..00000000000 --- a/aws/data_source_aws_sso_permission_set.go +++ /dev/null @@ -1,180 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" -) - -func dataSourceAwsSsoPermissionSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSsoPermissionSetRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), - ), - }, - - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), - ), - }, - - "description": { - Type: schema.TypeString, - Computed: true, - }, - - "session_duration": { - Type: schema.TypeString, - Computed: true, - }, - - "relay_state": { - Type: schema.TypeString, - Computed: true, - }, - - "inline_policy": { - Type: schema.TypeString, - Computed: true, - }, - - "managed_policy_arns": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - instanceArn := d.Get("instance_arn").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading AWS SSO Permission Sets") - - var permissionSetArn string - var permissionSet *ssoadmin.PermissionSet - var permissionSetErr error - - req := &ssoadmin.ListPermissionSetsInput{ - InstanceArn: aws.String(instanceArn), - } - err := conn.ListPermissionSetsPages(req, func(page *ssoadmin.ListPermissionSetsOutput, lastPage bool) bool { - if page != nil && len(page.PermissionSets) != 0 { - for _, ps := range page.PermissionSets { - permissionSetArn = aws.StringValue(ps) - log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) - var permissionSetResp *ssoadmin.DescribePermissionSetOutput - permissionSetResp, permissionSetErr = conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if permissionSetErr != nil { - return false - } - if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { - permissionSet = permissionSetResp.PermissionSet - return false - } - } - } - return !lastPage - }) - - if err != nil { - return fmt.Errorf("Error getting AWS SSO Permission Sets: %w", err) - } - - if permissionSetErr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %w", permissionSetErr) - } - - if permissionSet == nil { - return fmt.Errorf("AWS SSO Permission Set %s not found", name) - } - - log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := conn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - var managedPolicyArns []string - managedPoliciesReq := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - managedPoliciesErr := conn.ListManagedPoliciesInPermissionSetPages(managedPoliciesReq, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { - for _, managedPolicy := range page.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - return !lastPage - }) - if managedPoliciesErr != nil { - return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) - } - - tags, tagsErr := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) - if tagsErr != nil { - return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, tagsErr) - } - - d.SetId(permissionSetArn) - d.Set("arn", permissionSetArn) - d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - d.Set("instance_arn", instanceArn) - d.Set("name", permissionSet.Name) - d.Set("description", permissionSet.Description) - d.Set("session_duration", permissionSet.SessionDuration) - d.Set("relay_state", permissionSet.RelayState) - d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - d.Set("managed_policy_arns", managedPolicyArns) - tagsMapErr := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) - if tagsMapErr != nil { - return fmt.Errorf("Error setting tags: %s", tagsMapErr) - } - - return nil -} diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go deleted file mode 100644 index 88fd4aa7de8..00000000000 --- a/aws/data_source_aws_sso_permission_set_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func TestAccDataSourceAwsSsoPermissionSet_Basic(t *testing.T) { - datasourceName := "data.aws_sso_permission_set.test" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - resource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 - resource.TestCheckResourceAttr(datasourceName, "name", rName), - resource.TestCheckResourceAttr(datasourceName, "description", "testing"), - resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), - resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), - resource.TestCheckResourceAttr(datasourceName, "tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsSsoPermissionSet_Tags(t *testing.T) { - datasourceName := "data.aws_sso_permission_set.test" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - resource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 - resource.TestCheckResourceAttr(datasourceName, "name", rName), - resource.TestCheckResourceAttr(datasourceName, "description", "testing"), - resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), - resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), - resource.TestCheckResourceAttr(datasourceName, "tags.%", "3"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSsoPermissionSetConfigBasic(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} - -data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name -} -`, rName) // lintignore:AWSAT005 -} - -func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } -} - -data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name - - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } -} -`, rName) // lintignore:AWSAT005 -} diff --git a/aws/data_source_aws_ssoadmin_instances.go b/aws/data_source_aws_ssoadmin_instances.go new file mode 100644 index 00000000000..06089b97915 --- /dev/null +++ b/aws/data_source_aws_ssoadmin_instances.go @@ -0,0 +1,69 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsSsoAdminInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoAdminInstancesRead, + + Schema: map[string]*schema.Schema{ + "arns": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "identity_store_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsSsoAdminInstancesRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + var instances []*ssoadmin.InstanceMetadata + var arns, ids []string + + err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + instances = append(instances, page.Instances...) + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error reading SSO Instances: %w", err) + } + + if len(instances) == 0 { + return fmt.Errorf("error reading SSO Instance: no instances found") + } + + for _, instance := range instances { + arns = append(arns, aws.StringValue(instance.InstanceArn)) + ids = append(ids, aws.StringValue(instance.IdentityStoreId)) + } + + d.SetId(meta.(*AWSClient).region) + if err := d.Set("arns", arns); err != nil { + return fmt.Errorf("error setting arns: %w", err) + } + if err := d.Set("identity_store_ids", ids); err != nil { + return fmt.Errorf("error setting identity_store_ids: %w", err) + } + + return nil +} diff --git a/aws/data_source_aws_ssoadmin_instances_test.go b/aws/data_source_aws_ssoadmin_instances_test.go new file mode 100644 index 00000000000..39fd84a9b44 --- /dev/null +++ b/aws/data_source_aws_ssoadmin_instances_test.go @@ -0,0 +1,60 @@ +package aws + +import ( + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func testAccPreCheckAWSSSOAdminInstances(t *testing.T) { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + var instances []*ssoadmin.InstanceMetadata + err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + instances = append(instances, page.Instances...) + + return !lastPage + }) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if len(instances) == 0 { + t.Skip("skipping acceptance testing: No SSO Instance found.") + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func TestAccDataSourceAWSSSOAdminInstances_Basic(t *testing.T) { + dataSourceName := "data.aws_ssoadmin_instances.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSSSOAdminInstancesConfigBasic(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "arns.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "identity_store_ids.#", "1"), + testAccMatchResourceAttrGlobalARNNoAccount(dataSourceName, "arns.0", "sso", regexp.MustCompile("instance/(sso)?ins-[a-zA-Z0-9-.]{16}")), + resource.TestMatchResourceAttr(dataSourceName, "identity_store_ids.0", regexp.MustCompile("^[a-zA-Z0-9-]*")), + ), + }, + }, + }) +} + +func testAccDataSourceAWSSSOAdminInstancesConfigBasic() string { + return `data "aws_ssoadmin_instances" "test" {}` +} diff --git a/aws/data_source_aws_ssoadmin_permission_set.go b/aws/data_source_aws_ssoadmin_permission_set.go new file mode 100644 index 00000000000..75e22a6a59d --- /dev/null +++ b/aws/data_source_aws_ssoadmin_permission_set.go @@ -0,0 +1,165 @@ +package aws + +import ( + "errors" + "fmt" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func dataSourceAwsSsoAdminPermissionSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoAdminPermissionSetRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"arn", "name"}, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`[\w+=,.@-]+`), "must match [\\w+=,.@-]"), + ), + ExactlyOneOf: []string{"name", "arn"}, + }, + + "relay_state": { + Type: schema.TypeString, + Computed: true, + }, + + "session_duration": { + Type: schema.TypeString, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + instanceArn := d.Get("instance_arn").(string) + + var permissionSet *ssoadmin.PermissionSet + + if v, ok := d.GetOk("arn"); ok { + arn := v.(string) + + input := &ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + } + + output, err := conn.DescribePermissionSet(input) + if err != nil { + return fmt.Errorf("error reading SSO Admin Permission Set (%s): %w", arn, err) + } + + if output == nil { + return fmt.Errorf("error reading SSO Admin Permission Set (%s): empty output", arn) + } + + permissionSet = output.PermissionSet + } else { + name := d.Get("name").(string) + var describeErr error + + input := &ssoadmin.ListPermissionSetsInput{ + InstanceArn: aws.String(instanceArn), + } + + err := conn.ListPermissionSetsPages(input, func(page *ssoadmin.ListPermissionSetsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, permissionSetArn := range page.PermissionSets { + output, describeErr := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: permissionSetArn, + }) + + if describeErr != nil { + return false + } + + if output == nil || output.PermissionSet == nil { + continue + } + + if aws.StringValue(output.PermissionSet.Name) == name { + permissionSet = output.PermissionSet + return false + } + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing SSO Permission Sets: %w", err) + } + + if describeErr != nil { + return fmt.Errorf("error reading SSO Permission Set: %w", describeErr) + } + } + + if permissionSet == nil { + return errors.New("error reading SSO Permission Set: not found") + } + + arn := aws.StringValue(permissionSet.PermissionSetArn) + + d.SetId(arn) + d.Set("arn", arn) + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("description", permissionSet.Description) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("session_duration", permissionSet.SessionDuration) + d.Set("relay_state", permissionSet.RelayState) + + tags, err := keyvaluetags.SsoadminListTags(conn, arn, instanceArn) + if err != nil { + return fmt.Errorf("error listing tags for SSO Permission Set (%s): %w", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} diff --git a/aws/data_source_aws_ssoadmin_permission_set_test.go b/aws/data_source_aws_ssoadmin_permission_set_test.go new file mode 100644 index 00000000000..0c87ddc1c16 --- /dev/null +++ b/aws/data_source_aws_ssoadmin_permission_set_test.go @@ -0,0 +1,127 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceAWSSSOAdminPermissionSet_arn(t *testing.T) { + dataSourceName := "data.aws_ssoadmin_permission_set.test" + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSSSOPermissionSetByArnConfig(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), + resource.TestCheckResourceAttrPair(resourceName, "description", dataSourceName, "description"), + resource.TestCheckResourceAttrPair(resourceName, "relay_state", dataSourceName, "relay_state"), + resource.TestCheckResourceAttrPair(resourceName, "session_duration", dataSourceName, "session_duration"), + resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSSSOAdminPermissionSet_name(t *testing.T) { + dataSourceName := "data.aws_ssoadmin_permission_set.test" + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSSSOPermissionSetByNameConfig(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), + resource.TestCheckResourceAttrPair(resourceName, "description", dataSourceName, "description"), + resource.TestCheckResourceAttrPair(resourceName, "relay_state", dataSourceName, "relay_state"), + resource.TestCheckResourceAttrPair(resourceName, "session_duration", dataSourceName, "session_duration"), + resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), + ), + }, + }, + }) +} + +func TestAccDataSourceAWSSSOAdminPermissionSet_NonExistent(t *testing.T) { + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSSSOPermissionSetByNameConfig_nonExistent, + ExpectError: regexp.MustCompile(`not found`), + }, + }, + }) +} + +func testAccDataSourceAWSSSOPermissionSetByArnConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %[1]q + description = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} + +data "aws_ssoadmin_permission_set" "test" { + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + arn = aws_ssoadmin_permission_set.test.arn +} +`, rName) +} + +func testAccDataSourceAWSSSOPermissionSetByNameConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %[1]q + description = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} + +data "aws_ssoadmin_permission_set" "test" { + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = aws_ssoadmin_permission_set.test.name +} +`, rName) +} + +const testAccDataSourceAWSSSOPermissionSetByNameConfig_nonExistent = ` +data "aws_ssoadmin_instances" "test" {} + +data "aws_ssoadmin_permission_set" "test" { + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = "does-not-exist" +} +` diff --git a/aws/internal/keyvaluetags/generators/listtags/main.go b/aws/internal/keyvaluetags/generators/listtags/main.go index 840bbe34802..ea4014ec53d 100644 --- a/aws/internal/keyvaluetags/generators/listtags/main.go +++ b/aws/internal/keyvaluetags/generators/listtags/main.go @@ -111,6 +111,7 @@ var serviceNames = []string{ "sns", "sqs", "ssm", + "ssoadmin", "storagegateway", "swf", "transfer", diff --git a/aws/internal/keyvaluetags/generators/servicetags/main.go b/aws/internal/keyvaluetags/generators/servicetags/main.go index 761772d520d..413c2037e2d 100644 --- a/aws/internal/keyvaluetags/generators/servicetags/main.go +++ b/aws/internal/keyvaluetags/generators/servicetags/main.go @@ -92,6 +92,7 @@ var sliceServiceNames = []string{ "sfn", "sns", "ssm", + "ssoadmin", "storagegateway", "swf", "transfer", diff --git a/aws/internal/keyvaluetags/generators/updatetags/main.go b/aws/internal/keyvaluetags/generators/updatetags/main.go index 584e64a08b7..2dc4b622169 100644 --- a/aws/internal/keyvaluetags/generators/updatetags/main.go +++ b/aws/internal/keyvaluetags/generators/updatetags/main.go @@ -117,6 +117,7 @@ var serviceNames = []string{ "sns", "sqs", "ssm", + "ssoadmin", "storagegateway", "swf", "synthetics", diff --git a/aws/internal/keyvaluetags/list_tags_gen.go b/aws/internal/keyvaluetags/list_tags_gen.go index a08d3c9e5e1..e73978a76ea 100644 --- a/aws/internal/keyvaluetags/list_tags_gen.go +++ b/aws/internal/keyvaluetags/list_tags_gen.go @@ -98,6 +98,7 @@ import ( "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/aws/aws-sdk-go/service/storagegateway" "github.com/aws/aws-sdk-go/service/swf" "github.com/aws/aws-sdk-go/service/transfer" @@ -1719,6 +1720,24 @@ func SsmListTags(conn *ssm.SSM, identifier string, resourceType string) (KeyValu return SsmKeyValueTags(output.TagList), nil } +// SsoadminListTags lists ssoadmin service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoadminListTags(conn *ssoadmin.SSOAdmin, identifier string, resourceType string) (KeyValueTags, error) { + input := &ssoadmin.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + InstanceArn: aws.String(resourceType), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SsoadminKeyValueTags(output.Tags), nil +} + // StoragegatewayListTags lists storagegateway service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. diff --git a/aws/internal/keyvaluetags/service_generation_customizations.go b/aws/internal/keyvaluetags/service_generation_customizations.go index 48841befb37..e66c7688abb 100644 --- a/aws/internal/keyvaluetags/service_generation_customizations.go +++ b/aws/internal/keyvaluetags/service_generation_customizations.go @@ -108,6 +108,7 @@ import ( "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/aws/aws-sdk-go/service/storagegateway" "github.com/aws/aws-sdk-go/service/swf" "github.com/aws/aws-sdk-go/service/synthetics" @@ -331,6 +332,8 @@ func ServiceClientType(serviceName string) string { funcType = reflect.TypeOf(sqs.New) case "ssm": funcType = reflect.TypeOf(ssm.New) + case "ssoadmin": + funcType = reflect.TypeOf(ssoadmin.New) case "storagegateway": funcType = reflect.TypeOf(storagegateway.New) case "swf": @@ -820,7 +823,8 @@ func ServiceTagKeyType(serviceName string) string { } } -// ServiceTagResourceTypeField determines the service tagging resource type field. +// ServiceTagResourceTypeField determines the service tagging resource type field +// with the exception of the ssoadmin service which uses the instance arn field func ServiceTagResourceTypeField(serviceName string) string { switch serviceName { case "autoscaling": @@ -829,6 +833,8 @@ func ServiceTagResourceTypeField(serviceName string) string { return "ResourceType" case "ssm": return "ResourceType" + case "ssoadmin": + return "InstanceArn" default: return "" } diff --git a/aws/internal/keyvaluetags/service_tags_gen.go b/aws/internal/keyvaluetags/service_tags_gen.go index 0f8a6481e11..bf7458fba54 100644 --- a/aws/internal/keyvaluetags/service_tags_gen.go +++ b/aws/internal/keyvaluetags/service_tags_gen.go @@ -80,6 +80,7 @@ import ( "github.com/aws/aws-sdk-go/service/sfn" "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/aws/aws-sdk-go/service/storagegateway" "github.com/aws/aws-sdk-go/service/swf" "github.com/aws/aws-sdk-go/service/transfer" @@ -2632,6 +2633,33 @@ func SsmKeyValueTags(tags []*ssm.Tag) KeyValueTags { return New(m) } +// SsoadminTags returns ssoadmin service tags. +func (tags KeyValueTags) SsoadminTags() []*ssoadmin.Tag { + result := make([]*ssoadmin.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ssoadmin.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SsoadminKeyValueTags creates KeyValueTags from ssoadmin service tags. +func SsoadminKeyValueTags(tags []*ssoadmin.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} + // StoragegatewayTags returns storagegateway service tags. func (tags KeyValueTags) StoragegatewayTags() []*storagegateway.Tag { result := make([]*storagegateway.Tag, 0, len(tags)) diff --git a/aws/internal/keyvaluetags/sso_tags.go b/aws/internal/keyvaluetags/sso_tags.go deleted file mode 100644 index 05391e0c734..00000000000 --- a/aws/internal/keyvaluetags/sso_tags.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build !generate - -package keyvaluetags - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" -) - -// Custom SSO tag service functions using the same format as generated code. - -// SsoListTags lists sso service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func SsoListTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string) (KeyValueTags, error) { - input := &ssoadmin.ListTagsForResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - } - - output, err := conn.ListTagsForResource(input) - - if err != nil { - return New(nil), err - } - - return SsoKeyValueTags(output.Tags), nil -} - -// SsoUpdateTags updates sso service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func SsoUpdateTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string, oldTagsMap interface{}, newTagsMap interface{}) error { - oldTags := New(oldTagsMap) - newTags := New(newTagsMap) - - if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { - input := &ssoadmin.UntagResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), - } - - _, err := conn.UntagResource(input) - - if err != nil { - return fmt.Errorf("error untagging resource (%s): %w", identifier, err) - } - } - - if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { - input := &ssoadmin.TagResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - Tags: updatedTags.IgnoreAws().SsoTags(), - } - - _, err := conn.TagResource(input) - - if err != nil { - return fmt.Errorf("error tagging resource (%s): %w", identifier, err) - } - } - - return nil -} - -// SsoTags returns sso service tags. -func (tags KeyValueTags) SsoTags() []*ssoadmin.Tag { - result := make([]*ssoadmin.Tag, 0, len(tags)) - - for k, v := range tags.Map() { - tag := &ssoadmin.Tag{ - Key: aws.String(k), - Value: aws.String(v), - } - - result = append(result, tag) - } - - return result -} - -// SsoKeyValueTags creates KeyValueTags from sso service tags. -func SsoKeyValueTags(tags []*ssoadmin.Tag) KeyValueTags { - m := make(map[string]*string, len(tags)) - - for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value - } - - return New(m) -} diff --git a/aws/internal/keyvaluetags/update_tags_gen.go b/aws/internal/keyvaluetags/update_tags_gen.go index 754c49b3133..d944f50f314 100644 --- a/aws/internal/keyvaluetags/update_tags_gen.go +++ b/aws/internal/keyvaluetags/update_tags_gen.go @@ -106,6 +106,7 @@ import ( "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/ssm" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/aws/aws-sdk-go/service/storagegateway" "github.com/aws/aws-sdk-go/service/swf" "github.com/aws/aws-sdk-go/service/synthetics" @@ -3721,6 +3722,44 @@ func SsmUpdateTags(conn *ssm.SSM, identifier string, resourceType string, oldTag return nil } +// SsoadminUpdateTags updates ssoadmin service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoadminUpdateTags(conn *ssoadmin.SSOAdmin, identifier string, resourceType string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ssoadmin.UntagResourceInput{ + ResourceArn: aws.String(identifier), + InstanceArn: aws.String(resourceType), + TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ssoadmin.TagResourceInput{ + ResourceArn: aws.String(identifier), + InstanceArn: aws.String(resourceType), + Tags: updatedTags.IgnoreAws().SsoadminTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + // StoragegatewayUpdateTags updates storagegateway service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go new file mode 100644 index 00000000000..f820ab6b767 --- /dev/null +++ b/aws/internal/service/ssoadmin/finder/finder.go @@ -0,0 +1,48 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" +) + +func AttachedManagedPolicy(conn *ssoadmin.SSOAdmin, permissionSetArn, instanceArn, managedPolicyArn string) (*ssoadmin.AttachedManagedPolicy, error) { + input := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ + PermissionSetArn: aws.String(permissionSetArn), + InstanceArn: aws.String(instanceArn), + } + + var attachedPolicy *ssoadmin.AttachedManagedPolicy + err := conn.ListManagedPoliciesInPermissionSetPages(input, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, policy := range page.AttachedManagedPolicies { + if aws.StringValue(policy.Arn) == managedPolicyArn { + attachedPolicy = policy + return false + } + } + return !lastPage + }) + + return attachedPolicy, err +} + +func InlinePolicy(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { + input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + output, err := conn.GetInlinePolicyForPermissionSet(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output.InlinePolicy, nil +} diff --git a/aws/internal/service/ssoadmin/waiter/status.go b/aws/internal/service/ssoadmin/waiter/status.go new file mode 100644 index 00000000000..0d1293a201b --- /dev/null +++ b/aws/internal/service/ssoadmin/waiter/status.go @@ -0,0 +1,53 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +const ( + InlinePolicyDeleteStatusUnknown = "Unknown" + InlinePolicyDeleteStatusNotFound = "NotFound" + InlinePolicyDeleteStatusExists = "Exists" + PermissionSetProvisioningStatusUnknown = "Unknown" + PermissionSetProvisioningStatusNotFound = "NotFound" +) + +func InlinePolicyDeletedStatus(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + + if err != nil { + return nil, InlinePolicyDeleteStatusUnknown, err + } + + if aws.StringValue(policy) == "" { + return nil, InlinePolicyDeleteStatusNotFound, nil + } + + return policy, InlinePolicyDeleteStatusExists, nil + } +} + +func PermissionSetProvisioningStatus(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ + InstanceArn: aws.String(instanceArn), + ProvisionPermissionSetRequestId: aws.String(requestID), + } + + resp, err := conn.DescribePermissionSetProvisioningStatus(input) + + if err != nil { + return nil, PermissionSetProvisioningStatusUnknown, err + } + + if resp == nil || resp.PermissionSetProvisioningStatus == nil { + return nil, PermissionSetProvisioningStatusNotFound, nil + } + + return resp.PermissionSetProvisioningStatus, aws.StringValue(resp.PermissionSetProvisioningStatus.Status), nil + } +} diff --git a/aws/internal/service/ssoadmin/waiter/waiter.go b/aws/internal/service/ssoadmin/waiter/waiter.go new file mode 100644 index 00000000000..2097bf80131 --- /dev/null +++ b/aws/internal/service/ssoadmin/waiter/waiter.go @@ -0,0 +1,43 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + AWSSSOAdminPermissionSetDeleteTimeout = 5 * time.Minute + AWSSSOAdminPermissionSetProvisioningRetryDelay = 5 * time.Second + AWSSSOAdminPermissionSetProvisionTimeout = 10 * time.Minute +) + +func InlinePolicyDeleted(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { + stateConf := resource.StateChangeConf{ + Pending: []string{InlinePolicyDeleteStatusExists}, + Target: []string{InlinePolicyDeleteStatusNotFound}, + Refresh: InlinePolicyDeletedStatus(conn, instanceArn, permissionSetArn), + Timeout: AWSSSOAdminPermissionSetDeleteTimeout, + } + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*string); ok { + return v, err + } + return nil, err +} + +func PermissionSetProvisioned(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) (*ssoadmin.PermissionSetProvisioningStatus, error) { + stateConf := resource.StateChangeConf{ + Delay: AWSSSOAdminPermissionSetProvisioningRetryDelay, + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Refresh: PermissionSetProvisioningStatus(conn, instanceArn, requestID), + Timeout: AWSSSOAdminPermissionSetProvisionTimeout, + } + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*ssoadmin.PermissionSetProvisioningStatus); ok { + return v, err + } + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 9e4bf123c83..92375c6d43c 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -355,8 +355,8 @@ func Provider() *schema.Provider { "aws_ssm_document": dataSourceAwsSsmDocument(), "aws_ssm_parameter": dataSourceAwsSsmParameter(), "aws_ssm_patch_baseline": dataSourceAwsSsmPatchBaseline(), - "aws_sso_instance": dataSourceAwsSsoInstance(), - "aws_sso_permission_set": dataSourceAwsSsoPermissionSet(), + "aws_ssoadmin_instances": dataSourceAwsSsoAdminInstances(), + "aws_ssoadmin_permission_set": dataSourceAwsSsoAdminPermissionSet(), "aws_storagegateway_local_disk": dataSourceAwsStorageGatewayLocalDisk(), "aws_subnet": dataSourceAwsSubnet(), "aws_subnet_ids": dataSourceAwsSubnetIDs(), @@ -939,7 +939,7 @@ func Provider() *schema.Provider { "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), - "aws_sso_permission_set": resourceAwsSsoPermissionSet(), + "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go deleted file mode 100644 index b8f2d725d85..00000000000 --- a/aws/resource_aws_sso_permission_set.go +++ /dev/null @@ -1,614 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" -) - -const ( - AWSSSOPermissionSetCreateTimeout = 5 * time.Minute - AWSSSOPermissionSetUpdateTimeout = 10 * time.Minute - AWSSSOPermissionSetDeleteTimeout = 5 * time.Minute - AWSSSOPermissionSetProvisioningRetryDelay = 5 * time.Second - AWSSSOPermissionSetProvisioningRetryMinTimeout = 3 * time.Second -) - -func resourceAwsSsoPermissionSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsoPermissionSetCreate, - Read: resourceAwsSsoPermissionSetRead, - Update: resourceAwsSsoPermissionSetUpdate, - Delete: resourceAwsSsoPermissionSetDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsSsoPermissionSetImport, - }, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(AWSSSOPermissionSetCreateTimeout), - Update: schema.DefaultTimeout(AWSSSOPermissionSetUpdateTimeout), - Delete: schema.DefaultTimeout(AWSSSOPermissionSetDeleteTimeout), - }, - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_failure_reason": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_request_id": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_status": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws(-[a-z]+)*:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), - ), - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), - ), - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 700), - validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*$`), "must match [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]"), - ), - }, - - "session_duration": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 100), - Default: "PT1H", - }, - - "relay_state": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 240), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9&$@#\\\/%?=~\-_'"|!:,.;*+\[\]\(\)\{\} ]+$`), "must match [a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\} ]"), - ), - }, - - "inline_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateIAMPolicyJson, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - - "managed_policy_arns": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsSsoPermissionSetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - permissionSetArn := d.Id() - instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) - if err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error parsing AWS Permission Set (%s) for import: %s", permissionSetArn, err) - } - - ssoadminconn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - - if permissionSetErr != nil { - return []*schema.ResourceData{}, permissionSetErr - } - - permissionSet := permissionSetResp.PermissionSet - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing Inline Policy for AWS SSO Permission Set (%s): %s", permissionSetArn, inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if managedPoliciesErr != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing Managed Policies for AWS SSO Permission Set (%s): %s", permissionSetArn, managedPoliciesErr) - } - var managedPolicyArns []string - for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - - tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) - if err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error listing tags during AWS SSO Permission Set (%s) import: %s", permissionSetArn, err) - } - - err = d.Set("instance_arn", instanceArn) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("arn", permissionSetArn) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("name", permissionSet.Name) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("description", permissionSet.Description) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("session_duration", permissionSet.SessionDuration) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("relay_state", permissionSet.RelayState) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("managed_policy_arns", managedPolicyArns) - if err != nil { - return []*schema.ResourceData{}, err - } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing AWS SSO Permission Set (%s) tags: %s", permissionSetArn, err) - } - d.SetId(permissionSetArn) - - return []*schema.ResourceData{d}, nil -} - -func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - log.Printf("[INFO] Creating AWS SSO Permission Set") - - instanceArn := aws.String(d.Get("instance_arn").(string)) - - params := &ssoadmin.CreatePermissionSetInput{ - InstanceArn: instanceArn, - Name: aws.String(d.Get("name").(string)), - } - - if v, ok := d.GetOk("description"); ok { - params.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("relay_state"); ok { - params.RelayState = aws.String(v.(string)) - } - - if v, ok := d.GetOk("session_duration"); ok { - params.SessionDuration = aws.String(v.(string)) - } - - if v, ok := d.GetOk("tags"); ok { - params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoTags() - } - - createPermissionSetResp, createPermissionerr := ssoadminconn.CreatePermissionSet(params) - if createPermissionerr != nil { - return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) - } - - if createPermissionSetResp == nil || createPermissionSetResp.PermissionSet == nil { - return fmt.Errorf("error creating AWS SSO Permission Set (%s): empty output", d.Get("name").(string)) - } - - permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn - d.SetId(aws.StringValue(permissionSetArn)) - - if attachPoliciesErr := attachPoliciesToPermissionSet(ssoadminconn, d, permissionSetArn, instanceArn); attachPoliciesErr != nil { - return attachPoliciesErr - } - - return resourceAwsSsoPermissionSetRead(d, meta) -} - -func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - var permissionSet *ssoadmin.PermissionSet - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) - - permissionSetResp, permissionerr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - - if isAWSErr(permissionerr, ssoadmin.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] AWS SSO Permission Set (%s) not found, removing from state", permissionSetArn) - d.SetId("") - return nil - } - - if permissionerr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) - } - - if permissionSetResp == nil || permissionSetResp.PermissionSet == nil { - return fmt.Errorf("error reading AWS SSO Permission Set (%s): empty output", name) - } - - log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if managedPoliciesErr != nil { - return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) - } - var managedPolicyArns []string - for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - - tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) - if err != nil { - return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) - } - - d.Set("arn", permissionSetArn) - d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - d.Set("instance_arn", instanceArn) - d.Set("name", permissionSet.Name) - d.Set("description", permissionSet.Description) - d.Set("session_duration", permissionSet.SessionDuration) - d.Set("relay_state", permissionSet.RelayState) - d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - if err = d.Set("managed_policy_arns", managedPolicyArns); err != nil { - return err - } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("Error setting tags: %s", err) - } - - return nil -} - -func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - - log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) - - if d.HasChanges("description", "relay_state", "session_duration") { - input := &ssoadmin.UpdatePermissionSetInput{ - PermissionSetArn: aws.String(permissionSetArn), - InstanceArn: aws.String(instanceArn), - } - - if d.HasChange("description") { - input.Description = aws.String(d.Get("description").(string)) - } - - if d.HasChange("relay_state") { - input.RelayState = aws.String(d.Get("relay_state").(string)) - } - - if d.HasChange("session_duration") { - input.SessionDuration = aws.String(d.Get("session_duration").(string)) - } - - log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) - _, permissionerr := ssoadminconn.UpdatePermissionSet(input) - if permissionerr != nil { - return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionerr) - } - } - - if d.HasChange("tags") { - oldTags, newTags := d.GetChange("tags") - if updateTagsErr := keyvaluetags.SsoUpdateTags(ssoadminconn, d.Get("arn").(string), d.Get("instance_arn").(string), oldTags, newTags); updateTagsErr != nil { - return fmt.Errorf("Error updating tags: %s", updateTagsErr) - } - } - - if d.HasChange("inline_policy") { - if v, ok := d.GetOk("inline_policy"); ok { - log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) - - inlinePolicy := aws.String(v.(string)) - - updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } else { - deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - } - if d.HasChange("managed_policy_arns") { - o, n := d.GetChange("managed_policy_arns") - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removalList := os.Difference(ns) - for _, v := range removalList.List() { - input := &ssoadmin.DetachManagedPolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - ManagedPolicyArn: aws.String(v.(string)), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, managedPoliciesErr := ssoadminconn.DetachManagedPolicyFromPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error detaching Managed Policy from AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - - additionList := ns.Difference(os) - for _, v := range additionList.List() { - input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - ManagedPolicyArn: aws.String(v.(string)), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - } - - // Reprovision if anything has changed - if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policy_arns", "tags") { - - // Auto provision all accounts - targetType := ssoadmin.ProvisionTargetTypeAllProvisionedAccounts - provisionInput := &ssoadmin.ProvisionPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - TargetType: aws.String(targetType), - } - - log.Printf("[INFO] Provisioning AWS SSO Permission Set") - provisionResponse, err := ssoadminconn.ProvisionPermissionSet(provisionInput) - if err != nil { - return fmt.Errorf("Error provisioning AWS SSO Permission Set (%s): %w", d.Id(), err) - } - - status := provisionResponse.PermissionSetProvisioningStatus - - _, waitErr := waitForPermissionSetProvisioning(ssoadminconn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutUpdate)) - if waitErr != nil { - return waitErr - } - } - - return resourceAwsSsoPermissionSetRead(d, meta) -} - -func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - - log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) - - params := &ssoadmin.DeletePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, err := ssoadminconn.DeletePermissionSet(params) - - if err != nil { - if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { - return nil - } - return fmt.Errorf("error deleting AWS SSO Permission Set (%s): %w", d.Id(), err) - } - return nil -} - -func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, permissionSetArn *string, instanceArn *string) error { - - if v, ok := d.GetOk("inline_policy"); ok { - log.Printf("[INFO] Attaching IAM inline policy to AWS SSO Permission Set") - - inlinePolicy := aws.String(v.(string)) - - input := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: instanceArn, - PermissionSetArn: permissionSetArn, - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(input) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - - if v, ok := d.GetOk("managed_policy_arns"); ok { - log.Printf("[INFO] Attaching Managed Policies to AWS SSO Permission Set") - - managedPolicies := expandStringSet(v.(*schema.Set)) - - for _, managedPolicyArn := range managedPolicies { - - input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ - InstanceArn: instanceArn, - ManagedPolicyArn: managedPolicyArn, - PermissionSetArn: permissionSetArn, - } - - _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - } - - return nil -} - -func resourceAwsSsoPermissionSetParseID(id string) (string, error) { - // id = arn:${Partition}:sso:::permissionSet/${InstanceID}/${PermissionSetID} - idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", id) - permissionSetARN, err := arn.Parse(id) - if err != nil { - return "", idFormatErr - } - - // We need: - // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/${InstanceId}/${PermissionSetId}) - // Split up the resource of the permission set ARN - resourceParts := strings.Split(permissionSetARN.Resource, "/") - if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { - return "", idFormatErr - } - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceARN := &arn.ARN{ - Partition: permissionSetARN.Partition, - Service: permissionSetARN.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - } - - return instanceARN.String(), nil -} - -func waitForPermissionSetProvisioning(ssoadminconn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.PermissionSetProvisioningStatus, error) { - - stateConf := resource.StateChangeConf{ - Delay: AWSSSOPermissionSetProvisioningRetryDelay, - Pending: []string{ssoadmin.StatusValuesInProgress}, - Target: []string{ssoadmin.StatusValuesSucceeded}, - Timeout: timeout, - MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, - Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, requestID, instanceArn), - } - status, err := stateConf.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for AWS SSO Permission Set provisioning status: %s", err) - } - return status.(*ssoadmin.PermissionSetProvisioningStatus), nil -} - -func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, requestID, instanceArn string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ - InstanceArn: aws.String(instanceArn), - ProvisionPermissionSetRequestId: aws.String(requestID), - } - - resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) - if err != nil { - return resp, "", fmt.Errorf("Error describing permission set provisioning status: %s", err) - } - status := resp.PermissionSetProvisioningStatus - if aws.StringValue(status.Status) == ssoadmin.StatusValuesFailed { - return resp, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(status.PermissionSetArn), aws.StringValue(status.FailureReason)) - } - return status, aws.StringValue(status.Status), nil - - } -} diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go deleted file mode 100644 index ba152e9070c..00000000000 --- a/aws/resource_aws_sso_permission_set_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -func TestAccAWSSSOPermissionSet_Basic(t *testing.T) { - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "name", rName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSSSOPermissionSet_ManagedPolicies(t *testing.T) { - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccSSOPermissionSetBasicConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), - resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), // lintignore:AWSAT005 - resource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), // lintignore:AWSAT005 - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccAWSSSOPermissionSet_Disappears(t *testing.T) { - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoPermissionSet(), resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSSSOPermissionSet_Tags(t *testing.T) { - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key1", "value1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccSSOPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - { - Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSOPermissionSetExists(resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) - } - - instanceArn, err := resourceAwsSsoPermissionSetParseID(rs.Primary.ID) - - if err != nil { - return err - } - - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(rs.Primary.ID), - }) - - if permissionSetErr != nil { - return permissionSetErr - } - - if permissionSetResp != nil { - if arn := aws.StringValue(permissionSetResp.PermissionSet.PermissionSetArn); arn == rs.Primary.ID { - return nil - } - } - - return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_sso_permission_set" { - continue - } - - idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", rs.Primary.ID) - permissionSetArn, err := arn.Parse(rs.Primary.ID) - if err != nil { - return err - } - - resourceParts := strings.Split(permissionSetArn.Resource, "/") - if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { - return idFormatErr - } - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceArn := arn.ARN{ - Partition: permissionSetArn.Partition, - Service: permissionSetArn.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - }.String() - - input := &ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(rs.Primary.ID), - } - - output, err := ssoadminconn.DescribePermissionSet(input) - - if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { - continue - } - - if err != nil { - return err - } - - if output != nil { - return fmt.Errorf("AWS SSO Permission Set (%s) still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccSSOPermissionSetBasicConfig(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} -`, rName) // lintignore:AWSAT005 -} - -func testAccSSOPermissionSetBasicConfigUpdated(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test update" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" - ] -} -`, rName) // lintignore:AWSAT005 -} - -func testAccSSOPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) // lintignore:AWSAT005 -} - -func testAccSSOPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) // lintignore:AWSAT005 -} diff --git a/aws/resource_aws_ssoadmin_permission_set.go b/aws/resource_aws_ssoadmin_permission_set.go new file mode 100644 index 00000000000..9a1ef87a5d5 --- /dev/null +++ b/aws/resource_aws_ssoadmin_permission_set.go @@ -0,0 +1,302 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" +) + +func resourceAwsSsoAdminPermissionSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoAdminPermissionSetCreate, + Read: resourceAwsSsoAdminPermissionSetRead, + Update: resourceAwsSsoAdminPermissionSetUpdate, + Delete: resourceAwsSsoAdminPermissionSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 700), + validation.StringMatch(regexp.MustCompile(`[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*`), "must match [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]"), + ), + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`[\w+=,.@-]+`), "must match [\\w+=,.@-]"), + ), + }, + + "relay_state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 240), + validation.StringMatch(regexp.MustCompile(`[a-zA-Z0-9&$@#\\\/%?=~\-_'"|!:,.;*+\[\]\ \(\)\{\}]+`), "must match [a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\}]"), + ), + }, + + "session_duration": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + Default: "PT1H", + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSsoAdminPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + name := d.Get("name").(string) + + input := &ssoadmin.CreatePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + Name: aws.String(name), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("relay_state"); ok { + input.RelayState = aws.String(v.(string)) + } + + if v, ok := d.GetOk("session_duration"); ok { + input.SessionDuration = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoadminTags() + } + + output, err := conn.CreatePermissionSet(input) + if err != nil { + return fmt.Errorf("error creating SSO Permission Set (%s): %w", name, err) + } + + if output == nil || output.PermissionSet == nil { + return fmt.Errorf("error creating SSO Permission Set (%s): empty output", name) + } + + d.SetId(fmt.Sprintf("%s,%s", aws.StringValue(output.PermissionSet.PermissionSetArn), instanceArn)) + + return resourceAwsSsoAdminPermissionSetRead(d, meta) +} + +func resourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Permision Set ID: %w", err) + } + + output, err := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] SSO Permission Set (%s) not found, removing from state", arn) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading SSO Permission Set: %w", err) + } + + if output == nil || output.PermissionSet == nil { + return fmt.Errorf("error reading SSO Permission Set (%s): empty output", arn) + } + + permissionSet := output.PermissionSet + + d.Set("arn", permissionSet.PermissionSetArn) + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("description", permissionSet.Description) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("relay_state", permissionSet.RelayState) + d.Set("session_duration", permissionSet.SessionDuration) + + tags, err := keyvaluetags.SsoadminListTags(conn, arn, instanceArn) + if err != nil { + return fmt.Errorf("error listing tags for SSO Permission Set (%s): %w", arn, err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set ID: %w", err) + } + + if d.HasChanges("description", "relay_state", "session_duration") { + input := &ssoadmin.UpdatePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + } + + if d.HasChange("description") { + input.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("relay_state") { + input.RelayState = aws.String(d.Get("relay_state").(string)) + } + + if d.HasChange("session_duration") { + input.SessionDuration = aws.String(d.Get("session_duration").(string)) + } + + _, err := conn.UpdatePermissionSet(input) + if err != nil { + return fmt.Errorf("error updating SSO Permission Set (%s): %w", arn, err) + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.SsoadminUpdateTags(conn, arn, instanceArn, o, n); err != nil { + return fmt.Errorf("error updating tags: %w", err) + } + } + + // Re-provision ALL accounts after making the above changes + err = provisionSsoAdminPermissionSet(conn, arn, instanceArn) + if err != nil { + return err + } + + return resourceAwsSsoAdminPermissionSetRead(d, meta) +} + +func resourceAwsSsoAdminPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set ID: %w", err) + } + + input := &ssoadmin.DeletePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + } + + _, err = conn.DeletePermissionSet(input) + if err != nil { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + return nil + } + return fmt.Errorf("error deleting SSO Permission Set (%s): %w", arn, err) + } + + return nil +} + +func parseSsoAdminPermissionSetID(id string) (string, string, error) { + idParts := strings.Split(id, ",") + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%q), expected PERMISSION_SET_ARN,INSTANCE_ARN", id) + } + return idParts[0], idParts[1], nil +} + +func provisionSsoAdminPermissionSet(conn *ssoadmin.SSOAdmin, arn, instanceArn string) error { + input := &ssoadmin.ProvisionPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + TargetType: aws.String(ssoadmin.ProvisionTargetTypeAllProvisionedAccounts), + } + + var output *ssoadmin.ProvisionPermissionSetOutput + err := resource.Retry(waiter.AWSSSOAdminPermissionSetProvisionTimeout, func() *resource.RetryError { + var err error + output, err = conn.ProvisionPermissionSet(input) + + if err != nil { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeConflictException) { + return resource.RetryableError(err) + } + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeThrottlingException) { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + + if tfresource.TimedOut(err) { + output, err = conn.ProvisionPermissionSet(input) + } + + if err != nil { + return fmt.Errorf("error provisioning SSO Permission Set (%s): %w", arn, err) + } + + if output == nil && output.PermissionSetProvisioningStatus == nil { + return fmt.Errorf("error provisioning SSO Permission Set (%s): empty output", arn) + } + + _, err = waiter.PermissionSetProvisioned(conn, instanceArn, aws.StringValue(output.PermissionSetProvisioningStatus.RequestId)) + if err != nil { + return fmt.Errorf("error waiting for SSO Permission Set (%s) to provision: %w", arn, err) + } + + return nil +} diff --git a/aws/resource_aws_ssoadmin_permission_set_test.go b/aws/resource_aws_ssoadmin_permission_set_test.go new file mode 100644 index 00000000000..a5ef3dfac7e --- /dev/null +++ b/aws/resource_aws_ssoadmin_permission_set_test.go @@ -0,0 +1,415 @@ +package aws + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func init() { + resource.AddTestSweepers("aws_ssoadmin_permission_set", &resource.Sweeper{ + Name: "aws_ssoadmin_permission_set", + F: testSweepSsoAdminPermissionSets, + }) +} + +func testSweepSsoAdminPermissionSets(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*AWSClient).ssoadminconn + var sweeperErrs *multierror.Error + + // Need to Read the SSO Instance first; assumes the first instance returned + // is where the permission sets exist as AWS SSO currently supports only 1 instance + ds := dataSourceAwsSsoAdminInstances() + dsData := ds.Data(nil) + + err = ds.Read(dsData, client) + + if testSweepSkipResourceError(err) { + log.Printf("[WARN] Skipping SSO Permission Set sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return err + } + + instanceArn := dsData.Get("arns").(*schema.Set).List()[0].(string) + + input := &ssoadmin.ListPermissionSetsInput{ + InstanceArn: aws.String(instanceArn), + } + + err = conn.ListPermissionSetsPages(input, func(page *ssoadmin.ListPermissionSetsOutput, isLast bool) bool { + if page == nil { + return !isLast + } + + for _, permissionSet := range page.PermissionSets { + if permissionSet == nil { + continue + } + + arn := aws.StringValue(permissionSet) + + log.Printf("[INFO] Deleting SSO Permission Set: %s", arn) + + r := resourceAwsSsoAdminPermissionSet() + d := r.Data(nil) + d.SetId(fmt.Sprintf("%s,%s", arn, instanceArn)) + + err = r.Delete(d, client) + + if err != nil { + log.Printf("[ERROR] %s", err) + sweeperErrs = multierror.Append(sweeperErrs, err) + continue + } + } + + return !isLast + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SSO Permission Set sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving SSO Permission Set: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAWSSSOAdminPermissionSet_basic(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "session_duration", "PT1H"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSet_tags(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetConfigTagsSingle(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSOAdminPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSOAdminPermissionSetConfigTagsSingle(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSet_updateDescription(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", ""), + ), + }, + { + Config: testAccAWSSSOAdminPermissionSetUpdateDescriptionConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSet_updateRelayState(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "relay_state", ""), + ), + }, + { + Config: testAccAWSSSOAdminPermissionSetUpdateRelayStateConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "relay_state", "https://example.com"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSet_updateSessionDuration(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + ), + }, + { + Config: testAccAWSSSOAdminPermissionSetUpdateSessionDurationConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "session_duration", "PT2H"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSSSOAdminPermissionSetDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_permission_set" { + continue + } + + arn, instanceArn, err := parseSsoAdminPermissionSetID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set ID (%s): %w", rs.Primary.ID, err) + } + + input := &ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + } + + _, err = conn.DescribePermissionSet(input) + + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("SSO Permission Set (%s) still exists", arn) + } + + return nil +} + +func testAccCheckAWSSOAdminPermissionSetExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + arn, instanceArn, err := parseSsoAdminPermissionSetID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set ID (%s): %w", rs.Primary.ID, err) + } + + _, err = conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(arn), + }) + + if err != nil { + return err + } + + return nil + } +} + +func testAccAWSSSOAdminPermissionSetBasicConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} +`, rName) +} + +func testAccAWSSSOAdminPermissionSetUpdateDescriptionConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %[1]q + description = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} +`, rName) +} + +func testAccAWSSSOAdminPermissionSetUpdateRelayStateConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" +} +`, rName) +} + +func testAccAWSSSOAdminPermissionSetUpdateSessionDurationConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + session_duration = "PT2H" +} +`, rName) +} + +func testAccAWSSSOAdminPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSSOAdminPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown deleted file mode 100644 index 72f80407dd5..00000000000 --- a/website/docs/d/sso_instance.html.markdown +++ /dev/null @@ -1,35 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_instance" -description: |- - Get information on an AWS Single Sign-On Instance. ---- - -# Data Source: aws_sso_instance - -Use this data source to get the Single Sign-On Instance ARN and Identity Store ID. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -output "arn" { - value = data.aws_sso_instance.selected.arn -} - -output "identity_store_id" { - value = data.aws_sso_instance.selected.identity_store_id -} -``` - -## Argument Reference - -There are no arguments available for this data source. - -## Attributes Reference - -* `arn` - The AWS ARN associated with the AWS Single Sign-On Instance. -* `id` - The AWS ARN associated with the AWS Single Sign-On Instance. -* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown deleted file mode 100644 index c6e90109ef7..00000000000 --- a/website/docs/d/sso_permission_set.html.markdown +++ /dev/null @@ -1,47 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_permission_set" -description: |- - Get information on an AWS Single Sign-On Permission Set. ---- - -# Data Source: aws_sso_permission_set - -Use this data source to get the Single Sign-On Permission Set. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_sso_permission_set" "example" { - instance_arn = data.aws_sso_instance.selected.arn - name = "Example" -} - -output "arn" { - value = data.aws_sso_permission_set.example.arn -} -``` - -## Argument Reference - -The following arguments are supported: - -* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. -* `name` - (Required) The name of the AWS Single Sign-On Permission Set. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The arn of the permission set. -* `arn` - The arn of the permission set. -* `created_date` - The created date of the permission set. -* `description` - The description of the permission set. -* `session_duration` - The session duration of the permission set. -* `relay_state` - The relay state of the permission set. -* `inline_policy` - The inline policy of the permission set. -* `managed_policy_arns` - The managed policies attached to the permission set. -* `tags` - The tags of the permission set. \ No newline at end of file diff --git a/website/docs/d/ssoadmin_instances.html.markdown b/website/docs/d/ssoadmin_instances.html.markdown new file mode 100644 index 00000000000..781bc666dce --- /dev/null +++ b/website/docs/d/ssoadmin_instances.html.markdown @@ -0,0 +1,35 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_instance" +description: |- + Get information on SSO Instances. +--- + +# Data Source: aws_ssoadmin_instances + +Use this data source to get ARNs and Identity Store IDs of Single Sign-On (SSO) Instances. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +output "arn" { + value = tolist(data.aws_ssoadmin_instances.example.arns)[0] +} + +output "identity_store_id" { + value = tolist(data.aws_ssoadmin_instances.example.identity_store_ids)[0] +} +``` + +## Argument Reference + +There are no arguments available for this data source. + +## Attributes Reference + +* `arn` - Set of Amazon Resource Names (ARNs) of the SSO Instances. +* `id` - AWS Region. +* `identity_store_ids` - Set of identifier(s) of the identity store(s) connected to the SSO instance(s). diff --git a/website/docs/d/ssoadmin_permission_set.html.markdown b/website/docs/d/ssoadmin_permission_set.html.markdown new file mode 100644 index 00000000000..a87a3d12614 --- /dev/null +++ b/website/docs/d/ssoadmin_permission_set.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_permission_set" +description: |- + Get information on a Single Sign-On (SSO) Permission Set. +--- + +# Data Source: aws_ssoadmin_permission_set + +Use this data source to get a Single Sign-On (SSO) Permission Set. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +data "aws_ssoadmin_permission_set" "example" { + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] + name = "Example" +} + +output "arn" { + value = data.aws_ssoadmin_permission_set.example.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +~> **NOTE:** Either `arn` or `name` must be configured. + +* `arn` - (Optional) The Amazon Resource Name (ARN) of the permission set. +* `instance_arn` - (Required) The Amazon Resource Name (ARN) of the SSO Instance associated with the permission set. +* `name` - (Optional) The name of the SSO Permission Set. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The Amazon Resource Name (ARN) of the permission set. +* `description` - The description of the Permission Set. +* `relay_state` - The relay state URL used to redirect users within the application during the federation authentication process. +* `session_duration` - The length of time that the application user sessions are valid in the ISO-8601 standard. Default: `PT1H`. +* `tags` - Key-value map of resource tags. \ No newline at end of file diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown deleted file mode 100644 index be261c581cb..00000000000 --- a/website/docs/r/sso_permission_set.html.markdown +++ /dev/null @@ -1,73 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_permission_set" -description: |- - Manages an AWS Single Sign-On permission set ---- - -# Resource: aws_sso_permission_set - -Provides an AWS Single Sign-On Permission Set resource - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_iam_policy_document" "example" { - statement { - sid = "1" - - actions = [ - "s3:ListAllMyBuckets", - "s3:GetBucketLocation", - ] - - resources = [ - "arn:aws:s3:::*", - ] - } -} - -resource "aws_sso_permission_set" "example" { - name = "Example" - description = "An example" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - inline_policy = data.aws_iam_policy_document.example.json - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - ] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. -* `name` - (Required) The name of the AWS Single Sign-On Permission Set. -* `description` - (Optional) The description of the AWS Single Sign-On Permission Set. -* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. -* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. -* `inline_policy` - (Optional) The inline policy of the AWS Single Sign-On Permission Set. -* `managed_policy_arns` - (Optional) The managed policies attached to the AWS Single Sign-On Permission Set. -* `tags` - (Optional) Key-value map of resource tags. - -## Attribute Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The arn of the AWS Single Sign-On Permission Set. -* `arn` - The arn of the AWS Single Sign-On Permission Set. -* `created_date` - The created date of the AWS Single Sign-On Permission Set. - -## Import - -`aws_sso_permission_set` can be imported by using the AWS Single Sign-On Permission Set Resource Name (ARN), e.g. - -``` -$ terraform import aws_sso_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk -``` \ No newline at end of file diff --git a/website/docs/r/ssoadmin_permission_set.html.markdown b/website/docs/r/ssoadmin_permission_set.html.markdown new file mode 100644 index 00000000000..f28382356fb --- /dev/null +++ b/website/docs/r/ssoadmin_permission_set.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_permission_set" +description: |- + Manages a Single Sign-On (SSO) Permission Set +--- + +# Resource: aws_ssoadmin_permission_set + +Provides a Single Sign-On (SSO) Permission Set resource + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +resource "aws_ssoadmin_permission_set" "example" { + name = "Example" + description = "An example" + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] + relay_state = "https://s3.console.aws.amazon.com/s3/home?region=us-east-1#" + session_duration = "PT2H" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `description` - (Optional) The description of the Permission Set. +* `instance_arn` - (Required) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. +* `name` - (Required) The name of the Permission Set. +* `relay_state` - (Optional) The relay state URL used to redirect users within the application during the federation authentication process. +* `session_duration` - (Optional) The length of time that the application user sessions are valid in the ISO-8601 standard. Default: `PT1H`. +* `tags` - (Optional) Key-value map of resource tags. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The Amazon Resource Name (ARN) of the Permission Set. +* `arn` - The Amazon Resource Name (ARN) of the Permission Set. +* `created_date` - The date the Permission Set was created in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). + +## Import + +SSO Permission Sets can be imported by their ARN and SSO Instance ARN, e.g. + +``` +$ terraform import aws_ssoadmin_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 +``` \ No newline at end of file From 833ff5d91d3662f919e1a66dc25883276fadfaf2 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 7 Jan 2021 15:34:17 -0500 Subject: [PATCH 0449/1212] importlint, typo, docs, remove unused --- ...data_source_aws_ssoadmin_instances_test.go | 12 ++--- ...data_source_aws_ssoadmin_permission_set.go | 12 +++-- ...source_aws_ssoadmin_permission_set_test.go | 43 ++++++++--------- .../service/ssoadmin/finder/finder.go | 48 ------------------- .../service/ssoadmin/waiter/status.go | 20 -------- .../service/ssoadmin/waiter/waiter.go | 15 ------ aws/resource_aws_ssoadmin_permission_set.go | 13 +++-- ...source_aws_ssoadmin_permission_set_test.go | 18 +++---- .../docs/d/ssoadmin_instances.html.markdown | 6 +-- .../d/ssoadmin_permission_set.html.markdown | 6 +-- .../r/ssoadmin_permission_set.html.markdown | 10 ++-- 11 files changed, 60 insertions(+), 143 deletions(-) delete mode 100644 aws/internal/service/ssoadmin/finder/finder.go diff --git a/aws/data_source_aws_ssoadmin_instances_test.go b/aws/data_source_aws_ssoadmin_instances_test.go index 39fd84a9b44..a4c4eb48fb9 100644 --- a/aws/data_source_aws_ssoadmin_instances_test.go +++ b/aws/data_source_aws_ssoadmin_instances_test.go @@ -9,10 +9,10 @@ import ( ) func testAccPreCheckAWSSSOAdminInstances(t *testing.T) { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn var instances []*ssoadmin.InstanceMetadata - err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { if page == nil { return !lastPage } @@ -35,7 +35,7 @@ func testAccPreCheckAWSSSOAdminInstances(t *testing.T) { } } -func TestAccDataSourceAWSSSOAdminInstances_Basic(t *testing.T) { +func TestAccDataSourceAWSSSOAdminInstances_basic(t *testing.T) { dataSourceName := "data.aws_ssoadmin_instances.test" resource.ParallelTest(t, resource.TestCase{ @@ -43,7 +43,7 @@ func TestAccDataSourceAWSSSOAdminInstances_Basic(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAWSSSOAdminInstancesConfigBasic(), + Config: testAccDataSourceAWSSSOAdminInstancesConfigBasic, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "arns.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "identity_store_ids.#", "1"), @@ -55,6 +55,4 @@ func TestAccDataSourceAWSSSOAdminInstances_Basic(t *testing.T) { }) } -func testAccDataSourceAWSSSOAdminInstancesConfigBasic() string { - return `data "aws_ssoadmin_instances" "test" {}` -} +const testAccDataSourceAWSSSOAdminInstancesConfigBasic = `data "aws_ssoadmin_instances" "test" {}` diff --git a/aws/data_source_aws_ssoadmin_permission_set.go b/aws/data_source_aws_ssoadmin_permission_set.go index 75e22a6a59d..44f85ac9aa2 100644 --- a/aws/data_source_aws_ssoadmin_permission_set.go +++ b/aws/data_source_aws_ssoadmin_permission_set.go @@ -22,6 +22,7 @@ func dataSourceAwsSsoAdminPermissionSet() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + ValidateFunc: validateArn, ExactlyOneOf: []string{"arn", "name"}, }, @@ -44,6 +45,7 @@ func dataSourceAwsSsoAdminPermissionSet() *schema.Resource { "name": { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 32), validation.StringMatch(regexp.MustCompile(`[\w+=,.@-]+`), "must match [\\w+=,.@-]"), @@ -92,8 +94,8 @@ func dataSourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interfa } permissionSet = output.PermissionSet - } else { - name := d.Get("name").(string) + } else if v, ok := d.GetOk("name"); ok { + name := v.(string) var describeErr error input := &ssoadmin.ListPermissionSetsInput{ @@ -106,6 +108,10 @@ func dataSourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interfa } for _, permissionSetArn := range page.PermissionSets { + if permissionSetArn == nil { + continue + } + output, describeErr := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: permissionSetArn, @@ -133,7 +139,7 @@ func dataSourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interfa } if describeErr != nil { - return fmt.Errorf("error reading SSO Permission Set: %w", describeErr) + return fmt.Errorf("error reading SSO Permission Set (%s): %w", name, describeErr) } } diff --git a/aws/data_source_aws_ssoadmin_permission_set_test.go b/aws/data_source_aws_ssoadmin_permission_set_test.go index 0c87ddc1c16..d83cfc94930 100644 --- a/aws/data_source_aws_ssoadmin_permission_set_test.go +++ b/aws/data_source_aws_ssoadmin_permission_set_test.go @@ -21,6 +21,7 @@ func TestAccDataSourceAWSSSOAdminPermissionSet_arn(t *testing.T) { { Config: testAccDataSourceAWSSSOPermissionSetByArnConfig(rName), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), resource.TestCheckResourceAttrPair(resourceName, "description", dataSourceName, "description"), resource.TestCheckResourceAttrPair(resourceName, "relay_state", dataSourceName, "relay_state"), @@ -44,6 +45,7 @@ func TestAccDataSourceAWSSSOAdminPermissionSet_name(t *testing.T) { { Config: testAccDataSourceAWSSSOPermissionSetByNameConfig(rName), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), resource.TestCheckResourceAttrPair(resourceName, "description", dataSourceName, "description"), resource.TestCheckResourceAttrPair(resourceName, "relay_state", dataSourceName, "relay_state"), @@ -55,7 +57,7 @@ func TestAccDataSourceAWSSSOAdminPermissionSet_name(t *testing.T) { }) } -func TestAccDataSourceAWSSSOAdminPermissionSet_NonExistent(t *testing.T) { +func TestAccDataSourceAWSSSOAdminPermissionSet_nonExistent(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, @@ -69,15 +71,15 @@ func TestAccDataSourceAWSSSOAdminPermissionSet_NonExistent(t *testing.T) { }) } -func testAccDataSourceAWSSSOPermissionSetByArnConfig(rName string) string { +func testAccDataSourceAWSSSOPermissionSetBaseConfig(rName string) string { return fmt.Sprintf(` data "aws_ssoadmin_instances" "test" {} resource "aws_ssoadmin_permission_set" "test" { - name = %[1]q - description = %[1]q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] - relay_state = "https://example.com" + name = %[1]q + description = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" tags = { Key1 = "Value1" @@ -85,36 +87,29 @@ resource "aws_ssoadmin_permission_set" "test" { Key3 = "Value3" } } +`, rName) +} +func testAccDataSourceAWSSSOPermissionSetByArnConfig(rName string) string { + return composeConfig( + testAccDataSourceAWSSSOPermissionSetBaseConfig(rName), + ` data "aws_ssoadmin_permission_set" "test" { instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] arn = aws_ssoadmin_permission_set.test.arn } -`, rName) +`) } func testAccDataSourceAWSSSOPermissionSetByNameConfig(rName string) string { - return fmt.Sprintf(` -data "aws_ssoadmin_instances" "test" {} - -resource "aws_ssoadmin_permission_set" "test" { - name = %[1]q - description = %[1]q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] - relay_state = "https://example.com" - - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } -} - + return composeConfig( + testAccDataSourceAWSSSOPermissionSetBaseConfig(rName), + ` data "aws_ssoadmin_permission_set" "test" { instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] name = aws_ssoadmin_permission_set.test.name } -`, rName) +`) } const testAccDataSourceAWSSSOPermissionSetByNameConfig_nonExistent = ` diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go deleted file mode 100644 index f820ab6b767..00000000000 --- a/aws/internal/service/ssoadmin/finder/finder.go +++ /dev/null @@ -1,48 +0,0 @@ -package finder - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" -) - -func AttachedManagedPolicy(conn *ssoadmin.SSOAdmin, permissionSetArn, instanceArn, managedPolicyArn string) (*ssoadmin.AttachedManagedPolicy, error) { - input := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ - PermissionSetArn: aws.String(permissionSetArn), - InstanceArn: aws.String(instanceArn), - } - - var attachedPolicy *ssoadmin.AttachedManagedPolicy - err := conn.ListManagedPoliciesInPermissionSetPages(input, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, policy := range page.AttachedManagedPolicies { - if aws.StringValue(policy.Arn) == managedPolicyArn { - attachedPolicy = policy - return false - } - } - return !lastPage - }) - - return attachedPolicy, err -} - -func InlinePolicy(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { - input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - output, err := conn.GetInlinePolicyForPermissionSet(input) - if err != nil { - return nil, err - } - - if output == nil { - return nil, nil - } - - return output.InlinePolicy, nil -} diff --git a/aws/internal/service/ssoadmin/waiter/status.go b/aws/internal/service/ssoadmin/waiter/status.go index 0d1293a201b..cd0b8e10bbe 100644 --- a/aws/internal/service/ssoadmin/waiter/status.go +++ b/aws/internal/service/ssoadmin/waiter/status.go @@ -4,33 +4,13 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" ) const ( - InlinePolicyDeleteStatusUnknown = "Unknown" - InlinePolicyDeleteStatusNotFound = "NotFound" - InlinePolicyDeleteStatusExists = "Exists" PermissionSetProvisioningStatusUnknown = "Unknown" PermissionSetProvisioningStatusNotFound = "NotFound" ) -func InlinePolicyDeletedStatus(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) - - if err != nil { - return nil, InlinePolicyDeleteStatusUnknown, err - } - - if aws.StringValue(policy) == "" { - return nil, InlinePolicyDeleteStatusNotFound, nil - } - - return policy, InlinePolicyDeleteStatusExists, nil - } -} - func PermissionSetProvisioningStatus(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ diff --git a/aws/internal/service/ssoadmin/waiter/waiter.go b/aws/internal/service/ssoadmin/waiter/waiter.go index 2097bf80131..48d7e111abf 100644 --- a/aws/internal/service/ssoadmin/waiter/waiter.go +++ b/aws/internal/service/ssoadmin/waiter/waiter.go @@ -8,25 +8,10 @@ import ( ) const ( - AWSSSOAdminPermissionSetDeleteTimeout = 5 * time.Minute AWSSSOAdminPermissionSetProvisioningRetryDelay = 5 * time.Second AWSSSOAdminPermissionSetProvisionTimeout = 10 * time.Minute ) -func InlinePolicyDeleted(conn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { - stateConf := resource.StateChangeConf{ - Pending: []string{InlinePolicyDeleteStatusExists}, - Target: []string{InlinePolicyDeleteStatusNotFound}, - Refresh: InlinePolicyDeletedStatus(conn, instanceArn, permissionSetArn), - Timeout: AWSSSOAdminPermissionSetDeleteTimeout, - } - outputRaw, err := stateConf.WaitForState() - if v, ok := outputRaw.(*string); ok { - return v, err - } - return nil, err -} - func PermissionSetProvisioned(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) (*ssoadmin.PermissionSetProvisioningStatus, error) { stateConf := resource.StateChangeConf{ Delay: AWSSSOAdminPermissionSetProvisioningRetryDelay, diff --git a/aws/resource_aws_ssoadmin_permission_set.go b/aws/resource_aws_ssoadmin_permission_set.go index 9a1ef87a5d5..676129a7f0a 100644 --- a/aws/resource_aws_ssoadmin_permission_set.go +++ b/aws/resource_aws_ssoadmin_permission_set.go @@ -130,9 +130,9 @@ func resourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interface conn := meta.(*AWSClient).ssoadminconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + arn, instanceArn, err := parseSsoAdminResourceID(d.Id()) if err != nil { - return fmt.Errorf("error parsing SSO Permision Set ID: %w", err) + return fmt.Errorf("error parsing SSO Permission Set ID: %w", err) } output, err := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ @@ -179,7 +179,7 @@ func resourceAwsSsoAdminPermissionSetRead(d *schema.ResourceData, meta interface func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssoadminconn - arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + arn, instanceArn, err := parseSsoAdminResourceID(d.Id()) if err != nil { return fmt.Errorf("error parsing SSO Permission Set ID: %w", err) } @@ -216,8 +216,7 @@ func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interfa } // Re-provision ALL accounts after making the above changes - err = provisionSsoAdminPermissionSet(conn, arn, instanceArn) - if err != nil { + if err := provisionSsoAdminPermissionSet(conn, arn, instanceArn); err != nil { return err } @@ -227,7 +226,7 @@ func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interfa func resourceAwsSsoAdminPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssoadminconn - arn, instanceArn, err := parseSsoAdminPermissionSetID(d.Id()) + arn, instanceArn, err := parseSsoAdminResourceID(d.Id()) if err != nil { return fmt.Errorf("error parsing SSO Permission Set ID: %w", err) } @@ -248,7 +247,7 @@ func resourceAwsSsoAdminPermissionSetDelete(d *schema.ResourceData, meta interfa return nil } -func parseSsoAdminPermissionSetID(id string) (string, string, error) { +func parseSsoAdminResourceID(id string) (string, string, error) { idParts := strings.Split(id, ",") if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { return "", "", fmt.Errorf("unexpected format for ID (%q), expected PERMISSION_SET_ARN,INSTANCE_ARN", id) diff --git a/aws/resource_aws_ssoadmin_permission_set_test.go b/aws/resource_aws_ssoadmin_permission_set_test.go index a5ef3dfac7e..e1384c94628 100644 --- a/aws/resource_aws_ssoadmin_permission_set_test.go +++ b/aws/resource_aws_ssoadmin_permission_set_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "log" "testing" @@ -12,6 +11,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -277,7 +277,7 @@ func testAccCheckAWSSSOAdminPermissionSetDestroy(s *terraform.State) error { continue } - arn, instanceArn, err := parseSsoAdminPermissionSetID(rs.Primary.ID) + arn, instanceArn, err := parseSsoAdminResourceID(rs.Primary.ID) if err != nil { return fmt.Errorf("error parsing SSO Permission Set ID (%s): %w", rs.Primary.ID, err) @@ -317,7 +317,7 @@ func testAccCheckAWSSOAdminPermissionSetExists(resourceName string) resource.Tes conn := testAccProvider.Meta().(*AWSClient).ssoadminconn - arn, instanceArn, err := parseSsoAdminPermissionSetID(rs.Primary.ID) + arn, instanceArn, err := parseSsoAdminResourceID(rs.Primary.ID) if err != nil { return fmt.Errorf("error parsing SSO Permission Set ID (%s): %w", rs.Primary.ID, err) @@ -341,8 +341,8 @@ func testAccAWSSSOAdminPermissionSetBasicConfig(rName string) string { data "aws_ssoadmin_instances" "test" {} resource "aws_ssoadmin_permission_set" "test" { - name = %q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] } `, rName) } @@ -388,8 +388,8 @@ func testAccAWSSSOAdminPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 s data "aws_ssoadmin_instances" "test" {} resource "aws_ssoadmin_permission_set" "test" { - name = %q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] tags = { %[2]q = %[3]q @@ -403,8 +403,8 @@ func testAccAWSSSOAdminPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1 data "aws_ssoadmin_instances" "test" {} resource "aws_ssoadmin_permission_set" "test" { - name = %q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] tags = { %[2]q = %[3]q diff --git a/website/docs/d/ssoadmin_instances.html.markdown b/website/docs/d/ssoadmin_instances.html.markdown index 781bc666dce..b8d55b5e1cb 100644 --- a/website/docs/d/ssoadmin_instances.html.markdown +++ b/website/docs/d/ssoadmin_instances.html.markdown @@ -1,7 +1,7 @@ --- subcategory: "SSO Admin" layout: "aws" -page_title: "AWS: aws_ssoadmin_instance" +page_title: "AWS: aws_ssoadmin_instances" description: |- Get information on SSO Instances. --- @@ -30,6 +30,6 @@ There are no arguments available for this data source. ## Attributes Reference -* `arn` - Set of Amazon Resource Names (ARNs) of the SSO Instances. +* `arns` - Set of Amazon Resource Names (ARNs) of the SSO Instances. * `id` - AWS Region. -* `identity_store_ids` - Set of identifier(s) of the identity store(s) connected to the SSO instance(s). +* `identity_store_ids` - Set of identifiers of the identity stores connected to the SSO Instances. diff --git a/website/docs/d/ssoadmin_permission_set.html.markdown b/website/docs/d/ssoadmin_permission_set.html.markdown index a87a3d12614..ee6aa37f94c 100644 --- a/website/docs/d/ssoadmin_permission_set.html.markdown +++ b/website/docs/d/ssoadmin_permission_set.html.markdown @@ -1,7 +1,7 @@ --- subcategory: "SSO Admin" layout: "aws" -page_title: "AWS: aws_sso_permission_set" +page_title: "AWS: aws_ssoadmin_permission_set" description: |- Get information on a Single Sign-On (SSO) Permission Set. --- @@ -39,8 +39,8 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The Amazon Resource Name (ARN) of the permission set. +* `id` - The Amazon Resource Name (ARN) of the Permission Set. * `description` - The description of the Permission Set. * `relay_state` - The relay state URL used to redirect users within the application during the federation authentication process. -* `session_duration` - The length of time that the application user sessions are valid in the ISO-8601 standard. Default: `PT1H`. +* `session_duration` - The length of time that the application user sessions are valid in the ISO-8601 standard. * `tags` - Key-value map of resource tags. \ No newline at end of file diff --git a/website/docs/r/ssoadmin_permission_set.html.markdown b/website/docs/r/ssoadmin_permission_set.html.markdown index f28382356fb..7c1bab71e35 100644 --- a/website/docs/r/ssoadmin_permission_set.html.markdown +++ b/website/docs/r/ssoadmin_permission_set.html.markdown @@ -10,6 +10,8 @@ description: |- Provides a Single Sign-On (SSO) Permission Set resource +~> **NOTE:** Updating this resource will automatically [Provision the Permission Set](https://docs.aws.amazon.com/singlesignon/latest/APIReference/API_ProvisionPermissionSet.html) to apply the corresponding updates to all assigned accounts. + ## Example Usage ```hcl @@ -29,8 +31,8 @@ resource "aws_ssoadmin_permission_set" "example" { The following arguments are supported: * `description` - (Optional) The description of the Permission Set. -* `instance_arn` - (Required) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. -* `name` - (Required) The name of the Permission Set. +* `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. +* `name` - (Required, Forces new resource) The name of the Permission Set. * `relay_state` - (Optional) The relay state URL used to redirect users within the application during the federation authentication process. * `session_duration` - (Optional) The length of time that the application user sessions are valid in the ISO-8601 standard. Default: `PT1H`. * `tags` - (Optional) Key-value map of resource tags. @@ -39,13 +41,13 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - The Amazon Resource Name (ARN) of the Permission Set. * `arn` - The Amazon Resource Name (ARN) of the Permission Set. +* `id` - The Amazon Resource Names (ARNs) of the Permission Set and SSO Instance, separated by a comma (`,`). * `created_date` - The date the Permission Set was created in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8). ## Import -SSO Permission Sets can be imported by their ARN and SSO Instance ARN, e.g. +SSO Permission Sets can be imported using the `arn` and `instance_arn` separated by a comma (`,`) e.g. ``` $ terraform import aws_ssoadmin_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 From db0df8de2002d0d18fd5c6802aee7a1c1fcf07d0 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 6 Jan 2021 20:18:05 -0500 Subject: [PATCH 0450/1212] add inline policy resource for permission set --- .../service/ssoadmin/finder/finder.go | 24 ++ aws/provider.go | 1 + ...s_ssoadmin_permission_set_inline_policy.go | 131 +++++++++ ...admin_permission_set_inline_policy_test.go | 252 ++++++++++++++++++ ...permission_set_inline_policy.html.markdown | 67 +++++ 5 files changed, 475 insertions(+) create mode 100644 aws/internal/service/ssoadmin/finder/finder.go create mode 100644 aws/resource_aws_ssoadmin_permission_set_inline_policy.go create mode 100644 aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go create mode 100644 website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go new file mode 100644 index 00000000000..65278a72550 --- /dev/null +++ b/aws/internal/service/ssoadmin/finder/finder.go @@ -0,0 +1,24 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" +) + +func InlinePolicy(ssoadminconn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { + input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + output, err := ssoadminconn.GetInlinePolicyForPermissionSet(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output.InlinePolicy, nil +} diff --git a/aws/provider.go b/aws/provider.go index 92375c6d43c..de1b036b3ef 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -940,6 +940,7 @@ func Provider() *schema.Provider { "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), + "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), "aws_storagegateway_cached_iscsi_volume": resourceAwsStorageGatewayCachedIscsiVolume(), "aws_storagegateway_gateway": resourceAwsStorageGatewayGateway(), diff --git a/aws/resource_aws_ssoadmin_permission_set_inline_policy.go b/aws/resource_aws_ssoadmin_permission_set_inline_policy.go new file mode 100644 index 00000000000..e076925f0c6 --- /dev/null +++ b/aws/resource_aws_ssoadmin_permission_set_inline_policy.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +func resourceAwsSsoAdminPermissionSetInlinePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoAdminPermissionSetInlinePolicyPut, + Read: resourceAwsSsoAdminPermissionSetInlinePolicyRead, + Update: resourceAwsSsoAdminPermissionSetInlinePolicyPut, + Delete: resourceAwsSsoAdminPermissionSetInlinePolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "inline_policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateIAMPolicyJson, + DiffSuppressFunc: suppressEquivalentJsonDiffs, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "permission_set_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + }, + } +} + +func resourceAwsSsoAdminPermissionSetInlinePolicyPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + + input := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: aws.String(d.Get("inline_policy").(string)), + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, err := conn.PutInlinePolicyToPermissionSet(input) + if err != nil { + return fmt.Errorf("error putting Inline Policy for SSO Permission Set (%s): %w", permissionSetArn, err) + } + + d.SetId(fmt.Sprintf("%s,%s", permissionSetArn, instanceArn)) + + // (Re)provision ALL accounts after making the above changes + if err := provisionSsoAdminPermissionSet(conn, permissionSetArn, instanceArn); err != nil { + return err + } + + return resourceAwsSsoAdminPermissionSetInlinePolicyRead(d, meta) +} + +func resourceAwsSsoAdminPermissionSetInlinePolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + permissionSetArn, instanceArn, err := parseSsoAdminResourceID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID: %w", err) + } + + policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Inline Policy for SSO Permission Set (%s) not found, removing from state", permissionSetArn) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Inline Policy for SSO Permission Set (%s): %w", permissionSetArn, err) + } + + if policy == nil { + log.Printf("[WARN] Inline Policy for SSO Permission Set (%s) not found, removing from state", permissionSetArn) + d.SetId("") + return nil + } + + d.Set("inline_policy", policy) + d.Set("instance_arn", instanceArn) + d.Set("permission_set_arn", permissionSetArn) + + return nil +} + +func resourceAwsSsoAdminPermissionSetInlinePolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + permissionSetArn, instanceArn, err := parseSsoAdminResourceID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID: %w", err) + } + + input := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, err = conn.DeleteInlinePolicyFromPermissionSet(input) + + if err != nil { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + return nil + } + return fmt.Errorf("error detaching Inline Policy from SSO Permission Set (%s): %w", permissionSetArn, err) + } + + return nil +} diff --git a/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go b/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go new file mode 100644 index 00000000000..ee12cca2c69 --- /dev/null +++ b/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go @@ -0,0 +1,252 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +func TestAccAWSSSOAdminPermissionSetInlinePolicy_basic(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set_inline_policy.test" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "instance_arn", permissionSetResourceName, "instance_arn"), + resource.TestCheckResourceAttrPair(resourceName, "permission_set_arn", permissionSetResourceName, "arn"), + resource.TestMatchResourceAttr(resourceName, "inline_policy", regexp.MustCompile("s3:ListAllMyBuckets")), + resource.TestMatchResourceAttr(resourceName, "inline_policy", regexp.MustCompile("s3:GetBucketLocation")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSetInlinePolicy_update(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set_inline_policy.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSSOAdminPermissionSetInlinePolicyUpdateConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "inline_policy", regexp.MustCompile("s3:ListAllMyBuckets")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminPermissionSetInlinePolicy_disappears(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set_inline_policy.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminPermissionSetInlinePolicy(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSSOInlinePolicy_disappears_permissionSet(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set_inline_policy.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminPermissionSet(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_permission_set_inline_policy" { + continue + } + + permissionSetArn, instanceArn, err := parseSsoAdminResourceID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID (%s): %w", rs.Primary.ID, err) + } + + policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return err + } + + if aws.StringValue(policy) == "" { + continue + } + + return fmt.Errorf("Inline Policy for SSO PermissionSet (%s) still exists", permissionSetArn) + } + + return nil +} + +func testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + permissionSetArn, instanceArn, err := parseSsoAdminResourceID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID (%s): %w", rs.Primary.ID, err) + } + + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + + if err != nil { + return err + } + + if policy == nil { + return fmt.Errorf("Inline Policy for SSO Permission Set (%s) not found", permissionSetArn) + } + + return nil + } +} + +func testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_iam_policy_document" "test" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } +} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +resource "aws_ssoadmin_permission_set_inline_policy" "test" { + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`, rName) +} + +func testAccSSOAdminPermissionSetInlinePolicyUpdateConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_iam_policy_document" "test" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } +} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +resource "aws_ssoadmin_permission_set_inline_policy" "test" { + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`, rName) +} diff --git a/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown b/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown new file mode 100644 index 00000000000..ce1f2b406a0 --- /dev/null +++ b/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown @@ -0,0 +1,67 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_permission_set_inline_policy" +description: |- + Manages an IAM inline policy for a Single Sign-On (SSO) Permission Set +--- + +# Resource: aws_ssoadmin_permission_set_inline_policy + +Provides an IAM inline policy for a Single Sign-On (SSO) Permission Set resource + +~> **NOTE:** Creating or updating this resource will automatically [Provision the Permission Set](https://docs.aws.amazon.com/singlesignon/latest/APIReference/API_ProvisionPermissionSet.html) to apply the corresponding updates to all assigned accounts. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +resource "aws_ssoadmin_permission_set" "example" { + name = "Example" + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] +} + +data "aws_iam_policy_document" "test" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } +} + +resource "aws_ssoadmin_permission_set_inline_policy" "example" { + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.example.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.example.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `inline_policy` - (Required) The IAM inline policy to attach to a Permission Set. +* `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. +* `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - Permission Set Amazon Resource Name (ARN) and SSO Instance Amazon Resource Name (ARN), separated by a comma (`,`). + +## Import + +SSO Permission Set Inline Policies can be imported using the `permission_set_arn` and `instance_arn` separated by a comma (`,`) e.g. + +``` +$ terraform import aws_ssoadmin_permission_set_inline_policy.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 +``` \ No newline at end of file From 05d56d5bfb61ed39e7eae7fb7a8ba26a645031a5 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 7 Jan 2021 17:38:24 -0500 Subject: [PATCH 0451/1212] inline policy resource updates; managed policy attachment resource --- .../service/ssoadmin/finder/finder.go | 34 ++- aws/provider.go | 2 +- ..._aws_ssoadmin_managed_policy_attachment.go | 147 ++++++++++ ...ssoadmin_managed_policy_attachment_test.go | 277 ++++++++++++++++++ ...s_ssoadmin_permission_set_inline_policy.go | 16 +- ...admin_permission_set_inline_policy_test.go | 60 ++-- .../d/ssoadmin_permission_set.html.markdown | 2 +- ...in_managed_policy_attachment.html.markdown | 52 ++++ .../r/ssoadmin_permission_set.html.markdown | 2 +- ...permission_set_inline_policy.html.markdown | 11 +- 10 files changed, 553 insertions(+), 50 deletions(-) create mode 100644 aws/resource_aws_ssoadmin_managed_policy_attachment.go create mode 100644 aws/resource_aws_ssoadmin_managed_policy_attachment_test.go create mode 100644 website/docs/r/ssoadmin_managed_policy_attachment.html.markdown diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go index 65278a72550..0592c5775d8 100644 --- a/aws/internal/service/ssoadmin/finder/finder.go +++ b/aws/internal/service/ssoadmin/finder/finder.go @@ -5,20 +5,32 @@ import ( "github.com/aws/aws-sdk-go/service/ssoadmin" ) -func InlinePolicy(ssoadminconn *ssoadmin.SSOAdmin, instanceArn, permissionSetArn string) (*string, error) { - input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), +// ManagedPolicy returns the managed policy attached to a permission set within a specified SSO instance. +// Returns an error if no managed policy is found. +func ManagedPolicy(conn *ssoadmin.SSOAdmin, managedPolicyArn, permissionSetArn, instanceArn string) (*ssoadmin.AttachedManagedPolicy, error) { + input := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ PermissionSetArn: aws.String(permissionSetArn), + InstanceArn: aws.String(instanceArn), } - output, err := ssoadminconn.GetInlinePolicyForPermissionSet(input) - if err != nil { - return nil, err - } + var attachedPolicy *ssoadmin.AttachedManagedPolicy + err := conn.ListManagedPoliciesInPermissionSetPages(input, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } - if output == nil { - return nil, nil - } + for _, policy := range page.AttachedManagedPolicies { + if policy == nil { + continue + } + + if aws.StringValue(policy.Arn) == managedPolicyArn { + attachedPolicy = policy + return false + } + } + return !lastPage + }) - return output.InlinePolicy, nil + return attachedPolicy, err } diff --git a/aws/provider.go b/aws/provider.go index de1b036b3ef..ca0d3471156 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -871,7 +871,6 @@ func Provider() *schema.Provider { "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), - "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), @@ -939,6 +938,7 @@ func Provider() *schema.Provider { "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), + "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), "aws_storagegateway_cache": resourceAwsStorageGatewayCache(), diff --git a/aws/resource_aws_ssoadmin_managed_policy_attachment.go b/aws/resource_aws_ssoadmin_managed_policy_attachment.go new file mode 100644 index 00000000000..5e9b02e1a4a --- /dev/null +++ b/aws/resource_aws_ssoadmin_managed_policy_attachment.go @@ -0,0 +1,147 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +func resourceAwsSsoAdminManagedPolicyAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoAdminManagedPolicyAttachmentCreate, + Read: resourceAwsSsoAdminManagedPolicyAttachmentRead, + Delete: resourceAwsSsoAdminManagedPolicyAttachmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "managed_policy_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "managed_policy_name": { + Type: schema.TypeString, + Computed: true, + }, + + "permission_set_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + }, + } +} + +func resourceAwsSsoAdminManagedPolicyAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + managedPolicyArn := d.Get("managed_policy_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + + input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + ManagedPolicyArn: aws.String(managedPolicyArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, err := conn.AttachManagedPolicyToPermissionSet(input) + if err != nil { + return fmt.Errorf("error attaching Managed Policy to SSO Permission Set (%s): %w", permissionSetArn, err) + } + + d.SetId(fmt.Sprintf("%s,%s,%s", managedPolicyArn, permissionSetArn, instanceArn)) + + // Provision ALL accounts after attaching the managed policy + if err := provisionSsoAdminPermissionSet(conn, permissionSetArn, instanceArn); err != nil { + return err + } + + return resourceAwsSsoAdminManagedPolicyAttachmentRead(d, meta) +} + +func resourceAwsSsoAdminManagedPolicyAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + managedPolicyArn, permissionSetArn, instanceArn, err := parseSsoAdminManagedPolicyAttachmentID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Managed Policy Attachment ID: %w", err) + } + + policy, err := finder.ManagedPolicy(conn, managedPolicyArn, permissionSetArn, instanceArn) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Managed Policy (%s) for SSO Permission Set (%s) not found, removing from state", managedPolicyArn, permissionSetArn) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Managed Policy (%s) for SSO Permission Set (%s): %w", managedPolicyArn, permissionSetArn, err) + } + + if policy == nil { + log.Printf("[WARN] Managed Policy (%s) for SSO Permission Set (%s) not found, removing from state", managedPolicyArn, permissionSetArn) + d.SetId("") + return nil + } + + d.Set("instance_arn", instanceArn) + d.Set("managed_policy_arn", policy.Arn) + d.Set("managed_policy_name", policy.Name) + d.Set("permission_set_arn", permissionSetArn) + + return nil +} + +func resourceAwsSsoAdminManagedPolicyAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + managedPolicyArn, permissionSetArn, instanceArn, err := parseSsoAdminManagedPolicyAttachmentID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Managed Policy Attachment ID: %w", err) + } + + input := &ssoadmin.DetachManagedPolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + ManagedPolicyArn: aws.String(managedPolicyArn), + } + + _, err = conn.DetachManagedPolicyFromPermissionSet(input) + + if err != nil { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + return nil + } + return fmt.Errorf("error detaching Managed Policy (%s) from SSO Permission Set (%s): %w", managedPolicyArn, permissionSetArn, err) + } + + return nil +} + +func parseSsoAdminManagedPolicyAttachmentID(id string) (string, string, string, error) { + idParts := strings.Split(id, ",") + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + return "", "", "", fmt.Errorf("error parsing ID: expected MANAGED_POLICY_ARN,PERMISSION_SET_ARN,INSTANCE_ARN") + } + return idParts[0], idParts[1], idParts[2], nil +} diff --git a/aws/resource_aws_ssoadmin_managed_policy_attachment_test.go b/aws/resource_aws_ssoadmin_managed_policy_attachment_test.go new file mode 100644 index 00000000000..6f933c780b9 --- /dev/null +++ b/aws/resource_aws_ssoadmin_managed_policy_attachment_test.go @@ -0,0 +1,277 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +func TestAccAWSSSOAdminManagedPolicyAttachment_basic(t *testing.T) { + resourceName := "aws_ssoadmin_managed_policy_attachment.test" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + //lintignore:AWSAT001 + resource.TestMatchResourceAttr(resourceName, "managed_policy_arn", regexp.MustCompile(`policy/AlexaForBusinessDeviceSetup`)), + resource.TestCheckResourceAttr(resourceName, "managed_policy_name", "AlexaForBusinessDeviceSetup"), + resource.TestCheckResourceAttrPair(resourceName, "instance_arn", permissionSetResourceName, "instance_arn"), + resource.TestCheckResourceAttrPair(resourceName, "permission_set_arn", permissionSetResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminManagedPolicyAttachment_forceNew(t *testing.T) { + resourceName := "aws_ssoadmin_managed_policy_attachment.test" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + ), + }, + { + Config: testAccSSOAdminManagedPolicyAttachmentConfig_forceNew(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + //lintignore:AWSAT001 + resource.TestMatchResourceAttr(resourceName, "managed_policy_arn", regexp.MustCompile(`policy/AmazonCognitoReadOnly`)), + resource.TestCheckResourceAttr(resourceName, "managed_policy_name", "AmazonCognitoReadOnly"), + resource.TestCheckResourceAttrPair(resourceName, "instance_arn", permissionSetResourceName, "instance_arn"), + resource.TestCheckResourceAttrPair(resourceName, "permission_set_arn", permissionSetResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminManagedPolicyAttachment_disappears(t *testing.T) { + resourceName := "aws_ssoadmin_managed_policy_attachment.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminManagedPolicyAttachment(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminManagedPolicyAttachment_disappears_permissionSet(t *testing.T) { + resourceName := "aws_ssoadmin_managed_policy_attachment.test" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminPermissionSet(), permissionSetResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminManagedPolicyAttachment_multipleManagedPolicies(t *testing.T) { + resourceName := "aws_ssoadmin_managed_policy_attachment.test" + otherResourceName := "aws_ssoadmin_managed_policy_attachment.other" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + ), + }, + { + Config: testAccSSOAdminManagedPolicyAttachmentConfig_multiple(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName), + testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(otherResourceName), + //lintignore:AWSAT001 + resource.TestMatchResourceAttr(otherResourceName, "managed_policy_arn", regexp.MustCompile(`policy/AmazonDynamoDBReadOnlyAccess`)), + resource.TestCheckResourceAttr(otherResourceName, "managed_policy_name", "AmazonDynamoDBReadOnlyAccess"), + resource.TestCheckResourceAttrPair(otherResourceName, "instance_arn", permissionSetResourceName, "instance_arn"), + resource.TestCheckResourceAttrPair(otherResourceName, "permission_set_arn", permissionSetResourceName, "arn"), + ), + }, + { + ResourceName: otherResourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSSSOAdminManagedPolicyAttachmentDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_managed_policy_attachment" { + continue + } + + managedPolicyArn, permissionSetArn, instanceArn, err := parseSsoAdminManagedPolicyAttachmentID(rs.Primary.ID) + if err != nil { + return fmt.Errorf("error parsing SSO Managed Policy Attachment ID (%s): %w", rs.Primary.ID, err) + } + + policy, err := finder.ManagedPolicy(conn, managedPolicyArn, permissionSetArn, instanceArn) + + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return err + } + + if policy == nil { + continue + } + + return fmt.Errorf("Managed Policy (%s) for SSO Permission Set (%s) still exists", managedPolicyArn, permissionSetArn) + + } + + return nil +} + +func testAccCheckAWSSSOAdminManagedPolicyAttachmentExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + managedPolicyArn, permissionSetArn, instanceArn, err := parseSsoAdminManagedPolicyAttachmentID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Managed Policy Attachment ID (%s): %w", rs.Primary.ID, err) + } + + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + policy, err := finder.ManagedPolicy(conn, managedPolicyArn, permissionSetArn, instanceArn) + + if err != nil { + return err + } + + if policy == nil { + return fmt.Errorf("Managed Policy (%s) for SSO Permission Set (%s) not found", managedPolicyArn, permissionSetArn) + } + + return nil + } +} + +func testAccSSOAdminManagedPolicyAttachmentBaseConfig(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} +`, rName) +} + +func testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName string) string { + return composeConfig( + testAccSSOAdminManagedPolicyAttachmentBaseConfig(rName), + ` +resource "aws_ssoadmin_managed_policy_attachment" "test" { + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + managed_policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AlexaForBusinessDeviceSetup" + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`) +} + +func testAccSSOAdminManagedPolicyAttachmentConfig_forceNew(rName string) string { + return composeConfig( + testAccSSOAdminManagedPolicyAttachmentBaseConfig(rName), + ` +resource "aws_ssoadmin_managed_policy_attachment" "test" { + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + managed_policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonCognitoReadOnly" + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`) +} + +func testAccSSOAdminManagedPolicyAttachmentConfig_multiple(rName string) string { + return composeConfig( + testAccSSOAdminManagedPolicyAttachmentBasicConfig(rName), + ` +resource "aws_ssoadmin_managed_policy_attachment" "other" { + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + managed_policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonDynamoDBReadOnlyAccess" + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`) +} diff --git a/aws/resource_aws_ssoadmin_permission_set_inline_policy.go b/aws/resource_aws_ssoadmin_permission_set_inline_policy.go index e076925f0c6..cc7ca2d2d94 100644 --- a/aws/resource_aws_ssoadmin_permission_set_inline_policy.go +++ b/aws/resource_aws_ssoadmin_permission_set_inline_policy.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" ) func resourceAwsSsoAdminPermissionSetInlinePolicy() *schema.Resource { @@ -80,7 +79,12 @@ func resourceAwsSsoAdminPermissionSetInlinePolicyRead(d *schema.ResourceData, me return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID: %w", err) } - policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + output, err := conn.GetInlinePolicyForPermissionSet(input) if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { log.Printf("[WARN] Inline Policy for SSO Permission Set (%s) not found, removing from state", permissionSetArn) @@ -92,13 +96,11 @@ func resourceAwsSsoAdminPermissionSetInlinePolicyRead(d *schema.ResourceData, me return fmt.Errorf("error reading Inline Policy for SSO Permission Set (%s): %w", permissionSetArn, err) } - if policy == nil { - log.Printf("[WARN] Inline Policy for SSO Permission Set (%s) not found, removing from state", permissionSetArn) - d.SetId("") - return nil + if output == nil { + return fmt.Errorf("error reading Inline Policy for SSO Permission Set (%s): empty output", permissionSetArn) } - d.Set("inline_policy", policy) + d.Set("inline_policy", output.InlinePolicy) d.Set("instance_arn", instanceArn) d.Set("permission_set_arn", permissionSetArn) diff --git a/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go b/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go index ee12cca2c69..6d2637af690 100644 --- a/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go +++ b/aws/resource_aws_ssoadmin_permission_set_inline_policy_test.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" ) func TestAccAWSSSOAdminPermissionSetInlinePolicy_basic(t *testing.T) { @@ -58,11 +57,6 @@ func TestAccAWSSSOAdminPermissionSetInlinePolicy_update(t *testing.T) { testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, { Config: testAccSSOAdminPermissionSetInlinePolicyUpdateConfig(rName), Check: resource.ComposeTestCheckFunc( @@ -100,8 +94,9 @@ func TestAccAWSSSOAdminPermissionSetInlinePolicy_disappears(t *testing.T) { }) } -func TestAccAWSSSOInlinePolicy_disappears_permissionSet(t *testing.T) { +func TestAccAWSSSOAdminPermissionSetInlinePolicy_disappears_permissionSet(t *testing.T) { resourceName := "aws_ssoadmin_permission_set_inline_policy.test" + permissionSetResourceName := "aws_ssoadmin_permission_set.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.Test(t, resource.TestCase{ @@ -113,7 +108,7 @@ func TestAccAWSSSOInlinePolicy_disappears_permissionSet(t *testing.T) { Config: testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName), - testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminPermissionSet(), resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminPermissionSet(), permissionSetResourceName), ), ExpectNonEmptyPlan: true, }, @@ -135,8 +130,12 @@ func testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy(s *terraform.State) return fmt.Errorf("error parsing SSO Permission Set Inline Policy ID (%s): %w", rs.Primary.ID, err) } - policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + output, err := conn.GetInlinePolicyForPermissionSet(input) if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { continue } @@ -145,7 +144,12 @@ func testAccCheckAWSSSOAdminPermissionSetInlinePolicyDestroy(s *terraform.State) return err } - if aws.StringValue(policy) == "" { + if output == nil { + continue + } + + // SSO API returns empty string when removed from Permission Set + if aws.StringValue(output.InlinePolicy) == "" { continue } @@ -174,13 +178,17 @@ func testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName string) conn := testAccProvider.Meta().(*AWSClient).ssoadminconn - policy, err := finder.InlinePolicy(conn, instanceArn, permissionSetArn) + input := &ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + output, err := conn.GetInlinePolicyForPermissionSet(input) if err != nil { return err } - if policy == nil { + if output == nil || output.InlinePolicy == nil { return fmt.Errorf("Inline Policy for SSO Permission Set (%s) not found", permissionSetArn) } @@ -190,6 +198,8 @@ func testAccCheckAWSSSOAdminPermissionSetInlinePolicyExists(resourceName string) func testAccSSOAdminPermissionSetInlinePolicyBasicConfig(rName string) string { return fmt.Sprintf(` +data "aws_partition" "current" {} + data "aws_ssoadmin_instances" "test" {} data "aws_iam_policy_document" "test" { @@ -202,26 +212,28 @@ data "aws_iam_policy_document" "test" { ] resources = [ - "arn:aws:s3:::*", + "arn:${data.aws_partition.current.partition}:s3:::*", ] } } resource "aws_ssoadmin_permission_set" "test" { - name = %q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] } resource "aws_ssoadmin_permission_set_inline_policy" "test" { - inline_policy = data.aws_iam_policy_document.test.json - instance_arn = aws_ssoadmin_permission_set.test.instance_arn - permission_set_arn = aws_ssoadmin_permission_set.test.arn + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn } `, rName) } func testAccSSOAdminPermissionSetInlinePolicyUpdateConfig(rName string) string { return fmt.Sprintf(` +data "aws_partition" "current" {} + data "aws_ssoadmin_instances" "test" {} data "aws_iam_policy_document" "test" { @@ -233,20 +245,20 @@ data "aws_iam_policy_document" "test" { ] resources = [ - "arn:aws:s3:::*", + "arn:${data.aws_partition.current.partition}:s3:::*", ] } } resource "aws_ssoadmin_permission_set" "test" { - name = %q - instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] } resource "aws_ssoadmin_permission_set_inline_policy" "test" { - inline_policy = data.aws_iam_policy_document.test.json - instance_arn = aws_ssoadmin_permission_set.test.instance_arn - permission_set_arn = aws_ssoadmin_permission_set.test.arn + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn } `, rName) } diff --git a/website/docs/d/ssoadmin_permission_set.html.markdown b/website/docs/d/ssoadmin_permission_set.html.markdown index ee6aa37f94c..18d34fde390 100644 --- a/website/docs/d/ssoadmin_permission_set.html.markdown +++ b/website/docs/d/ssoadmin_permission_set.html.markdown @@ -43,4 +43,4 @@ In addition to all arguments above, the following attributes are exported: * `description` - The description of the Permission Set. * `relay_state` - The relay state URL used to redirect users within the application during the federation authentication process. * `session_duration` - The length of time that the application user sessions are valid in the ISO-8601 standard. -* `tags` - Key-value map of resource tags. \ No newline at end of file +* `tags` - Key-value map of resource tags. diff --git a/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown new file mode 100644 index 00000000000..b91c1291873 --- /dev/null +++ b/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_managed_policy_attachment" +description: |- + Manages an IAM managed policy for a Single Sign-On (SSO) Permission Set +--- + +# Resource: aws_ssoadmin_managed_policy_attachment + +Provides an IAM managed policy for a Single Sign-On (SSO) Permission Set resource + +~> **NOTE:** Creating this resource will automatically [Provision the Permission Set](https://docs.aws.amazon.com/singlesignon/latest/APIReference/API_ProvisionPermissionSet.html) to apply the corresponding updates to all assigned accounts. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +resource "aws_ssoadmin_permission_set" "example" { + name = "Example" + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] +} + +resource "aws_ssoadmin_managed_policy_attachment" "example" { + instance_arn = aws_ssoadmin_permission_set.example.instance_arn + managed_policy_arn = "arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup" + permission_set_arn = aws_ssoadmin_permission_set.example.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance under which the operation will be executed. +* `managed_policy_arn` - (Required, Forces new resource) The IAM managed policy Amazon Resource Name (ARN) to be attached to the Permission Set. +* `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The Amazon Resource Names (ARNs) of the Managed Policy, Permission Set, and SSO Instance, separated by a comma (`,`). + +## Import + +SSO Managed Policy Attachments can be imported using the `managed_policy_arn`, `permission_set_arn`, and `instance_arn` separated by a comma (`,`) e.g. + +``` +$ terraform import aws_ssoadmin_managed_policy_attachment.example arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 +``` diff --git a/website/docs/r/ssoadmin_permission_set.html.markdown b/website/docs/r/ssoadmin_permission_set.html.markdown index 7c1bab71e35..d54c562738a 100644 --- a/website/docs/r/ssoadmin_permission_set.html.markdown +++ b/website/docs/r/ssoadmin_permission_set.html.markdown @@ -51,4 +51,4 @@ SSO Permission Sets can be imported using the `arn` and `instance_arn` separated ``` $ terraform import aws_ssoadmin_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 -``` \ No newline at end of file +``` diff --git a/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown b/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown index ce1f2b406a0..422871b77ed 100644 --- a/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown +++ b/website/docs/r/ssoadmin_permission_set_inline_policy.html.markdown @@ -10,7 +10,8 @@ description: |- Provides an IAM inline policy for a Single Sign-On (SSO) Permission Set resource -~> **NOTE:** Creating or updating this resource will automatically [Provision the Permission Set](https://docs.aws.amazon.com/singlesignon/latest/APIReference/API_ProvisionPermissionSet.html) to apply the corresponding updates to all assigned accounts. +~> **NOTE:** AWS Single Sign-On (SSO) only supports one IAM inline policy per [`aws_ssoadmin_permission_set`](ssoadmin_permission_set.html) resource. +Creating or updating this resource will automatically [Provision the Permission Set](https://docs.aws.amazon.com/singlesignon/latest/APIReference/API_ProvisionPermissionSet.html) to apply the corresponding updates to all assigned accounts. ## Example Usage @@ -22,7 +23,7 @@ resource "aws_ssoadmin_permission_set" "example" { instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] } -data "aws_iam_policy_document" "test" { +data "aws_iam_policy_document" "example" { statement { sid = "1" @@ -38,7 +39,7 @@ data "aws_iam_policy_document" "test" { } resource "aws_ssoadmin_permission_set_inline_policy" "example" { - inline_policy = data.aws_iam_policy_document.test.json + inline_policy = data.aws_iam_policy_document.example.json instance_arn = aws_ssoadmin_permission_set.example.instance_arn permission_set_arn = aws_ssoadmin_permission_set.example.arn } @@ -56,7 +57,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `id` - Permission Set Amazon Resource Name (ARN) and SSO Instance Amazon Resource Name (ARN), separated by a comma (`,`). +* `id` - The Amazon Resource Names (ARNs) of the Permission Set and SSO Instance, separated by a comma (`,`). ## Import @@ -64,4 +65,4 @@ SSO Permission Set Inline Policies can be imported using the `permission_set_arn ``` $ terraform import aws_ssoadmin_permission_set_inline_policy.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 -``` \ No newline at end of file +``` From 4e86ee0360bbd2c85d39bb9c561dad5f36f7b5f3 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 7 Jan 2021 17:42:49 -0500 Subject: [PATCH 0452/1212] add missing attribute --- website/docs/r/ssoadmin_managed_policy_attachment.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown index b91c1291873..b3ec1aff7e9 100644 --- a/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown +++ b/website/docs/r/ssoadmin_managed_policy_attachment.html.markdown @@ -42,6 +42,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: * `id` - The Amazon Resource Names (ARNs) of the Managed Policy, Permission Set, and SSO Instance, separated by a comma (`,`). +* `managed_policy_name` - The name of the IAM Managed Policy. ## Import From ca0a0f28e8a17bc021deac8712a743e41f2bab89 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 7 Jan 2021 17:52:40 -0500 Subject: [PATCH 0453/1212] add mixed policy attachments test to permission set --- ...source_aws_ssoadmin_permission_set_test.go | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/aws/resource_aws_ssoadmin_permission_set_test.go b/aws/resource_aws_ssoadmin_permission_set_test.go index e1384c94628..47f4406471f 100644 --- a/aws/resource_aws_ssoadmin_permission_set_test.go +++ b/aws/resource_aws_ssoadmin_permission_set_test.go @@ -269,6 +269,36 @@ func TestAccAWSSSOAdminPermissionSet_updateSessionDuration(t *testing.T) { }) } +func TestAccAWSSSOAdminPermissionSet_mixedPolicyAttachments(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + ), + }, + { + Config: testAccAWSSSOAdminPermissionSetMixedPolicyAttachmentsConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAWSSSOAdminPermissionSetDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ssoadminconn @@ -413,3 +443,41 @@ resource "aws_ssoadmin_permission_set" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccAWSSSOAdminPermissionSetMixedPolicyAttachmentsConfig(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +resource "aws_ssoadmin_managed_policy_attachment" "test" { + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + managed_policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AlexaForBusinessDeviceSetup" + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} + +data "aws_iam_policy_document" "test" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + ] + + resources = [ + "arn:${data.aws_partition.current.partition}:s3:::*", + ] + } +} +resource "aws_ssoadmin_permission_set_inline_policy" "test" { + inline_policy = data.aws_iam_policy_document.test.json + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn +} +`, rName) +} From 2b5a8baee9e18f06b45d350fb8e41d70e1f62417 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 7 Jan 2021 18:22:09 -0500 Subject: [PATCH 0454/1212] rebased to master version w/changes --- aws/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/provider.go b/aws/provider.go index ca0d3471156..0543109d0f1 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -871,6 +871,7 @@ func Provider() *schema.Provider { "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), + "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), From fefedc1cd1378953aeee02d73d70a8274c9c61b7 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 7 Jan 2021 18:33:56 -0500 Subject: [PATCH 0455/1212] Update CHANGELOG for #15808 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 75895ab63fa..19c987a6c9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,12 @@ FEATURES +* **New Data Source:** `aws_ssoadmin_instances` [GH-15808] +* **New Data Source:** `aws_ssoadmin_permission_set` [GH-15808] * **New Resource:** `aws_sagemaker_image` [GH-16082] +* **New Resource:** `aws_ssoadmin_managed_policy_attachment` [GH-15808] +* **New Resource:** `aws_ssoadmin_permission_set` [GH-15808] +* **New Resource:** `aws_ssoadmin_permission_set_inline_policy` [GH-15808] ENHANCEMENTS From b454050e9ee74870d1b35ca86fd921012dfa6990 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 8 Jan 2021 00:31:05 +0000 Subject: [PATCH 0456/1212] v3.23.0 --- CHANGELOG.md | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19c987a6c9e..3d3c0b492d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,34 +1,34 @@ -## 3.23.0 (Unreleased) +## 3.23.0 (January 08, 2021) FEATURES -* **New Data Source:** `aws_ssoadmin_instances` [GH-15808] -* **New Data Source:** `aws_ssoadmin_permission_set` [GH-15808] -* **New Resource:** `aws_sagemaker_image` [GH-16082] -* **New Resource:** `aws_ssoadmin_managed_policy_attachment` [GH-15808] -* **New Resource:** `aws_ssoadmin_permission_set` [GH-15808] -* **New Resource:** `aws_ssoadmin_permission_set_inline_policy` [GH-15808] +* **New Data Source:** `aws_ssoadmin_instances` ([#15808](https://github.com/hashicorp/terraform-provider-aws/issues/15808)) +* **New Data Source:** `aws_ssoadmin_permission_set` ([#15808](https://github.com/hashicorp/terraform-provider-aws/issues/15808)) +* **New Resource:** `aws_sagemaker_image` ([#16082](https://github.com/hashicorp/terraform-provider-aws/issues/16082)) +* **New Resource:** `aws_ssoadmin_managed_policy_attachment` ([#15808](https://github.com/hashicorp/terraform-provider-aws/issues/15808)) +* **New Resource:** `aws_ssoadmin_permission_set` ([#15808](https://github.com/hashicorp/terraform-provider-aws/issues/15808)) +* **New Resource:** `aws_ssoadmin_permission_set_inline_policy` ([#15808](https://github.com/hashicorp/terraform-provider-aws/issues/15808)) ENHANCEMENTS -* data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute [GH-16947] -* data-source/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] -* resource/aws_elasticache_replication_group: Add reader_endpoint_address attribute [GH-9979] -* resource/aws_elasticache_replication_group: Allows configuring `replicas_per_node_group` for "Redis (cluster mode disabled)" [GH-16829] -* resource/aws_imagebuilder_image_recipe: Add `working_directory` argument [GH-16947] -* resource/aws_glue_crawler: add support for `lineage_configuration` and `recrawl_policy` [GH-16714] -* resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` [GH-16714] -* resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours [GH-16608] -* resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation [GH-16914] -* resource/aws_route53_zone: Add length validations for `delegation_set_id` and `name` arguments [GH-12340] -* resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute [GH-16495] +* data-source/aws_imagebuilder_image_recipe: Add `working_directory` attribute ([#16947](https://github.com/hashicorp/terraform-provider-aws/issues/16947)) +* data-source/aws_elasticache_replication_group: Add reader_endpoint_address attribute ([#9979](https://github.com/hashicorp/terraform-provider-aws/issues/9979)) +* resource/aws_elasticache_replication_group: Add reader_endpoint_address attribute ([#9979](https://github.com/hashicorp/terraform-provider-aws/issues/9979)) +* resource/aws_elasticache_replication_group: Allows configuring `replicas_per_node_group` for "Redis (cluster mode disabled)" ([#16829](https://github.com/hashicorp/terraform-provider-aws/issues/16829)) +* resource/aws_imagebuilder_image_recipe: Add `working_directory` argument ([#16947](https://github.com/hashicorp/terraform-provider-aws/issues/16947)) +* resource/aws_glue_crawler: add support for `lineage_configuration` and `recrawl_policy` ([#16714](https://github.com/hashicorp/terraform-provider-aws/issues/16714)) +* resource/aws_glue_crawler: add plan time validations to `name`, `description` and `table_prefix` ([#16714](https://github.com/hashicorp/terraform-provider-aws/issues/16714)) +* resource/aws_kinesis_stream: Update `retention_period` argument plan-time validation to include up to 8760 hours ([#16608](https://github.com/hashicorp/terraform-provider-aws/issues/16608)) +* resource/aws_msk_cluster: Support `PER_TOPIC_PER_PARTITION` value for `enhanced_monitoring` argument plan-time validation ([#16914](https://github.com/hashicorp/terraform-provider-aws/issues/16914)) +* resource/aws_route53_zone: Add length validations for `delegation_set_id` and `name` arguments ([#12340](https://github.com/hashicorp/terraform-provider-aws/issues/12340)) +* resource/aws_vpc_endpoint_service: Make `private_dns_name` configurable and add `private_dns_name_configuration` attribute ([#16495](https://github.com/hashicorp/terraform-provider-aws/issues/16495)) BUG FIXES -* resource/aws_emr_cluster: Remove from state instead of returning an error on long terminated cluster [GH-16924] -* resource/aws_glue_catalog_table: Glue table partition keys should be set to empty list instead of being unset [GH-16727] -* resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit [GH-16905] -* resource/aws_transfer_user: Update `user_name` argument validation to support 100 characters [GH-16938] +* resource/aws_emr_cluster: Remove from state instead of returning an error on long terminated cluster ([#16924](https://github.com/hashicorp/terraform-provider-aws/issues/16924)) +* resource/aws_glue_catalog_table: Glue table partition keys should be set to empty list instead of being unset ([#16727](https://github.com/hashicorp/terraform-provider-aws/issues/16727)) +* resource/aws_imagebuilder_distribution_configuration: Remove `user_ids` argument maximum limit ([#16905](https://github.com/hashicorp/terraform-provider-aws/issues/16905)) +* resource/aws_transfer_user: Update `user_name` argument validation to support 100 characters ([#16938](https://github.com/hashicorp/terraform-provider-aws/issues/16938)) ## 3.22.0 (December 18, 2020) From 8e147cb5de8b14af5c1f1de09da9620e80709e05 Mon Sep 17 00:00:00 2001 From: Rob H Date: Tue, 6 Aug 2019 15:52:22 +0100 Subject: [PATCH 0457/1212] Fixed bug. include_map was using ExcludeMap on Update --- aws/resource_aws_fms_policy.go | 728 +++++++++++++++++++--------- aws/resource_aws_fms_policy_test.go | 149 +++--- 2 files changed, 558 insertions(+), 319 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index fd99932b3eb..a1ac82c9b3d 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -3,13 +3,14 @@ package aws import ( "encoding/json" "fmt" + "log" + "reflect" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/fms" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "log" - "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/mitchellh/mapstructure" ) func resourceAwsFmsPolicy() *schema.Resource { @@ -43,15 +44,22 @@ func resourceAwsFmsPolicy() *schema.Resource { "exclude_map": { Type: schema.TypeSet, + MaxItems: 1, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account": { - Type: schema.TypeList, - Required: true, + Type: schema.TypeSet, + Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(12, 12), + Type: schema.TypeString, + }, + }, + "orgunit": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, }, }, @@ -60,15 +68,22 @@ func resourceAwsFmsPolicy() *schema.Resource { "include_map": { Type: schema.TypeSet, + MaxItems: 1, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account": { - Type: schema.TypeList, - Required: true, + Type: schema.TypeSet, + Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(12, 12), + Type: schema.TypeString, + }, + }, + "orgunit": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, }, }, }, @@ -99,76 +114,348 @@ func resourceAwsFmsPolicy() *schema.Resource { "resource_tags": tagsSchema(), "security_service_policy_data": { - Type: schema.TypeSet, + Type: schema.TypeList, + MaxItems: 1, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "managed_service_data": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Required: true, - Type: schema.TypeString, - }, - "rule_groups": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "override_action": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{"COUNT", "NONE"}, false), - }, - }, - }, - }, - }, + "waf": wafSchema(), + "wafv2": wafV2Schema(), + "shield_advanced": { + Type: schema.TypeBool, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + }, + "security_groups_common": securityGroupsCommon(), + "security_groups_content_audit": securityGroupsContentAudit(), + "security_groups_usage_audit": securityGroupsUsageAudit(), + }, + }, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func securityGroupsCommon() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "revert_manual_security_group_changes": { + Type: schema.TypeBool, + Optional: true, + }, + "exclusive_resource_security_group_management": { + Type: schema.TypeBool, + Optional: true, + }, + "security_groups": { + Type: schema.TypeSet, + Required: true, + Elem: schema.TypeString, + }, + "remediation_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "resource_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + } +} + +func securityGroupsContentAudit() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "revert_manual_security_group_changes": { + Type: schema.TypeBool, + Optional: true, + }, + "security_group_action": { + Type: schema.TypeString, + Required: true, + }, + "security_groups": { + Type: schema.TypeSet, + Required: true, + Elem: schema.TypeString, + }, + "remediation_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "resource_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + } +} + +func securityGroupsUsageAudit() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "revert_manual_security_group_changes": { + Type: schema.TypeBool, + Optional: true, + }, + "security_group_action": { + Type: schema.TypeString, + Required: true, + }, + "security_groups": { + Type: schema.TypeSet, + Required: true, + Elem: schema.TypeString, + }, + "remediation_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + "resource_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + } +} + +func wafSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "override_action": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, }, - }, - "default_action": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Required: true, - Type: schema.TypeString, - }, - }, + "default_action": { + Type: schema.TypeString, + Required: true, }, }, }, }, }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"WAF", "ADVANCED_SHIELD"}, false), - }, }, }, }, - "arn": { - Type: schema.TypeString, - Computed: true, + }, + } +} + +func wafV2Schema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preprocess_rule_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_group_arn": { + Type: schema.TypeString, + Required: true, + }, + "override_action": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "managed_rule_group_identifier": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeString, + Optional: true, + }, + "vendor_name": { + Type: schema.TypeString, + Optional: true, + }, + "managed_rule_group_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "rule_group_type": { + Type: schema.TypeString, + Optional: true, + }, + "exclude_rule_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: schema.TypeString, + }, + "post_process_rule_groups": { + Type: schema.TypeSet, + Optional: true, + Elem: schema.TypeString, + }, + "default_action": { + Type: schema.TypeString, + Optional: true, + }, + "override_customer_web_acl_association": { + Type: schema.TypeBool, + Optional: true, + }, + "logging_configuration": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_destination_configs": { + Type: schema.TypeSet, + Required: true, + Elem: schema.TypeString, + }, + "redacted_fields": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "redacted_field_type": { + Type: schema.TypeString, + Required: true, + }, + "redacted_field_value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, }, }, } } +// Shared structs +type fmsPolicyBasicType struct { + Type string `json:"type" mapstructure:"type"` +} + +// WAF structs +type fmsPolicyManagedServiceDataWAF struct { + Type string `json:"type" mapstructure:"type"` + RuleGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"rule_groups"` +} + +type fmsPolicyRuleGroup struct { + ID string `json:"id" mapstructure:"id"` + OverrrideAction fmsPolicyBasicType `json:"overrideAction" mapstructure:"override_action"` +} + +// WAFv2 structs +type fmsPolicyManagedServiceDataWAFV2 struct { + Type string `json:"type" mapstructure:"type"` + PreProcessRuleGroups []fmsPolicyProcessRuleGroup `json:"preProcessRuleGroups" mapstructure:"preprocess_rule_groups"` + PostProcessRuleGroups []fmsPolicyProcessRuleGroup `json:"postProcessRuleGroups" mapstructure:"postprocess_rule_groups"` + DefaultAction fmsPolicyBasicType `json:"defaultAction" mapstructure:"default_action"` + OverrideCustomerWebACLAssociation bool `json:"overrideCustomerWebACLAssociation" mapstructure:"override_customer_web_acl_association"` + LoggingConfiguration fmsPolicyLoggingConfiguration `json:"loggingConfiguration" mapstructure:"logging_configuration"` +} + +type fmsPolicyLoggingConfiguration struct { + LogDestinationConfigs []string `json:"logDestinationConfigs" mapstructure:"log_destination_configs"` + RedactedFields []fmsPolicyRedactedField `json:"redactedFields" mapstructure:"redacted_fields"` +} + +type fmsPolicyRedactedField struct { + RedactedFieldType string `json:"redactedFieldType" mapstructure:"redacted_field_type"` + RedactedFieldValue string `json:"redactedFieldValue" mapstructure:"redacted_field_value"` +} + +type fmsPolicyRuleGroupIdentifier struct { + Version string `json:"version" mapstructure:"version"` + VendorName string `json:"vendorName" mapstructure:"vendor_name"` + ManagedRuleGroupName string `json:"managedRuleGroupName" mapstructure:"managed_rule_group_name"` +} + +type fmsPolicyProcessRuleGroup struct { + RuleGroupARN string `json:"ruleGroupArn" mapstructure:"rule_group_arn"` + OverrideAction fmsPolicyBasicType `json:"overrideAction" mapstructure:"override_action"` + ManagedRuleGroupIdentifier fmsPolicyRuleGroupIdentifier `json:"managedRuleGroupIdentifier" mapstructure:"managed_rule_group_identifier"` + RuleGroupType string `json:"ruleGroupType" mapstructure:"rule_group_type"` + ExcludeRules []string `json:"excludeRules" mapstructure:"excluded_rules"` +} + +// SECURITY_GROUPS_COMMON structs +type fmsPolicyManagedServiceDataSecurityGroupsCommon struct { + Type string `json:"type" mapstructure:"type"` + RevertManualSecurityGroupChanges bool `json:"revertManualSecurityGroupChanges" mapstructure:"revert_manual_security_group_changes"` + ExclusiveResourceSecurityGroupManagement bool `json:"exclusiveResourceSecurityGroupManagement" mapstructure:"exclusive_resource_security_group_management"` + SecurityGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"security_groups"` +} + +// SECURITY_GROUPS_CONTENT_AUDIT structs +type fmsPolicyManagedServiceDataSecurityGroupsContentAudit struct { + Type string `json:"type" mapstructure:"type"` + SecurityGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"security_groups"` + SecurityGroupAction fmsPolicyBasicType `json:"securityGroupAction" mapstructure:"security_group_action"` +} + +// SECURITY_GROUPS_USAGE_AUDIT structs +type fmsPolicyManagedServiceDataSecurityGroupsUsageAudit struct { + Type string `json:"type" mapstructure:"type"` + DeleteUnusedSecurityGroups bool `json:"deleteUnusedSecurityGroups" mapstructure:"delete_unused_security_groups"` + CoalesceRedundantSecurityGroups bool `json:"coalesceRedundantSecurityGroups" mapstructure:"coalesce_redundant_security_groups"` +} + func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).fmsconn @@ -181,7 +468,10 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("security_service_policy_data"); ok { - fmsPolicy.SecurityServicePolicyData = expandAwsFmsManagedSecurityData(v.(*schema.Set)) + var err error + if fmsPolicy.SecurityServicePolicyData, err = expandSecurityServicePolicyData(v.([]interface{})[0].(map[string]interface{})); err != nil { + return err + } } if rTags, tagsOk := d.GetOk("resource_tags"); tagsOk { @@ -189,11 +479,11 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("include_map"); ok { - fmsPolicy.IncludeMap = expandAccountList(v.(*schema.Set)) + fmsPolicy.IncludeMap = expandFMSPolicyMap(v.(*schema.Set)) } if v, ok := d.GetOk("exclude_map"); ok { - fmsPolicy.ExcludeMap = expandAccountList(v.(*schema.Set)) + fmsPolicy.ExcludeMap = expandFMSPolicyMap(v.(*schema.Set)) } params := &fms.PutPolicyInput{ @@ -237,13 +527,22 @@ func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { d.Set("name", aws.StringValue(resp.Policy.PolicyName)) d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) - d.Set("exclude_map", flattenFMSAccountMap(resp.Policy.ExcludeMap)) - d.Set("include_map", flattenFMSAccountMap(resp.Policy.IncludeMap)) + if err = d.Set("exclude_map", flattenFMSPolicyMap(resp.Policy.ExcludeMap)); err != nil { + return err + } + d.Set("include_map", flattenFMSPolicyMap(resp.Policy.IncludeMap)) d.Set("remediation_enabled", aws.BoolValue(resp.Policy.RemediationEnabled)) d.Set("resource_type_list", resp.Policy.ResourceTypeList) d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)) - d.Set("security_service_policy_data", flattenFmsSecurityServicePolicyData(resp.Policy.SecurityServicePolicyData)) + + securityServicePolicyData, err := fmsPolicyUnmarshalManagedServiceData(resp.Policy.SecurityServicePolicyData) + if err != nil { + return err + } + if err = d.Set("security_service_policy_data", securityServicePolicyData); err != nil { + return err + } return nil } @@ -264,12 +563,12 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error requestUpdate := false if d.HasChange("exclude_map") { - fmsPolicy.ExcludeMap = expandAccountList(d.Get("exclude_map").(*schema.Set)) + fmsPolicy.ExcludeMap = expandFMSPolicyMap(d.Get("exclude_map").(*schema.Set)) requestUpdate = true } if d.HasChange("include_map") { - fmsPolicy.ExcludeMap = expandAccountList(d.Get("include_map").(*schema.Set)) + fmsPolicy.IncludeMap = expandFMSPolicyMap(d.Get("include_map").(*schema.Set)) requestUpdate = true } @@ -279,10 +578,13 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error } if requestUpdate { - fmsPolicy.SecurityServicePolicyData = expandAwsFmsManagedSecurityData(d.Get("security_service_policy_data").(*schema.Set)) + var err error + if fmsPolicy.SecurityServicePolicyData, err = expandSecurityServicePolicyData(d.Get("security_service_policy_data").(*schema.Set).List()[0].(map[string]interface{})); err != nil { + return err + } params := &fms.PutPolicyInput{Policy: fmsPolicy} - _, err := conn.PutPolicy(params) + _, err = conn.PutPolicy(params) if err != nil { return fmt.Errorf("Error modifying FMS Policy Rule: %s", err) @@ -312,104 +614,149 @@ func resourceAwsFmsPolicyDelete(d *schema.ResourceData, meta interface{}) error return nil } -func expandAccountList(set *schema.Set) map[string][]*string { - var accountList = make(map[string][]*string) - - for _, account := range set.List() { - l := account.(map[string]interface{}) - y := l["account"].([]interface{}) - - for _, a := range y { - accountList["ACCOUNT"] = append(accountList["ACCOUNT"], aws.String(a.(string))) +func terraformMapDecodeHelper() mapstructure.DecodeHookFuncType { + return func(inType reflect.Type, outType reflect.Type, value interface{}) (interface{}, error) { + if inType == reflect.SliceOf(outType) && reflect.ValueOf(value).Len() == 1 { + return reflect.ValueOf(value).Index(0).Interface(), nil } + return value, nil } - - return accountList } -func constructManagedServiceData(m []interface{}) map[string]interface{} { - var msd map[string]interface{} - - for _, data := range m { - m := data.(map[string]interface{}) - - rgl := m["rule_groups"].(*schema.Set).List() - rgs := constructRuleGroupsList(rgl) - - msd = map[string]interface{}{ - "type": m["type"].(string), - "defaultAction": m["default_action"].(map[string]interface{}), - "ruleGroups": rgs, +func fmsPolicyUnmarshalManagedServiceData(policyData *fms.SecurityServicePolicyData) (map[string]interface{}, error) { + var policyStruct interface{} + var securityServicePolicy map[string]interface{} + var policyType string + switch *policyData.Type { + case "WAF": + policyType = "waf" + policyStruct = fmsPolicyManagedServiceDataWAF{} + if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { + return nil, err } + case "WAFV2": + policyType = "wafv2" + policyStruct = fmsPolicyManagedServiceDataWAFV2{} + if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { + return nil, err + } + case "SECURITY_GROUPS_COMMON": + policyType = "security_groups_common" + policyStruct = fmsPolicyManagedServiceDataSecurityGroupsCommon{} + if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { + return nil, err + } + case "SECURITY_CONTENT_AUDIT": + policyType = "security_groups_content_audit" + policyStruct = fmsPolicyManagedServiceDataSecurityGroupsContentAudit{} + if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { + return nil, err + } + case "SECURITY_GROUPS_USAGE_AUDIT": + policyType = "security_groups_usage_audit" + policyStruct = fmsPolicyManagedServiceDataSecurityGroupsUsageAudit{} + if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { + return nil, err + } + case "SHIELD_ADVANCED": + policyType = "security_groups_usage_audit" + policyStruct = true } - return msd + var policyMap map[string]interface{} + err := mapstructure.Decode(policyStruct, policyMap) + securityServicePolicy[policyType] = policyMap + return securityServicePolicy, err } -func constructRuleGroupsList(rgs []interface{}) []map[string]interface{} { - ruleGroup := []map[string]interface{}{} - - for _, rg := range rgs { - log.Printf("[DEBUG] Rule_Group Keys: %s", rg) - - m := rg.(map[string]interface{}) - - ruleId := m["id"].(string) - overrideAction := m["override_action"].(map[string]interface{}) - - rule := map[string]interface{}{ - "id": ruleId, - "overrideAction": overrideAction, - } - - ruleGroup = append(ruleGroup, rule) +func fmsPolicyMarshalManagedServiceData(policyMap interface{}, policyStruct interface{}) (*string, error) { + var managedServiceData []byte + var err error + decoderConfig := mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: &policyStruct, + DecodeHook: terraformMapDecodeHelper(), + } + weakDecoder, err := mapstructure.NewDecoder(&decoderConfig) + if err != nil { + return nil, err + } + if err = weakDecoder.Decode(policyMap); err != nil { + return nil, err } - return ruleGroup + if managedServiceData, err = json.Marshal(policyStruct); err != nil { + return nil, err + } + return aws.String(string(managedServiceData)), nil } -func expandAwsFmsManagedSecurityData(set *schema.Set) *fms.SecurityServicePolicyData { - spd := set.List() - - securityServicePolicyData := &fms.SecurityServicePolicyData{} - - for _, t := range spd { - spdMap := t.(map[string]interface{}) - spdType := spdMap["type"].(string) - - securityServicePolicyData.Type = aws.String(spdType) - - switch spdType { - case "WAF": - if v, ok := spdMap["managed_service_data"]; !ok { - log.Printf("[DEBUG] Error Looking up Managed Service Data: %s", v) - } else { - spdPolicy := constructManagedServiceData(v.(*schema.Set).List()) +func expandSecurityServicePolicyData(policyData map[string]interface{}) (*fms.SecurityServicePolicyData, error) { + var managedServiceData *string + var err error + var SecurityPolicyType string + switch { + case len(policyData["waf"].([]interface{})) == 1: + policyStruct := fmsPolicyManagedServiceDataWAF{} + SecurityPolicyType = "WAF" + managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["waf"].([]interface{})[0], policyStruct) + + case len(policyData["wafv2"].([]interface{})) == 1: + policyStruct := fmsPolicyManagedServiceDataWAFV2{} + SecurityPolicyType = "WAFV2" + managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["wafv2"].([]interface{})[0], policyStruct) + + case policyData["shield_advanced"].(bool): + SecurityPolicyType = "SHIELD_ADVANCED" + managedServiceData = aws.String("{}") + + case len(policyData["security_groups_common"].([]interface{})) == 1: + policyStruct := fmsPolicyManagedServiceDataSecurityGroupsCommon{} + SecurityPolicyType = "SECURITY_GROUPS_COMMON" + managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_common"].([]interface{})[0], policyStruct) + + case len(policyData["security_groups_content_audit"].([]interface{})) == 1: + policyStruct := fmsPolicyManagedServiceDataSecurityGroupsContentAudit{} + SecurityPolicyType = "SECURITY_GROUPS_CONTENT_AUDIT" + managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_content_audit"].([]interface{})[0], policyStruct) + + case len(policyData["security_groups_usage_audit"].([]interface{})) == 1: + policyStruct := fmsPolicyManagedServiceDataSecurityGroupsUsageAudit{} + SecurityPolicyType = "SECURITY_GROUPS_USAGE_AUDIT" + managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_usage_audit"].([]interface{})[0], policyStruct) + } - js, err := json.Marshal(spdPolicy) - if err != nil { - log.Printf("[DEBUG] JSON Error: %s", err) - } + return &fms.SecurityServicePolicyData{ + Type: aws.String(SecurityPolicyType), + ManagedServiceData: managedServiceData, + }, err +} - securityServicePolicyData.ManagedServiceData = aws.String(string(js)) +func expandFMSPolicyMap(set *schema.Set) map[string][]*string { + fmsPolicyMap := map[string][]*string{} + if set.Len() > 0 { + for key, listValue := range set.List()[0].(map[string]interface{}) { + for _, value := range listValue.([]interface{}) { + fmsPolicyMap[key] = append(fmsPolicyMap[key], aws.String(value.(string))) } } } - - return securityServicePolicyData + return fmsPolicyMap } -func flattenFMSAccountMap(accountMap map[string][]*string) *schema.Set { - eMap := map[string]interface{}{} - - if _, ok := eMap["account"]; ok { - for _, v := range accountMap["ACCOUNT"] { - eMap["account"] = append(eMap["account"].([]*string), v) +func flattenFMSPolicyMap(fmsPolicyMap map[string][]*string) []interface{} { + flatPolicyMap := map[string]interface{}{} + + for key, value := range fmsPolicyMap { + switch key { + case "account": + flatPolicyMap["account"] = value + case "orgunit": + flatPolicyMap["orgunit"] = value + default: + log.Printf("[WARNING] Unexpected key (%q) found in FMS policy", key) } } - s := schema.NewSet(fmsPolicyDataHash, []interface{}{}) - s.Add(eMap) - - return s + return []interface{}{flatPolicyMap} } func flattenFMSResourceTags(resourceTags []*fms.ResourceTag) map[string]interface{} { @@ -421,67 +768,6 @@ func flattenFMSResourceTags(resourceTags []*fms.ResourceTag) map[string]interfac return resTags } -func flattenFmsManagedServiceData(sspdMsd map[string]interface{}) *schema.Set { - msdSS := schema.NewSet(fmsPolicyDataHash, []interface{}{}) - - msdData := map[string]interface{}{ - "type": sspdMsd["type"].(string), - } - - if sspdMsd["defaultAction"] != nil { - msdData["default_action"] = sspdMsd["defaultAction"] - } - - msdData["rule_groups"] = flattenFmsMsdRuleGroupsList(sspdMsd) - - msdSS.Add(msdData) - - return msdSS -} - -func flattenFmsMsdRuleGroupsList(sspdMsd map[string]interface{}) *schema.Set { - ruleGroupsSet := schema.NewSet(fmsPolicyDataHash, []interface{}{}) - if sspdMsd["ruleGroups"] != nil { - for _, v := range sspdMsd["ruleGroups"].([]interface{}) { - - rg := v.(map[string]interface{}) - - rule := map[string]interface{}{ - "id": rg["id"].(string), - "override_action": rg["overrideAction"].(map[string]interface{}), - } - - ruleGroupsSet.Add(rule) - } - } - return ruleGroupsSet -} - -func flattenFmsSecurityServicePolicyData(spd *fms.SecurityServicePolicyData) *schema.Set { - s := schema.NewSet(fmsPolicyDataHash, []interface{}{}) - - sspd := map[string]interface{}{ - "type": aws.StringValue(spd.Type), - } - - var policy map[string]interface{} - - if spd.ManagedServiceData != nil { - - msd := []byte(aws.StringValue(spd.ManagedServiceData)) - - if err := json.Unmarshal(msd, &policy); err != nil { - panic(err) - } - - sspd["managed_service_data"] = flattenFmsManagedServiceData(policy) - } - - s.Add(sspd) - - return s -} - func constructResourceTags(rTags interface{}) []*fms.ResourceTag { var rTagList []*fms.ResourceTag @@ -492,17 +778,3 @@ func constructResourceTags(rTags interface{}) []*fms.ResourceTag { return rTagList } - -func fmsPolicyDataHash(v interface{}) int { - var buf strings.Builder - - m := v.(map[string]interface{}) - - if _, ok := m["Id"]; ok { - buf.WriteString(fmt.Sprintf("%s", m["Id"])) - } else { - buf.WriteString(fmt.Sprintf("%s-", m)) - } - - return hashcode.String(buf.String()) -} diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index 26ec196b463..9194a4398ad 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -7,34 +7,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/fms" - "github.com/hashicorp/terraform/helper/acctest" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccAWSFmsPolicy_importBasic(t *testing.T) { - resourceName := "aws_fms_policy.test" - fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) - wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsFmsPolicyDestroy, - Steps: []resource.TestStep{ - { - Config: testAccFmsPolicyConfig(fmsPolicyName, wafRuleGroupName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"policy_update_token"}, - }, - }, - }) -} - func TestAccAWSFmsPolicy_basic(t *testing.T) { fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) @@ -53,6 +30,11 @@ func TestAccAWSFmsPolicy_basic(t *testing.T) { resource.TestCheckResourceAttr("aws_fms_policy.test", "security_service_policy_data.#", "1"), ), }, + { + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"policy_update_token"}, + }, }, }) } @@ -96,10 +78,10 @@ func testAccCheckAwsFmsPolicyDestroy(s *terraform.State) error { continue } - policyId := rs.Primary.Attributes["id"] + policyID := rs.Primary.Attributes["id"] input := &fms.GetPolicyInput{ - PolicyId: aws.String(policyId), + PolicyId: aws.String(policyID), } resp, err := conn.GetPolicy(input) @@ -132,36 +114,38 @@ func testAccCheckAwsFmsPolicyExists(name string) resource.TestCheckFunc { func testAccFmsPolicyConfig(name string, group string) string { return fmt.Sprintf(` -resource "aws_fms_policy" "test" { - exclude_resource_tags = false - name = %[1]q - remediation_enabled = false - resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - - security_service_policy_data { - type = "WAF" - - managed_service_data { - type = "WAF" - - rule_groups { - id = "${aws_wafregional_rule_group.test.id}" - - override_action={ - type = "COUNT" - } - } - - default_action={ - type = "BLOCK" - } - } +#resource "aws_fms_policy" "test" { +# exclude_resource_tags = false +# name = %[1]q +# remediation_enabled = false +# resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] +# +# security_service_policy_data { +# waf { +# rule_groups {id = aws_waf_rule.wafrule.id} +# } +# } +#} + +resource "aws_waf_ipset" "ipset" { + name = "tfIPSet" + + ip_set_descriptors { + type = "IPV4" + value = "192.0.7.0/24" } } -resource "aws_wafregional_rule_group" "test" { - metric_name = "MyTest" +resource "aws_waf_rule" "wafrule" { + depends_on = [aws_waf_ipset.ipset] name = %[2]q + metric_name = "tfWAFRule" + + predicates { + data_id = aws_waf_ipset.ipset.id + negated = false + type = "IPMatch" + } } `, name, group) } @@ -174,26 +158,24 @@ resource "aws_fms_policy" "test" { remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - security_service_policy_data { - type = "WAF" - - managed_service_data { - type = "WAF" - - rule_groups { - id = "${aws_wafregional_rule_group.test.id}" - - override_action={ - type = "COUNT" - } - } - - default_action={ - type = "BLOCK" - } - } - } - resource_tags = { + security_service_policy_data = < Date: Thu, 7 Jan 2021 23:10:21 -0800 Subject: [PATCH 0458/1212] Code review changes --- aws/resource_aws_fms_policy.go | 563 +++--------------------- aws/resource_aws_fms_policy_test.go | 198 ++++++--- website/docs/r/fms_policy.html.markdown | 41 +- 3 files changed, 234 insertions(+), 568 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index a1ac82c9b3d..fccddfa43b4 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -1,16 +1,13 @@ package aws import ( - "encoding/json" "fmt" "log" - "reflect" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/fms" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/mitchellh/mapstructure" ) func resourceAwsFmsPolicy() *schema.Resource { @@ -27,7 +24,6 @@ func resourceAwsFmsPolicy() *schema.Resource { "name": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "delete_all_policy_resources": { @@ -39,13 +35,13 @@ func resourceAwsFmsPolicy() *schema.Resource { "exclude_resource_tags": { Type: schema.TypeBool, Required: true, - ForceNew: true, }, "exclude_map": { - Type: schema.TypeSet, - MaxItems: 1, - Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account": { @@ -67,9 +63,10 @@ func resourceAwsFmsPolicy() *schema.Resource { }, "include_map": { - Type: schema.TypeSet, - MaxItems: 1, - Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account": { @@ -92,13 +89,12 @@ func resourceAwsFmsPolicy() *schema.Resource { "remediation_enabled": { Type: schema.TypeBool, - Required: true, + Optional: true, }, "resource_type_list": { Type: schema.TypeSet, Required: true, - ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"AWS::ApiGateway::Stage", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::CloudFront::Distribution"}, false), @@ -115,20 +111,19 @@ func resourceAwsFmsPolicy() *schema.Resource { "security_service_policy_data": { Type: schema.TypeList, - MaxItems: 1, Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "waf": wafSchema(), - "wafv2": wafV2Schema(), - "shield_advanced": { - Type: schema.TypeBool, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, + "type": { + Type: schema.TypeString, + Required: true, + }, + "managed_service_data": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonDiffs, }, - "security_groups_common": securityGroupsCommon(), - "security_groups_content_audit": securityGroupsContentAudit(), - "security_groups_usage_audit": securityGroupsUsageAudit(), }, }, }, @@ -140,322 +135,6 @@ func resourceAwsFmsPolicy() *schema.Resource { } } -func securityGroupsCommon() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "revert_manual_security_group_changes": { - Type: schema.TypeBool, - Optional: true, - }, - "exclusive_resource_security_group_management": { - Type: schema.TypeBool, - Optional: true, - }, - "security_groups": { - Type: schema.TypeSet, - Required: true, - Elem: schema.TypeString, - }, - "remediation_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "resource_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - } -} - -func securityGroupsContentAudit() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "revert_manual_security_group_changes": { - Type: schema.TypeBool, - Optional: true, - }, - "security_group_action": { - Type: schema.TypeString, - Required: true, - }, - "security_groups": { - Type: schema.TypeSet, - Required: true, - Elem: schema.TypeString, - }, - "remediation_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "resource_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - } -} - -func securityGroupsUsageAudit() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "revert_manual_security_group_changes": { - Type: schema.TypeBool, - Optional: true, - }, - "security_group_action": { - Type: schema.TypeString, - Required: true, - }, - "security_groups": { - Type: schema.TypeSet, - Required: true, - Elem: schema.TypeString, - }, - "remediation_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "resource_type": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - } -} - -func wafSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "override_action": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "default_action": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func wafV2Schema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MaxItems: 1, - ExactlyOneOf: []string{"security_service_policy_data.0.waf", "security_service_policy_data.0.wafv2", "security_service_policy_data.0.shield_advanced", "security_service_policy_data.0.security_groups_common", "security_service_policy_data.0.security_groups_content_audit", "security_service_policy_data.0.security_groups_usage_audit"}, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "preprocess_rule_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_group_arn": { - Type: schema.TypeString, - Required: true, - }, - "override_action": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "managed_rule_group_identifier": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "version": { - Type: schema.TypeString, - Optional: true, - }, - "vendor_name": { - Type: schema.TypeString, - Optional: true, - }, - "managed_rule_group_name": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "rule_group_type": { - Type: schema.TypeString, - Optional: true, - }, - "exclude_rule_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: schema.TypeString, - }, - "post_process_rule_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: schema.TypeString, - }, - "default_action": { - Type: schema.TypeString, - Optional: true, - }, - "override_customer_web_acl_association": { - Type: schema.TypeBool, - Optional: true, - }, - "logging_configuration": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "log_destination_configs": { - Type: schema.TypeSet, - Required: true, - Elem: schema.TypeString, - }, - "redacted_fields": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "redacted_field_type": { - Type: schema.TypeString, - Required: true, - }, - "redacted_field_value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -// Shared structs -type fmsPolicyBasicType struct { - Type string `json:"type" mapstructure:"type"` -} - -// WAF structs -type fmsPolicyManagedServiceDataWAF struct { - Type string `json:"type" mapstructure:"type"` - RuleGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"rule_groups"` -} - -type fmsPolicyRuleGroup struct { - ID string `json:"id" mapstructure:"id"` - OverrrideAction fmsPolicyBasicType `json:"overrideAction" mapstructure:"override_action"` -} - -// WAFv2 structs -type fmsPolicyManagedServiceDataWAFV2 struct { - Type string `json:"type" mapstructure:"type"` - PreProcessRuleGroups []fmsPolicyProcessRuleGroup `json:"preProcessRuleGroups" mapstructure:"preprocess_rule_groups"` - PostProcessRuleGroups []fmsPolicyProcessRuleGroup `json:"postProcessRuleGroups" mapstructure:"postprocess_rule_groups"` - DefaultAction fmsPolicyBasicType `json:"defaultAction" mapstructure:"default_action"` - OverrideCustomerWebACLAssociation bool `json:"overrideCustomerWebACLAssociation" mapstructure:"override_customer_web_acl_association"` - LoggingConfiguration fmsPolicyLoggingConfiguration `json:"loggingConfiguration" mapstructure:"logging_configuration"` -} - -type fmsPolicyLoggingConfiguration struct { - LogDestinationConfigs []string `json:"logDestinationConfigs" mapstructure:"log_destination_configs"` - RedactedFields []fmsPolicyRedactedField `json:"redactedFields" mapstructure:"redacted_fields"` -} - -type fmsPolicyRedactedField struct { - RedactedFieldType string `json:"redactedFieldType" mapstructure:"redacted_field_type"` - RedactedFieldValue string `json:"redactedFieldValue" mapstructure:"redacted_field_value"` -} - -type fmsPolicyRuleGroupIdentifier struct { - Version string `json:"version" mapstructure:"version"` - VendorName string `json:"vendorName" mapstructure:"vendor_name"` - ManagedRuleGroupName string `json:"managedRuleGroupName" mapstructure:"managed_rule_group_name"` -} - -type fmsPolicyProcessRuleGroup struct { - RuleGroupARN string `json:"ruleGroupArn" mapstructure:"rule_group_arn"` - OverrideAction fmsPolicyBasicType `json:"overrideAction" mapstructure:"override_action"` - ManagedRuleGroupIdentifier fmsPolicyRuleGroupIdentifier `json:"managedRuleGroupIdentifier" mapstructure:"managed_rule_group_identifier"` - RuleGroupType string `json:"ruleGroupType" mapstructure:"rule_group_type"` - ExcludeRules []string `json:"excludeRules" mapstructure:"excluded_rules"` -} - -// SECURITY_GROUPS_COMMON structs -type fmsPolicyManagedServiceDataSecurityGroupsCommon struct { - Type string `json:"type" mapstructure:"type"` - RevertManualSecurityGroupChanges bool `json:"revertManualSecurityGroupChanges" mapstructure:"revert_manual_security_group_changes"` - ExclusiveResourceSecurityGroupManagement bool `json:"exclusiveResourceSecurityGroupManagement" mapstructure:"exclusive_resource_security_group_management"` - SecurityGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"security_groups"` -} - -// SECURITY_GROUPS_CONTENT_AUDIT structs -type fmsPolicyManagedServiceDataSecurityGroupsContentAudit struct { - Type string `json:"type" mapstructure:"type"` - SecurityGroups []fmsPolicyRuleGroup `json:"ruleGroups" mapstructure:"security_groups"` - SecurityGroupAction fmsPolicyBasicType `json:"securityGroupAction" mapstructure:"security_group_action"` -} - -// SECURITY_GROUPS_USAGE_AUDIT structs -type fmsPolicyManagedServiceDataSecurityGroupsUsageAudit struct { - Type string `json:"type" mapstructure:"type"` - DeleteUnusedSecurityGroups bool `json:"deleteUnusedSecurityGroups" mapstructure:"delete_unused_security_groups"` - CoalesceRedundantSecurityGroups bool `json:"coalesceRedundantSecurityGroups" mapstructure:"coalesce_redundant_security_groups"` -} - func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).fmsconn @@ -467,11 +146,10 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } - if v, ok := d.GetOk("security_service_policy_data"); ok { - var err error - if fmsPolicy.SecurityServicePolicyData, err = expandSecurityServicePolicyData(v.([]interface{})[0].(map[string]interface{})); err != nil { - return err - } + securityServicePolicy := d.Get("security_service_policy_data").([]interface{})[0].(map[string]interface{}) + fmsPolicy.SecurityServicePolicyData = &fms.SecurityServicePolicyData{ + ManagedServiceData: aws.String(securityServicePolicy["managed_service_data"].(string)), + Type: aws.String(securityServicePolicy["type"].(string)), } if rTags, tagsOk := d.GetOk("resource_tags"); tagsOk { @@ -479,11 +157,11 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("include_map"); ok { - fmsPolicy.IncludeMap = expandFMSPolicyMap(v.(*schema.Set)) + fmsPolicy.IncludeMap = expandFMSPolicyMap(v.([]interface{})) } if v, ok := d.GetOk("exclude_map"); ok { - fmsPolicy.ExcludeMap = expandFMSPolicyMap(v.(*schema.Set)) + fmsPolicy.ExcludeMap = expandFMSPolicyMap(v.([]interface{})) } params := &fms.PutPolicyInput{ @@ -530,17 +208,23 @@ func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { if err = d.Set("exclude_map", flattenFMSPolicyMap(resp.Policy.ExcludeMap)); err != nil { return err } - d.Set("include_map", flattenFMSPolicyMap(resp.Policy.IncludeMap)) + if err = d.Set("include_map", flattenFMSPolicyMap(resp.Policy.IncludeMap)); err != nil { + return err + } d.Set("remediation_enabled", aws.BoolValue(resp.Policy.RemediationEnabled)) - d.Set("resource_type_list", resp.Policy.ResourceTypeList) + if err = d.Set("resource_type_list", resp.Policy.ResourceTypeList); err != nil { + return err + } d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) - d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)) - - securityServicePolicyData, err := fmsPolicyUnmarshalManagedServiceData(resp.Policy.SecurityServicePolicyData) - if err != nil { + if err = d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)); err != nil { return err } - if err = d.Set("security_service_policy_data", securityServicePolicyData); err != nil { + + securityServicePolicy := []map[string]string{{ + "type": *resp.Policy.SecurityServicePolicyData.Type, + "managed_service_data": *resp.Policy.SecurityServicePolicyData.ManagedServiceData, + }} + if err = d.Set("security_service_policy_data", securityServicePolicy); err != nil { return err } @@ -560,35 +244,23 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } - requestUpdate := false + fmsPolicy.ExcludeMap = expandFMSPolicyMap(d.Get("exclude_map").([]interface{})) - if d.HasChange("exclude_map") { - fmsPolicy.ExcludeMap = expandFMSPolicyMap(d.Get("exclude_map").(*schema.Set)) - requestUpdate = true - } + fmsPolicy.IncludeMap = expandFMSPolicyMap(d.Get("include_map").([]interface{})) - if d.HasChange("include_map") { - fmsPolicy.IncludeMap = expandFMSPolicyMap(d.Get("include_map").(*schema.Set)) - requestUpdate = true - } + fmsPolicy.ResourceTags = constructResourceTags(d.Get("resource_tags")) - if d.HasChange("resource_tags") { - fmsPolicy.ResourceTags = constructResourceTags(d.Get("resource_tags")) - requestUpdate = true + securityServicePolicy := d.Get("security_service_policy_data").([]interface{})[0].(map[string]interface{}) + fmsPolicy.SecurityServicePolicyData = &fms.SecurityServicePolicyData{ + ManagedServiceData: aws.String(securityServicePolicy["managed_service_data"].(string)), + Type: aws.String(securityServicePolicy["type"].(string)), } - if requestUpdate { - var err error - if fmsPolicy.SecurityServicePolicyData, err = expandSecurityServicePolicyData(d.Get("security_service_policy_data").(*schema.Set).List()[0].(map[string]interface{})); err != nil { - return err - } + params := &fms.PutPolicyInput{Policy: fmsPolicy} + _, err := conn.PutPolicy(params) - params := &fms.PutPolicyInput{Policy: fmsPolicy} - _, err = conn.PutPolicy(params) - - if err != nil { - return fmt.Errorf("Error modifying FMS Policy Rule: %s", err) - } + if err != nil { + return fmt.Errorf("Error modifying FMS Policy Rule: %s", err) } return resourceAwsFmsPolicyRead(d, meta) @@ -614,128 +286,23 @@ func resourceAwsFmsPolicyDelete(d *schema.ResourceData, meta interface{}) error return nil } -func terraformMapDecodeHelper() mapstructure.DecodeHookFuncType { - return func(inType reflect.Type, outType reflect.Type, value interface{}) (interface{}, error) { - if inType == reflect.SliceOf(outType) && reflect.ValueOf(value).Len() == 1 { - return reflect.ValueOf(value).Index(0).Interface(), nil - } - return value, nil - } -} - -func fmsPolicyUnmarshalManagedServiceData(policyData *fms.SecurityServicePolicyData) (map[string]interface{}, error) { - var policyStruct interface{} - var securityServicePolicy map[string]interface{} - var policyType string - switch *policyData.Type { - case "WAF": - policyType = "waf" - policyStruct = fmsPolicyManagedServiceDataWAF{} - if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { - return nil, err - } - case "WAFV2": - policyType = "wafv2" - policyStruct = fmsPolicyManagedServiceDataWAFV2{} - if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { - return nil, err - } - case "SECURITY_GROUPS_COMMON": - policyType = "security_groups_common" - policyStruct = fmsPolicyManagedServiceDataSecurityGroupsCommon{} - if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { - return nil, err - } - case "SECURITY_CONTENT_AUDIT": - policyType = "security_groups_content_audit" - policyStruct = fmsPolicyManagedServiceDataSecurityGroupsContentAudit{} - if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { - return nil, err - } - case "SECURITY_GROUPS_USAGE_AUDIT": - policyType = "security_groups_usage_audit" - policyStruct = fmsPolicyManagedServiceDataSecurityGroupsUsageAudit{} - if err := json.Unmarshal([]byte(*policyData.ManagedServiceData), &policyStruct); err != nil { - return nil, err +func expandFMSPolicyMap(set []interface{}) map[string][]*string { + fmsPolicyMap := map[string][]*string{} + if len(set) > 0 { + if _, ok := set[0].(map[string]interface{}); !ok { + return fmsPolicyMap } - case "SHIELD_ADVANCED": - policyType = "security_groups_usage_audit" - policyStruct = true - } - var policyMap map[string]interface{} - err := mapstructure.Decode(policyStruct, policyMap) - securityServicePolicy[policyType] = policyMap - return securityServicePolicy, err -} - -func fmsPolicyMarshalManagedServiceData(policyMap interface{}, policyStruct interface{}) (*string, error) { - var managedServiceData []byte - var err error - decoderConfig := mapstructure.DecoderConfig{ - WeaklyTypedInput: true, - Result: &policyStruct, - DecodeHook: terraformMapDecodeHelper(), - } - weakDecoder, err := mapstructure.NewDecoder(&decoderConfig) - if err != nil { - return nil, err - } - if err = weakDecoder.Decode(policyMap); err != nil { - return nil, err - } - if managedServiceData, err = json.Marshal(policyStruct); err != nil { - return nil, err - } - return aws.String(string(managedServiceData)), nil -} - -func expandSecurityServicePolicyData(policyData map[string]interface{}) (*fms.SecurityServicePolicyData, error) { - var managedServiceData *string - var err error - var SecurityPolicyType string - switch { - case len(policyData["waf"].([]interface{})) == 1: - policyStruct := fmsPolicyManagedServiceDataWAF{} - SecurityPolicyType = "WAF" - managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["waf"].([]interface{})[0], policyStruct) - - case len(policyData["wafv2"].([]interface{})) == 1: - policyStruct := fmsPolicyManagedServiceDataWAFV2{} - SecurityPolicyType = "WAFV2" - managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["wafv2"].([]interface{})[0], policyStruct) - - case policyData["shield_advanced"].(bool): - SecurityPolicyType = "SHIELD_ADVANCED" - managedServiceData = aws.String("{}") - - case len(policyData["security_groups_common"].([]interface{})) == 1: - policyStruct := fmsPolicyManagedServiceDataSecurityGroupsCommon{} - SecurityPolicyType = "SECURITY_GROUPS_COMMON" - managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_common"].([]interface{})[0], policyStruct) - - case len(policyData["security_groups_content_audit"].([]interface{})) == 1: - policyStruct := fmsPolicyManagedServiceDataSecurityGroupsContentAudit{} - SecurityPolicyType = "SECURITY_GROUPS_CONTENT_AUDIT" - managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_content_audit"].([]interface{})[0], policyStruct) - - case len(policyData["security_groups_usage_audit"].([]interface{})) == 1: - policyStruct := fmsPolicyManagedServiceDataSecurityGroupsUsageAudit{} - SecurityPolicyType = "SECURITY_GROUPS_USAGE_AUDIT" - managedServiceData, err = fmsPolicyMarshalManagedServiceData(policyData["security_groups_usage_audit"].([]interface{})[0], policyStruct) - } - - return &fms.SecurityServicePolicyData{ - Type: aws.String(SecurityPolicyType), - ManagedServiceData: managedServiceData, - }, err -} + for key, listValue := range set[0].(map[string]interface{}) { + var flatKey string + switch key { + case "account": + flatKey = "ACCOUNT" + case "orgunit": + flatKey = "ORG_UNIT" + } -func expandFMSPolicyMap(set *schema.Set) map[string][]*string { - fmsPolicyMap := map[string][]*string{} - if set.Len() > 0 { - for key, listValue := range set.List()[0].(map[string]interface{}) { - for _, value := range listValue.([]interface{}) { - fmsPolicyMap[key] = append(fmsPolicyMap[key], aws.String(value.(string))) + for _, value := range listValue.(*schema.Set).List() { + fmsPolicyMap[flatKey] = append(fmsPolicyMap[flatKey], aws.String(value.(string))) } } } @@ -747,9 +314,9 @@ func flattenFMSPolicyMap(fmsPolicyMap map[string][]*string) []interface{} { for key, value := range fmsPolicyMap { switch key { - case "account": + case "ACCOUNT": flatPolicyMap["account"] = value - case "orgunit": + case "ORG_UNIT": flatPolicyMap["orgunit"] = value default: log.Printf("[WARNING] Unexpected key (%q) found in FMS policy", key) diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index 9194a4398ad..db625adcf00 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -31,9 +31,64 @@ func TestAccAWSFmsPolicy_basic(t *testing.T) { ), }, { + ResourceName: "aws_fms_policy.test", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"policy_update_token"}, + ImportStateVerifyIgnore: []string{"policy_update_token", "delete_all_policy_resources"}, + }, + }, + }) +} + +func TestAccAWSFmsPolicy_includeMap(t *testing.T) { + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig_include(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + testAccMatchResourceAttrRegionalARN("aws_fms_policy.test", "arn", "fms", regexp.MustCompile(`policy/`)), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "security_service_policy_data.#", "1"), + ), + }, + { + ResourceName: "aws_fms_policy.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"policy_update_token", "delete_all_policy_resources"}, + }, + }, + }) +} + +func TestAccAWSFmsPolicy_update(t *testing.T) { + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + fmsPolicyName2 := fmt.Sprintf("tf-fms-%s2", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + testAccMatchResourceAttrRegionalARN("aws_fms_policy.test", "arn", "fms", regexp.MustCompile(`policy/`)), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "security_service_policy_data.#", "1"), + ), + }, + { + Config: testAccFmsPolicyConfig_updated(fmsPolicyName2, wafRuleGroupName), }, }, }) @@ -114,38 +169,90 @@ func testAccCheckAwsFmsPolicyExists(name string) resource.TestCheckFunc { func testAccFmsPolicyConfig(name string, group string) string { return fmt.Sprintf(` -#resource "aws_fms_policy" "test" { -# exclude_resource_tags = false -# name = %[1]q -# remediation_enabled = false -# resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] -# -# security_service_policy_data { -# waf { -# rule_groups {id = aws_waf_rule.wafrule.id} -# } -# } -#} - -resource "aws_waf_ipset" "ipset" { - name = "tfIPSet" - - ip_set_descriptors { - type = "IPV4" - value = "192.0.7.0/24" - } +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + exclude_map { + account = [ data.aws_organizations_organization.example.accounts[0].id ] + } + + security_service_policy_data { + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } } -resource "aws_waf_rule" "wafrule" { - depends_on = [aws_waf_ipset.ipset] +data "aws_organizations_organization" "example" {} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" name = %[2]q - metric_name = "tfWAFRule" +} +`, name, group) +} - predicates { - data_id = aws_waf_ipset.ipset.id - negated = false - type = "IPMatch" - } +func testAccFmsPolicyConfig_updated(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = true + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + exclude_map { + account = [ data.aws_organizations_organization.example.accounts[0].id ] + } + + security_service_policy_data { + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"ALLOW\"}, \"overrideCustomerWebACLAssociation\": false}" + } + + lifecycle { + create_before_destroy = false + } +} + +data "aws_organizations_organization" "example" {} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q +} + +resource "aws_wafregional_rule_group" "test2" { + metric_name = "MyTest2" + name = %[2]q +} +`, name, group) +} + +func testAccFmsPolicyConfig_include(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + remediation_enabled = false + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + include_map { + account = [ data.aws_organizations_organization.example.accounts[0].id ] + } + + security_service_policy_data { + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } +} + +data "aws_organizations_organization" "example" {} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q } `, name, group) } @@ -158,26 +265,14 @@ resource "aws_fms_policy" "test" { remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - security_service_policy_data = < Additional information about this configuration can be found in the [AWS Firewall Manager SecurityServicePolicyData API Reference](https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_SecurityServicePolicyData.html) From 7396d54a43e54e0d94b01fe1e60158da1b2b4a4a Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 8 Jan 2021 10:47:24 -0800 Subject: [PATCH 0459/1212] Fix HCL --- website/docs/r/fms_policy.html.markdown | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index 230ee52eda7..c65e1730483 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -24,22 +24,21 @@ resource "aws_fms_policy" "example" { managed_service_data = < Date: Sun, 1 Nov 2020 09:47:11 +0200 Subject: [PATCH 0460/1212] add resource --- .../service/sagemaker/finder/finder.go | 19 + aws/resource_aws_sagemaker_domain.go | 568 ++++++++++++++++++ 2 files changed, 587 insertions(+) create mode 100644 aws/resource_aws_sagemaker_domain.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 0abb4a7a7e9..06d23f1f24f 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -42,3 +42,22 @@ func ImageByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeIma return output, nil } + +// DomainByName returns the domain corresponding to the specified domain id. +// Returns nil if no domain is found. +func DomainByName(conn *sagemaker.SageMaker, domainID string) (*sagemaker.DescribeDomainOutput, error) { + input := &sagemaker.DescribeDomainInput{ + DomainId: aws.String(domainID), + } + + output, err := conn.DescribeDomain(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go new file mode 100644 index 00000000000..e04f9b29bb8 --- /dev/null +++ b/aws/resource_aws_sagemaker_domain.go @@ -0,0 +1,568 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func resourceAwsSagemakerDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerDomainCreate, + Read: resourceAwsSagemakerDomainRead, + Update: resourceAwsSagemakerDomainUpdate, + Delete: resourceAwsSagemakerDomainDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "domain_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*$`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + ), + }, + "auth_mode": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + ValidateFunc: validation.StringInSlice(sagemaker.AuthMode_Values(), false), + }, + "vpc_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MaxItems: 16, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "app_network_access_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: sagemaker.AppNetworkAccessTypePublicInternetOnly, + ValidateFunc: validation.StringInSlice(sagemaker.AppNetworkAccessType_Values(), false), + }, + "home_efs_file_system_kms_key_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ValidateFunc: validateArn, + }, + + "default_user_settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "execution_role": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "sharing_settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notebook_output_option": { + Type: schema.TypeString, + Optional: true, + Default: sagemaker.NotebookOutputOptionDisabled, + ValidateFunc: validation.StringInSlice(sagemaker.NotebookOutputOption_Values(), false), + }, + "s3_kms_Key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "s3_output_path": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "tensor_board_app_settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + "jupyter_server_app_settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + "kernel_gateway_app_settings": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "tags": tagsSchema(), + "url": { + Type: schema.TypeString, + Computed: true, + }, + "single_sign_on_managed_application_instance_id": { + Type: schema.TypeString, + Computed: true, + }, + "home_efs_file_system_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.CreateDomainInput{ + DomainName: aws.String(d.Get("domain_name").(string)), + AuthMode: aws.String(d.Get("auth_mode").(string)), + VpcId: aws.String(d.Get("vpc_id").(string)), + AppNetworkAccessType: aws.String(d.Get("app_network_access_type").(string)), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), + DefaultUserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("default_user_settings").([]interface{})), + } + + if v, ok := d.GetOk("home_efs_file_system_kms_key_id"); ok { + input.HomeEfsFileSystemKmsKeyId = aws.String(v.(string)) + } + + log.Printf("[DEBUG] sagemaker domain create config: %#v", *input) + output, err := conn.CreateDomain(input) + if err != nil { + return fmt.Errorf("error creating SageMaker domain: %w", err) + } + + domainArn := aws.StringValue(output.DomainArn) + domainID, err := decodeSagemakerDomainID(domainArn) + if err != nil { + return err + } + + d.SetId(domainID) + + return resourceAwsSagemakerDomainRead(d, meta) +} + +func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + domain, err := finder.DomainByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, "ValidationException", "Cannot find Domain") { + d.SetId("") + log.Printf("[WARN] Unable to find SageMaker domain (%s), removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading SageMaker domain (%s): %w", d.Id(), err) + } + + d.Set("domain_name", domain.DomainName) + d.Set("auth_mode", domain.AuthMode) + d.Set("app_network_access_type", domain.AppNetworkAccessType) + d.Set("arn", domain.DomainArn) + d.Set("home_efs_file_system_id", domain.HomeEfsFileSystemId) + d.Set("home_efs_file_system_kms_key_id", domain.HomeEfsFileSystemKmsKeyId) + d.Set("single_sign_on_managed_application_instance_id", domain.SingleSignOnManagedApplicationInstanceId) + d.Set("url", domain.Url) + d.Set("vpc_id", domain.VpcId) + + if err := d.Set("subnet_ids", flattenStringSet(domain.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids for sagemaker domain (%s): %w", d.Id(), err) + } + + if err := d.Set("default_user_settings", flattenSagemakerDomainDefaultUserSettings(domain.DefaultUserSettings)); err != nil { + return fmt.Errorf("error setting default_user_settings for sagemaker domain (%s): %w", d.Id(), err) + } + + return nil +} + +func resourceAwsSagemakerDomainUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.UpdateDomainInput{ + DomainId: aws.String(d.Id()), + DefaultUserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("default_user_settings").([]interface{})), + } + + log.Printf("[DEBUG] sagemaker domain update config: %#v", *input) + _, err := conn.UpdateDomain(input) + if err != nil { + return fmt.Errorf("error updating SageMaker domain: %w", err) + } + + return resourceAwsSagemakerDomainRead(d, meta) +} + +func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteDomainInput{ + DomainId: aws.String(d.Id()), + } + + if _, err := conn.DeleteDomain(input); err != nil { + if isAWSErr(err, "ValidationException", "Cannot find Domain") { + return nil + } + return fmt.Errorf("error deleting SageMaker domain (%s): %w", d.Id(), err) + } + + return nil +} + +func expandSagemakerDomainDefaultUserSettings(l []interface{}) *sagemaker.UserSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.UserSettings{} + + if v, ok := m["execution_role"].(string); ok && v != "" { + config.ExecutionRole = aws.String(v) + } + + if v, ok := m["security_groups"].(*schema.Set); ok && v.Len() > 0 { + config.SecurityGroups = expandStringSet(v) + } + + if v, ok := m["tensor_board_app_settings"].([]interface{}); ok && len(v) > 0 { + config.TensorBoardAppSettings = expandSagemakerDomainTensorBoardAppSettings(v) + } + + if v, ok := m["kernel_gateway_app_settings"].([]interface{}); ok && len(v) > 0 { + config.KernelGatewayAppSettings = expandSagemakerDomainKernelGatewayAppSettings(v) + } + + if v, ok := m["jupyter_server_app_settings"].([]interface{}); ok && len(v) > 0 { + config.JupyterServerAppSettings = expandSagemakerDomainJupyterServerAppSettings(v) + } + + if v, ok := m["share_settings"].([]interface{}); ok && len(v) > 0 { + config.SharingSettings = expandSagemakerDomainShareSettings(v) + } + + return config +} + +func expandSagemakerDomainJupyterServerAppSettings(l []interface{}) *sagemaker.JupyterServerAppSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.JupyterServerAppSettings{} + + if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) + } + + return config +} + +func expandSagemakerDomainKernelGatewayAppSettings(l []interface{}) *sagemaker.KernelGatewayAppSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.KernelGatewayAppSettings{} + + if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) + } + + return config +} + +func expandSagemakerDomainTensorBoardAppSettings(l []interface{}) *sagemaker.TensorBoardAppSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.TensorBoardAppSettings{} + + if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) + } + + return config +} + +func expandSagemakerDomainDefaultResourceSpec(l []interface{}) *sagemaker.ResourceSpec { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.ResourceSpec{} + + if v, ok := m["instance_type"].(string); ok && v != "" { + config.InstanceType = aws.String(v) + } + + if v, ok := m["sagemaker_image_arn"].(string); ok && v != "" { + config.SageMakerImageArn = aws.String(v) + } + + return config +} + +func expandSagemakerDomainShareSettings(l []interface{}) *sagemaker.SharingSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.SharingSettings{ + NotebookOutputOption: aws.String(m["notebook_output_option"].(string)), + } + + if v, ok := m["s3_kms_key_id"].(string); ok && v != "" { + config.S3KmsKeyId = aws.String(v) + } + + if v, ok := m["s3_output_path"].(string); ok && v != "" { + config.S3OutputPath = aws.String(v) + } + + return config +} + +func flattenSagemakerDomainDefaultUserSettings(config *sagemaker.UserSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if config.ExecutionRole != nil { + m["execution_role"] = aws.StringValue(config.ExecutionRole) + } + + if config.SecurityGroups != nil { + m["security_groups"] = flattenStringSet(config.SecurityGroups) + } + + if config.JupyterServerAppSettings != nil { + m["jupyter_server_app_settings"] = flattenSagemakerDomainJupyterServerAppSettings(config.JupyterServerAppSettings) + } + + if config.KernelGatewayAppSettings != nil { + m["kernel_gateway_app_settings"] = flattenSagemakerDomainKernelGatewayAppSettings(config.KernelGatewayAppSettings) + } + + if config.TensorBoardAppSettings != nil { + m["tensor_board_app_settings"] = flattenSagemakerDomainTensorBoardAppSettings(config.TensorBoardAppSettings) + } + + if config.SharingSettings != nil { + m["share_settings"] = flattenSagemakerDomainShareSettings(config.SharingSettings) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerDomainDefaultResourceSpec(config *sagemaker.ResourceSpec) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if config.InstanceType != nil { + m["instance_type"] = aws.StringValue(config.InstanceType) + } + + if config.SageMakerImageArn != nil { + m["sagemaker_image_arn"] = aws.StringValue(config.SageMakerImageArn) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerDomainTensorBoardAppSettings(config *sagemaker.TensorBoardAppSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if config.DefaultResourceSpec != nil { + m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerDomainJupyterServerAppSettings(config *sagemaker.JupyterServerAppSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if config.DefaultResourceSpec != nil { + m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerDomainKernelGatewayAppSettings(config *sagemaker.KernelGatewayAppSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if config.DefaultResourceSpec != nil { + m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerDomainShareSettings(config *sagemaker.SharingSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "notebook_output_option": aws.StringValue(config.NotebookOutputOption), + } + + if config.S3KmsKeyId != nil { + m["s3_kms_key_id"] = aws.StringValue(config.S3KmsKeyId) + } + + if config.S3OutputPath != nil { + m["s3_output_path"] = aws.StringValue(config.S3OutputPath) + } + + return []map[string]interface{}{m} +} + +func decodeSagemakerDomainID(id string) (string, error) { + domainArn, err := arn.Parse(id) + if err != nil { + return "", err + } + + domainName := strings.TrimPrefix(domainArn.Resource, "domain/") + return domainName, nil +} From 4c9dc3ddcea649dd784c8d545746058a745e6fff Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 1 Nov 2020 09:48:02 +0200 Subject: [PATCH 0461/1212] add to provider --- aws/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/provider.go b/aws/provider.go index 0543109d0f1..98041dee66f 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -870,6 +870,7 @@ func Provider() *schema.Provider { "aws_default_route_table": resourceAwsDefaultRouteTable(), "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), + "aws_sagemaker_domain": resourceAwsSagemakerDomain(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), From bdc8be99d7aa11a69340b045ec281a3917c10d78 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 1 Nov 2020 09:55:12 +0200 Subject: [PATCH 0462/1212] add waiters --- .../service/sagemaker/waiter/status.go | 26 +++++++++++ .../service/sagemaker/waiter/waiter.go | 43 +++++++++++++++++++ aws/resource_aws_sagemaker_domain.go | 12 ++++++ 3 files changed, 81 insertions(+) diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 1f8f5d00dbe..cb55b5ee3b3 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -13,6 +13,7 @@ const ( SagemakerNotebookInstanceStatusNotFound = "NotFound" SagemakerImageStatusNotFound = "NotFound" SagemakerImageStatusFailed = "Failed" + SagemakerDomainStatusNotFound = "NotFound" ) // NotebookInstanceStatus fetches the NotebookInstance and its Status @@ -68,3 +69,28 @@ func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFu return output, aws.StringValue(output.ImageStatus), nil } } + +// DomainStatus fetches the Domain and its Status +func DomainStatus(conn *sagemaker.SageMaker, domainID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &sagemaker.DescribeDomainInput{ + DomainId: aws.String(domainID), + } + + output, err := conn.DescribeDomain(input) + + if tfawserr.ErrMessageContains(err, "ValidationException", "RecordNotFound") { + return nil, SagemakerDomainStatusNotFound, nil + } + + if err != nil { + return nil, sagemaker.DomainStatusFailed, err + } + + if output == nil { + return nil, SagemakerDomainStatusNotFound, nil + } + + return output, aws.StringValue(output.Status), nil + } +} diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index e6ff40fb82a..decbbab95bb 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -13,6 +13,8 @@ const ( NotebookInstanceDeletedTimeout = 10 * time.Minute ImageCreatedTimeout = 10 * time.Minute ImageDeletedTimeout = 10 * time.Minute + DomainInServiceTimeout = 10 * time.Minute + DomainDeletedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -117,3 +119,44 @@ func ImageDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeIm return nil, err } + +// DomainInService waits for a Domain to return InService +func DomainInService(conn *sagemaker.SageMaker, domainID string) (*sagemaker.DescribeDomainOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + SagemakerDomainStatusNotFound, + sagemaker.DomainStatusPending, + }, + Target: []string{sagemaker.DomainStatusInService}, + Refresh: DomainStatus(conn, domainID), + Timeout: DomainInServiceTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeDomainOutput); ok { + return output, err + } + + return nil, err +} + +// DomainDeleted waits for a Domain to return Deleted +func DomainDeleted(conn *sagemaker.SageMaker, domainID string) (*sagemaker.DescribeDomainOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.DomainStatusDeleting, + }, + Target: []string{}, + Refresh: DomainStatus(conn, domainID), + Timeout: DomainDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeDomainOutput); ok { + return output, err + } + + return nil, err +} diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index e04f9b29bb8..a4719911fcc 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" ) func resourceAwsSagemakerDomain() *schema.Resource { @@ -246,6 +247,10 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) d.SetId(domainID) + if _, err := waiter.DomainInService(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for sagemaker domain (%s) to create: %w", d.Id(), err) + } + return resourceAwsSagemakerDomainRead(d, meta) } @@ -314,6 +319,13 @@ func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) return fmt.Errorf("error deleting SageMaker domain (%s): %w", d.Id(), err) } + if _, err := waiter.DomainDeleted(conn, d.Id()); err != nil { + if isAWSErr(err, "ValidationException", "RecordNotFound") { + return nil + } + return fmt.Errorf("error waiting for sagemaker domain (%s) to delete: %w", d.Id(), err) + } + return nil } From 97b11e228431232575f2de5c632a002a53730813 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 1 Nov 2020 11:11:13 +0200 Subject: [PATCH 0463/1212] fix argument name --- aws/resource_aws_sagemaker_domain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index a4719911fcc..97f96063b7e 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -101,7 +101,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { Default: sagemaker.NotebookOutputOptionDisabled, ValidateFunc: validation.StringInSlice(sagemaker.NotebookOutputOption_Values(), false), }, - "s3_kms_Key_id": { + "s3_kms_key_id": { Type: schema.TypeString, Optional: true, ValidateFunc: validateArn, From 419959435192e06b42d0673fe2ed3835ce206220 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 1 Nov 2020 11:33:13 +0200 Subject: [PATCH 0464/1212] add tests --- aws/resource_aws_sagemaker_domain.go | 4 +- aws/resource_aws_sagemaker_domain_test.go | 345 ++++++++++++++++++++++ 2 files changed, 346 insertions(+), 3 deletions(-) create mode 100644 aws/resource_aws_sagemaker_domain_test.go diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 97f96063b7e..fcedafdc8a0 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -30,7 +30,6 @@ func resourceAwsSagemakerDomain() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "domain_name": { Type: schema.TypeString, Required: true, @@ -71,7 +70,6 @@ func resourceAwsSagemakerDomain() *schema.Resource { Optional: true, ValidateFunc: validateArn, }, - "default_user_settings": { Type: schema.TypeList, Required: true, @@ -86,7 +84,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, "execution_role": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validateArn, }, "sharing_settings": { diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go new file mode 100644 index 00000000000..ca31ef452ef --- /dev/null +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -0,0 +1,345 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func init() { + resource.AddTestSweepers("aws_sagemaker_domain", &resource.Sweeper{ + Name: "aws_sagemaker_domain", + F: testSweepSagemakerDomains, + }) +} + +func testSweepSagemakerDomains(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).sagemakerconn + + err = conn.ListDomainsPages(&sagemaker.ListDomainsInput{}, func(page *sagemaker.ListDomainsOutput, lastPage bool) bool { + for _, instance := range page.Domains { + domainArn := aws.StringValue(instance.DomainArn) + domainID, err := decodeSagemakerDomainID(domainArn) + if err != nil { + log.Printf("[ERROR] Error parsing sagemaker domain arn (%s): %s", domainArn, err) + } + input := &sagemaker.DeleteDomainInput{ + DomainId: aws.String(domainID), + } + + log.Printf("[INFO] Deleting SageMaker domain: %s", domainArn) + if _, err := conn.DeleteDomain(input); err != nil { + log.Printf("[ERROR] Error deleting SageMaker domain (%s): %s", domainArn, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker domain sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("Error retrieving SageMaker domains: %w", err) + } + + return nil +} + +func TestAccAWSSagemakerDomain_basic(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "domain_name", rName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("domain/%s", rName)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// func TestAccAWSSagemakerDomain_gitConfig_branch(t *testing.T) { +// var notebook sagemaker.DescribeDomainOutput +// rName := acctest.RandomWithPrefix("tf-acc-test") +// resourceName := "aws_sagemaker_domain.test" + +// resource.ParallelTest(t, resource.TestCase{ +// PreCheck: func() { testAccPreCheck(t) }, +// Providers: testAccProviders, +// CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, +// Steps: []resource.TestStep{ +// { +// Config: testAccAWSSagemakerDomainGitConfigBranchConfig(rName), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), +// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), +// testAccCheckResourceAttrRegionalARN(resourceName0......, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), +// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), +// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), +// resource.TestCheckResourceAttr(resourceName, "git_config.0.branch", "master"), +// ), +// }, +// { +// ResourceName: resourceName, +// ImportState: true, +// ImportStateVerify: true, +// }, +// }, +// }) +// } + +// func TestAccAWSSagemakerDomain_gitConfig_secret(t *testing.T) { +// var notebook sagemaker.DescribeDomainOutput +// rName := acctest.RandomWithPrefix("tf-acc-test") +// resourceName := "aws_sagemaker_domain.test" + +// resource.ParallelTest(t, resource.TestCase{ +// PreCheck: func() { testAccPreCheck(t) }, +// Providers: testAccProviders, +// CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, +// Steps: []resource.TestStep{ +// { +// Config: testAccAWSSagemakerDomainGitConfigSecretConfig(rName), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), +// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), +// testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), +// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), +// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), +// resource.TestCheckResourceAttrPair(resourceName, "git_config.0.secret_arn", "aws_secretsmanager_secret.test", "arn"), +// ), +// }, +// { +// ResourceName: resourceName, +// ImportState: true, +// ImportStateVerify: true, +// }, +// { +// Config: testAccAWSSagemakerDomainGitConfigSecretUpdatedConfig(rName), +// Check: resource.ComposeTestCheckFunc( +// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), +// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), +// testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), +// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), +// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), +// resource.TestCheckResourceAttrPair(resourceName, "git_config.0.secret_arn", "aws_secretsmanager_secret.test2", "arn"), +// ), +// }, +// }, +// }) +// } + +func TestAccAWSSagemakerDomain_disappears(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerDomain(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerDomainDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_domain" { + continue + } + + domain, err := finder.DomainByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + domainArn := aws.StringValue(domain.DomainArn) + domainID, err := decodeSagemakerDomainID(domainArn) + if err != nil { + return err + } + + if domainID == rs.Primary.ID { + return fmt.Errorf("sagemaker domain %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerDomainExists(n string, codeRepo *sagemaker.DescribeDomainOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker domain ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.DomainByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *codeRepo = *resp + + return nil + } +} + +func testAccAWSSagemakerDomainConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + + tags = { + Name = %[1]q + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} +`, rName) +} + +func testAccAWSSagemakerDomainBasicConfig(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + } +} +`, rName) +} + +// func testAccAWSSagemakerDomainGitConfigBranchConfig(rName string) string { +// return fmt.Sprintf(` +// resource "aws_sagemaker_domain" "test" { +// domain_name = %[1]q + +// git_config { +// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" +// branch = "master" +// } +// } +// `, rName) +// } + +// func testAccAWSSagemakerDomainGitConfigSecretConfig(rName string) string { +// return fmt.Sprintf(` +// resource "aws_secretsmanager_secret" "test" { +// name = %[1]q +// } + +// resource "aws_secretsmanager_secret_version" "test" { +// secret_id = aws_secretsmanager_secret.test.id +// secret_string = jsonencode({ username = "example", passowrd = "example" }) +// } + +// resource "aws_sagemaker_domain" "test" { +// domain_name = %[1]q + +// git_config { +// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" +// secret_arn = aws_secretsmanager_secret.test.arn +// } + +// depends_on = [aws_secretsmanager_secret_version.test] +// } +// `, rName) +// } + +// func testAccAWSSagemakerDomainGitConfigSecretUpdatedConfig(rName string) string { +// return fmt.Sprintf(` +// resource "aws_secretsmanager_secret" "test2" { +// name = "%[1]s-2" +// } + +// resource "aws_secretsmanager_secret_version" "test2" { +// secret_id = aws_secretsmanager_secret.test2.id +// secret_string = jsonencode({ username = "example", passowrd = "example" }) +// } + +// resource "aws_sagemaker_domain" "test" { +// domain_name = %[1]q + +// git_config { +// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" +// secret_arn = aws_secretsmanager_secret.test2.arn +// } + +// depends_on = [aws_secretsmanager_secret_version.test2] +// } +// `, rName) +// } From bedbee64310a158e2c62bcec7038d7763f0f4b2c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Tue, 3 Nov 2020 17:05:00 +0200 Subject: [PATCH 0465/1212] domain tests --- aws/resource_aws_sagemaker_domain.go | 32 ++++++++++++++++------- aws/resource_aws_sagemaker_domain_test.go | 15 ++++++++++- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index fcedafdc8a0..8225a24b26e 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -89,7 +90,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, "sharing_settings": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -113,7 +114,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, "tensor_board_app_settings": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -141,7 +142,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, "jupyter_server_app_settings": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -169,7 +170,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, "kernel_gateway_app_settings": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -257,7 +258,7 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er domain, err := finder.DomainByName(conn, d.Id()) if err != nil { - if isAWSErr(err, "ValidationException", "Cannot find Domain") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { d.SetId("") log.Printf("[WARN] Unable to find SageMaker domain (%s), removing from state", d.Id()) return nil @@ -311,17 +312,28 @@ func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) } if _, err := conn.DeleteDomain(input); err != nil { - if isAWSErr(err, "ValidationException", "Cannot find Domain") { - return nil + if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + return fmt.Errorf("error deleting SageMaker domain (%s): %w", d.Id(), err) } - return fmt.Errorf("error deleting SageMaker domain (%s): %w", d.Id(), err) } if _, err := waiter.DomainDeleted(conn, d.Id()); err != nil { - if isAWSErr(err, "ValidationException", "RecordNotFound") { + if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + return fmt.Errorf("error waiting for sagemaker domain (%s) to delete: %w", d.Id(), err) + } + } + + efsConn := meta.(*AWSClient).efsconn + efsFsID := d.Get("home_efs_file_system_id").(string) + + _, err := efsConn.DeleteFileSystem(&efs.DeleteFileSystemInput{ + FileSystemId: aws.String(efsFsID), + }) + if err != nil { + if isAWSErr(err, efs.ErrCodeFileSystemNotFound, "") { return nil } - return fmt.Errorf("error waiting for sagemaker domain (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("Error delete EFS file system (%s): %w", efsFsID, err) } return nil diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index ca31ef452ef..903b390c29a 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -17,6 +18,9 @@ func init() { resource.AddTestSweepers("aws_sagemaker_domain", &resource.Sweeper{ Name: "aws_sagemaker_domain", F: testSweepSagemakerDomains, + Dependencies: []string{ + "aws_efs_file_system", + }, }) } @@ -75,7 +79,15 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "domain_name", rName), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("domain/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "auth_mode", "IAM"), + resource.TestCheckResourceAttr(resourceName, "app_network_access_type", "PublicInternetOnly"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.execution_role", "aws_iam_role.test", "arn"), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "sagemaker", regexp.MustCompile(`domain/.+`)), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), + resource.TestCheckResourceAttrSet(resourceName, "url"), ), }, { @@ -233,6 +245,7 @@ func testAccCheckAWSSagemakerDomainExists(n string, codeRepo *sagemaker.Describe func testAccAWSSagemakerDomainConfigBase(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" tags = { From 0a8f26618bedbc2bea0f3c75406823bbff5fbb12 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 6 Nov 2020 23:29:34 +0200 Subject: [PATCH 0466/1212] move efs delete to tests --- aws/resource_aws_sagemaker_domain.go | 14 ------- aws/resource_aws_sagemaker_domain_test.go | 51 +++++++++++++++++++++++ 2 files changed, 51 insertions(+), 14 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 8225a24b26e..c822aa84d79 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -323,19 +322,6 @@ func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) } } - efsConn := meta.(*AWSClient).efsconn - efsFsID := d.Get("home_efs_file_system_id").(string) - - _, err := efsConn.DeleteFileSystem(&efs.DeleteFileSystemInput{ - FileSystemId: aws.String(efsFsID), - }) - if err != nil { - if isAWSErr(err, efs.ErrCodeFileSystemNotFound, "") { - return nil - } - return fmt.Errorf("Error delete EFS file system (%s): %w", efsFsID, err) - } - return nil } diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 903b390c29a..0c821a01c15 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -19,6 +20,7 @@ func init() { Name: "aws_sagemaker_domain", F: testSweepSagemakerDomains, Dependencies: []string{ + "aws_efs_mount_target", "aws_efs_file_system", }, }) @@ -88,6 +90,7 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), resource.TestCheckResourceAttrSet(resourceName, "url"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -242,6 +245,54 @@ func testAccCheckAWSSagemakerDomainExists(n string, codeRepo *sagemaker.Describe } } +func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Sagemaker domain not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Sagemaker domain name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).efsconn + efsFsID := rs.Primary.Attributes["home_efs_file_system_id"] + vpcID := rs.Primary.Attributes["vpc_id"] + + resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ + FileSystemId: aws.String(efsFsID), + }) + + if err != nil { + return fmt.Errorf("Sagemaker domain EFS mount targets not found: %w", err) + } + + //reusing EFS mount target delete for wait logic + mountTargets := resp.MountTargets + for _, mt := range mountTargets { + r := resourceAwsEfsMountTarget() + d := r.Data(nil) + mtId := aws.StringValue(mt.MountTargetId) + d.SetId(mtId) + err := r.Delete(d, testAccProvider.Meta()) + if err != nil { + return fmt.Errorf("Sagemaker domain EFS mount target (%s) failed to delete: %w", mtId, err) + } + } + + r := resourceAwsEfsFileSystem() + d := r.Data(nil) + d.SetId(efsFsID) + err = r.Delete(d, testAccProvider.Meta()) + if err != nil { + return fmt.Errorf("Sagemaker domain EFS file system (%s) failed to delete: %w", efsFsID, err) + } + + return nil + } +} + func testAccAWSSagemakerDomainConfigBase(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { From 491c6765f576d1e78a6149c8318f984900b7bc2d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 00:21:07 +0200 Subject: [PATCH 0467/1212] add tags test --- aws/resource_aws_sagemaker_domain_test.go | 220 +++++++++------------- 1 file changed, 86 insertions(+), 134 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 0c821a01c15..0ee0f0da04a 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -90,6 +90,7 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), resource.TestCheckResourceAttrSet(resourceName, "url"), + resource.TestCheckResourceAttrSet(resourceName, "home_efs_file_system_id"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, @@ -102,76 +103,49 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { }) } -// func TestAccAWSSagemakerDomain_gitConfig_branch(t *testing.T) { -// var notebook sagemaker.DescribeDomainOutput -// rName := acctest.RandomWithPrefix("tf-acc-test") -// resourceName := "aws_sagemaker_domain.test" - -// resource.ParallelTest(t, resource.TestCase{ -// PreCheck: func() { testAccPreCheck(t) }, -// Providers: testAccProviders, -// CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, -// Steps: []resource.TestStep{ -// { -// Config: testAccAWSSagemakerDomainGitConfigBranchConfig(rName), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), -// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), -// testAccCheckResourceAttrRegionalARN(resourceName0......, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), -// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), -// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), -// resource.TestCheckResourceAttr(resourceName, "git_config.0.branch", "master"), -// ), -// }, -// { -// ResourceName: resourceName, -// ImportState: true, -// ImportStateVerify: true, -// }, -// }, -// }) -// } - -// func TestAccAWSSagemakerDomain_gitConfig_secret(t *testing.T) { -// var notebook sagemaker.DescribeDomainOutput -// rName := acctest.RandomWithPrefix("tf-acc-test") -// resourceName := "aws_sagemaker_domain.test" - -// resource.ParallelTest(t, resource.TestCase{ -// PreCheck: func() { testAccPreCheck(t) }, -// Providers: testAccProviders, -// CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, -// Steps: []resource.TestStep{ -// { -// Config: testAccAWSSagemakerDomainGitConfigSecretConfig(rName), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), -// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), -// testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), -// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), -// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), -// resource.TestCheckResourceAttrPair(resourceName, "git_config.0.secret_arn", "aws_secretsmanager_secret.test", "arn"), -// ), -// }, -// { -// ResourceName: resourceName, -// ImportState: true, -// ImportStateVerify: true, -// }, -// { -// Config: testAccAWSSagemakerDomainGitConfigSecretUpdatedConfig(rName), -// Check: resource.ComposeTestCheckFunc( -// testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), -// resource.TestCheckResourceAttr(resourceName, "domain_name", rName), -// testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), -// resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), -// resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/terraform-providers/terraform-provider-aws.git"), -// resource.TestCheckResourceAttrPair(resourceName, "git_config.0.secret_arn", "aws_secretsmanager_secret.test2", "arn"), -// ), -// }, -// }, -// }) -// } +func TestAccAWSSagemakerDomain_tags(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainBasicConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerDomainBasicConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerDomainBasicConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} func TestAccAWSSagemakerDomain_disappears(t *testing.T) { var notebook sagemaker.DescribeDomainOutput @@ -187,6 +161,7 @@ func TestAccAWSSagemakerDomain_disappears(t *testing.T) { Config: testAccAWSSagemakerDomainBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerDomain(), resourceName), ), ExpectNonEmptyPlan: true, @@ -258,7 +233,6 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te conn := testAccProvider.Meta().(*AWSClient).efsconn efsFsID := rs.Primary.Attributes["home_efs_file_system_id"] - vpcID := rs.Primary.Attributes["vpc_id"] resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ FileSystemId: aws.String(efsFsID), @@ -273,11 +247,11 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te for _, mt := range mountTargets { r := resourceAwsEfsMountTarget() d := r.Data(nil) - mtId := aws.StringValue(mt.MountTargetId) - d.SetId(mtId) + mtID := aws.StringValue(mt.MountTargetId) + d.SetId(mtID) err := r.Delete(d, testAccProvider.Meta()) if err != nil { - return fmt.Errorf("Sagemaker domain EFS mount target (%s) failed to delete: %w", mtId, err) + return fmt.Errorf("Sagemaker domain EFS mount target (%s) failed to delete: %w", mtID, err) } } @@ -347,63 +321,41 @@ resource "aws_sagemaker_domain" "test" { `, rName) } -// func testAccAWSSagemakerDomainGitConfigBranchConfig(rName string) string { -// return fmt.Sprintf(` -// resource "aws_sagemaker_domain" "test" { -// domain_name = %[1]q - -// git_config { -// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" -// branch = "master" -// } -// } -// `, rName) -// } - -// func testAccAWSSagemakerDomainGitConfigSecretConfig(rName string) string { -// return fmt.Sprintf(` -// resource "aws_secretsmanager_secret" "test" { -// name = %[1]q -// } - -// resource "aws_secretsmanager_secret_version" "test" { -// secret_id = aws_secretsmanager_secret.test.id -// secret_string = jsonencode({ username = "example", passowrd = "example" }) -// } - -// resource "aws_sagemaker_domain" "test" { -// domain_name = %[1]q - -// git_config { -// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" -// secret_arn = aws_secretsmanager_secret.test.arn -// } - -// depends_on = [aws_secretsmanager_secret_version.test] -// } -// `, rName) -// } - -// func testAccAWSSagemakerDomainGitConfigSecretUpdatedConfig(rName string) string { -// return fmt.Sprintf(` -// resource "aws_secretsmanager_secret" "test2" { -// name = "%[1]s-2" -// } - -// resource "aws_secretsmanager_secret_version" "test2" { -// secret_id = aws_secretsmanager_secret.test2.id -// secret_string = jsonencode({ username = "example", passowrd = "example" }) -// } - -// resource "aws_sagemaker_domain" "test" { -// domain_name = %[1]q - -// git_config { -// repository_url = "https://github.com/terraform-providers/terraform-provider-aws.git" -// secret_arn = aws_secretsmanager_secret.test2.arn -// } - -// depends_on = [aws_secretsmanager_secret_version.test2] -// } -// `, rName) -// } +func testAccAWSSagemakerDomainBasicConfigTags1(rName, tagKey1, tagValue1 string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSagemakerDomainBasicConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From 2706ac02b6a8a591671024479fe64a3da1817a86 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 00:28:34 +0200 Subject: [PATCH 0468/1212] add sg test --- aws/resource_aws_sagemaker_domain_test.go | 91 +++++++++++++++++++++-- 1 file changed, 86 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 0ee0f0da04a..fc2f1ab1664 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -114,7 +114,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSagemakerDomainBasicConfigTags1(rName, "key1", "value1"), + Config: testAccAWSSagemakerDomainConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -127,7 +127,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSSagemakerDomainBasicConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Config: testAccAWSSagemakerDomainConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), @@ -136,11 +136,48 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { ), }, { - Config: testAccAWSSagemakerDomainBasicConfigTags1(rName, "key2", "value2"), + Config: testAccAWSSagemakerDomainConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + }, + }) +} + +func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigSecurityGroup1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.security_groups.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerDomainConfigSecurityGroup2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.security_groups.#", "2"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, }, @@ -321,7 +358,51 @@ resource "aws_sagemaker_domain" "test" { `, rName) } -func testAccAWSSagemakerDomainBasicConfigTags1(rName, tagKey1, tagValue1 string) string { +func testAccAWSSagemakerDomainConfigSecurityGroup1(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_security_group" "test" { + name = "%[1]s" +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + security_groups = [aws_security_group.test.id] + } +} +`, rName) +} + +func testAccAWSSagemakerDomainConfigSecurityGroup2(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q +} + +resource "aws_security_group" "test2" { + name = "%[2]s-2" +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + security_groups = [aws_security_group.test.id, aws_security_group.test2.id] + } +} +`, rName) +} + +func testAccAWSSagemakerDomainConfigTags1(rName, tagKey1, tagValue1 string) string { return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_domain" "test" { domain_name = %[1]q @@ -340,7 +421,7 @@ resource "aws_sagemaker_domain" "test" { `, rName, tagKey1, tagValue1) } -func testAccAWSSagemakerDomainBasicConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { +func testAccAWSSagemakerDomainConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_domain" "test" { domain_name = %[1]q From f75acfffea32fe57dbf401e98f6d6350eb8ee1c0 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 00:32:03 +0200 Subject: [PATCH 0469/1212] add kms test --- aws/resource_aws_sagemaker_domain_test.go | 66 +++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index fc2f1ab1664..d45833042b1 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -103,6 +103,33 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { }) } +func TestAccAWSSagemakerDomain_efsKms(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigEFSKMS(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttrPair(resourceName, "home_efs_file_system_kms_key_id", "aws_kms_key.test", "arn"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerDomain_tags(t *testing.T) { var notebook sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -402,6 +429,45 @@ resource "aws_sagemaker_domain" "test" { `, rName) } +func testAccAWSSagemakerDomainConfigEFSKMS(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "Terraform acc test %s" + deletion_window_in_days = 7 + + policy = < Date: Sat, 7 Nov 2020 00:32:49 +0200 Subject: [PATCH 0470/1212] fix sg name --- aws/resource_aws_sagemaker_domain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index d45833042b1..1ef239bce51 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -412,7 +412,7 @@ resource "aws_security_group" "test" { } resource "aws_security_group" "test2" { - name = "%[2]s-2" + name = "%[1]s-2" } resource "aws_sagemaker_domain" "test" { From f390f8c5974e1285d8ba3aeb9b2d75ee7db2cf9c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 00:56:25 +0200 Subject: [PATCH 0471/1212] docs --- website/docs/r/sagemaker_domain.html.markdown | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 website/docs/r/sagemaker_domain.html.markdown diff --git a/website/docs/r/sagemaker_domain.html.markdown b/website/docs/r/sagemaker_domain.html.markdown new file mode 100644 index 00000000000..0aea6514664 --- /dev/null +++ b/website/docs/r/sagemaker_domain.html.markdown @@ -0,0 +1,109 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_domain" +description: |- + Provides a Sagemaker Domain resource. +--- + +# Resource: aws_sagemaker_domain + +Provides a Sagemaker Domain resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_domain" "example" { + domain_name = "example" + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + } +} + +resource "aws_iam_role" "example" { + name = "example" + path = "/" + assume_role_policy = data.aws_iam_policy_document.example.json +} + +data "aws_iam_policy_document" "example" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `domain_name` - (Required) The domain name. +* `auth_mode` - (Required) The mode of authentication that members use to access the domain. Valid values are `IAM` and `SSO`. +* `vpc_id` - (Required) The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. +* `subnet_ids` - (Required) The VPC subnets that Studio uses for communication. +* `default_user_settings` - (Required) The default user settings. see [Default User Settings](#default-user-settings) below. +* `app_network_access_type` - (Optional) Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly`. Valid values are `PublicInternetOnly` and `VpcOnly`. +* `home_efs_file_system_kms_key_id` - (Optional) The AWS Key Management Service (KMS) encryption key ARN. +* `tags` - (Optional) A map of tags to assign to the resource. + +### Default User Settings + +* `execution_role` - (Required) The execution role ARN for the user. +* `security_groups` - (Optional) The security groups. +* `sharing_settings` - (Optional) The sharing settings. see [Sharing Settings](#sharing-settings) below. +* `tensor_board_app_settings` - (Optional) The TensorBoard app settings. see [TensorBoard App Settings](#tensorboard-app-settings) below. +* `jupyter_server_app_settings` - (Optional) The kernel gateway app settings. see [Jupyter Server App Settings](#jupyter-server-app-settings) below. +* `kernel_gateway_app_settings` - (Optional) The Jupyter server's app settings. see [Kernel Gateway App Settings](#kernal-gateway-app-settings) below. + +#### Sharing Settings + +* `notebook_output_option` - (Optional) Whether to include the notebook cell output when sharing the notebook. The default is `Disabled`. Valid values are `Allowed` and `Disabled`. +* `s3_kms_key_id` - (Optional) When `notebook_output_option` is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket. +* `s3_output_path` - (Optional) When `notebook_output_option` is Allowed, the Amazon S3 bucket used to save the notebook cell output. + +#### TensorBoard App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. + +#### Kernel Gateway App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. + +#### Jupyter Server App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. + +##### Default Resource Spec + +* `instance_type` - (Optional) The instance type. +* `sagemaker_image_arn` - (Optional) The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Domain. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Domain. +* `url` - The domain's URL. +* `single_sign_on_managed_application_instance_id` - The SSO managed application instance ID. +* `home_efs_file_system_id` - The ID of the Amazon Elastic File System (EFS) managed by this Domain. + + +## Import + +Sagemaker Code Domains can be imported using the `id`, e.g. + +``` +$ terraform import aws_sagemaker_domain.test_domain d-8jgsjtilstu8 +``` From 979339c4c453d941e63d2c497dfb35b370fa3451 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 01:00:17 +0200 Subject: [PATCH 0472/1212] fmt --- aws/resource_aws_sagemaker_domain_test.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 1ef239bce51..06ca2655c55 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -334,7 +334,6 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te func testAccAWSSagemakerDomainConfigBase(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" tags = { @@ -398,8 +397,8 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id] + execution_role = aws_iam_role.test.arn + security_groups = [aws_security_group.test.id] } } `, rName) @@ -422,8 +421,8 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id, aws_security_group.test2.id] + execution_role = aws_iam_role.test.arn + security_groups = [aws_security_group.test.id, aws_security_group.test2.id] } } `, rName) @@ -500,8 +499,8 @@ resource "aws_sagemaker_domain" "test" { } tags = { - %[2]q = %[3]q - %[4]q = %[5]q + %[2]q = %[3]q + %[4]q = %[5]q } } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) From cbd61bd5481792a3fff01549a09f40fdeb216ad4 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 01:07:31 +0200 Subject: [PATCH 0473/1212] sg test fmt --- aws/resource_aws_sagemaker_domain_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 06ca2655c55..1fe27f220e8 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -398,7 +398,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id] + security_groups = [aws_security_group.test.id] } } `, rName) @@ -422,7 +422,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id, aws_security_group.test2.id] + security_groups = [aws_security_group.test.id, aws_security_group.test2.id] } } `, rName) From 00582269e931108fb62ed07d9404c85cc60d2b00 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 01:23:26 +0200 Subject: [PATCH 0474/1212] delete implict sgs --- aws/resource_aws_sagemaker_domain_test.go | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 1fe27f220e8..3078d1783b5 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -297,6 +298,7 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te conn := testAccProvider.Meta().(*AWSClient).efsconn efsFsID := rs.Primary.Attributes["home_efs_file_system_id"] + vpcID := rs.Primary.Attributes["vpc_id"] resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ FileSystemId: aws.String(efsFsID), @@ -327,6 +329,34 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te return fmt.Errorf("Sagemaker domain EFS file system (%s) failed to delete: %w", efsFsID, err) } + var filters []*ec2.Filter + filters = append(filters, &ec2.Filter{ + Name: aws.String("vpc-id"), + Values: aws.StringSlice([]string{vpcID}), + }) + + req := &ec2.DescribeSecurityGroupsInput{ + Filters: filters, + } + + ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn + + sgResp, err := ec2conn.DescribeSecurityGroups(req) + if err != nil { + return fmt.Errorf("error reading security groups: %w", err) + } + + for _, sg := range sgResp.SecurityGroups { + sgID := aws.StringValue(sg.GroupId) + r := resourceAwsSecurityGroup() + d := r.Data(nil) + d.SetId(sgID) + err = r.Delete(d, testAccProvider.Meta()) + if err != nil { + return fmt.Errorf("Sagemaker domain EFS file system sg (%s) failed to delete: %w", sgID, err) + } + } + return nil } } From 2f4fedf3a74bff0f0e0906b77178041179c5b10d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 13:10:01 +0200 Subject: [PATCH 0475/1212] tests are passing --- aws/resource_aws_sagemaker_domain_test.go | 47 +++++++++++++++++++---- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 3078d1783b5..ae65771b2ef 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -346,14 +346,45 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te return fmt.Errorf("error reading security groups: %w", err) } + //revoke permissions for _, sg := range sgResp.SecurityGroups { sgID := aws.StringValue(sg.GroupId) - r := resourceAwsSecurityGroup() - d := r.Data(nil) - d.SetId(sgID) - err = r.Delete(d, testAccProvider.Meta()) - if err != nil { - return fmt.Errorf("Sagemaker domain EFS file system sg (%s) failed to delete: %w", sgID, err) + + if len(sg.IpPermissions) > 0 { + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissions, + } + _, err = ec2conn.RevokeSecurityGroupIngress(req) + + if err != nil { + return fmt.Errorf("Error revoking security group %s rules: %w", sgID, err) + } + } + + if len(sg.IpPermissionsEgress) > 0 { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissionsEgress, + } + _, err = ec2conn.RevokeSecurityGroupEgress(req) + + if err != nil { + return fmt.Errorf("Error revoking security group %s rules: %w", sgID, err) + } + } + } + + for _, sg := range sgResp.SecurityGroups { + sgID := aws.StringValue(sg.GroupId) + if aws.StringValue(sg.GroupName) != "default" { + r := resourceAwsSecurityGroup() + d := r.Data(nil) + d.SetId(sgID) + err = r.Delete(d, testAccProvider.Meta()) + if err != nil { + return fmt.Errorf("Sagemaker domain EFS file system sg (%s) failed to delete: %w", sgID, err) + } } } @@ -428,7 +459,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id] + security_groups = [aws_security_sg.test.id] } } `, rName) @@ -452,7 +483,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_group.test.id, aws_security_group.test2.id] + security_groups = [aws_security_sg.test.id, aws_security_sg.test2.id] } } `, rName) From 8d67e972a7e3d8ae3c4e307318160b10f39e2312 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 14:13:19 +0200 Subject: [PATCH 0476/1212] fix tags --- aws/resource_aws_sagemaker_domain.go | 43 ++++++++++++++++++----- aws/resource_aws_sagemaker_domain_test.go | 10 +++--- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index c822aa84d79..87291d0e4a5 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" ) @@ -231,6 +232,10 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) input.HomeEfsFileSystemKmsKeyId = aws.String(v.(string)) } + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + log.Printf("[DEBUG] sagemaker domain create config: %#v", *input) output, err := conn.CreateDomain(input) if err != nil { @@ -254,6 +259,7 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig domain, err := finder.DomainByName(conn, d.Id()) if err != nil { @@ -265,10 +271,11 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error reading SageMaker domain (%s): %w", d.Id(), err) } + arn := aws.StringValue(domain.DomainArn) d.Set("domain_name", domain.DomainName) d.Set("auth_mode", domain.AuthMode) d.Set("app_network_access_type", domain.AppNetworkAccessType) - d.Set("arn", domain.DomainArn) + d.Set("arn", arn) d.Set("home_efs_file_system_id", domain.HomeEfsFileSystemId) d.Set("home_efs_file_system_kms_key_id", domain.HomeEfsFileSystemKmsKeyId) d.Set("single_sign_on_managed_application_instance_id", domain.SingleSignOnManagedApplicationInstanceId) @@ -283,21 +290,41 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("error setting default_user_settings for sagemaker domain (%s): %w", d.Id(), err) } + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for Sagemaker Domain (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + return nil } func resourceAwsSagemakerDomainUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn - input := &sagemaker.UpdateDomainInput{ - DomainId: aws.String(d.Id()), - DefaultUserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("default_user_settings").([]interface{})), + if d.HasChange("default_user_settings") { + input := &sagemaker.UpdateDomainInput{ + DomainId: aws.String(d.Id()), + DefaultUserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("default_user_settings").([]interface{})), + } + + log.Printf("[DEBUG] sagemaker domain update config: %#v", *input) + _, err := conn.UpdateDomain(input) + if err != nil { + return fmt.Errorf("error updating SageMaker domain: %w", err) + } } - log.Printf("[DEBUG] sagemaker domain update config: %#v", *input) - _, err := conn.UpdateDomain(input) - if err != nil { - return fmt.Errorf("error updating SageMaker domain: %w", err) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Notebook Instance (%s) tags: %s", d.Id(), err) + } } return resourceAwsSagemakerDomainRead(d, meta) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index ae65771b2ef..62ee3a685c2 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "regexp" + "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -305,7 +306,7 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te }) if err != nil { - return fmt.Errorf("Sagemaker domain EFS mount targets not found: %w", err) + return fmt.Errorf("Sagemaker domain EFS mount targets for EFS FS (%s) not found: %w", efsFsID, err) } //reusing EFS mount target delete for wait logic @@ -377,7 +378,8 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te for _, sg := range sgResp.SecurityGroups { sgID := aws.StringValue(sg.GroupId) - if aws.StringValue(sg.GroupName) != "default" { + sgName := aws.StringValue(sg.GroupName) + if sgName != "default" && strings.HasPrefix(sgName, "tf-acc-test") { r := resourceAwsSecurityGroup() d := r.Data(nil) d.SetId(sgID) @@ -459,7 +461,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_sg.test.id] + security_groups = [aws_security_group.test.id] } } `, rName) @@ -483,7 +485,7 @@ resource "aws_sagemaker_domain" "test" { default_user_settings { execution_role = aws_iam_role.test.arn - security_groups = [aws_security_sg.test.id, aws_security_sg.test2.id] + security_groups = [aws_security_group.test.id, aws_security_group.test2.id] } } `, rName) From 71b63853419014b0def95cc01b3cf71d4d684d74 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 14:39:50 +0200 Subject: [PATCH 0477/1212] fix filter for implict sgs --- aws/resource_aws_sagemaker_domain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 62ee3a685c2..54e666ba7fe 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -379,7 +379,7 @@ func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.Te for _, sg := range sgResp.SecurityGroups { sgID := aws.StringValue(sg.GroupId) sgName := aws.StringValue(sg.GroupName) - if sgName != "default" && strings.HasPrefix(sgName, "tf-acc-test") { + if sgName != "default" && !strings.HasPrefix(sgName, "tf-acc-test") { r := resourceAwsSecurityGroup() d := r.Data(nil) d.SetId(sgID) From 6d1b9ba87b05172f09cca7cb5fca31e871b74f6c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 17:02:21 +0200 Subject: [PATCH 0478/1212] remove kms key attributes and tests --- aws/resource_aws_sagemaker_domain.go | 11 ---- aws/resource_aws_sagemaker_domain_test.go | 66 ------------------- website/docs/r/sagemaker_domain.html.markdown | 1 - 3 files changed, 78 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 87291d0e4a5..2aa1c2caa76 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -65,12 +65,6 @@ func resourceAwsSagemakerDomain() *schema.Resource { Default: sagemaker.AppNetworkAccessTypePublicInternetOnly, ValidateFunc: validation.StringInSlice(sagemaker.AppNetworkAccessType_Values(), false), }, - "home_efs_file_system_kms_key_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - ValidateFunc: validateArn, - }, "default_user_settings": { Type: schema.TypeList, Required: true, @@ -228,10 +222,6 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) DefaultUserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("default_user_settings").([]interface{})), } - if v, ok := d.GetOk("home_efs_file_system_kms_key_id"); ok { - input.HomeEfsFileSystemKmsKeyId = aws.String(v.(string)) - } - if v, ok := d.GetOk("tags"); ok { input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } @@ -277,7 +267,6 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er d.Set("app_network_access_type", domain.AppNetworkAccessType) d.Set("arn", arn) d.Set("home_efs_file_system_id", domain.HomeEfsFileSystemId) - d.Set("home_efs_file_system_kms_key_id", domain.HomeEfsFileSystemKmsKeyId) d.Set("single_sign_on_managed_application_instance_id", domain.SingleSignOnManagedApplicationInstanceId) d.Set("url", domain.Url) d.Set("vpc_id", domain.VpcId) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 54e666ba7fe..b443380e706 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -105,33 +105,6 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { }) } -func TestAccAWSSagemakerDomain_efsKms(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_sagemaker_domain.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSagemakerDomainConfigEFSKMS(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), - resource.TestCheckResourceAttrPair(resourceName, "home_efs_file_system_kms_key_id", "aws_kms_key.test", "arn"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSSagemakerDomain_tags(t *testing.T) { var notebook sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -491,45 +464,6 @@ resource "aws_sagemaker_domain" "test" { `, rName) } -func testAccAWSSagemakerDomainConfigEFSKMS(rName string) string { - return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` -resource "aws_kms_key" "test" { - description = "Terraform acc test %s" - deletion_window_in_days = 7 - - policy = < Date: Sat, 7 Nov 2020 18:45:03 +0200 Subject: [PATCH 0479/1212] share settings --- aws/resource_aws_sagemaker_domain.go | 5 +- aws/resource_aws_sagemaker_domain_test.go | 65 +++++++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 2aa1c2caa76..1611431b849 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -85,6 +85,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { "sharing_settings": { Type: schema.TypeList, Optional: true, + ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -370,7 +371,7 @@ func expandSagemakerDomainDefaultUserSettings(l []interface{}) *sagemaker.UserSe config.JupyterServerAppSettings = expandSagemakerDomainJupyterServerAppSettings(v) } - if v, ok := m["share_settings"].([]interface{}); ok && len(v) > 0 { + if v, ok := m["sharing_settings"].([]interface{}); ok && len(v) > 0 { config.SharingSettings = expandSagemakerDomainShareSettings(v) } @@ -495,7 +496,7 @@ func flattenSagemakerDomainDefaultUserSettings(config *sagemaker.UserSettings) [ } if config.SharingSettings != nil { - m["share_settings"] = flattenSagemakerDomainShareSettings(config.SharingSettings) + m["sharing_settings"] = flattenSagemakerDomainShareSettings(config.SharingSettings) } return []map[string]interface{}{m} diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index b443380e706..576661911b9 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -88,6 +88,8 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.execution_role", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Disabled"), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "sagemaker", regexp.MustCompile(`domain/.+`)), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), @@ -186,6 +188,37 @@ func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { }) } +func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigSharingSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Allowed"), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.sharing_settings.0.s3_kms_key_id", "aws_kms_key.test", "arn"), + resource.TestCheckResourceAttrSet(resourceName, "default_user_settings.0.sharing_settings.0.s3_output_path"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerDomain_disappears(t *testing.T) { var notebook sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -502,3 +535,35 @@ resource "aws_sagemaker_domain" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccAWSSagemakerDomainConfigSharingSettings(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + + sharing_settings { + notebook_output_option = "Allowed" + s3_kms_key_id = aws_kms_key.test.arn + s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" + } + } +} +`, rName) +} From 7f98573ede8338045926bb3fd2605e6c9b8cfdea Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 23:10:56 +0200 Subject: [PATCH 0480/1212] fix basic test --- aws/resource_aws_sagemaker_domain_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 576661911b9..6d80a6b2704 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -88,8 +88,6 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.execution_role", "aws_iam_role.test", "arn"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.#", "1"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Disabled"), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "sagemaker", regexp.MustCompile(`domain/.+`)), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), From d810be1e2a7c66df5e6fd7cc6ac1420e64afdc40 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 23:32:36 +0200 Subject: [PATCH 0481/1212] fix default_resource_spec --- aws/resource_aws_sagemaker_domain.go | 12 +++--- aws/resource_aws_sagemaker_domain_test.go | 51 +++++++++++++++++++++++ 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 1611431b849..1bf7068cefb 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -387,7 +387,7 @@ func expandSagemakerDomainJupyterServerAppSettings(l []interface{}) *sagemaker.J config := &sagemaker.JupyterServerAppSettings{} - if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + if v, ok := m["default_resource_spec"].([]interface{}); ok && len(v) > 0 { config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) } @@ -403,7 +403,7 @@ func expandSagemakerDomainKernelGatewayAppSettings(l []interface{}) *sagemaker.K config := &sagemaker.KernelGatewayAppSettings{} - if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + if v, ok := m["default_resource_spec"].([]interface{}); ok && len(v) > 0 { config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) } @@ -419,7 +419,7 @@ func expandSagemakerDomainTensorBoardAppSettings(l []interface{}) *sagemaker.Ten config := &sagemaker.TensorBoardAppSettings{} - if v, ok := m["default_resurce_spec"].([]interface{}); ok && len(v) > 0 { + if v, ok := m["default_resource_spec"].([]interface{}); ok && len(v) > 0 { config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) } @@ -528,7 +528,7 @@ func flattenSagemakerDomainTensorBoardAppSettings(config *sagemaker.TensorBoardA m := map[string]interface{}{} if config.DefaultResourceSpec != nil { - m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + m["default_resource_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) } return []map[string]interface{}{m} @@ -542,7 +542,7 @@ func flattenSagemakerDomainJupyterServerAppSettings(config *sagemaker.JupyterSer m := map[string]interface{}{} if config.DefaultResourceSpec != nil { - m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + m["default_resource_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) } return []map[string]interface{}{m} @@ -556,7 +556,7 @@ func flattenSagemakerDomainKernelGatewayAppSettings(config *sagemaker.KernelGate m := map[string]interface{}{} if config.DefaultResourceSpec != nil { - m["default_resurce_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) + m["default_resource_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) } return []map[string]interface{}{m} diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 6d80a6b2704..bcd33a82b28 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -217,6 +217,36 @@ func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { }) } +func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigTensorBoardAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.default_resource_spec.instance_type", "ml.t3.micro"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerDomain_disappears(t *testing.T) { var notebook sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -565,3 +595,24 @@ resource "aws_sagemaker_domain" "test" { } `, rName) } + +func testAccAWSSagemakerDomainConfigTensorBoardAppSettings(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + + tensor_board_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} From dd67c32b62e21ea9dd60374343d68b4b22a4643d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 7 Nov 2020 23:55:49 +0200 Subject: [PATCH 0482/1212] force new --- aws/resource_aws_sagemaker_domain.go | 3 +++ aws/resource_aws_sagemaker_domain_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index 1bf7068cefb..e2b4da82367 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -110,6 +110,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { "tensor_board_app_settings": { Type: schema.TypeList, Optional: true, + ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -138,6 +139,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { "jupyter_server_app_settings": { Type: schema.TypeList, Optional: true, + ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -166,6 +168,7 @@ func resourceAwsSagemakerDomain() *schema.Resource { "kernel_gateway_app_settings": { Type: schema.TypeList, Optional: true, + ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index bcd33a82b28..d2a38907c0f 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -233,9 +233,9 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.default_resource_spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.default_resource_spec.instance_type", "ml.t3.micro"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + // testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -609,7 +609,7 @@ resource "aws_sagemaker_domain" "test" { tensor_board_app_settings { default_resource_spec { - instance_type = "ml.t3.micro" + instance_type = "ml.t3.micro" } } } From 07a393d1b02c5bd87b50ab46d968ed122bf6015e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 00:01:02 +0200 Subject: [PATCH 0483/1212] add last tests --- aws/resource_aws_sagemaker_domain_test.go | 104 +++++++++++++++++++++- 1 file changed, 103 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index d2a38907c0f..970a0e3d467 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -235,7 +235,67 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), - // testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerDomain_kernelGatewayAppSettings(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigKernelGatewayAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerDomain_jupyterServerAppSettings(t *testing.T) { + var notebook sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigJupyterServerAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -616,3 +676,45 @@ resource "aws_sagemaker_domain" "test" { } `, rName) } + +func testAccAWSSagemakerDomainConfigJupyterServerAppSettings(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + + jupyter_server_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} + +func testAccAWSSagemakerDomainConfigKernelGatewayAppSettings(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + + kernel_gateway_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} From 8806d20ddbb5a32aaf3a0c537f81c860988b4f83 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 8 Nov 2020 11:32:26 +0200 Subject: [PATCH 0484/1212] fmt --- aws/resource_aws_sagemaker_domain_test.go | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 970a0e3d467..eb3e4e612ee 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -644,13 +644,13 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - + execution_role = aws_iam_role.test.arn + sharing_settings { notebook_output_option = "Allowed" s3_kms_key_id = aws_kms_key.test.arn s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" - } + } } } `, rName) @@ -665,10 +665,10 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - + execution_role = aws_iam_role.test.arn + tensor_board_app_settings { - default_resource_spec { + default_resource_spec { instance_type = "ml.t3.micro" } } @@ -686,10 +686,10 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - + execution_role = aws_iam_role.test.arn + jupyter_server_app_settings { - default_resource_spec { + default_resource_spec { instance_type = "ml.t3.micro" } } @@ -707,10 +707,10 @@ resource "aws_sagemaker_domain" "test" { subnet_ids = [aws_subnet.test.id] default_user_settings { - execution_role = aws_iam_role.test.arn - + execution_role = aws_iam_role.test.arn + kernel_gateway_app_settings { - default_resource_spec { + default_resource_spec { instance_type = "ml.t3.micro" } } From 4cf57ad6584df6ee642e5b70ffe88be3a8e31b13 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Mon, 9 Nov 2020 17:05:06 +0200 Subject: [PATCH 0485/1212] Apply suggestions from code review Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_domain.go | 12 ++++++------ website/docs/r/sagemaker_domain.html.markdown | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index e2b4da82367..ce08e69e66c 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -245,7 +245,7 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) d.SetId(domainID) if _, err := waiter.DomainInService(conn, d.Id()); err != nil { - return fmt.Errorf("error waiting for sagemaker domain (%s) to create: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker domain (%s) to create: %w", d.Id(), err) } return resourceAwsSagemakerDomainRead(d, meta) @@ -276,17 +276,17 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er d.Set("vpc_id", domain.VpcId) if err := d.Set("subnet_ids", flattenStringSet(domain.SubnetIds)); err != nil { - return fmt.Errorf("error setting subnet_ids for sagemaker domain (%s): %w", d.Id(), err) + return fmt.Errorf("error setting subnet_ids for SageMaker domain (%s): %w", d.Id(), err) } if err := d.Set("default_user_settings", flattenSagemakerDomainDefaultUserSettings(domain.DefaultUserSettings)); err != nil { - return fmt.Errorf("error setting default_user_settings for sagemaker domain (%s): %w", d.Id(), err) + return fmt.Errorf("error setting default_user_settings for SageMaker domain (%s): %w", d.Id(), err) } tags, err := keyvaluetags.SagemakerListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for Sagemaker Domain (%s): %w", d.Id(), err) + return fmt.Errorf("error listing tags for SageMaker Domain (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { @@ -316,7 +316,7 @@ func resourceAwsSagemakerDomainUpdate(d *schema.ResourceData, meta interface{}) o, n := d.GetChange("tags") if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Sagemaker Notebook Instance (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating SageMaker domain (%s) tags: %w", d.Id(), err) } } @@ -338,7 +338,7 @@ func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) if _, err := waiter.DomainDeleted(conn, d.Id()); err != nil { if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { - return fmt.Errorf("error waiting for sagemaker domain (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker domain (%s) to delete: %w", d.Id(), err) } } diff --git a/website/docs/r/sagemaker_domain.html.markdown b/website/docs/r/sagemaker_domain.html.markdown index 3235315d880..08830fdf22e 100644 --- a/website/docs/r/sagemaker_domain.html.markdown +++ b/website/docs/r/sagemaker_domain.html.markdown @@ -52,7 +52,7 @@ The following arguments are supported: * `auth_mode` - (Required) The mode of authentication that members use to access the domain. Valid values are `IAM` and `SSO`. * `vpc_id` - (Required) The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. * `subnet_ids` - (Required) The VPC subnets that Studio uses for communication. -* `default_user_settings` - (Required) The default user settings. see [Default User Settings](#default-user-settings) below. +* `default_user_settings` - (Required) The default user settings. See [Default User Settings](#default-user-settings) below. * `app_network_access_type` - (Optional) Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly`. Valid values are `PublicInternetOnly` and `VpcOnly`. * `tags` - (Optional) A map of tags to assign to the resource. @@ -60,10 +60,10 @@ The following arguments are supported: * `execution_role` - (Required) The execution role ARN for the user. * `security_groups` - (Optional) The security groups. -* `sharing_settings` - (Optional) The sharing settings. see [Sharing Settings](#sharing-settings) below. -* `tensor_board_app_settings` - (Optional) The TensorBoard app settings. see [TensorBoard App Settings](#tensorboard-app-settings) below. -* `jupyter_server_app_settings` - (Optional) The kernel gateway app settings. see [Jupyter Server App Settings](#jupyter-server-app-settings) below. -* `kernel_gateway_app_settings` - (Optional) The Jupyter server's app settings. see [Kernel Gateway App Settings](#kernal-gateway-app-settings) below. +* `sharing_settings` - (Optional) The sharing settings. See [Sharing Settings](#sharing-settings) below. +* `tensor_board_app_settings` - (Optional) The TensorBoard app settings. See [TensorBoard App Settings](#tensorboard-app-settings) below. +* `jupyter_server_app_settings` - (Optional) The Jupyter server's app settings. See [Jupyter Server App Settings](#jupyter-server-app-settings) below. +* `kernel_gateway_app_settings` - (Optional) The kernel gateway app settings. See [Kernel Gateway App Settings](#kernal-gateway-app-settings) below. #### Sharing Settings From 201716234b35bcf8bd92dc7b3d6cf04ae7b98f54 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 9 Nov 2020 21:13:32 +0200 Subject: [PATCH 0486/1212] add custom image --- aws/resource_aws_sagemaker_domain.go | 69 +++++++++++++++++++ website/docs/r/sagemaker_domain.html.markdown | 7 ++ 2 files changed, 76 insertions(+) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index ce08e69e66c..d48cdd622f2 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -191,6 +191,27 @@ func resourceAwsSagemakerDomain() *schema.Resource { }, }, }, + "custom_image": { + Type: schema.TypeList, + Optional: true, + MaxItems: 30, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_image_config_name": { + Type: schema.TypeString, + Required: true, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + }, + "image_version_number": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, }, }, }, @@ -410,6 +431,10 @@ func expandSagemakerDomainKernelGatewayAppSettings(l []interface{}) *sagemaker.K config.DefaultResourceSpec = expandSagemakerDomainDefaultResourceSpec(v) } + if v, ok := m["custom_image"].([]interface{}); ok && len(v) > 0 { + config.CustomImages = expandSagemakerDomainCustomImages(v) + } + return config } @@ -471,6 +496,27 @@ func expandSagemakerDomainShareSettings(l []interface{}) *sagemaker.SharingSetti return config } +func expandSagemakerDomainCustomImages(l []interface{}) []*sagemaker.CustomImage { + images := make([]*sagemaker.CustomImage, 0, len(l)) + + for _, eRaw := range l { + data := eRaw.(map[string]interface{}) + + image := &sagemaker.CustomImage{ + AppImageConfigName: aws.String(data["app_image_config_name"].(string)), + ImageName: aws.String(data["image_name"].(string)), + } + + if v, ok := data["image_version_number"].(int); ok { + image.ImageVersionNumber = aws.Int64(int64(v)) + } + + images = append(images, image) + } + + return images +} + func flattenSagemakerDomainDefaultUserSettings(config *sagemaker.UserSettings) []map[string]interface{} { if config == nil { return []map[string]interface{}{} @@ -562,6 +608,10 @@ func flattenSagemakerDomainKernelGatewayAppSettings(config *sagemaker.KernelGate m["default_resource_spec"] = flattenSagemakerDomainDefaultResourceSpec(config.DefaultResourceSpec) } + if config.CustomImages != nil { + m["custom_image"] = flattenSagemakerDomainCustomImages(config.CustomImages) + } + return []map[string]interface{}{m} } @@ -585,6 +635,25 @@ func flattenSagemakerDomainShareSettings(config *sagemaker.SharingSettings) []ma return []map[string]interface{}{m} } +func flattenSagemakerDomainCustomImages(config []*sagemaker.CustomImage) []map[string]interface{} { + images := make([]map[string]interface{}, 0, len(config)) + + for _, raw := range config { + image := make(map[string]interface{}) + + image["app_image_config_name"] = aws.StringValue(raw.AppImageConfigName) + image["image_name"] = aws.StringValue(raw.ImageName) + + if raw.ImageVersionNumber != nil { + image["image_version_number"] = aws.Int64Value(raw.ImageVersionNumber) + } + + images = append(images, image) + } + + return images +} + func decodeSagemakerDomainID(id string) (string, error) { domainArn, err := arn.Parse(id) if err != nil { diff --git a/website/docs/r/sagemaker_domain.html.markdown b/website/docs/r/sagemaker_domain.html.markdown index 08830fdf22e..03d07d2fe69 100644 --- a/website/docs/r/sagemaker_domain.html.markdown +++ b/website/docs/r/sagemaker_domain.html.markdown @@ -78,6 +78,7 @@ The following arguments are supported: #### Kernel Gateway App Settings * `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a KernelGateway app. see [Custom Image](#custom-image) below. #### Jupyter Server App Settings @@ -88,6 +89,12 @@ The following arguments are supported: * `instance_type` - (Optional) The instance type. * `sagemaker_image_arn` - (Optional) The Amazon Resource Name (ARN) of the SageMaker image created on the instance. +##### Custom Image + +* `app_image_config_name` - (Required) The name of the App Image Config. +* `image_name` - (Required) The name of the Custom Image. +* `image_version_number` - (Optional) The version number of the Custom Image. + ## Attributes Reference The following attributes are exported: From 3bd91cfc021e3f089df09e95ccd25c65dbf3ed03 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 8 Jan 2021 11:07:37 -0800 Subject: [PATCH 0487/1212] Remove spurious file --- .nova/Tasks/acctest-terraform.json | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 .nova/Tasks/acctest-terraform.json diff --git a/.nova/Tasks/acctest-terraform.json b/.nova/Tasks/acctest-terraform.json deleted file mode 100644 index 168593857ec..00000000000 --- a/.nova/Tasks/acctest-terraform.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "actions" : { - "run" : { - "enabled" : true, - "script" : "act -W .\/.github\/workflows\/acctest-terraform-lint.yml -j validate-terraform" - } - }, - "identifier" : "8F20BADE-1E64-441C-8311-51299FDBE717", - "openLogOnRun" : "start" -} From 59226f893403965f13c496b456e29c9f63f35ad3 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 8 Jan 2021 12:20:49 -0800 Subject: [PATCH 0488/1212] Fix formatting --- aws/resource_aws_fms_policy_test.go | 78 ++++++++++++------------- website/docs/r/fms_policy.html.markdown | 1 - 2 files changed, 39 insertions(+), 40 deletions(-) diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index db625adcf00..802aab68931 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -171,25 +171,25 @@ func testAccFmsPolicyConfig(name string, group string) string { return fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false - name = %[1]q + name = "%[1]s" remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - exclude_map { - account = [ data.aws_organizations_organization.example.accounts[0].id ] - } + exclude_map { + account = [data.aws_organizations_organization.example.accounts[0].id] + } security_service_policy_data { - type = "WAF" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" - } + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } } data "aws_organizations_organization" "example" {} resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" - name = %[2]q + name = "%[2]s" } `, name, group) } @@ -198,34 +198,34 @@ func testAccFmsPolicyConfig_updated(name string, group string) string { return fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false - name = %[1]q + name = "%[1]s" remediation_enabled = true resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - exclude_map { - account = [ data.aws_organizations_organization.example.accounts[0].id ] - } + exclude_map { + account = [data.aws_organizations_organization.example.accounts[0].id] + } security_service_policy_data { - type = "WAF" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"ALLOW\"}, \"overrideCustomerWebACLAssociation\": false}" - } + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"ALLOW\"}, \"overrideCustomerWebACLAssociation\": false}" + } - lifecycle { - create_before_destroy = false - } + lifecycle { + create_before_destroy = false + } } data "aws_organizations_organization" "example" {} resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" - name = %[2]q + name = "%[2]s" } resource "aws_wafregional_rule_group" "test2" { metric_name = "MyTest2" - name = %[2]q + name = "%[2]s" } `, name, group) } @@ -234,25 +234,25 @@ func testAccFmsPolicyConfig_include(name string, group string) string { return fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false - name = %[1]q + name = "%[1]s" remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] - include_map { - account = [ data.aws_organizations_organization.example.accounts[0].id ] - } + include_map { + account = [data.aws_organizations_organization.example.accounts[0].id] + } security_service_policy_data { - type = "WAF" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" - } + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } } data "aws_organizations_organization" "example" {} resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" - name = %[2]q + name = "%[2]s" } `, name, group) } @@ -261,25 +261,25 @@ func testAccFmsPolicyConfig_tags(name string, group string) string { return fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false - name = %[1]q + name = "%[1]s" remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] security_service_policy_data { - type = "WAF" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" - } + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } resource_tags = { Environment = "Testing" - Usage= "original" + Usage = "original" } } resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" - name = %[2]q + name = "%[2]s" } `, name, group) } @@ -288,14 +288,14 @@ func testAccFmsPolicyConfig_tagsChanged(name string, group string) string { return fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false - name = %[1]q + name = "%[1]s" remediation_enabled = false resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] security_service_policy_data { - type = "WAF" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" - } + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } resource_tags = { Usage = "changed" @@ -305,7 +305,7 @@ resource "aws_fms_policy" "test" { resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" - name = %[2]q + name = "%[2]s" } `, name, group) } diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index c65e1730483..8a9d55ed9a8 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -1,7 +1,6 @@ --- layout: "aws" page_title: "AWS: aws_fms_policy" -sidebar_current: "docs-aws-resource-fms-policy" description: |- Provides a resource to create an AWS Firewall Manager policy --- From 488b4098c8555e7187db47391ad139c007758082 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 3 Dec 2020 19:05:43 -0500 Subject: [PATCH 0489/1212] tests/eip: Fix hardcoded region --- aws/data_source_aws_eip.go | 9 ++- aws/resource_aws_eip.go | 18 ++--- aws/resource_aws_eip_association_test.go | 78 ++++++++++++++------- aws/resource_aws_eip_test.go | 89 ++++++++++++++++++------ 4 files changed, 133 insertions(+), 61 deletions(-) diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index 3fed18c62eb..339d0ba21ec 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -3,7 +3,6 @@ package aws import ( "fmt" "log" - "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" @@ -142,6 +141,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { d.Set("private_ip", eip.PrivateIpAddress) if eip.PrivateIpAddress != nil { +<<<<<<< HEAD dashIP := strings.Replace(*eip.PrivateIpAddress, ".", "-", -1) if region == endpoints.UsEast1RegionID { @@ -149,10 +149,14 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { } else { d.Set("private_dns", fmt.Sprintf("ip-%s.%s.compute.internal", dashIP, region)) } +======= + d.Set("private_dns", fmt.Sprintf("ip-%s.%s", resourceAwsEc2DashIP(*eip.PrivateIpAddress), resourceAwsEc2RegionalPrivateDnsSuffix(region))) +>>>>>>> beae41d73 (tests/eip: Fix hardcoded region) } d.Set("public_ip", eip.PublicIp) if eip.PublicIp != nil { +<<<<<<< HEAD dashIP := strings.Replace(*eip.PublicIp, ".", "-", -1) if region == endpoints.UsEast1RegionID { @@ -160,6 +164,9 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { } else { d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s.compute", dashIP, region))) } +======= + d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s", resourceAwsEc2DashIP(*eip.PublicIp), resourceAwsEc2RegionalPublicDnsSuffix(region)))) +>>>>>>> beae41d73 (tests/eip: Fix hardcoded region) } d.Set("public_ipv4_pool", eip.PublicIpv4Pool) d.Set("carrier_ip", eip.CarrierIp) diff --git a/aws/resource_aws_eip.go b/aws/resource_aws_eip.go index 3fa6364b321..3b20e3cab1e 100644 --- a/aws/resource_aws_eip.go +++ b/aws/resource_aws_eip.go @@ -279,24 +279,14 @@ func resourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { region := *ec2conn.Config.Region d.Set("private_ip", address.PrivateIpAddress) if address.PrivateIpAddress != nil { - dashIP := strings.Replace(*address.PrivateIpAddress, ".", "-", -1) - - if region == "us-east-1" { - d.Set("private_dns", fmt.Sprintf("ip-%s.ec2.internal", dashIP)) - } else { - d.Set("private_dns", fmt.Sprintf("ip-%s.%s.compute.internal", dashIP, region)) - } + d.Set("private_dns", fmt.Sprintf("ip-%s.%s", resourceAwsEc2DashIP(*address.PrivateIpAddress), resourceAwsEc2RegionalPrivateDnsSuffix(region))) } + d.Set("public_ip", address.PublicIp) if address.PublicIp != nil { - dashIP := strings.Replace(*address.PublicIp, ".", "-", -1) - - if region == "us-east-1" { - d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.compute-1", dashIP))) - } else { - d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s.compute", dashIP, region))) - } + d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s", resourceAwsEc2DashIP(*address.PublicIp), resourceAwsEc2RegionalPublicDnsSuffix(region)))) } + d.Set("public_ipv4_pool", address.PublicIpv4Pool) d.Set("carrier_ip", address.CarrierIp) d.Set("customer_owned_ipv4_pool", address.CustomerOwnedIpv4Pool) diff --git a/aws/resource_aws_eip_association_test.go b/aws/resource_aws_eip_association_test.go index 7cf7249ffd9..d12ae6b5477 100644 --- a/aws/resource_aws_eip_association_test.go +++ b/aws/resource_aws_eip_association_test.go @@ -291,16 +291,8 @@ func testAccCheckAWSEIPAssociationDestroy(s *terraform.State) error { func testAccAWSEIPAssociationConfig() string { return composeConfig( + testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - resource "aws_vpc" "test" { cidr_block = "192.168.0.0/24" tags = { @@ -367,16 +359,8 @@ resource "aws_network_interface" "test" { func testAccAWSEIPAssociationConfigDisappears() string { return composeConfig( + testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - resource "aws_vpc" "main" { cidr_block = "192.168.0.0/24" tags = { @@ -437,7 +421,35 @@ resource "aws_eip_association" "test" { func testAccAWSEIPAssociationConfig_spotInstance(rInt int) string { return composeConfig( - testAccAWSSpotInstanceRequestConfig(rInt), ` + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_default_subnet" "default" { + availability_zone = data.aws_availability_zones.available.names[0] + + lifecycle { + ignore_changes = [ + # testing environments often change the Name tag + tags["Name"], + ] + } +} + +resource "aws_key_pair" "test" { + key_name = "tmp-key-%d" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" +} + +resource "aws_spot_instance_request" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + key_name = aws_key_pair.test.key_name + spot_price = "0.10" + wait_for_fulfillment = true + subnet_id = aws_default_subnet.default.id +} + resource "aws_eip" "test" { vpc = true } @@ -446,19 +458,35 @@ resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id instance_id = aws_spot_instance_request.test.spot_instance_id } -`) +`, rInt)) } func testAccAWSEIPAssociationConfig_instance() string { return composeConfig( + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), - ` + fmt.Sprintf(` +resource "aws_default_subnet" "default" { + availability_zone = data.aws_availability_zones.available.names[0] + + lifecycle { + ignore_changes = [ + # testing environments often change the Name tag + tags["Name"], + ] + } +} + resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.small" + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_default_subnet.default.id } -resource "aws_eip" "test" {} +resource "aws_eip" "test" { + vpc = true +} resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id @@ -485,7 +513,9 @@ resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id } -resource "aws_eip" "test" {} +resource "aws_eip" "test" { + vpc = true +} resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id diff --git a/aws/resource_aws_eip_test.go b/aws/resource_aws_eip_test.go index 811014e4c16..cb8b807b321 100644 --- a/aws/resource_aws_eip_test.go +++ b/aws/resource_aws_eip_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -712,13 +711,12 @@ func testAccCheckAWSEIPPrivateDNS(resourceName string) resource.TestCheckFunc { return fmt.Errorf("Not found: %s", resourceName) } - privateIPDashed := strings.Replace(rs.Primary.Attributes["private_ip"], ".", "-", -1) privateDNS := rs.Primary.Attributes["private_dns"] - expectedPrivateDNS := fmt.Sprintf("ip-%s.%s.compute.internal", privateIPDashed, testAccGetRegion()) - - if testAccGetRegion() == "us-east-1" { - expectedPrivateDNS = fmt.Sprintf("ip-%s.ec2.internal", privateIPDashed) - } + expectedPrivateDNS := fmt.Sprintf( + "ip-%s.%s", + resourceAwsEc2DashIP(rs.Primary.Attributes["private_ip"]), + resourceAwsEc2RegionalPrivateDnsSuffix(testAccGetRegion()), + ) if privateDNS != expectedPrivateDNS { return fmt.Errorf("expected private_dns value (%s), received: %s", expectedPrivateDNS, privateDNS) @@ -735,13 +733,13 @@ func testAccCheckAWSEIPPublicDNS(resourceName string) resource.TestCheckFunc { return fmt.Errorf("Not found: %s", resourceName) } - publicIPDashed := strings.Replace(rs.Primary.Attributes["public_ip"], ".", "-", -1) publicDNS := rs.Primary.Attributes["public_dns"] - expectedPublicDNS := fmt.Sprintf("ec2-%s.%s.compute.%s", publicIPDashed, testAccGetRegion(), testAccGetPartitionDNSSuffix()) - - if testAccGetRegion() == "us-east-1" { - expectedPublicDNS = fmt.Sprintf("ec2-%s.compute-1.%s", publicIPDashed, testAccGetPartitionDNSSuffix()) - } + expectedPublicDNS := fmt.Sprintf( + "ec2-%s.%s.%s", + resourceAwsEc2DashIP(rs.Primary.Attributes["public_ip"]), + resourceAwsEc2RegionalPublicDnsSuffix(testAccGetRegion()), + testAccGetPartitionDNSSuffix(), + ) if publicDNS != expectedPublicDNS { return fmt.Errorf("expected public_dns value (%s), received: %s", expectedPublicDNS, publicDNS) @@ -758,13 +756,13 @@ func testAccCheckAWSEIPPublicDNSEc2Classic(resourceName string) resource.TestChe return fmt.Errorf("Not found: %s", resourceName) } - publicIPDashed := strings.Replace(rs.Primary.Attributes["public_ip"], ".", "-", -1) publicDNS := rs.Primary.Attributes["public_dns"] - expectedPublicDNS := fmt.Sprintf("ec2-%s.%s.compute.%s", publicIPDashed, testAccGetEc2ClassicRegion(), testAccGetPartitionDNSSuffix()) - - if testAccGetEc2ClassicRegion() == endpoints.UsEast1RegionID { - expectedPublicDNS = fmt.Sprintf("ec2-%s.compute-1.%s", publicIPDashed, testAccGetPartitionDNSSuffix()) - } + expectedPublicDNS := fmt.Sprintf( + "ec2-%s.%s.%s", + resourceAwsEc2DashIP(rs.Primary.Attributes["public_ip"]), + resourceAwsEc2RegionalPublicDnsSuffix(testAccGetEc2ClassicRegion()), + testAccGetPartitionDNSSuffix(), + ) if publicDNS != expectedPublicDNS { return fmt.Errorf("expected public_dns value (%s), received: %s", expectedPublicDNS, publicDNS) @@ -776,12 +774,15 @@ func testAccCheckAWSEIPPublicDNSEc2Classic(resourceName string) resource.TestChe const testAccAWSEIPConfig = ` resource "aws_eip" "test" { + vpc = true } ` func testAccAWSEIPConfig_tags(rName, testName string) string { return fmt.Sprintf(` resource "aws_eip" "test" { + vpc = true + tags = { RandomName = "%[1]s" TestName = "%[2]s" @@ -837,14 +838,29 @@ resource "aws_eip" "test" { func testAccAWSEIPInstanceConfig() string { return composeConfig( - testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + ` +resource "aws_default_subnet" "default" { + availability_zone = data.aws_availability_zones.available.names[0] + + lifecycle { + ignore_changes = [ + # testing environments often change the Name tag + tags["Name"], + ] + } +} + resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" + subnet_id = aws_default_subnet.default.id } resource "aws_eip" "test" { instance = aws_instance.test.id + vpc = true } `) } @@ -1161,10 +1177,24 @@ resource "aws_route_table_association" "test" { func testAccAWSEIPAssociate_not_associated() string { return composeConfig( + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +resource "aws_default_subnet" "default" { + availability_zone = data.aws_availability_zones.available.names[0] + + lifecycle { + ignore_changes = [ + # testing environments often change the Name tag + tags["Name"], + ] + } +} + resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.small" + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_default_subnet.default.id } resource "aws_eip" "test" { @@ -1174,14 +1204,29 @@ resource "aws_eip" "test" { func testAccAWSEIPAssociate_associated() string { return composeConfig( + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +resource "aws_default_subnet" "default" { + availability_zone = data.aws_availability_zones.available.names[0] + + lifecycle { + ignore_changes = [ + # testing environments often change the Name tag + tags["Name"], + ] + } +} + resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.small" + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_default_subnet.default.id } resource "aws_eip" "test" { instance = aws_instance.test.id + vpc = true } `) } From 6a327e4ec8d1a1973531fcea39a2a94bc23d9e6f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 3 Dec 2020 19:06:35 -0500 Subject: [PATCH 0490/1212] tests/vpc_dhcp: Fix hardcoded regions --- aws/resource_aws_default_vpc_dhcp_options.go | 31 ++++++++++++++----- ...ource_aws_default_vpc_dhcp_options_test.go | 2 +- aws/resource_aws_vpc_dhcp_options_test.go | 7 +---- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_default_vpc_dhcp_options.go b/aws/resource_aws_default_vpc_dhcp_options.go index 1e4c631dbfe..49f967b2922 100644 --- a/aws/resource_aws_default_vpc_dhcp_options.go +++ b/aws/resource_aws_default_vpc_dhcp_options.go @@ -3,8 +3,10 @@ package aws import ( "fmt" "log" + "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -37,13 +39,6 @@ func resourceAwsDefaultVpcDhcpOptions() *schema.Resource { func resourceAwsDefaultVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - var domainName string - awsRegion := meta.(*AWSClient).region - if awsRegion == "us-east-1" { - domainName = "ec2.internal" - } else { - domainName = awsRegion + ".compute.internal" - } req := &ec2.DescribeDhcpOptionsInput{ Filters: []*ec2.Filter{ { @@ -52,7 +47,7 @@ func resourceAwsDefaultVpcDhcpOptionsCreate(d *schema.ResourceData, meta interfa }, { Name: aws.String("value"), - Values: aws.StringSlice([]string{domainName}), + Values: aws.StringSlice([]string{resourceAwsEc2RegionalPrivateDnsSuffix(meta.(*AWSClient).region)}), }, { Name: aws.String("key"), @@ -95,3 +90,23 @@ func resourceAwsDefaultVpcDhcpOptionsDelete(d *schema.ResourceData, meta interfa log.Printf("[WARN] Cannot destroy Default DHCP Options Set. Terraform will remove this resource from the state file, however resources may remain.") return nil } + +func resourceAwsEc2RegionalPrivateDnsSuffix(region string) string { + if region == endpoints.UsEast1RegionID { + return "ec2.internal" + } + + return fmt.Sprintf("%s.compute.internal", region) +} + +func resourceAwsEc2RegionalPublicDnsSuffix(region string) string { + if region == endpoints.UsEast1RegionID { + return "compute-1" + } + + return fmt.Sprintf("%s.compute", region) +} + +func resourceAwsEc2DashIP(ip string) string { + return strings.Replace(ip, ".", "-", -1) +} diff --git a/aws/resource_aws_default_vpc_dhcp_options_test.go b/aws/resource_aws_default_vpc_dhcp_options_test.go index 30f3f4b9916..74d62800762 100644 --- a/aws/resource_aws_default_vpc_dhcp_options_test.go +++ b/aws/resource_aws_default_vpc_dhcp_options_test.go @@ -23,7 +23,7 @@ func TestAccAWSDefaultVpcDhcpOptions_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDHCPOptionsExists(resourceName, &d), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`dhcp-options/dopt-.+`)), - resource.TestCheckResourceAttr(resourceName, "domain_name", "us-west-2.compute.internal"), + resource.TestCheckResourceAttr(resourceName, "domain_name", resourceAwsEc2RegionalPrivateDnsSuffix(testAccGetRegion())), resource.TestCheckResourceAttr(resourceName, "domain_name_servers", "AmazonProvidedDNS"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.Name", "Default DHCP Option Set"), diff --git a/aws/resource_aws_vpc_dhcp_options_test.go b/aws/resource_aws_vpc_dhcp_options_test.go index f08c518082e..223c2b01c05 100644 --- a/aws/resource_aws_vpc_dhcp_options_test.go +++ b/aws/resource_aws_vpc_dhcp_options_test.go @@ -33,11 +33,6 @@ func testSweepVpcDhcpOptions(region string) error { for _, dhcpOption := range page.DhcpOptions { var defaultDomainNameFound, defaultDomainNameServersFound bool - domainName := region + ".compute.internal" - if region == "us-east-1" { - domainName = "ec2.internal" - } - // This skips the default dhcp configurations so they don't get deleted for _, dhcpConfiguration := range dhcpOption.DhcpConfigurations { if aws.StringValue(dhcpConfiguration.Key) == "domain-name" { @@ -45,7 +40,7 @@ func testSweepVpcDhcpOptions(region string) error { continue } - if aws.StringValue(dhcpConfiguration.Values[0].Value) == domainName { + if aws.StringValue(dhcpConfiguration.Values[0].Value) == resourceAwsEc2RegionalPrivateDnsSuffix(region) { defaultDomainNameFound = true } } else if aws.StringValue(dhcpConfiguration.Key) == "domain-name-servers" { From b27d8b7a524653b19284923ad90035058d7d08cc Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 3 Dec 2020 19:07:10 -0500 Subject: [PATCH 0491/1212] tests/network_interface: Fix hardcoded regions --- aws/resource_aws_network_interface_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_network_interface_test.go b/aws/resource_aws_network_interface_test.go index b43d5a2665c..9c9d240eccc 100644 --- a/aws/resource_aws_network_interface_test.go +++ b/aws/resource_aws_network_interface_test.go @@ -3,7 +3,6 @@ package aws import ( "fmt" "log" - "strings" "testing" "github.com/aws/aws-sdk-go/aws" @@ -524,8 +523,9 @@ func testAccCheckAWSENIAttributes(conf *ec2.NetworkInterface) resource.TestCheck return fmt.Errorf("expected private ip to be 172.16.10.100, but was %s", *conf.PrivateIpAddress) } - if !strings.HasPrefix(*conf.PrivateDnsName, "ip-172-16-10-100.") || !strings.HasSuffix(*conf.PrivateDnsName, ".compute.internal") { - return fmt.Errorf("expected private dns name to be ip-172-16-10-100..compute.internal, but was %s", *conf.PrivateDnsName) + expectedPrivateDnsName := fmt.Sprintf("ip-%s.%s", resourceAwsEc2DashIP(*conf.PrivateIpAddress), resourceAwsEc2RegionalPrivateDnsSuffix(testAccGetRegion())) + if *conf.PrivateDnsName != expectedPrivateDnsName { + return fmt.Errorf("expected private dns name to be %s, but was %s", expectedPrivateDnsName, *conf.PrivateDnsName) } if len(*conf.MacAddress) == 0 { @@ -573,8 +573,9 @@ func testAccCheckAWSENIAttributesWithAttachment(conf *ec2.NetworkInterface) reso return fmt.Errorf("expected private ip to be 172.16.10.100, but was %s", *conf.PrivateIpAddress) } - if !strings.HasPrefix(*conf.PrivateDnsName, "ip-172-16-10-100.") || !strings.HasSuffix(*conf.PrivateDnsName, ".compute.internal") { - return fmt.Errorf("expected private dns name to be ip-172-16-10-100..compute.internal, but was %s", *conf.PrivateDnsName) + expectedPrivateDnsName := fmt.Sprintf("ip-%s.%s", resourceAwsEc2DashIP(*conf.PrivateIpAddress), resourceAwsEc2RegionalPrivateDnsSuffix(testAccGetRegion())) + if *conf.PrivateDnsName != expectedPrivateDnsName { + return fmt.Errorf("expected private dns name to be %s, but was %s", expectedPrivateDnsName, *conf.PrivateDnsName) } return nil From 32671a194df1ec8fa18764d2b70b38a1bdfbfcbe Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 15:30:28 -0500 Subject: [PATCH 0492/1212] data/eip: Simplify IP construction --- aws/data_source_aws_eip.go | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index 339d0ba21ec..977f2bfdca9 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -5,7 +5,6 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" @@ -141,32 +140,12 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { d.Set("private_ip", eip.PrivateIpAddress) if eip.PrivateIpAddress != nil { -<<<<<<< HEAD - dashIP := strings.Replace(*eip.PrivateIpAddress, ".", "-", -1) - - if region == endpoints.UsEast1RegionID { - d.Set("private_dns", fmt.Sprintf("ip-%s.ec2.internal", dashIP)) - } else { - d.Set("private_dns", fmt.Sprintf("ip-%s.%s.compute.internal", dashIP, region)) - } -======= d.Set("private_dns", fmt.Sprintf("ip-%s.%s", resourceAwsEc2DashIP(*eip.PrivateIpAddress), resourceAwsEc2RegionalPrivateDnsSuffix(region))) ->>>>>>> beae41d73 (tests/eip: Fix hardcoded region) } d.Set("public_ip", eip.PublicIp) if eip.PublicIp != nil { -<<<<<<< HEAD - dashIP := strings.Replace(*eip.PublicIp, ".", "-", -1) - - if region == endpoints.UsEast1RegionID { - d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.compute-1", dashIP))) - } else { - d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s.compute", dashIP, region))) - } -======= d.Set("public_dns", meta.(*AWSClient).PartitionHostname(fmt.Sprintf("ec2-%s.%s", resourceAwsEc2DashIP(*eip.PublicIp), resourceAwsEc2RegionalPublicDnsSuffix(region)))) ->>>>>>> beae41d73 (tests/eip: Fix hardcoded region) } d.Set("public_ipv4_pool", eip.PublicIpv4Pool) d.Set("carrier_ip", eip.CarrierIp) From cef0e3d93d5ce54cb6d5948bed5b6c0fef84e246 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 15:53:18 -0500 Subject: [PATCH 0493/1212] tests/eip_association: Fix networking --- aws/resource_aws_eip_association_test.go | 44 ++++++++++++++---------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/aws/resource_aws_eip_association_test.go b/aws/resource_aws_eip_association_test.go index d12ae6b5477..d91097cc48a 100644 --- a/aws/resource_aws_eip_association_test.go +++ b/aws/resource_aws_eip_association_test.go @@ -422,18 +422,21 @@ resource "aws_eip_association" "test" { func testAccAWSEIPAssociationConfig_spotInstance(rInt int) string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), - testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` -resource "aws_default_subnet" "default" { +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id +} - lifecycle { - ignore_changes = [ - # testing environments often change the Name tag - tags["Name"], - ] - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } resource "aws_key_pair" "test" { @@ -447,7 +450,7 @@ resource "aws_spot_instance_request" "test" { key_name = aws_key_pair.test.key_name spot_price = "0.10" wait_for_fulfillment = true - subnet_id = aws_default_subnet.default.id + subnet_id = aws_subnet.test.id } resource "aws_eip" "test" { @@ -463,25 +466,28 @@ resource "aws_eip_association" "test" { func testAccAWSEIPAssociationConfig_instance() string { return composeConfig( - testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` -resource "aws_default_subnet" "default" { +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id +} - lifecycle { - ignore_changes = [ - # testing environments often change the Name tag - tags["Name"], - ] - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type - subnet_id = aws_default_subnet.default.id + subnet_id = aws_subnet.test.id } resource "aws_eip" "test" { @@ -492,7 +498,7 @@ resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id instance_id = aws_instance.test.id } -`) +`)) } const testAccAWSEIPAssociationConfig_networkInterface = ` From 61a029c55c2e69e7fccc1e2ef8bccce7ae99539a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 16:01:53 -0500 Subject: [PATCH 0494/1212] tests/eip: Fixing networking --- aws/resource_aws_eip_test.go | 64 +++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/aws/resource_aws_eip_test.go b/aws/resource_aws_eip_test.go index cb8b807b321..5a92c76593b 100644 --- a/aws/resource_aws_eip_test.go +++ b/aws/resource_aws_eip_test.go @@ -839,23 +839,27 @@ resource "aws_eip" "test" { func testAccAWSEIPInstanceConfig() string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), ` -resource "aws_default_subnet" "default" { +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id +} - lifecycle { - ignore_changes = [ - # testing environments often change the Name tag - tags["Name"], - ] - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.small" - subnet_id = aws_default_subnet.default.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + subnet_id = aws_subnet.test.id } resource "aws_eip" "test" { @@ -1177,24 +1181,27 @@ resource "aws_route_table_association" "test" { func testAccAWSEIPAssociate_not_associated() string { return composeConfig( - testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` -resource "aws_default_subnet" "default" { +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id +} - lifecycle { - ignore_changes = [ - # testing environments often change the Name tag - tags["Name"], - ] - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type - subnet_id = aws_default_subnet.default.id + subnet_id = aws_subnet.test.id } resource "aws_eip" "test" { @@ -1204,24 +1211,27 @@ resource "aws_eip" "test" { func testAccAWSEIPAssociate_associated() string { return composeConfig( - testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` -resource "aws_default_subnet" "default" { +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id +} - lifecycle { - ignore_changes = [ - # testing environments often change the Name tag - tags["Name"], - ] - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id } resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = data.aws_ec2_instance_type_offering.available.instance_type - subnet_id = aws_default_subnet.default.id + subnet_id = aws_subnet.test.id } resource "aws_eip" "test" { From c00f122a5858871c99fecaf1f77b24384c818aea Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 16:07:39 -0500 Subject: [PATCH 0495/1212] tests/eip: Fix lint issues --- aws/resource_aws_eip_association_test.go | 4 ++-- aws/resource_aws_eip_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_eip_association_test.go b/aws/resource_aws_eip_association_test.go index d91097cc48a..d4aeeba49ee 100644 --- a/aws/resource_aws_eip_association_test.go +++ b/aws/resource_aws_eip_association_test.go @@ -431,7 +431,7 @@ resource "aws_vpc" "test" { resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id } @@ -476,7 +476,7 @@ resource "aws_vpc" "test" { resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id } diff --git a/aws/resource_aws_eip_test.go b/aws/resource_aws_eip_test.go index 5a92c76593b..89602e45992 100644 --- a/aws/resource_aws_eip_test.go +++ b/aws/resource_aws_eip_test.go @@ -848,7 +848,7 @@ resource "aws_vpc" "test" { resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id } @@ -1190,7 +1190,7 @@ resource "aws_vpc" "test" { resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id } @@ -1220,7 +1220,7 @@ resource "aws_vpc" "test" { resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) vpc_id = aws_vpc.test.id } From f3943a4cd0097c98f057ef81f8168754de08f257 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 16:22:31 -0500 Subject: [PATCH 0496/1212] tests/data/eip: Make tests names consistent --- aws/data_source_aws_eip_test.go | 45 +++++++++++++++++---------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/aws/data_source_aws_eip_test.go b/aws/data_source_aws_eip_test.go index 30c7d68e7ab..97c35b23b1f 100644 --- a/aws/data_source_aws_eip_test.go +++ b/aws/data_source_aws_eip_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccDataSourceAwsEip_Filter(t *testing.T) { +func TestAccDataSourceAWSEIP_Filter(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -18,7 +18,7 @@ func TestAccDataSourceAwsEip_Filter(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigFilter(rName), + Config: testAccDataSourceAWSEIPConfigFilter(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_dns", resourceName, "public_dns"), @@ -29,7 +29,7 @@ func TestAccDataSourceAwsEip_Filter(t *testing.T) { }) } -func TestAccDataSourceAwsEip_Id(t *testing.T) { +func TestAccDataSourceAWSEIP_Id(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -38,7 +38,7 @@ func TestAccDataSourceAwsEip_Id(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigId, + Config: testAccDataSourceAWSEIPConfigId, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_dns", resourceName, "public_dns"), @@ -48,7 +48,7 @@ func TestAccDataSourceAwsEip_Id(t *testing.T) { }, }) } -func TestAccDataSourceAwsEip_PublicIP_EC2Classic(t *testing.T) { +func TestAccDataSourceAWSEIP_PublicIP_EC2Classic(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -57,7 +57,7 @@ func TestAccDataSourceAwsEip_PublicIP_EC2Classic(t *testing.T) { ProviderFactories: testAccProviderFactories, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigPublicIpEc2Classic(), + Config: testAccDataSourceAWSEIPConfigPublicIpEc2Classic(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_dns", resourceName, "public_dns"), @@ -68,7 +68,7 @@ func TestAccDataSourceAwsEip_PublicIP_EC2Classic(t *testing.T) { }) } -func TestAccDataSourceAwsEip_PublicIP_VPC(t *testing.T) { +func TestAccDataSourceAWSEIP_PublicIP_VPC(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -77,7 +77,7 @@ func TestAccDataSourceAwsEip_PublicIP_VPC(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigPublicIpVpc, + Config: testAccDataSourceAWSEIPConfigPublicIpVpc, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_dns", resourceName, "public_dns"), @@ -89,7 +89,7 @@ func TestAccDataSourceAwsEip_PublicIP_VPC(t *testing.T) { }) } -func TestAccDataSourceAwsEip_Tags(t *testing.T) { +func TestAccDataSourceAWSEIP_Tags(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -99,7 +99,7 @@ func TestAccDataSourceAwsEip_Tags(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigTags(rName), + Config: testAccDataSourceAWSEIPConfigTags(rName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "public_dns", resourceName, "public_dns"), @@ -110,7 +110,7 @@ func TestAccDataSourceAwsEip_Tags(t *testing.T) { }) } -func TestAccDataSourceAwsEip_NetworkInterface(t *testing.T) { +func TestAccDataSourceAWSEIP_NetworkInterface(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -119,7 +119,7 @@ func TestAccDataSourceAwsEip_NetworkInterface(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigNetworkInterface, + Config: testAccDataSourceAWSEIPConfigNetworkInterface, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "network_interface_id", resourceName, "network_interface"), @@ -132,7 +132,7 @@ func TestAccDataSourceAwsEip_NetworkInterface(t *testing.T) { }) } -func TestAccDataSourceAwsEip_Instance(t *testing.T) { +func TestAccDataSourceAWSEIP_Instance(t *testing.T) { dataSourceName := "data.aws_eip.test" resourceName := "aws_eip.test" @@ -141,7 +141,7 @@ func TestAccDataSourceAwsEip_Instance(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsEipConfigInstance, + Config: testAccDataSourceAWSEIPConfigInstance, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), resource.TestCheckResourceAttrPair(dataSourceName, "instance_id", resourceName, "instance"), @@ -206,7 +206,7 @@ data "aws_eip" "test" { ` } -func testAccDataSourceAwsEipConfigFilter(rName string) string { +func testAccDataSourceAWSEIPConfigFilter(rName string) string { return fmt.Sprintf(` resource "aws_eip" "test" { vpc = true @@ -225,7 +225,7 @@ data "aws_eip" "test" { `, rName) } -const testAccDataSourceAwsEipConfigId = ` +const testAccDataSourceAWSEIPConfigId = ` resource "aws_eip" "test" { vpc = true } @@ -235,7 +235,7 @@ data "aws_eip" "test" { } ` -func testAccDataSourceAwsEipConfigPublicIpEc2Classic() string { +func testAccDataSourceAWSEIPConfigPublicIpEc2Classic() string { return composeConfig( testAccEc2ClassicRegionProviderConfig(), ` @@ -247,7 +247,7 @@ data "aws_eip" "test" { `) } -const testAccDataSourceAwsEipConfigPublicIpVpc = ` +const testAccDataSourceAWSEIPConfigPublicIpVpc = ` resource "aws_eip" "test" { vpc = true } @@ -257,7 +257,7 @@ data "aws_eip" "test" { } ` -func testAccDataSourceAwsEipConfigTags(rName string) string { +func testAccDataSourceAWSEIPConfigTags(rName string) string { return fmt.Sprintf(` resource "aws_eip" "test" { vpc = true @@ -275,7 +275,7 @@ data "aws_eip" "test" { `, rName) } -const testAccDataSourceAwsEipConfigNetworkInterface = ` +const testAccDataSourceAWSEIPConfigNetworkInterface = ` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" } @@ -306,7 +306,8 @@ data "aws_eip" "test" { } ` -var testAccDataSourceAwsEipConfigInstance = testAccAvailableAZsNoOptInDefaultExcludeConfig() + ` +var testAccDataSourceAWSEIPConfigInstance = composeConfig( + testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.2.0.0/16" } @@ -347,7 +348,7 @@ data "aws_eip" "test" { values = [aws_eip.test.instance] } } -` +`) func testAccDataSourceAWSEIPConfigCarrierIP(rName string) string { return composeConfig( From a00a55422a20189342b1e088fef18379d91eab90 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 16:33:34 -0500 Subject: [PATCH 0497/1212] tests/eip_association: Fix lint issue --- aws/resource_aws_eip_association_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_eip_association_test.go b/aws/resource_aws_eip_association_test.go index d4aeeba49ee..f08600145fc 100644 --- a/aws/resource_aws_eip_association_test.go +++ b/aws/resource_aws_eip_association_test.go @@ -469,7 +469,7 @@ func testAccAWSEIPAssociationConfig_instance() string { testAccAvailableEc2InstanceTypeForAvailabilityZone("aws_subnet.test.availability_zone", "t3.micro", "t2.micro"), testAccAvailableAZsNoOptInConfig(), testAccLatestAmazonLinuxHvmEbsAmiConfig(), - fmt.Sprintf(` + ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } @@ -498,7 +498,7 @@ resource "aws_eip_association" "test" { allocation_id = aws_eip.test.id instance_id = aws_instance.test.id } -`)) +`) } const testAccAWSEIPAssociationConfig_networkInterface = ` From e8b81002019617d145450b81ff2ec65eb39456e3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 4 Dec 2020 15:19:12 -0500 Subject: [PATCH 0498/1212] tests/resource/workspaces_directory: Fix hardcoded region --- aws/resource_aws_workspaces_directory_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 48ed67ded79..be12b003e89 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/workspaces" multierror "github.com/hashicorp/go-multierror" @@ -661,7 +662,7 @@ data "aws_availability_zones" "available" { locals { region_workspaces_az_ids = { - "us-east-1" = formatlist("use1-az%%d", [2, 4, 6]) + %q = formatlist("use1-az%%d", [2, 4, 6]) } workspaces_az_ids = lookup(local.region_workspaces_az_ids, data.aws_region.current.name, data.aws_availability_zones.available.zone_ids) @@ -709,7 +710,7 @@ resource "aws_directory_service_directory" "main" { Name = "tf-testacc-workspaces-directory-%[1]s" } } -`, rName) +`, rName, endpoints.UsEast1RegionID) } func testAccWorkspacesDirectoryConfig(rName string) string { From cff7df0c56a77ef0fa71ee76a5f7fb6d763f76e0 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 16:59:24 -0500 Subject: [PATCH 0499/1212] tests/workspaces_directory: Lint ignore hardcoded region --- aws/resource_aws_workspaces_directory_test.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index be12b003e89..50b0337f0a5 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/workspaces" multierror "github.com/hashicorp/go-multierror" @@ -648,21 +647,15 @@ func testAccPreCheckWorkspacesDirectory(t *testing.T) { } func testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName string) string { - return fmt.Sprintf(` + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + //lintignore:AWSAT003 + fmt.Sprintf(` data "aws_region" "current" {} -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - locals { region_workspaces_az_ids = { - %q = formatlist("use1-az%%d", [2, 4, 6]) + "us-east-1" = formatlist("use1-az%%d", [2, 4, 6]) } workspaces_az_ids = lookup(local.region_workspaces_az_ids, data.aws_region.current.name, data.aws_availability_zones.available.zone_ids) @@ -710,7 +703,7 @@ resource "aws_directory_service_directory" "main" { Name = "tf-testacc-workspaces-directory-%[1]s" } } -`, rName, endpoints.UsEast1RegionID) +`, rName)) } func testAccWorkspacesDirectoryConfig(rName string) string { From 2e71e336050ea60453d8a67158ac7e88d0486789 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 17:21:17 -0500 Subject: [PATCH 0500/1212] tests/workspaces_directory: Fix Sprintf issue --- aws/resource_aws_workspaces_directory_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 50b0337f0a5..52b3524cc85 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -708,7 +708,8 @@ resource "aws_directory_service_directory" "main" { func testAccWorkspacesDirectoryConfig(rName string) string { return composeConfig( - testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id @@ -720,7 +721,7 @@ resource "aws_workspaces_directory" "main" { data "aws_iam_role" "workspaces-default" { name = "workspaces_DefaultRole" } -`) +`, rName)) } func testAccWorkspacesDirectory_selfServicePermissions(rName string) string { From 5ac9c7287351ea7669a0ac2220be047c54fb8efe Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sat, 9 Jan 2021 07:23:02 +0900 Subject: [PATCH 0501/1212] Add aws_route53_resolver_dnssec_config --- aws/provider.go | 1 + ...urce_aws_route53_resolver_dnssec_config.go | 176 ++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 aws/resource_aws_route53_resolver_dnssec_config.go diff --git a/aws/provider.go b/aws/provider.go index 2d2fe3b412b..3a68bb22c4c 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -858,6 +858,7 @@ func Provider() *schema.Provider { "aws_route53_vpc_association_authorization": resourceAwsRoute53VPCAssociationAuthorization(), "aws_route53_zone": resourceAwsRoute53Zone(), "aws_route53_health_check": resourceAwsRoute53HealthCheck(), + "aws_route53_resolver_dnssec_config": resourceAwsRoute53ResolverDnssecConfig(), "aws_route53_resolver_endpoint": resourceAwsRoute53ResolverEndpoint(), "aws_route53_resolver_query_log_config": resourceAwsRoute53ResolverQueryLogConfig(), "aws_route53_resolver_query_log_config_association": resourceAwsRoute53ResolverQueryLogConfigAssociation(), diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go new file mode 100644 index 00000000000..10f2881debe --- /dev/null +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -0,0 +1,176 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + route53ResolverDnssecConfigStatusNotFound = "NOT_FOUND" +) + +func resourceAwsRoute53ResolverDnssecConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53ResolverDnssecConfigCreate, + Read: resourceAwsRoute53ResolverDnssecConfigRead, + Delete: resourceAwsRoute53ResolverDnssecConfigDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "validation_status": { + Type: schema.TypeString, + Computed: true, + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + } +} + +func resourceAwsRoute53ResolverDnssecConfigCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53resolverconn + + req := &route53resolver.UpdateResolverDnssecConfigInput{ + ResourceId: aws.String(d.Get("resource_id").(string)), + Validation: aws.String(route53resolver.ValidationEnable), + } + + log.Printf("[DEBUG] Creating Route53 Resolver DNSSEC config: %#v", req) + resp, err := conn.UpdateResolverDnssecConfig(req) + if err != nil { + return fmt.Errorf("error creating Route53 Resolver DNSSEC config: %s", err) + } + + d.SetId(aws.StringValue(resp.ResolverDNSSECConfig.ResourceId)) + + err = route53ResolverDnssecConfigWait(conn, d.Id(), d.Timeout(schema.TimeoutCreate), + []string{route53resolver.ResolverDNSSECValidationStatusEnabling}, + []string{route53resolver.ResolverDNSSECValidationStatusEnabled}) + if err != nil { + return err + } + + return resourceAwsRoute53ResolverDnssecConfigRead(d, meta) +} + +func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53resolverconn + ec2Conn := meta.(*AWSClient).ec2conn + + vpc, err := vpcDescribe(ec2Conn, d.Id()) + if err != nil { + return fmt.Errorf("error getting VPC associated with Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + } + + // GetResolverDnssecConfig returns AccessDeniedException if sending a request with non-existing VPC id + if vpc == nil { + log.Printf("[WARN] VPC associated with Resolver DNSSEC config (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + raw, state, err := route53ResolverDnssecConfigRefresh(conn, d.Id())() + if err != nil { + return fmt.Errorf("error getting Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + } + + if state == route53ResolverDnssecConfigStatusNotFound || state == route53resolver.ResolverDNSSECValidationStatusDisabled { + log.Printf("[WARN] Route53 Resolver DNSSEC config (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + out := raw.(*route53resolver.ResolverDnssecConfig) + d.Set("id", out.Id) + d.Set("owner_id", out.OwnerId) + d.Set("resource_id", out.ResourceId) + d.Set("validation_status", out.ValidationStatus) + + return nil +} + +func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).route53resolverconn + + log.Printf("[DEBUG] Deleting Route53 Resolver DNSSEC config: %s", d.Id()) + _, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ + ResourceId: aws.String(d.Id()), + Validation: aws.String(route53resolver.ValidationDisable), + }) + if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { + return nil + } + if err != nil { + return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + } + + err = route53ResolverDnssecConfigWait(conn, d.Id(), d.Timeout(schema.TimeoutDelete), + []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, + []string{route53resolver.ResolverDNSSECValidationStatusDisabled}) + if err != nil { + return err + } + + return nil +} + +func route53ResolverDnssecConfigWait(conn *route53resolver.Route53Resolver, id string, timeout time.Duration, pending, target []string) error { + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Refresh: route53ResolverDnssecConfigRefresh(conn, id), + Timeout: timeout, + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to reach target state: %s", id, err) + } + + return nil +} + +func route53ResolverDnssecConfigRefresh(conn *route53resolver.Route53Resolver, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ + ResourceId: aws.String(id), + }) + + if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { + return &route53resolver.ResolverDnssecConfig{}, route53ResolverDnssecConfigStatusNotFound, nil + } + + if err != nil { + return nil, "", err + } + + return resp.ResolverDNSSECConfig, aws.StringValue(resp.ResolverDNSSECConfig.ValidationStatus), nil + } +} From 64405329e0e66d789c80244ae77f2feb6f803b2b Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sat, 9 Jan 2021 07:23:16 +0900 Subject: [PATCH 0502/1212] Add tests for aws_route53_resolver_dnssec_config --- ...aws_route53_resolver_dnssec_config_test.go | 237 ++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 aws/resource_aws_route53_resolver_dnssec_config_test.go diff --git a/aws/resource_aws_route53_resolver_dnssec_config_test.go b/aws/resource_aws_route53_resolver_dnssec_config_test.go new file mode 100644 index 00000000000..51bb8f3fc2a --- /dev/null +++ b/aws/resource_aws_route53_resolver_dnssec_config_test.go @@ -0,0 +1,237 @@ +package aws + +import ( + "fmt" + "log" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func init() { + resource.AddTestSweepers("aws_route53_resolver_dnssec_config", &resource.Sweeper{ + Name: "aws_route53_resolver_dnssec_config", + F: testSweepRoute53ResolverDnssecConfig, + }) +} + +func testSweepRoute53ResolverDnssecConfig(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).route53resolverconn + + var errors error + err = conn.ListResolverDnssecConfigsPages(&route53resolver.ListResolverDnssecConfigsInput{}, func(page *route53resolver.ListResolverDnssecConfigsOutput, isLast bool) bool { + if page == nil { + return !isLast + } + + for _, resolverDnssecConfig := range page.ResolverDnssecConfigs { + id := aws.StringValue(resolverDnssecConfig.ResourceId) + + log.Printf("[INFO] Deleting Route53 Resolver Dnssec config: %s", id) + _, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ + ResourceId: aws.String(id), + Validation: aws.String(route53resolver.ResolverDNSSECValidationStatusDisabled), + }) + if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { + continue + } + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error deleting Route53 Resolver Resolver Dnssec config (%s): %w", id, err)) + continue + } + + err = route53ResolverEndpointWaitUntilTargetState(conn, id, 10*time.Minute, + []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, + []string{route53resolver.ResolverDNSSECValidationStatusDisabled}) + if err != nil { + errors = multierror.Append(errors, err) + continue + } + } + + return !isLast + }) + if err != nil { + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Route53 Resolver Resolver Dnssec config sweep for %s: %s", region, err) + return nil + } + errors = multierror.Append(errors, fmt.Errorf("error retrieving Route53 Resolver Resolver Dnssec config: %w", err)) + } + + return errors +} + +func TestAccAWSRoute53ResolverDnssecConfig_basic(t *testing.T) { + var config route53resolver.ResolverDnssecConfig + resourceName := "aws_route53_resolver_dnssec_config.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53ResolverDnssecConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttrSet(resourceName, "owner_id"), + resource.TestCheckResourceAttrSet(resourceName, "resource_id"), + resource.TestCheckResourceAttr(resourceName, "validation_status", "ENABLED"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSRoute53ResolverDnssecConfig_disappear(t *testing.T) { + var config route53resolver.ResolverDnssecConfig + resourceName := "aws_route53_resolver_dnssec_config.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53ResolverDnssecConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + testAccCheckResourceDisappears(testAccProvider, resourceAwsRoute53ResolverDnssecConfig(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSRoute53ResolverDnssecConfig_disappear_VPC(t *testing.T) { + var config route53resolver.ResolverDnssecConfig + resourceName := "aws_route53_resolver_dnssec_config.test" + vpcResourceName := "aws_vpc.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53ResolverDnssecConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + testAccCheckResourceDisappears(testAccProvider, resourceAwsVpc(), vpcResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckRoute53ResolverDnssecConfigDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).route53resolverconn + ec2Conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_route53_resolver_dnssec_config" { + continue + } + + vpc, err := vpcDescribe(ec2Conn, rs.Primary.ID) + if err != nil { + return err + } + + // The VPC has been deleted + if vpc == nil { + continue + } + + // Try to find the resource + out, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ + ResourceId: aws.String(rs.Primary.ID), + }) + // Verify the error is what we want + if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { + continue + } + if err != nil { + return err + } + if aws.StringValue(out.ResolverDNSSECConfig.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { + continue + } + + return fmt.Errorf("Route 53 Resolver Dnssec config still exists: %s", rs.Primary.ID) + } + + return nil +} + +func testAccCheckRoute53ResolverDnssecConfigExists(n string, c *route53resolver.ResolverDnssecConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Route 53 Resolver Dnssec config ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).route53resolverconn + resp, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ + ResourceId: aws.String(rs.Primary.ID), + }) + if err != nil { + return err + } + + *c = *resp.ResolverDNSSECConfig + + return nil + } +} + +func testAccRoute53ResolverDnssecConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true + + tags = { + Name = %q + } +} +`, rName) +} + +func testAccRoute53ResolverDnssecConfigConfigBasic(rName string) string { + return testAccRoute53ResolverDnssecConfigBase(rName) + ` +resource "aws_route53_resolver_dnssec_config" "test" { + resource_id = aws_vpc.test.id +} +` +} From d0a354511213cabb14a368908def639eaa490199 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sat, 9 Jan 2021 07:23:46 +0900 Subject: [PATCH 0503/1212] Add a doc for aws_route53_resolver_dnssec_config --- ...ute53_resolver_dnssec_config.html.markdown | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 website/docs/r/route53_resolver_dnssec_config.html.markdown diff --git a/website/docs/r/route53_resolver_dnssec_config.html.markdown b/website/docs/r/route53_resolver_dnssec_config.html.markdown new file mode 100644 index 00000000000..878c152e4c8 --- /dev/null +++ b/website/docs/r/route53_resolver_dnssec_config.html.markdown @@ -0,0 +1,55 @@ +--- +subcategory: "Route53 Resolver" +layout: "aws" +page_title: "AWS: aws_route53_resolver_dnssec_config" +description: |- + Provides a Route 53 Resolver DNSSEC config resource. +--- + +# Resource: aws_route53_resolver_dnssec_config + +Provides a Route 53 Resolver DNSSEC config resource. + +## Example Usage + +```hcl +resource "aws_vpc" "example" { + cidr_block = "10.0.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true +} + +resource "aws_route53_resolver_dnssec_config" "example" { + resource_id = aws_vpc.example.id +} +``` + +## Argument Reference + +The following argument is supported: + +* `resource_id` - (Required) The ID of the virtual private cloud (VPC) that you're updating the DNSSEC validation status for. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID for a configuration for DNSSEC validation. +* `owner_id` - The owner account ID of the virtual private cloud (VPC) for a configuration for DNSSEC validation. +* `validation_status` - The validation status for a DNSSEC configuration. The status can be one of the following: `ENABLING`, `ENABLED`, `DISABLING` and `DISABLED`. + +## Timeouts + +`aws_route53_resolver_dnssec_config` provides the following +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: + +- `create` - (Default `10 minutes`) Used for creating Route 53 Resolver DNSSEC config +- `delete` - (Default `10 minutes`) Used for destroying Route 53 Resolver DNSSEC config + +## Import + + Route 53 Resolver DNSSEC configs can be imported using the VPC ID, e.g. + +``` +$ terraform import aws_route53_resolver_dnssec_config.example vpc-7a190fdssf3 +``` From ff805a66216c0af214e64374670b1aa8e4ce3c96 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 9 Jan 2021 10:24:29 +0200 Subject: [PATCH 0504/1212] add with image test --- aws/resource_aws_sagemaker_domain_test.go | 100 +++++++++++++++++----- aws/resource_aws_sagemaker_image.go | 3 +- 2 files changed, 80 insertions(+), 23 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index eb3e4e612ee..e544c5f2217 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -69,7 +69,7 @@ func testSweepSagemakerDomains(region string) error { } func TestAccAWSSagemakerDomain_basic(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -81,7 +81,7 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { { Config: testAccAWSSagemakerDomainBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "domain_name", rName), resource.TestCheckResourceAttr(resourceName, "auth_mode", "IAM"), resource.TestCheckResourceAttr(resourceName, "app_network_access_type", "PublicInternetOnly"), @@ -106,7 +106,7 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { } func TestAccAWSSagemakerDomain_tags(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -118,7 +118,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -131,7 +131,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -140,7 +140,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), @@ -151,7 +151,7 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { } func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -163,7 +163,7 @@ func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigSecurityGroup1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.security_groups.#", "1"), ), @@ -176,7 +176,7 @@ func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigSecurityGroup2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.security_groups.#", "2"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), @@ -187,7 +187,7 @@ func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { } func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -199,10 +199,10 @@ func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigSharingSettings(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.#", "1"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Allowed"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.domain_output_option", "Allowed"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.sharing_settings.0.s3_kms_key_id", "aws_kms_key.test", "arn"), resource.TestCheckResourceAttrSet(resourceName, "default_user_settings.0.sharing_settings.0.s3_output_path"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), @@ -218,7 +218,7 @@ func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { } func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -230,7 +230,7 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigTensorBoardAppSettings(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), @@ -247,8 +247,39 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { }) } +func TestAccAWSSagemakerDomain_tensorboardAppSettingsWithImage(t *testing.T) { + var domain sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainConfigTensorBoardAppSettingsWithImage(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.sagemaker_image_arn", "aws_sagemaker_image.test", "arn"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerDomain_kernelGatewayAppSettings(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -260,7 +291,7 @@ func TestAccAWSSagemakerDomain_kernelGatewayAppSettings(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigKernelGatewayAppSettings(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.#", "1"), @@ -278,7 +309,7 @@ func TestAccAWSSagemakerDomain_kernelGatewayAppSettings(t *testing.T) { } func TestAccAWSSagemakerDomain_jupyterServerAppSettings(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -290,7 +321,7 @@ func TestAccAWSSagemakerDomain_jupyterServerAppSettings(t *testing.T) { { Config: testAccAWSSagemakerDomainConfigJupyterServerAppSettings(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.0.default_resource_spec.#", "1"), @@ -308,7 +339,7 @@ func TestAccAWSSagemakerDomain_jupyterServerAppSettings(t *testing.T) { } func TestAccAWSSagemakerDomain_disappears(t *testing.T) { - var notebook sagemaker.DescribeDomainOutput + var domain sagemaker.DescribeDomainOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_domain.test" @@ -320,7 +351,7 @@ func TestAccAWSSagemakerDomain_disappears(t *testing.T) { { Config: testAccAWSSagemakerDomainBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerDomainExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerDomain(), resourceName), ), @@ -647,7 +678,7 @@ resource "aws_sagemaker_domain" "test" { execution_role = aws_iam_role.test.arn sharing_settings { - notebook_output_option = "Allowed" + domain_output_option = "Allowed" s3_kms_key_id = aws_kms_key.test.arn s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" } @@ -677,6 +708,33 @@ resource "aws_sagemaker_domain" "test" { `, rName) } +func testAccAWSSagemakerDomainConfigTensorBoardAppSettingsWithImage(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + + tensor_board_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + sagemaker_image_arn = aws_sagemaker_image.test.arn + } + } + } +} +`, rName) +} + func testAccAWSSagemakerDomainConfigJupyterServerAppSettings(rName string) string { return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` resource "aws_sagemaker_domain" "test" { diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 3da9bdb809c..104da90c363 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -201,11 +201,10 @@ func resourceAwsSagemakerImageDelete(d *schema.ResourceData, meta interface{}) e } if _, err := waiter.ImageDeleted(conn, d.Id()); err != nil { - if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { return nil } return fmt.Errorf("error waiting for SageMaker Image (%s) to delete: %w", d.Id(), err) - } return nil From d0f80f91f1abb43e0b1fb9db1af07e4af36465e9 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 9 Jan 2021 10:27:11 +0200 Subject: [PATCH 0505/1212] fmt --- aws/resource_aws_sagemaker_domain_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index e544c5f2217..95f7b3d3ab7 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -679,8 +679,8 @@ resource "aws_sagemaker_domain" "test" { sharing_settings { domain_output_option = "Allowed" - s3_kms_key_id = aws_kms_key.test.arn - s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" + s3_kms_key_id = aws_kms_key.test.arn + s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" } } } @@ -726,7 +726,7 @@ resource "aws_sagemaker_domain" "test" { tensor_board_app_settings { default_resource_spec { - instance_type = "ml.t3.micro" + instance_type = "ml.t3.micro" sagemaker_image_arn = aws_sagemaker_image.test.arn } } From a987d7778b1451420b6ed09d9d89c96ab80ee600 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Wed, 2 Sep 2020 19:08:01 +0100 Subject: [PATCH 0506/1212] r/aws_cloudwatch_composite_alarm: add resource --- aws/provider.go | 1 + ...resource_aws_cloudwatch_composite_alarm.go | 269 ++++++++++++++++++ ...rce_aws_cloudwatch_composite_alarm_test.go | 256 +++++++++++++++++ .../cloudwatch_composite_alarm.html.markdown | 56 ++++ 4 files changed, 582 insertions(+) create mode 100644 aws/resource_aws_cloudwatch_composite_alarm.go create mode 100644 aws/resource_aws_cloudwatch_composite_alarm_test.go create mode 100644 website/docs/r/cloudwatch_composite_alarm.html.markdown diff --git a/aws/provider.go b/aws/provider.go index 0543109d0f1..d98ac926dc7 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -513,6 +513,7 @@ func Provider() *schema.Provider { "aws_cloudhsm_v2_cluster": resourceAwsCloudHsmV2Cluster(), "aws_cloudhsm_v2_hsm": resourceAwsCloudHsmV2Hsm(), "aws_cognito_resource_server": resourceAwsCognitoResourceServer(), + "aws_cloudwatch_composite_alarm": resourceAwsCloudWatchCompositeAlarm(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), "aws_codedeploy_app": resourceAwsCodeDeployApp(), diff --git a/aws/resource_aws_cloudwatch_composite_alarm.go b/aws/resource_aws_cloudwatch_composite_alarm.go new file mode 100644 index 00000000000..dada6aa8f42 --- /dev/null +++ b/aws/resource_aws_cloudwatch_composite_alarm.go @@ -0,0 +1,269 @@ +package aws + +import ( + "context" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsCloudWatchCompositeAlarm() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceAwsCloudWatchCompositeAlarmCreate, + ReadContext: resourceAwsCloudWatchCompositeAlarmRead, + UpdateContext: resourceAwsCloudWatchCompositeAlarmUpdate, + DeleteContext: resourceAwsCloudWatchCompositeAlarmDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "actions_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "alarm_actions": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + "alarm_description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + "alarm_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 255), + }, + "alarm_rule": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 10240), + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "insufficient_data_actions": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + "ok_actions": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsCloudWatchCompositeAlarmCreate( + ctx context.Context, + d *schema.ResourceData, + meta interface{}, +) diag.Diagnostics { + conn := meta.(*AWSClient).cloudwatchconn + name := d.Get("alarm_name").(string) + + input := expandAwsCloudWatchPutCompositeAlarmInput(d) + _, err := conn.PutCompositeAlarmWithContext(ctx, &input) + if err != nil { + return diag.Errorf("create composite alarm: %s", err) + } + + log.Printf("[INFO] Created Composite Alarm %s.", name) + d.SetId(name) + + return resourceAwsCloudWatchCompositeAlarmRead(ctx, d, meta) +} + +func resourceAwsCloudWatchCompositeAlarmRead( + ctx context.Context, + d *schema.ResourceData, + meta interface{}, +) diag.Diagnostics { + conn := meta.(*AWSClient).cloudwatchconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + name := d.Id() + + alarm, ok, err := getAwsCloudWatchCompositeAlarm(ctx, conn, name) + switch { + case err != nil: + return diag.FromErr(err) + case !ok: + log.Printf("[WARN] Composite alarm %s has disappeared!", name) + d.SetId("") + return nil + } + + d.Set("actions_enabled", alarm.ActionsEnabled) + + if err := d.Set("alarm_actions", flattenStringSet(alarm.AlarmActions)); err != nil { + return diag.Errorf("set alarm_actions: %s", err) + } + + d.Set("alarm_description", alarm.AlarmDescription) + d.Set("alarm_name", alarm.AlarmName) + d.Set("alarm_rule", alarm.AlarmRule) + d.Set("arn", alarm.AlarmArn) + + if err := d.Set("insufficient_data_actions", flattenStringSet(alarm.InsufficientDataActions)); err != nil { + return diag.Errorf("set insufficient_data_actions: %s", err) + } + + if err := d.Set("ok_actions", flattenStringSet(alarm.OKActions)); err != nil { + return diag.Errorf("set ok_actions: %s", err) + } + + tags, err := keyvaluetags.CloudwatchListTags(conn, aws.StringValue(alarm.AlarmArn)) + if err != nil { + return diag.Errorf("list tags of alarm: %s", err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return diag.Errorf("set tags: %s", err) + } + + return nil +} + +func resourceAwsCloudWatchCompositeAlarmUpdate( + ctx context.Context, + d *schema.ResourceData, + meta interface{}, +) diag.Diagnostics { + conn := meta.(*AWSClient).cloudwatchconn + name := d.Id() + + log.Printf("[INFO] Updating Composite Alarm %s...", name) + + input := expandAwsCloudWatchPutCompositeAlarmInput(d) + _, err := conn.PutCompositeAlarmWithContext(ctx, &input) + if err != nil { + return diag.Errorf("create composite alarm: %s", err) + } + + arn := d.Get("arn").(string) + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.CloudwatchUpdateTags(conn, arn, o, n); err != nil { + return diag.Errorf("update tags: %s", err) + } + } + + return resourceAwsCloudWatchCompositeAlarmRead(ctx, d, meta) +} + +func resourceAwsCloudWatchCompositeAlarmDelete( + ctx context.Context, + d *schema.ResourceData, + meta interface{}, +) diag.Diagnostics { + conn := meta.(*AWSClient).cloudwatchconn + name := d.Id() + + log.Printf("[INFO] Deleting Composite Alarm %s...", name) + + input := cloudwatch.DeleteAlarmsInput{ + AlarmNames: aws.StringSlice([]string{name}), + } + + _, err := conn.DeleteAlarmsWithContext(ctx, &input) + switch { + case isAWSErr(err, "ResourceNotFound", ""): + log.Printf("[WARN] Composite Alarm %s has disappeared!", name) + return nil + case err != nil: + return diag.FromErr(err) + } + + return nil +} + +func expandAwsCloudWatchPutCompositeAlarmInput(d *schema.ResourceData) cloudwatch.PutCompositeAlarmInput { + out := cloudwatch.PutCompositeAlarmInput{} + + if v, ok := d.GetOk("actions_enabled"); ok { + out.ActionsEnabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("alarm_actions"); ok { + out.AlarmActions = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("alarm_description"); ok { + out.AlarmDescription = aws.String(v.(string)) + } + + if v, ok := d.GetOk("alarm_name"); ok { + out.AlarmName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("alarm_rule"); ok { + out.AlarmRule = aws.String(v.(string)) + } + + if v, ok := d.GetOk("insufficient_data_actions"); ok { + out.InsufficientDataActions = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("ok_actions"); ok { + out.OKActions = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tags"); ok { + out.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CloudwatchTags() + } + + return out +} + +func getAwsCloudWatchCompositeAlarm( + ctx context.Context, + conn *cloudwatch.CloudWatch, + name string, +) (*cloudwatch.CompositeAlarm, bool, error) { + input := cloudwatch.DescribeAlarmsInput{ + AlarmNames: aws.StringSlice([]string{name}), + AlarmTypes: aws.StringSlice([]string{cloudwatch.AlarmTypeCompositeAlarm}), + } + + output, err := conn.DescribeAlarmsWithContext(ctx, &input) + switch { + case err != nil: + return nil, false, err + case len(output.CompositeAlarms) != 1: + return nil, false, nil + } + + return output.CompositeAlarms[0], true, nil +} diff --git a/aws/resource_aws_cloudwatch_composite_alarm_test.go b/aws/resource_aws_cloudwatch_composite_alarm_test.go new file mode 100644 index 00000000000..fb9862b5c8b --- /dev/null +++ b/aws/resource_aws_cloudwatch_composite_alarm_test.go @@ -0,0 +1,256 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccCheckAwsCloudWatchCompositeAlarmExists(n string, alarm *cloudwatch.CompositeAlarm) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn + params := cloudwatch.DescribeAlarmsInput{ + AlarmNames: []*string{aws.String(rs.Primary.ID)}, + AlarmTypes: []*string{aws.String(cloudwatch.AlarmTypeCompositeAlarm)}, + } + resp, err := conn.DescribeAlarms(¶ms) + if err != nil { + return err + } + if len(resp.CompositeAlarms) == 0 { + return fmt.Errorf("Alarm not found") + } + *alarm = *resp.CompositeAlarms[0] + + return nil + } +} + +func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudwatch_composite_alarm" { + continue + } + + params := cloudwatch.DescribeAlarmsInput{ + AlarmNames: []*string{aws.String(rs.Primary.ID)}, + } + + resp, err := conn.DescribeAlarms(¶ms) + + if err == nil { + if len(resp.MetricAlarms) != 0 && + *resp.MetricAlarms[0].AlarmName == rs.Primary.ID { + return fmt.Errorf("Alarm Still Exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { + alarm := cloudwatch.CompositeAlarm{} + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_create(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), + resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), + resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s) OR ALARM(tf-test-metric-1-%[1]s)", suffix)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_update(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 2"), + resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), + resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s)", suffix)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + ), + }, + }, + }) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_create(suffix string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + count = 2 + + alarm_name = "tf-test-metric-${count.index}-%[1]s" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_sns_topic" "test" { + count = 1 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_actions = aws_sns_topic.test.*.arn + alarm_description = "Test 1" + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) + insufficient_data_actions = aws_sns_topic.test.*.arn + ok_actions = aws_sns_topic.test.*.arn + + tags = { + Foo = "Bar" + } +} +`, suffix) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_update(suffix string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + count = 2 + + alarm_name = "tf-test-metric-${count.index}-%[1]s" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_sns_topic" "test" { + count = 2 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_actions = aws_sns_topic.test.*.arn + alarm_description = "Test 2" + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" + insufficient_data_actions = aws_sns_topic.test.*.arn + ok_actions = aws_sns_topic.test.*.arn + + tags = { + Foo = "Bar" + Bax = "Baf" + } +} +`, suffix) +} + +func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { + alarm := cloudwatch.CompositeAlarm{} + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + checkDisappears := func(*terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn + alarmNames := []string{ + "tf-test-composite-" + suffix, + "tf-test-metric-" + suffix, + } + + for _, name := range alarmNames { + input := cloudwatch.DeleteAlarmsInput{ + AlarmNames: []*string{&name}, + } + + _, err := conn.DeleteAlarms(&input) + if err != nil { + return err + } + } + + return nil + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), + checkDisappears, + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + alarm_name = "tf-test-metric-%[1]s" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test.alarm_name})" +} +`, suffix) +} diff --git a/website/docs/r/cloudwatch_composite_alarm.html.markdown b/website/docs/r/cloudwatch_composite_alarm.html.markdown new file mode 100644 index 00000000000..4a140d0f465 --- /dev/null +++ b/website/docs/r/cloudwatch_composite_alarm.html.markdown @@ -0,0 +1,56 @@ +--- +subcategory: "CloudWatch" +layout: "aws" +page_title: "AWS: aws_cloudwatch_composite_alarm" +description: |- + Provides a CloudWatch Composite Alarm resource. +--- + +# Resource: aws_cloudwatch_composite_alarm + +Provides a CloudWatch Composite Alarm resource. + +~> **NOTE:** An alarm (composite or metric) cannot be destroyed when there are other composite alarms depending on it. This can lead to a cyclical dependency on update, as Terraform will unsuccessfully attempt to destroy alarms before updating the rule. Consider using `depends_on`, references to alarm names, and two-stage updates. + +## Example Usage + +```hcl +resource "aws_cloudwatch_composite_alarm" "example" { + alarm_description = "This is a composite alarm!" + alarm_name = "example-composite-alarm" + + alarm_actions = aws_sns_topic.example.arn + ok_actions = aws_sns_topic.example.arn + + alarm_rule = < Date: Sat, 9 Jan 2021 19:35:24 +0000 Subject: [PATCH 0507/1212] docs: adds preferred_maintenance_window to aws_docdb_cluster --- website/docs/r/docdb_cluster.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/docdb_cluster.html.markdown b/website/docs/r/docdb_cluster.html.markdown index 37a5daa0e01..928c12978ea 100644 --- a/website/docs/r/docdb_cluster.html.markdown +++ b/website/docs/r/docdb_cluster.html.markdown @@ -67,6 +67,7 @@ The following arguments are supported: * `port` - (Optional) The port on which the DB accepts connections * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00 +* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30 * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. * `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. From f7a9453eec68f6682d7853606bdf3c515277b647 Mon Sep 17 00:00:00 2001 From: Roberth Kulbin Date: Sat, 9 Jan 2021 14:41:10 +0000 Subject: [PATCH 0508/1212] r/aws_cloudwatch_composite_alarm: code review --- .../service/cloudwatch/finder/finder.go | 24 ++ ...resource_aws_cloudwatch_composite_alarm.go | 72 +++--- ...rce_aws_cloudwatch_composite_alarm_test.go | 233 +++++++++++------- .../cloudwatch_composite_alarm.html.markdown | 6 +- 4 files changed, 194 insertions(+), 141 deletions(-) create mode 100644 aws/internal/service/cloudwatch/finder/finder.go diff --git a/aws/internal/service/cloudwatch/finder/finder.go b/aws/internal/service/cloudwatch/finder/finder.go new file mode 100644 index 00000000000..7fa7069ea89 --- /dev/null +++ b/aws/internal/service/cloudwatch/finder/finder.go @@ -0,0 +1,24 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudwatch" +) + +func CompositeAlarmByName(conn *cloudwatch.CloudWatch, name string) (*cloudwatch.CompositeAlarm, error) { + input := cloudwatch.DescribeAlarmsInput{ + AlarmNames: aws.StringSlice([]string{name}), + AlarmTypes: aws.StringSlice([]string{cloudwatch.AlarmTypeCompositeAlarm}), + } + + output, err := conn.DescribeAlarms(&input) + if err != nil { + return nil, err + } + + if output == nil || len(output.CompositeAlarms) != 1 { + return nil, nil + } + + return output.CompositeAlarms[0], nil +} diff --git a/aws/resource_aws_cloudwatch_composite_alarm.go b/aws/resource_aws_cloudwatch_composite_alarm.go index dada6aa8f42..f5cbb0d9c0b 100644 --- a/aws/resource_aws_cloudwatch_composite_alarm.go +++ b/aws/resource_aws_cloudwatch_composite_alarm.go @@ -6,11 +6,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/cloudwatch/finder" ) func resourceAwsCloudWatchCompositeAlarm() *schema.Resource { @@ -96,7 +97,7 @@ func resourceAwsCloudWatchCompositeAlarmCreate( input := expandAwsCloudWatchPutCompositeAlarmInput(d) _, err := conn.PutCompositeAlarmWithContext(ctx, &input) if err != nil { - return diag.Errorf("create composite alarm: %s", err) + return diag.Errorf("error creating composite alarm: %s", err) } log.Printf("[INFO] Created Composite Alarm %s.", name) @@ -114,20 +115,25 @@ func resourceAwsCloudWatchCompositeAlarmRead( ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig name := d.Id() - alarm, ok, err := getAwsCloudWatchCompositeAlarm(ctx, conn, name) - switch { - case err != nil: - return diag.FromErr(err) - case !ok: - log.Printf("[WARN] Composite alarm %s has disappeared!", name) - d.SetId("") - return nil + alarm, err := finder.CompositeAlarmByName(conn, name) + if err != nil { + return diag.Errorf("error reading composite alarm (%s): %s", name, err) + } + + if alarm == nil { + if !d.IsNewResource() { + log.Printf("[WARN] CloudWatch Composite alarm %s not found, removing from state", name) + d.SetId("") + return nil + } + + return diag.Errorf("error reading composite alarm (%s): alarm not filtered", name) } d.Set("actions_enabled", alarm.ActionsEnabled) if err := d.Set("alarm_actions", flattenStringSet(alarm.AlarmActions)); err != nil { - return diag.Errorf("set alarm_actions: %s", err) + return diag.Errorf("error setting alarm_actions: %s", err) } d.Set("alarm_description", alarm.AlarmDescription) @@ -136,20 +142,20 @@ func resourceAwsCloudWatchCompositeAlarmRead( d.Set("arn", alarm.AlarmArn) if err := d.Set("insufficient_data_actions", flattenStringSet(alarm.InsufficientDataActions)); err != nil { - return diag.Errorf("set insufficient_data_actions: %s", err) + return diag.Errorf("error setting insufficient_data_actions: %s", err) } if err := d.Set("ok_actions", flattenStringSet(alarm.OKActions)); err != nil { - return diag.Errorf("set ok_actions: %s", err) + return diag.Errorf("error setting ok_actions: %s", err) } tags, err := keyvaluetags.CloudwatchListTags(conn, aws.StringValue(alarm.AlarmArn)) if err != nil { - return diag.Errorf("list tags of alarm: %s", err) + return diag.Errorf("error listing tags of alarm: %s", err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return diag.Errorf("set tags: %s", err) + return diag.Errorf("error setting tags: %s", err) } return nil @@ -168,7 +174,7 @@ func resourceAwsCloudWatchCompositeAlarmUpdate( input := expandAwsCloudWatchPutCompositeAlarmInput(d) _, err := conn.PutCompositeAlarmWithContext(ctx, &input) if err != nil { - return diag.Errorf("create composite alarm: %s", err) + return diag.Errorf("error creating composite alarm: %s", err) } arn := d.Get("arn").(string) @@ -176,7 +182,7 @@ func resourceAwsCloudWatchCompositeAlarmUpdate( o, n := d.GetChange("tags") if err := keyvaluetags.CloudwatchUpdateTags(conn, arn, o, n); err != nil { - return diag.Errorf("update tags: %s", err) + return diag.Errorf("error updating tags: %s", err) } } @@ -198,12 +204,11 @@ func resourceAwsCloudWatchCompositeAlarmDelete( } _, err := conn.DeleteAlarmsWithContext(ctx, &input) - switch { - case isAWSErr(err, "ResourceNotFound", ""): - log.Printf("[WARN] Composite Alarm %s has disappeared!", name) - return nil - case err != nil: - return diag.FromErr(err) + if err != nil { + if tfawserr.ErrCodeEquals(err, cloudwatch.ErrCodeResourceNotFound) { + return nil + } + return diag.Errorf("error deleting composite alarm: %s", err) } return nil @@ -246,24 +251,3 @@ func expandAwsCloudWatchPutCompositeAlarmInput(d *schema.ResourceData) cloudwatc return out } - -func getAwsCloudWatchCompositeAlarm( - ctx context.Context, - conn *cloudwatch.CloudWatch, - name string, -) (*cloudwatch.CompositeAlarm, bool, error) { - input := cloudwatch.DescribeAlarmsInput{ - AlarmNames: aws.StringSlice([]string{name}), - AlarmTypes: aws.StringSlice([]string{cloudwatch.AlarmTypeCompositeAlarm}), - } - - output, err := conn.DescribeAlarmsWithContext(ctx, &input) - switch { - case err != nil: - return nil, false, err - case len(output.CompositeAlarms) != 1: - return nil, false, nil - } - - return output.CompositeAlarms[0], true, nil -} diff --git a/aws/resource_aws_cloudwatch_composite_alarm_test.go b/aws/resource_aws_cloudwatch_composite_alarm_test.go index fb9862b5c8b..8a033cffebb 100644 --- a/aws/resource_aws_cloudwatch_composite_alarm_test.go +++ b/aws/resource_aws_cloudwatch_composite_alarm_test.go @@ -12,31 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func testAccCheckAwsCloudWatchCompositeAlarmExists(n string, alarm *cloudwatch.CompositeAlarm) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn - params := cloudwatch.DescribeAlarmsInput{ - AlarmNames: []*string{aws.String(rs.Primary.ID)}, - AlarmTypes: []*string{aws.String(cloudwatch.AlarmTypeCompositeAlarm)}, - } - resp, err := conn.DescribeAlarms(¶ms) - if err != nil { - return err - } - if len(resp.CompositeAlarms) == 0 { - return fmt.Errorf("Alarm not found") - } - *alarm = *resp.CompositeAlarms[0] - - return nil - } -} - func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn @@ -53,7 +28,7 @@ func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { if err == nil { if len(resp.MetricAlarms) != 0 && - *resp.MetricAlarms[0].AlarmName == rs.Primary.ID { + aws.StringValue(resp.MetricAlarms[0].AlarmName) == rs.Primary.ID { return fmt.Errorf("Alarm Still Exists: %s", rs.Primary.ID) } } @@ -63,7 +38,6 @@ func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { } func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { - alarm := cloudwatch.CompositeAlarm{} suffix := acctest.RandString(8) resourceName := "aws_cloudwatch_composite_alarm.test" @@ -73,9 +47,125 @@ func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsCloudWatchCompositeAlarmConfig_create(suffix), + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), + resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), + resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s) OR ALARM(tf-test-metric-1-%[1]s)", suffix)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + count = 2 + + alarm_name = "tf-test-metric-${count.index}-%[1]s" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_sns_topic" "test" { + count = 1 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_actions = aws_sns_topic.test.*.arn + alarm_description = "Test 1" + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) + insufficient_data_actions = aws_sns_topic.test.*.arn + ok_actions = aws_sns_topic.test.*.arn + + tags = { + Foo = "Bar" + } +} +`, suffix) +} + +func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudWatchCompositeAlarm(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix string) string { + return fmt.Sprintf(` +resource "aws_cloudwatch_metric_alarm" "test" { + alarm_name = "tf-test-metric-%[1]s" + comparison_operator = "GreaterThanOrEqualToThreshold" + evaluation_periods = 2 + metric_name = "CPUUtilization" + namespace = "AWS/EC2" + period = 120 + statistic = "Average" + threshold = 80 + + dimensions = { + InstanceId = "i-abc123" + } +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test.alarm_name})" +} +`, suffix) +} + +func TestAccAwsCloudWatchCompositeAlarm_update(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_update_before(suffix), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), @@ -92,9 +182,9 @@ func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsCloudWatchCompositeAlarmConfig_update(suffix), + Config: testAccAwsCloudWatchCompositeAlarmConfig_update_after(suffix), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "2"), resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 2"), resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), @@ -109,7 +199,7 @@ func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { }) } -func testAccAwsCloudWatchCompositeAlarmConfig_create(suffix string) string { +func testAccAwsCloudWatchCompositeAlarmConfig_update_before(suffix string) string { return fmt.Sprintf(` resource "aws_cloudwatch_metric_alarm" "test" { count = 2 @@ -148,7 +238,7 @@ resource "aws_cloudwatch_composite_alarm" "test" { `, suffix) } -func testAccAwsCloudWatchCompositeAlarmConfig_update(suffix string) string { +func testAccAwsCloudWatchCompositeAlarmConfig_update_after(suffix string) string { return fmt.Sprintf(` resource "aws_cloudwatch_metric_alarm" "test" { count = 2 @@ -182,75 +272,30 @@ resource "aws_cloudwatch_composite_alarm" "test" { tags = { Foo = "Bar" - Bax = "Baf" + Bax = "Baf" } } `, suffix) } -func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { - alarm := cloudwatch.CompositeAlarm{} - suffix := acctest.RandString(8) - resourceName := "aws_cloudwatch_composite_alarm.test" - - checkDisappears := func(*terraform.State) error { +func testAccCheckAwsCloudWatchCompositeAlarmExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn - alarmNames := []string{ - "tf-test-composite-" + suffix, - "tf-test-metric-" + suffix, + params := cloudwatch.DescribeAlarmsInput{ + AlarmNames: []*string{aws.String(rs.Primary.ID)}, + AlarmTypes: []*string{aws.String(cloudwatch.AlarmTypeCompositeAlarm)}, } - - for _, name := range alarmNames { - input := cloudwatch.DeleteAlarmsInput{ - AlarmNames: []*string{&name}, - } - - _, err := conn.DeleteAlarms(&input) - if err != nil { - return err - } + resp, err := conn.DescribeAlarms(¶ms) + if err != nil { + return err + } + if len(resp.CompositeAlarms) == 0 { + return fmt.Errorf("Alarm not found") } - return nil } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName, &alarm), - checkDisappears, - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "test" { - alarm_name = "tf-test-metric-%[1]s" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = 2 - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = 120 - statistic = "Average" - threshold = 80 - - dimensions = { - InstanceId = "i-abc123" - } -} - -resource "aws_cloudwatch_composite_alarm" "test" { - alarm_name = "tf-test-composite-%[1]s" - alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test.alarm_name})" -} -`, suffix) } diff --git a/website/docs/r/cloudwatch_composite_alarm.html.markdown b/website/docs/r/cloudwatch_composite_alarm.html.markdown index 4a140d0f465..5190bd0b27a 100644 --- a/website/docs/r/cloudwatch_composite_alarm.html.markdown +++ b/website/docs/r/cloudwatch_composite_alarm.html.markdown @@ -32,12 +32,12 @@ EOF ## Argument Reference * `actions_enabled` - (Optional) Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to `true`. -* `alarm_actions` - (Optional) The set of actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. +* `alarm_actions` - (Optional) The set of actions to execute when this alarm transitions to the `ALARM` state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. * `alarm_description` - (Optional) The description for the composite alarm. * `alarm_name` - (Required) The name for the composite alarm. This name must be unique within the region. * `alarm_rule` - (Required) An expression that specifies which other alarms are to be evaluated to determine this composite alarm's state. For syntax, see [Creating a Composite Alarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Create_Composite_Alarm.html). The maximum length is 10240 characters. -* `insufficient_data_actions` - (Optional) The set of actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. -* `ok_actions` - (Optional) The set of actions to execute when this alarm transitions to an OK state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. +* `insufficient_data_actions` - (Optional) The set of actions to execute when this alarm transitions to the `INSUFFICIENT_DATA` state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. +* `ok_actions` - (Optional) The set of actions to execute when this alarm transitions to an `OK` state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. * `tags` - (Optional) A map of tags to associate with the alarm. Up to 50 tags are allowed. ## Attributes Reference From 6ae4fb45d448c0a0ebe7e58f046bc08fc1ce1dea Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 10 Jan 2021 00:14:57 +0200 Subject: [PATCH 0509/1212] fix non existent argument + add kms key support --- aws/resource_aws_sagemaker_domain.go | 13 ++++- aws/resource_aws_sagemaker_domain_test.go | 55 ++++++++++++++++++- website/docs/r/sagemaker_domain.html.markdown | 1 + 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index d48cdd622f2..b3f55cb7d37 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -58,6 +58,13 @@ func resourceAwsSagemakerDomain() *schema.Resource { MaxItems: 16, Elem: &schema.Schema{Type: schema.TypeString}, }, + "kms_key_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + ValidateFunc: validateArn, + }, "app_network_access_type": { Type: schema.TypeString, ForceNew: true, @@ -251,6 +258,10 @@ func resourceAwsSagemakerDomainCreate(d *schema.ResourceData, meta interface{}) input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() } + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + log.Printf("[DEBUG] sagemaker domain create config: %#v", *input) output, err := conn.CreateDomain(input) if err != nil { @@ -294,7 +305,7 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er d.Set("home_efs_file_system_id", domain.HomeEfsFileSystemId) d.Set("single_sign_on_managed_application_instance_id", domain.SingleSignOnManagedApplicationInstanceId) d.Set("url", domain.Url) - d.Set("vpc_id", domain.VpcId) + d.Set("kms_key_id", domain.KmsKeyId) if err := d.Set("subnet_ids", flattenStringSet(domain.SubnetIds)); err != nil { return fmt.Errorf("error setting subnet_ids for SageMaker domain (%s): %w", d.Id(), err) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 95f7b3d3ab7..a3395c8e717 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -93,6 +93,34 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), resource.TestCheckResourceAttrSet(resourceName, "url"), resource.TestCheckResourceAttrSet(resourceName, "home_efs_file_system_id"), + resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), + testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerDomain_kms(t *testing.T) { + var domain sagemaker.DescribeDomainOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerDomainKMSConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerDomainExists(resourceName, &domain), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, @@ -572,6 +600,27 @@ resource "aws_sagemaker_domain" "test" { `, rName) } +func testAccAWSSagemakerDomainKMSConfig(rName string) string { + return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "Terraform acc test" + deletion_window_in_days = 7 +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + kms_key_id = aws_kms_key.test.arn + + default_user_settings { + execution_role = aws_iam_role.test.arn + } +} +`, rName) +} + func testAccAWSSagemakerDomainConfigSecurityGroup1(rName string) string { return testAccAWSSagemakerDomainConfigBase(rName) + fmt.Sprintf(` resource "aws_security_group" "test" { @@ -678,9 +727,9 @@ resource "aws_sagemaker_domain" "test" { execution_role = aws_iam_role.test.arn sharing_settings { - domain_output_option = "Allowed" - s3_kms_key_id = aws_kms_key.test.arn - s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" + notebook_output_option = "Allowed" + s3_kms_key_id = aws_kms_key.test.arn + s3_output_path = "s3://${aws_s3_bucket.test.bucket}/sharing" } } } diff --git a/website/docs/r/sagemaker_domain.html.markdown b/website/docs/r/sagemaker_domain.html.markdown index 03d07d2fe69..4573e9bb62d 100644 --- a/website/docs/r/sagemaker_domain.html.markdown +++ b/website/docs/r/sagemaker_domain.html.markdown @@ -53,6 +53,7 @@ The following arguments are supported: * `vpc_id` - (Required) The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. * `subnet_ids` - (Required) The VPC subnets that Studio uses for communication. * `default_user_settings` - (Required) The default user settings. See [Default User Settings](#default-user-settings) below. +* `kms_key_id` - (Optional) The AWS KMS customer managed CMK used to encrypt the EFS volume attached to the domain. * `app_network_access_type` - (Optional) Specifies the VPC used for non-EFS traffic. The default value is `PublicInternetOnly`. Valid values are `PublicInternetOnly` and `VpcOnly`. * `tags` - (Optional) A map of tags to assign to the resource. From abdcf868430492d3b4cbeb22296503cf2c83e696 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 10 Jan 2021 00:19:47 +0200 Subject: [PATCH 0510/1212] restore vpc id --- aws/resource_aws_sagemaker_domain.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index b3f55cb7d37..d35b27f74d1 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -305,6 +305,7 @@ func resourceAwsSagemakerDomainRead(d *schema.ResourceData, meta interface{}) er d.Set("home_efs_file_system_id", domain.HomeEfsFileSystemId) d.Set("single_sign_on_managed_application_instance_id", domain.SingleSignOnManagedApplicationInstanceId) d.Set("url", domain.Url) + d.Set("vpc_id", domain.VpcId) d.Set("kms_key_id", domain.KmsKeyId) if err := d.Set("subnet_ids", flattenStringSet(domain.SubnetIds)); err != nil { From 54a577afacd90b58b83f0648c9e385975b27a014 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 10 Jan 2021 00:44:19 +0200 Subject: [PATCH 0511/1212] missed a spot --- aws/resource_aws_sagemaker_domain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index a3395c8e717..60d02056fae 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -230,7 +230,7 @@ func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.#", "1"), - resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.domain_output_option", "Allowed"), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Allowed"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.sharing_settings.0.s3_kms_key_id", "aws_kms_key.test", "arn"), resource.TestCheckResourceAttrSet(resourceName, "default_user_settings.0.sharing_settings.0.s3_output_path"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), From f2aabe646f4995efd9e607063293bf699d2c9b20 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 10 Jan 2021 01:03:17 +0200 Subject: [PATCH 0512/1212] remove computed from kms --- aws/resource_aws_sagemaker_domain.go | 1 - aws/resource_aws_sagemaker_domain_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index d35b27f74d1..baea64f0eed 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -62,7 +62,6 @@ func resourceAwsSagemakerDomain() *schema.Resource { Type: schema.TypeString, ForceNew: true, Optional: true, - Computed: true, ValidateFunc: validateArn, }, "app_network_access_type": { diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index 60d02056fae..a1b7c62f819 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -93,7 +93,6 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), resource.TestCheckResourceAttrSet(resourceName, "url"), resource.TestCheckResourceAttrSet(resourceName, "home_efs_file_system_id"), - resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, From a40f52e747a5af45392088e47ef15b23afa3dc33 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Mon, 11 Jan 2021 08:48:02 +0900 Subject: [PATCH 0513/1212] Add import support for aws_cloudfront_public_key resource --- aws/resource_aws_cloudfront_public_key.go | 3 ++ ...resource_aws_cloudfront_public_key_test.go | 29 ++++++++++++++++--- .../r/cloudfront_public_key.html.markdown | 8 +++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_cloudfront_public_key.go b/aws/resource_aws_cloudfront_public_key.go index 1d30170ff6d..671911905f5 100644 --- a/aws/resource_aws_cloudfront_public_key.go +++ b/aws/resource_aws_cloudfront_public_key.go @@ -16,6 +16,9 @@ func resourceAwsCloudFrontPublicKey() *schema.Resource { Read: resourceAwsCloudFrontPublicKeyRead, Update: resourceAwsCloudFrontPublicKeyUpdate, Delete: resourceAwsCloudFrontPublicKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "caller_reference": { diff --git a/aws/resource_aws_cloudfront_public_key_test.go b/aws/resource_aws_cloudfront_public_key_test.go index 2305e0122f0..fd4d27f9862 100644 --- a/aws/resource_aws_cloudfront_public_key_test.go +++ b/aws/resource_aws_cloudfront_public_key_test.go @@ -14,6 +14,7 @@ import ( func TestAccAWSCloudFrontPublicKey_basic(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_public_key.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -23,18 +24,24 @@ func TestAccAWSCloudFrontPublicKey_basic(t *testing.T) { { Config: testAccAWSCloudFrontPublicKeyConfig(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFrontPublicKeyExistence("aws_cloudfront_public_key.example"), + testAccCheckCloudFrontPublicKeyExistence(resourceName), resource.TestCheckResourceAttr("aws_cloudfront_public_key.example", "comment", "test key"), resource.TestMatchResourceAttr("aws_cloudfront_public_key.example", "caller_reference", regexp.MustCompile(fmt.Sprintf("^%s", resource.UniqueIdPrefix))), resource.TestCheckResourceAttr("aws_cloudfront_public_key.example", "name", fmt.Sprintf("tf-acc-test-%d", rInt)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSCloudFrontPublicKey_namePrefix(t *testing.T) { startsWithPrefix := regexp.MustCompile("^tf-acc-test-") + resourceName := "aws_cloudfront_public_key.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -44,16 +51,25 @@ func TestAccAWSCloudFrontPublicKey_namePrefix(t *testing.T) { { Config: testAccAWSCloudFrontPublicKeyConfig_namePrefix(), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFrontPublicKeyExistence("aws_cloudfront_public_key.example"), + testAccCheckCloudFrontPublicKeyExistence(resourceName), resource.TestMatchResourceAttr("aws_cloudfront_public_key.example", "name", startsWithPrefix), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "name_prefix", + }, + }, }, }) } func TestAccAWSCloudFrontPublicKey_update(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_public_key.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -63,14 +79,19 @@ func TestAccAWSCloudFrontPublicKey_update(t *testing.T) { { Config: testAccAWSCloudFrontPublicKeyConfig(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFrontPublicKeyExistence("aws_cloudfront_public_key.example"), + testAccCheckCloudFrontPublicKeyExistence(resourceName), resource.TestCheckResourceAttr("aws_cloudfront_public_key.example", "comment", "test key"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCloudFrontPublicKeyConfigUpdate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFrontPublicKeyExistence("aws_cloudfront_public_key.example"), + testAccCheckCloudFrontPublicKeyExistence(resourceName), resource.TestCheckResourceAttr("aws_cloudfront_public_key.example", "comment", "test key1"), ), }, diff --git a/website/docs/r/cloudfront_public_key.html.markdown b/website/docs/r/cloudfront_public_key.html.markdown index bedf8901211..bf46b179bbe 100644 --- a/website/docs/r/cloudfront_public_key.html.markdown +++ b/website/docs/r/cloudfront_public_key.html.markdown @@ -36,3 +36,11 @@ In addition to all arguments above, the following attributes are exported: * `caller_reference` - Internal value used by CloudFront to allow future updates to the public key configuration. * `etag` - The current version of the public key. For example: `E2QWRUHAPOMQZL`. * `id` - The identifier for the public key. For example: `K3D5EWEUDCCXON`. + +## Import + +CloudFront Public Key can be imported using the `id`, e.g. + +``` +$ terraform import aws_cloudfront_public_key.example K3D5EWEUDCCXON +``` From 6ebb2a6d4e4f432899b2a065dc1881290976d625 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Mon, 11 Jan 2021 08:48:27 +0900 Subject: [PATCH 0514/1212] Add a disappear test for aws_cloudfront_public_key resource --- ...resource_aws_cloudfront_public_key_test.go | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/aws/resource_aws_cloudfront_public_key_test.go b/aws/resource_aws_cloudfront_public_key_test.go index fd4d27f9862..3b6a7c4304b 100644 --- a/aws/resource_aws_cloudfront_public_key_test.go +++ b/aws/resource_aws_cloudfront_public_key_test.go @@ -39,6 +39,27 @@ func TestAccAWSCloudFrontPublicKey_basic(t *testing.T) { }) } +func TestAccAWSCloudFrontPublicKey_disappears(t *testing.T) { + rInt := acctest.RandInt() + resourceName := "aws_cloudfront_public_key.example" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontPublicKeyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontPublicKeyExistence(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudFrontPublicKey(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSCloudFrontPublicKey_namePrefix(t *testing.T) { startsWithPrefix := regexp.MustCompile("^tf-acc-test-") resourceName := "aws_cloudfront_public_key.example" From fe8440a1e1c17341bab04dbbd1d6b0d8570eb918 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Sun, 10 Jan 2021 23:34:22 -0800 Subject: [PATCH 0515/1212] Add subcategory to doc --- website/docs/r/fms_policy.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index 8a9d55ed9a8..45725f6d25e 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -1,4 +1,5 @@ --- +subcategory: "Firewall Manager (FMS)" layout: "aws" page_title: "AWS: aws_fms_policy" description: |- From 23f242a81a05691db24ae623435ea201d7817560 Mon Sep 17 00:00:00 2001 From: Bill Rich Date: Mon, 11 Jan 2021 00:09:01 -0800 Subject: [PATCH 0516/1212] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d3c0b492d7..401109e1242 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 3.24.0 (Unreleased) + +FEATURES + +* **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) + ## 3.23.0 (January 08, 2021) FEATURES From 1ea2aff2dedf21b98242b41fe36425a156bb920d Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 15:33:40 +0200 Subject: [PATCH 0517/1212] aws_workspaces_directory: Add access properties --- aws/resource_aws_workspaces_directory.go | 204 +++++++++++++++++- aws/resource_aws_workspaces_directory_test.go | 58 +++++ .../docs/r/workspaces_directory.html.markdown | 21 ++ 3 files changed, 282 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_workspaces_directory.go b/aws/resource_aws_workspaces_directory.go index a2f90e7e696..80af440e9f7 100644 --- a/aws/resource_aws_workspaces_directory.go +++ b/aws/resource_aws_workspaces_directory.go @@ -104,6 +104,51 @@ func resourceAwsWorkspacesDirectory() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "tags": tagsSchema(), + "workspace_access_properties": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_type_android": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_chromeos": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_ios": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "device_type_osx": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_web": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_windows": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_zeroclient": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, "workspace_creation_properties": { Type: schema.TypeList, Computed: true, @@ -184,11 +229,23 @@ func resourceAwsWorkspacesDirectoryCreate(d *schema.ResourceData, meta interface SelfservicePermissions: expandSelfServicePermissions(v.([]interface{})), }) if err != nil { - return fmt.Errorf("error setting WorkSpaces Directory (%s) self service permissions: %w", directoryID, err) + return fmt.Errorf("error setting WorkSpaces Directory (%s) self-service permissions: %w", directoryID, err) } log.Printf("[INFO] Modified WorkSpaces Directory (%s) self-service permissions", directoryID) } + if v, ok := d.GetOk("workspace_access_properties"); ok { + log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) access properties", directoryID) + _, err := conn.ModifyWorkspaceAccessProperties(&workspaces.ModifyWorkspaceAccessPropertiesInput{ + ResourceId: aws.String(directoryID), + WorkspaceAccessProperties: expandAccessProperties(v.([]interface{})), + }) + if err != nil { + return fmt.Errorf("error setting WorkSpaces Directory (%s) access properties: %w", directoryID, err) + } + log.Printf("[INFO] Modified WorkSpaces Directory (%s) access properties", directoryID) + } + if v, ok := d.GetOk("workspace_creation_properties"); ok { log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) creation properties", directoryID) _, err := conn.ModifyWorkspaceCreationProperties(&workspaces.ModifyWorkspaceCreationPropertiesInput{ @@ -247,6 +304,10 @@ func resourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error setting self_service_permissions: %w", err) } + if err := d.Set("workspace_access_properties", flattenAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + return fmt.Errorf("error setting workspace_access_properties: %w", err) + } + if err := d.Set("workspace_creation_properties", flattenWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { return fmt.Errorf("error setting workspace_creation_properties: %w", err) } @@ -288,6 +349,20 @@ func resourceAwsWorkspacesDirectoryUpdate(d *schema.ResourceData, meta interface log.Printf("[INFO] Modified WorkSpaces Directory (%s) self-service permissions", d.Id()) } + if d.HasChange("workspace_access_properties") { + log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) access properties", d.Id()) + properties := d.Get("workspace_access_properties").([]interface{}) + + _, err := conn.ModifyWorkspaceAccessProperties(&workspaces.ModifyWorkspaceAccessPropertiesInput{ + ResourceId: aws.String(d.Id()), + WorkspaceAccessProperties: expandAccessProperties(properties), + }) + if err != nil { + return fmt.Errorf("error updating WorkSpaces Directory (%s) access properties: %w", d.Id(), err) + } + log.Printf("[INFO] Modified WorkSpaces Directory (%s) access properties", d.Id()) + } + if d.HasChange("workspace_creation_properties") { log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) creation properties", d.Id()) properties := d.Get("workspace_creation_properties").([]interface{}) @@ -370,6 +445,60 @@ func workspacesDirectoryDelete(id string, conn *workspaces.WorkSpaces) error { return nil } +func expandAccessProperties(properties []interface{}) *workspaces.WorkspaceAccessProperties { + if len(properties) == 0 || properties[0] == nil { + return nil + } + + result := &workspaces.WorkspaceAccessProperties{} + + p := properties[0].(map[string]interface{}) + + if p["device_type_android"].(bool) { + result.DeviceTypeAndroid = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeAndroid = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_chromeos"].(bool) { + result.DeviceTypeChromeOs = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeChromeOs = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_ios"].(bool) { + result.DeviceTypeIos = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeIos = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_osx"].(bool) { + result.DeviceTypeOsx = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeOsx = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_web"].(bool) { + result.DeviceTypeWeb = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeWeb = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_windows"].(bool) { + result.DeviceTypeWindows = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeWindows = aws.String(workspaces.AccessPropertyValueDeny) + } + + if p["device_type_zeroclient"].(bool) { + result.DeviceTypeZeroClient = aws.String(workspaces.AccessPropertyValueAllow) + } else { + result.DeviceTypeZeroClient = aws.String(workspaces.AccessPropertyValueDeny) + } + + return result +} + func expandSelfServicePermissions(permissions []interface{}) *workspaces.SelfservicePermissions { if len(permissions) == 0 || permissions[0] == nil { return nil @@ -436,6 +565,79 @@ func expandWorkspaceCreationProperties(properties []interface{}) *workspaces.Wor return result } +func flattenAccessProperties(properties *workspaces.WorkspaceAccessProperties) []interface{} { + if properties == nil { + return []interface{}{} + } + + result := map[string]interface{}{} + + switch *properties.DeviceTypeAndroid { + case workspaces.AccessPropertyValueAllow: + result["device_type_android"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_android"] = false + default: + result["device_type_android"] = nil + } + + switch *properties.DeviceTypeChromeOs { + case workspaces.AccessPropertyValueAllow: + result["device_type_chromeos"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_chromeos"] = false + default: + result["device_type_chromeos"] = nil + } + + switch *properties.DeviceTypeIos { + case workspaces.AccessPropertyValueAllow: + result["device_type_ios"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_ios"] = false + default: + result["device_type_ios"] = nil + } + + switch *properties.DeviceTypeOsx { + case workspaces.AccessPropertyValueAllow: + result["device_type_osx"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_osx"] = false + default: + result["device_type_osx"] = nil + } + + switch *properties.DeviceTypeWeb { + case workspaces.AccessPropertyValueAllow: + result["device_type_web"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_web"] = false + default: + result["device_type_web"] = nil + } + + switch *properties.DeviceTypeWindows { + case workspaces.AccessPropertyValueAllow: + result["device_type_windows"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_windows"] = false + default: + result["device_type_windows"] = nil + } + + switch *properties.DeviceTypeZeroClient { + case workspaces.AccessPropertyValueAllow: + result["device_type_zeroclient"] = true + case workspaces.AccessPropertyValueDeny: + result["device_type_zeroclient"] = false + default: + result["device_type_zeroclient"] = nil + } + + return []interface{}{result} +} + func flattenSelfServicePermissions(permissions *workspaces.SelfservicePermissions) []interface{} { if permissions == nil { return []interface{}{} diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 48ed67ded79..458c0666dc1 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -250,6 +250,40 @@ func TestAccAwsWorkspacesDirectory_selfServicePermissions(t *testing.T) { }) } +func TestAccAwsWorkspacesDirectory_workspaceAccessProperties(t *testing.T) { + var v workspaces.WorkspaceDirectory + rName := acctest.RandString(8) + + resourceName := "aws_workspaces_directory.main" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckWorkspacesDirectory(t) + testAccPreCheckAWSDirectoryServiceSimpleDirectory(t) + testAccPreCheckHasIAMRole(t, "workspaces_DefaultRole") + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsWorkspacesDirectoryDestroy, + Steps: []resource.TestStep{ + { + Config: testAccWorkspacesDirectory_workspaceAccessProperties(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_android", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_chromeos", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_ios", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_osx", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_web", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_windows", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_zeroclient", "true"), + ), + }, + }, + }) +} + func TestAccAwsWorkspacesDirectory_workspaceCreationProperties(t *testing.T) { var v workspaces.WorkspaceDirectory rName := acctest.RandString(8) @@ -789,6 +823,30 @@ resource "aws_workspaces_directory" "main" { `, tagKey1, tagValue1, tagKey2, tagValue2)) } +func testAccWorkspacesDirectory_workspaceAccessProperties(rName string) string { + return composeConfig( + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` +resource "aws_workspaces_directory" "main" { + directory_id = aws_directory_service_directory.main.id + + workspace_access_properties { + device_type_android = true + device_type_chromeos = true + device_type_ios = true + device_type_osx = true + device_type_web = true + device_type_windows = true + device_type_zeroclient = true + } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } +} +`, rName)) +} + func testAccWorkspacesDirectoryConfig_workspaceCreationProperties(rName string) string { return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), diff --git a/website/docs/r/workspaces_directory.html.markdown b/website/docs/r/workspaces_directory.html.markdown index ea87bec8ff0..b481344283d 100644 --- a/website/docs/r/workspaces_directory.html.markdown +++ b/website/docs/r/workspaces_directory.html.markdown @@ -34,6 +34,16 @@ resource "aws_workspaces_directory" "example" { switch_running_mode = true } + workspace_access_properties { + device_type_android = true + device_type_chromeos = true + device_type_ios = true + device_type_osx = true + device_type_web = true + device_type_windows = true + device_type_zeroclient = true + } + workspace_creation_properties { custom_security_group_id = aws_security_group.example.id default_ou = "OU=AWS,DC=Workgroup,DC=Example,DC=com" @@ -141,6 +151,7 @@ The following arguments are supported: * `ip_group_ids` - The identifiers of the IP access control groups associated with the directory. * `tags` – (Optional) A map of tags assigned to the WorkSpaces directory. * `self_service_permissions` – (Optional) Permissions to enable or disable self-service capabilities. Defined below. +* `workspace_access_properties` – (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. * `workspace_creation_properties` – (Optional) Default properties that are used for creating WorkSpaces. Defined below. ### self_service_permissions @@ -151,6 +162,16 @@ The following arguments are supported: * `restart_workspace` – (Optional) Whether WorkSpaces directory users can restart their workspace. Default `true`. * `switch_running_mode` – (Optional) Whether WorkSpaces directory users can switch the running mode of their workspace. Default `false`. +### workspace_access_properties + +* `device_type_android` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. +* `device_type_chromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. +* `device_type_ios` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. +* `device_type_osx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. +* `device_type_web` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. +* `device_type_windows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. +* `device_type_zeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. + ### workspace_creation_properties -> **Note:** Once you specified `custom_security_group_id` or `default_ou`, there is no way to delete these attributes. If you cleanup them from the configuration, they still be present in state. From dda6087dcb215180f4c1718ac0f62dd83723999b Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 15:34:10 +0200 Subject: [PATCH 0518/1212] data aws_workspaces_directory: Add access properties --- aws/data_source_aws_workspaces_directory.go | 49 +++++++++++++++++++ ...ta_source_aws_workspaces_directory_test.go | 18 +++++++ .../docs/d/workspaces_directory.html.markdown | 11 +++++ 3 files changed, 78 insertions(+) diff --git a/aws/data_source_aws_workspaces_directory.go b/aws/data_source_aws_workspaces_directory.go index bd8c99bb09b..01f7ef7c56b 100644 --- a/aws/data_source_aws_workspaces_directory.go +++ b/aws/data_source_aws_workspaces_directory.go @@ -86,6 +86,51 @@ func dataSourceAwsWorkspacesDirectory() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "tags": tagsSchema(), + "workspace_access_properties": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_type_android": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_chromeos": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_ios": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "device_type_osx": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_web": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_windows": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "device_type_zeroclient": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, "workspace_creation_properties": { Type: schema.TypeList, Computed: true, @@ -161,6 +206,10 @@ func dataSourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface return fmt.Errorf("error setting self_service_permissions: %s", err) } + if err := d.Set("workspace_access_properties", flattenAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + return fmt.Errorf("error setting workspace_access_properties: %w", err) + } + if err := d.Set("workspace_creation_properties", flattenWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { return fmt.Errorf("error setting workspace_creation_properties: %s", err) } diff --git a/aws/data_source_aws_workspaces_directory_test.go b/aws/data_source_aws_workspaces_directory_test.go index c47a35feaaf..b340ed6a419 100644 --- a/aws/data_source_aws_workspaces_directory_test.go +++ b/aws/data_source_aws_workspaces_directory_test.go @@ -34,6 +34,14 @@ func TestAccDataSourceAwsWorkspacesDirectory_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "self_service_permissions.0.rebuild_workspace", resourceName, "self_service_permissions.0.rebuild_workspace"), resource.TestCheckResourceAttrPair(dataSourceName, "self_service_permissions.0.restart_workspace", resourceName, "self_service_permissions.0.restart_workspace"), resource.TestCheckResourceAttrPair(dataSourceName, "self_service_permissions.0.switch_running_mode", resourceName, "self_service_permissions.0.switch_running_mode"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.#", resourceName, "workspace_access_properties.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_android", resourceName, "workspace_access_properties.0.device_type_android"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_chromeos", resourceName, "workspace_access_properties.0.device_type_chromeos"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_ios", resourceName, "workspace_access_properties.0.device_type_ios"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_osx", resourceName, "workspace_access_properties.0.device_type_osx"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_web", resourceName, "workspace_access_properties.0.device_type_web"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_windows", resourceName, "workspace_access_properties.0.device_type_windows"), + resource.TestCheckResourceAttrPair(dataSourceName, "workspace_access_properties.0.device_type_zeroclient", resourceName, "workspace_access_properties.0.device_type_zeroclient"), resource.TestCheckResourceAttrPair(dataSourceName, "subnet_ids.#", resourceName, "subnet_ids.#"), resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), resource.TestCheckResourceAttrPair(dataSourceName, "workspace_creation_properties.#", resourceName, "workspace_creation_properties.#"), @@ -63,6 +71,16 @@ resource "aws_workspaces_directory" "test" { restart_workspace = false switch_running_mode = true } + + workspace_access_properties { + device_type_android = true + device_type_chromeos = true + device_type_ios = true + device_type_osx = true + device_type_web = true + device_type_windows = true + device_type_zeroclient = true + } } data "aws_workspaces_directory" "test" { diff --git a/website/docs/d/workspaces_directory.html.markdown b/website/docs/d/workspaces_directory.html.markdown index 743f9a07855..0ed407e3e17 100644 --- a/website/docs/d/workspaces_directory.html.markdown +++ b/website/docs/d/workspaces_directory.html.markdown @@ -37,6 +37,7 @@ data "aws_workspaces_directory" "example" { * `subnet_ids` - The identifiers of the subnets where the directory resides. * `tags` – A map of tags assigned to the WorkSpaces directory. * `workspace_creation_properties` – The default properties that are used for creating WorkSpaces. Defined below. +* `workspace_access_properties` – (Optional) Specifies which devices and operating systems users can use to access their WorkSpaces. Defined below. * `workspace_security_group_id` - The identifier of the security group that is assigned to new WorkSpaces. Defined below. ### self_service_permissions @@ -47,6 +48,16 @@ data "aws_workspaces_directory" "example" { * `restart_workspace` – Whether WorkSpaces directory users can restart their workspace. * `switch_running_mode` – Whether WorkSpaces directory users can switch the running mode of their workspace. +### workspace_access_properties + +* `device_type_android` – (Optional) Indicates whether users can use Android devices to access their WorkSpaces. +* `device_type_chromeos` – (Optional) Indicates whether users can use Chromebooks to access their WorkSpaces. +* `device_type_ios` – (Optional) Indicates whether users can use iOS devices to access their WorkSpaces. +* `device_type_osx` – (Optional) Indicates whether users can use macOS clients to access their WorkSpaces. +* `device_type_web` – (Optional) Indicates whether users can access their WorkSpaces through a web browser. +* `device_type_windows` – (Optional) Indicates whether users can use Windows clients to access their WorkSpaces. +* `device_type_zeroclient` – (Optional) Indicates whether users can use zero client devices to access their WorkSpaces. + ### workspace_creation_properties * `custom_security_group_id` – The identifier of your custom security group. Should relate to the same VPC, where workspaces reside in. From 2656870c551418eedf44d90c0c0f194644a36204 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 15:34:48 +0200 Subject: [PATCH 0519/1212] data aws_workspaces_directory: Add missed workspace_creation_properties --- ...data_source_aws_workspaces_directory_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/aws/data_source_aws_workspaces_directory_test.go b/aws/data_source_aws_workspaces_directory_test.go index b340ed6a419..2f998c37ff6 100644 --- a/aws/data_source_aws_workspaces_directory_test.go +++ b/aws/data_source_aws_workspaces_directory_test.go @@ -61,6 +61,15 @@ func testAccDataSourceAwsWorkspacesDirectoryConfig(rName string) string { return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` +resource "aws_security_group" "test" { + name = "tf-testacc-workspaces-directory-%[1]s" + vpc_id = aws_vpc.main.id + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } +} + resource "aws_workspaces_directory" "test" { directory_id = aws_directory_service_directory.main.id @@ -81,6 +90,14 @@ resource "aws_workspaces_directory" "test" { device_type_windows = true device_type_zeroclient = true } + + workspace_creation_properties { + custom_security_group_id = aws_security_group.test.id + default_ou = "OU=AWS,DC=Workgroup,DC=Example,DC=com" + enable_internet_access = true + enable_maintenance_mode = false + user_enabled_as_local_administrator = false + } } data "aws_workspaces_directory" "test" { From 0b2efc3250c4a5cfe89eaa89cbe3994a6db9861a Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 15:35:08 +0200 Subject: [PATCH 0520/1212] Fix resource name interpolation into tags in test configs --- aws/data_source_aws_workspaces_directory_test.go | 9 +++++++-- aws/resource_aws_workspaces_directory_test.go | 10 ++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/aws/data_source_aws_workspaces_directory_test.go b/aws/data_source_aws_workspaces_directory_test.go index 2f998c37ff6..ad47cfc2f9e 100644 --- a/aws/data_source_aws_workspaces_directory_test.go +++ b/aws/data_source_aws_workspaces_directory_test.go @@ -1,6 +1,7 @@ package aws import ( + "fmt" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -60,7 +61,7 @@ func TestAccDataSourceAwsWorkspacesDirectory_basic(t *testing.T) { func testAccDataSourceAwsWorkspacesDirectoryConfig(rName string) string { return composeConfig( testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), - ` + fmt.Sprintf(` resource "aws_security_group" "test" { name = "tf-testacc-workspaces-directory-%[1]s" vpc_id = aws_vpc.main.id @@ -98,6 +99,10 @@ resource "aws_workspaces_directory" "test" { enable_maintenance_mode = false user_enabled_as_local_administrator = false } + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } data "aws_workspaces_directory" "test" { @@ -107,5 +112,5 @@ data "aws_workspaces_directory" "test" { data "aws_iam_role" "workspaces-default" { name = "workspaces_DefaultRole" } -`) +`, rName)) } diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 458c0666dc1..1c2810f2512 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -748,7 +748,8 @@ resource "aws_directory_service_directory" "main" { func testAccWorkspacesDirectoryConfig(rName string) string { return composeConfig( - testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id @@ -760,12 +761,13 @@ resource "aws_workspaces_directory" "main" { data "aws_iam_role" "workspaces-default" { name = "workspaces_DefaultRole" } -`) +`, rName)) } func testAccWorkspacesDirectory_selfServicePermissions(rName string) string { return composeConfig( - testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id @@ -781,7 +783,7 @@ resource "aws_workspaces_directory" "main" { Name = "tf-testacc-workspaces-directory-%[1]s" } } -`) +`, rName)) } func testAccWorkspacesDirectoryConfig_subnetIds(rName string) string { From 6a78fdcc4da5de40bd45a4681ad0183c4d389c46 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 18:05:43 +0200 Subject: [PATCH 0521/1212] Shift workspace_access_attributes from bool to string type --- aws/data_source_aws_workspaces_directory.go | 30 +-- ...ta_source_aws_workspaces_directory_test.go | 14 +- aws/resource_aws_workspaces_directory.go | 177 ++++++------------ aws/resource_aws_workspaces_directory_test.go | 114 +++++++++-- .../docs/r/workspaces_directory.html.markdown | 14 +- 5 files changed, 187 insertions(+), 162 deletions(-) diff --git a/aws/data_source_aws_workspaces_directory.go b/aws/data_source_aws_workspaces_directory.go index 01f7ef7c56b..eccd5f4d388 100644 --- a/aws/data_source_aws_workspaces_directory.go +++ b/aws/data_source_aws_workspaces_directory.go @@ -94,39 +94,39 @@ func dataSourceAwsWorkspacesDirectory() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "device_type_android": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, "device_type_chromeos": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, "device_type_ios": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: true, + Computed: true, }, "device_type_osx": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, "device_type_web": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, "device_type_windows": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, "device_type_zeroclient": { - Type: schema.TypeBool, + Type: schema.TypeString, Optional: true, - Default: false, + Computed: true, }, }, }, @@ -206,7 +206,7 @@ func dataSourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface return fmt.Errorf("error setting self_service_permissions: %s", err) } - if err := d.Set("workspace_access_properties", flattenAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + if err := d.Set("workspace_access_properties", flattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { return fmt.Errorf("error setting workspace_access_properties: %w", err) } diff --git a/aws/data_source_aws_workspaces_directory_test.go b/aws/data_source_aws_workspaces_directory_test.go index ad47cfc2f9e..a742dd21587 100644 --- a/aws/data_source_aws_workspaces_directory_test.go +++ b/aws/data_source_aws_workspaces_directory_test.go @@ -83,13 +83,13 @@ resource "aws_workspaces_directory" "test" { } workspace_access_properties { - device_type_android = true - device_type_chromeos = true - device_type_ios = true - device_type_osx = true - device_type_web = true - device_type_windows = true - device_type_zeroclient = true + device_type_android = "ALLOW" + device_type_chromeos = "ALLOW" + device_type_ios = "ALLOW" + device_type_osx = "ALLOW" + device_type_web = "DENY" + device_type_windows = "DENY" + device_type_zeroclient = "DENY" } workspace_creation_properties { diff --git a/aws/resource_aws_workspaces_directory.go b/aws/resource_aws_workspaces_directory.go index 80af440e9f7..1a667ba8be9 100644 --- a/aws/resource_aws_workspaces_directory.go +++ b/aws/resource_aws_workspaces_directory.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/workspaces" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/workspaces/waiter" ) @@ -112,39 +113,46 @@ func resourceAwsWorkspacesDirectory() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "device_type_android": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_chromeos": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_ios": { - Type: schema.TypeBool, - Optional: true, - Default: true, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_osx": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_web": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_windows": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_zeroclient": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeString, + Optional: true, + Default: workspaces.AccessPropertyValueAllow, + ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, }, }, @@ -238,7 +246,7 @@ func resourceAwsWorkspacesDirectoryCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) access properties", directoryID) _, err := conn.ModifyWorkspaceAccessProperties(&workspaces.ModifyWorkspaceAccessPropertiesInput{ ResourceId: aws.String(directoryID), - WorkspaceAccessProperties: expandAccessProperties(v.([]interface{})), + WorkspaceAccessProperties: expandWorkspaceAccessProperties(v.([]interface{})), }) if err != nil { return fmt.Errorf("error setting WorkSpaces Directory (%s) access properties: %w", directoryID, err) @@ -304,7 +312,7 @@ func resourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error setting self_service_permissions: %w", err) } - if err := d.Set("workspace_access_properties", flattenAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + if err := d.Set("workspace_access_properties", flattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { return fmt.Errorf("error setting workspace_access_properties: %w", err) } @@ -355,7 +363,7 @@ func resourceAwsWorkspacesDirectoryUpdate(d *schema.ResourceData, meta interface _, err := conn.ModifyWorkspaceAccessProperties(&workspaces.ModifyWorkspaceAccessPropertiesInput{ ResourceId: aws.String(d.Id()), - WorkspaceAccessProperties: expandAccessProperties(properties), + WorkspaceAccessProperties: expandWorkspaceAccessProperties(properties), }) if err != nil { return fmt.Errorf("error updating WorkSpaces Directory (%s) access properties: %w", d.Id(), err) @@ -445,7 +453,7 @@ func workspacesDirectoryDelete(id string, conn *workspaces.WorkSpaces) error { return nil } -func expandAccessProperties(properties []interface{}) *workspaces.WorkspaceAccessProperties { +func expandWorkspaceAccessProperties(properties []interface{}) *workspaces.WorkspaceAccessProperties { if len(properties) == 0 || properties[0] == nil { return nil } @@ -454,46 +462,32 @@ func expandAccessProperties(properties []interface{}) *workspaces.WorkspaceAcces p := properties[0].(map[string]interface{}) - if p["device_type_android"].(bool) { - result.DeviceTypeAndroid = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeAndroid = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_android"].(string) != "" { + result.DeviceTypeAndroid = aws.String(p["device_type_android"].(string)) } - if p["device_type_chromeos"].(bool) { - result.DeviceTypeChromeOs = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeChromeOs = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_chromeos"].(string) != "" { + result.DeviceTypeChromeOs = aws.String(p["device_type_chromeos"].(string)) } - if p["device_type_ios"].(bool) { - result.DeviceTypeIos = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeIos = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_ios"].(string) != "" { + result.DeviceTypeIos = aws.String(p["device_type_ios"].(string)) } - if p["device_type_osx"].(bool) { - result.DeviceTypeOsx = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeOsx = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_osx"].(string) != "" { + result.DeviceTypeOsx = aws.String(p["device_type_osx"].(string)) } - if p["device_type_web"].(bool) { - result.DeviceTypeWeb = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeWeb = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_web"].(string) != "" { + result.DeviceTypeWeb = aws.String(p["device_type_web"].(string)) } - if p["device_type_windows"].(bool) { - result.DeviceTypeWindows = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeWindows = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_windows"].(string) != "" { + result.DeviceTypeWindows = aws.String(p["device_type_windows"].(string)) } - if p["device_type_zeroclient"].(bool) { - result.DeviceTypeZeroClient = aws.String(workspaces.AccessPropertyValueAllow) - } else { - result.DeviceTypeZeroClient = aws.String(workspaces.AccessPropertyValueDeny) + if p["device_type_zeroclient"].(string) != "" { + result.DeviceTypeZeroClient = aws.String(p["device_type_zeroclient"].(string)) } return result @@ -565,77 +559,22 @@ func expandWorkspaceCreationProperties(properties []interface{}) *workspaces.Wor return result } -func flattenAccessProperties(properties *workspaces.WorkspaceAccessProperties) []interface{} { +func flattenWorkspaceAccessProperties(properties *workspaces.WorkspaceAccessProperties) []interface{} { if properties == nil { return []interface{}{} } - result := map[string]interface{}{} - - switch *properties.DeviceTypeAndroid { - case workspaces.AccessPropertyValueAllow: - result["device_type_android"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_android"] = false - default: - result["device_type_android"] = nil - } - - switch *properties.DeviceTypeChromeOs { - case workspaces.AccessPropertyValueAllow: - result["device_type_chromeos"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_chromeos"] = false - default: - result["device_type_chromeos"] = nil - } - - switch *properties.DeviceTypeIos { - case workspaces.AccessPropertyValueAllow: - result["device_type_ios"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_ios"] = false - default: - result["device_type_ios"] = nil - } - - switch *properties.DeviceTypeOsx { - case workspaces.AccessPropertyValueAllow: - result["device_type_osx"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_osx"] = false - default: - result["device_type_osx"] = nil - } - - switch *properties.DeviceTypeWeb { - case workspaces.AccessPropertyValueAllow: - result["device_type_web"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_web"] = false - default: - result["device_type_web"] = nil - } - - switch *properties.DeviceTypeWindows { - case workspaces.AccessPropertyValueAllow: - result["device_type_windows"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_windows"] = false - default: - result["device_type_windows"] = nil - } - - switch *properties.DeviceTypeZeroClient { - case workspaces.AccessPropertyValueAllow: - result["device_type_zeroclient"] = true - case workspaces.AccessPropertyValueDeny: - result["device_type_zeroclient"] = false - default: - result["device_type_zeroclient"] = nil + return []interface{}{ + map[string]interface{}{ + "device_type_android": aws.StringValue(properties.DeviceTypeAndroid), + "device_type_chromeos": aws.StringValue(properties.DeviceTypeChromeOs), + "device_type_ios": aws.StringValue(properties.DeviceTypeIos), + "device_type_osx": aws.StringValue(properties.DeviceTypeOsx), + "device_type_web": aws.StringValue(properties.DeviceTypeWeb), + "device_type_windows": aws.StringValue(properties.DeviceTypeWindows), + "device_type_zeroclient": aws.StringValue(properties.DeviceTypeZeroClient), + }, } - - return []interface{}{result} } func flattenSelfServicePermissions(permissions *workspaces.SelfservicePermissions) []interface{} { diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 1c2810f2512..d6f53d0196c 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -271,13 +271,13 @@ func TestAccAwsWorkspacesDirectory_workspaceAccessProperties(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAwsWorkspacesDirectoryExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.#", "1"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_android", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_chromeos", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_ios", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_osx", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_web", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_windows", "true"), - resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_zeroclient", "true"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_android", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_chromeos", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_ios", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_osx", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_web", "DENY"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_windows", "DENY"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_zeroclient", "DENY"), ), }, }, @@ -486,6 +486,92 @@ func TestFlattenSelfServicePermissions(t *testing.T) { } } +func TestExpandWorkspaceAccessProperties(t *testing.T) { + cases := []struct { + input []interface{} + expected *workspaces.WorkspaceAccessProperties + }{ + // Empty + { + input: []interface{}{}, + expected: nil, + }, + // Full + { + input: []interface{}{ + map[string]interface{}{ + "device_type_android": "ALLOW", + "device_type_chromeos": "ALLOW", + "device_type_ios": "ALLOW", + "device_type_osx": "ALLOW", + "device_type_web": "DENY", + "device_type_windows": "DENY", + "device_type_zeroclient": "DENY", + }, + }, + expected: &workspaces.WorkspaceAccessProperties{ + DeviceTypeAndroid: aws.String("ALLOW"), + DeviceTypeChromeOs: aws.String("ALLOW"), + DeviceTypeIos: aws.String("ALLOW"), + DeviceTypeOsx: aws.String("ALLOW"), + DeviceTypeWeb: aws.String("DENY"), + DeviceTypeWindows: aws.String("DENY"), + DeviceTypeZeroClient: aws.String("DENY"), + }, + }, + } + + for _, c := range cases { + actual := expandWorkspaceAccessProperties(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + +func TestFlattenWorkspaceAccessProperties(t *testing.T) { + cases := []struct { + input *workspaces.WorkspaceAccessProperties + expected []interface{} + }{ + // Empty + { + input: nil, + expected: []interface{}{}, + }, + // Full + { + input: &workspaces.WorkspaceAccessProperties{ + DeviceTypeAndroid: aws.String("ALLOW"), + DeviceTypeChromeOs: aws.String("ALLOW"), + DeviceTypeIos: aws.String("ALLOW"), + DeviceTypeOsx: aws.String("ALLOW"), + DeviceTypeWeb: aws.String("DENY"), + DeviceTypeWindows: aws.String("DENY"), + DeviceTypeZeroClient: aws.String("DENY"), + }, + expected: []interface{}{ + map[string]interface{}{ + "device_type_android": "ALLOW", + "device_type_chromeos": "ALLOW", + "device_type_ios": "ALLOW", + "device_type_osx": "ALLOW", + "device_type_web": "DENY", + "device_type_windows": "DENY", + "device_type_zeroclient": "DENY", + }, + }, + }, + } + + for _, c := range cases { + actual := flattenWorkspaceAccessProperties(c.input) + if !reflect.DeepEqual(actual, c.expected) { + t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) + } + } +} + func TestExpandWorkspaceCreationProperties(t *testing.T) { cases := []struct { input []interface{} @@ -833,13 +919,13 @@ resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id workspace_access_properties { - device_type_android = true - device_type_chromeos = true - device_type_ios = true - device_type_osx = true - device_type_web = true - device_type_windows = true - device_type_zeroclient = true + device_type_android = "ALLOW" + device_type_chromeos = "ALLOW" + device_type_ios = "ALLOW" + device_type_osx = "ALLOW" + device_type_web = "DENY" + device_type_windows = "DENY" + device_type_zeroclient = "DENY" } tags = { diff --git a/website/docs/r/workspaces_directory.html.markdown b/website/docs/r/workspaces_directory.html.markdown index b481344283d..f9b523949d0 100644 --- a/website/docs/r/workspaces_directory.html.markdown +++ b/website/docs/r/workspaces_directory.html.markdown @@ -35,13 +35,13 @@ resource "aws_workspaces_directory" "example" { } workspace_access_properties { - device_type_android = true - device_type_chromeos = true - device_type_ios = true - device_type_osx = true - device_type_web = true - device_type_windows = true - device_type_zeroclient = true + device_type_android = "ALLOW" + device_type_chromeos = "ALLOW" + device_type_ios = "ALLOW" + device_type_osx = "ALLOW" + device_type_web = "DENY" + device_type_windows = "DENY" + device_type_zeroclient = "DENY" } workspace_creation_properties { From ea785d25b0202bba1c7cc479ee61bcee1df9fdb8 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 22:26:29 +0200 Subject: [PATCH 0522/1212] Test workspace_access_properties default values --- aws/resource_aws_workspaces_directory.go | 7 ------- aws/resource_aws_workspaces_directory_test.go | 8 ++++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_workspaces_directory.go b/aws/resource_aws_workspaces_directory.go index 1a667ba8be9..ee369ad22a5 100644 --- a/aws/resource_aws_workspaces_directory.go +++ b/aws/resource_aws_workspaces_directory.go @@ -115,43 +115,36 @@ func resourceAwsWorkspacesDirectory() *schema.Resource { "device_type_android": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_chromeos": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_ios": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_osx": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_web": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_windows": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, "device_type_zeroclient": { Type: schema.TypeString, Optional: true, - Default: workspaces.AccessPropertyValueAllow, ValidateFunc: validation.StringInSlice(workspaces.AccessPropertyValue_Values(), false), }, }, diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index d6f53d0196c..5caeb1ff48e 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -90,6 +90,14 @@ func TestAccAwsWorkspacesDirectory_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "ip_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_android", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_chromeos", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_ios", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_osx", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_web", "DENY"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_windows", "ALLOW"), + resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_zeroclient", "ALLOW"), resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.#", "1"), resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.custom_security_group_id", ""), resource.TestCheckResourceAttr(resourceName, "workspace_creation_properties.0.default_ou", ""), From 412f7deec61e365df8c1316293954638ef2e4206 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Thu, 10 Dec 2020 22:59:05 +0200 Subject: [PATCH 0523/1212] Add acctest Name tag to all test workspaces --- aws/resource_aws_workspaces_directory_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_workspaces_directory_test.go b/aws/resource_aws_workspaces_directory_test.go index 5caeb1ff48e..4d69f00b393 100644 --- a/aws/resource_aws_workspaces_directory_test.go +++ b/aws/resource_aws_workspaces_directory_test.go @@ -80,6 +80,7 @@ func TestAccAwsWorkspacesDirectory_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "directory_type", workspaces.WorkspaceDirectoryTypeSimpleAd), resource.TestCheckResourceAttr(resourceName, "dns_ip_addresses.#", "2"), resource.TestCheckResourceAttrPair(resourceName, "iam_role_id", iamRoleDataSourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "ip_group_ids.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "registration_code"), resource.TestCheckResourceAttr(resourceName, "self_service_permissions.#", "1"), resource.TestCheckResourceAttr(resourceName, "self_service_permissions.0.change_compute_type", "false"), @@ -88,8 +89,8 @@ func TestAccAwsWorkspacesDirectory_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "self_service_permissions.0.restart_workspace", "true"), resource.TestCheckResourceAttr(resourceName, "self_service_permissions.0.switch_running_mode", "false"), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "ip_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", fmt.Sprintf("tf-testacc-workspaces-directory-%[1]s", rName)), resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.#", "1"), resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_android", "ALLOW"), resource.TestCheckResourceAttr(resourceName, "workspace_access_properties.0.device_type_chromeos", "ALLOW"), @@ -882,12 +883,17 @@ resource "aws_workspaces_directory" "main" { func testAccWorkspacesDirectoryConfig_subnetIds(rName string) string { return composeConfig( - testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), ` + testAccAwsWorkspacesDirectoryConfig_Prerequisites(rName), + fmt.Sprintf(` resource "aws_workspaces_directory" "main" { directory_id = aws_directory_service_directory.main.id subnet_ids = [aws_subnet.primary.id, aws_subnet.secondary.id] + + tags = { + Name = "tf-testacc-workspaces-directory-%[1]s" + } } -`) +`, rName)) } func testAccWorkspacesDirectoryConfigTags1(rName, tagKey1, tagValue1 string) string { From 408b892303d80e9e0d3b8f58599881eba4bab545 Mon Sep 17 00:00:00 2001 From: Andrew Babichev Date: Mon, 14 Dec 2020 15:43:02 +0200 Subject: [PATCH 0524/1212] Remove Otional and MaxItems from data source schema --- aws/data_source_aws_workspaces_directory.go | 23 ++++----------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/aws/data_source_aws_workspaces_directory.go b/aws/data_source_aws_workspaces_directory.go index eccd5f4d388..e3f45dabe51 100644 --- a/aws/data_source_aws_workspaces_directory.go +++ b/aws/data_source_aws_workspaces_directory.go @@ -89,43 +89,34 @@ func dataSourceAwsWorkspacesDirectory() *schema.Resource { "workspace_access_properties": { Type: schema.TypeList, Computed: true, - Optional: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "device_type_android": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_chromeos": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_ios": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_osx": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_web": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_windows": { Type: schema.TypeString, - Optional: true, Computed: true, }, "device_type_zeroclient": { Type: schema.TypeString, - Optional: true, Computed: true, }, }, @@ -134,33 +125,27 @@ func dataSourceAwsWorkspacesDirectory() *schema.Resource { "workspace_creation_properties": { Type: schema.TypeList, Computed: true, - Optional: true, - MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "custom_security_group_id": { Type: schema.TypeString, - Optional: true, Computed: true, }, "default_ou": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "enable_internet_access": { Type: schema.TypeBool, - Optional: true, - Default: false, + Computed: true, }, "enable_maintenance_mode": { Type: schema.TypeBool, - Optional: true, - Default: false, + Computed: true, }, "user_enabled_as_local_administrator": { Type: schema.TypeBool, - Optional: true, - Default: false, + Computed: true, }, }, }, From 7f4f2e05684b37e8c3259a72c71a918fb4e74db4 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 7 Dec 2020 15:48:18 -0600 Subject: [PATCH 0525/1212] ds/partition: Add DNS prefix --- aws/data_source_aws_partition.go | 14 ++++++++++++++ aws/data_source_aws_partition_test.go | 1 + website/docs/d/partition.html.markdown | 1 + 3 files changed, 16 insertions(+) diff --git a/aws/data_source_aws_partition.go b/aws/data_source_aws_partition.go index 6f01b88674d..7ec2cfaf4d0 100644 --- a/aws/data_source_aws_partition.go +++ b/aws/data_source_aws_partition.go @@ -2,6 +2,8 @@ package aws import ( "log" + "sort" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -15,10 +17,16 @@ func dataSourceAwsPartition() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "dns_suffix": { Type: schema.TypeString, Computed: true, }, + + "service_prefix": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -35,5 +43,11 @@ func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Setting AWS URL Suffix to %s.", client.dnsSuffix) d.Set("dns_suffix", meta.(*AWSClient).dnsSuffix) + dnsParts := strings.Split(meta.(*AWSClient).dnsSuffix, ".") + sort.Sort(sort.Reverse(sort.StringSlice(dnsParts))) + servicePrefix := strings.Join(dnsParts, ".") + d.Set("service_prefix", servicePrefix) + log.Printf("[DEBUG] Setting service prefix to %s.", servicePrefix) + return nil } diff --git a/aws/data_source_aws_partition_test.go b/aws/data_source_aws_partition_test.go index d254860377e..71884542011 100644 --- a/aws/data_source_aws_partition_test.go +++ b/aws/data_source_aws_partition_test.go @@ -18,6 +18,7 @@ func TestAccAWSPartition_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAwsPartition("data.aws_partition.current"), testAccCheckAwsDnsSuffix("data.aws_partition.current"), + resource.TestCheckResourceAttr("data.aws_partition.current", "service_prefix", testAccGetPartitionReverseDNSPrefix()), ), }, }, diff --git a/website/docs/d/partition.html.markdown b/website/docs/d/partition.html.markdown index f3fe61ccf4f..1647c633c6d 100644 --- a/website/docs/d/partition.html.markdown +++ b/website/docs/d/partition.html.markdown @@ -40,3 +40,4 @@ There are no arguments available for this data source. * `dns_suffix` - Base DNS domain name for the current partition (e.g. `amazonaws.com` in AWS Commercial, `amazonaws.com.cn` in AWS China). * `id` - Identifier of the current partition (e.g. `aws` in AWS Commercial, `aws-cn` in AWS China). * `partition` - Identifier of the current partition (e.g. `aws` in AWS Commercial, `aws-cn` in AWS China). +* `service_prefix` - Prefix of service names (e.g. `com.amazonaws` in AWS Commercial, `cn.com.amazonaws` in AWS China). From 99c0da9bacaa4ffaf000dafd0a0ee5b4b75b5b67 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 11 Jan 2021 09:21:00 -0500 Subject: [PATCH 0526/1212] Fix typo in 'data-handling-and-conversion.md' (#17042) Missing ')' prevents copy-and-paste. --- docs/contributing/data-handling-and-conversion.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/contributing/data-handling-and-conversion.md b/docs/contributing/data-handling-and-conversion.md index acc6f79ea8b..a8244309263 100644 --- a/docs/contributing/data-handling-and-conversion.md +++ b/docs/contributing/data-handling-and-conversion.md @@ -605,7 +605,7 @@ To read: ```go input := service.ExampleOperationInput{} -if v, ok := tfMap["nested_attribute_name"].(map[string]interface{}; ok && len(v) > 0 { +if v, ok := tfMap["nested_attribute_name"].(map[string]interface{}); ok && len(v) > 0 { apiObject.NestedAttributeName = stringMapToPointers(v) } ``` From 84bd672176090008626b2ae92a152c809f31bcb3 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 11 Jan 2021 09:25:22 -0500 Subject: [PATCH 0527/1212] data source/partition: Rename attribute --- aws/data_source_aws_partition.go | 4 ++-- aws/data_source_aws_partition_test.go | 2 +- website/docs/d/partition.html.markdown | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_partition.go b/aws/data_source_aws_partition.go index 7ec2cfaf4d0..9e2c40e502a 100644 --- a/aws/data_source_aws_partition.go +++ b/aws/data_source_aws_partition.go @@ -23,7 +23,7 @@ func dataSourceAwsPartition() *schema.Resource { Computed: true, }, - "service_prefix": { + "reverse_dns_prefix": { Type: schema.TypeString, Computed: true, }, @@ -46,7 +46,7 @@ func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error dnsParts := strings.Split(meta.(*AWSClient).dnsSuffix, ".") sort.Sort(sort.Reverse(sort.StringSlice(dnsParts))) servicePrefix := strings.Join(dnsParts, ".") - d.Set("service_prefix", servicePrefix) + d.Set("reverse_dns_prefix", servicePrefix) log.Printf("[DEBUG] Setting service prefix to %s.", servicePrefix) return nil diff --git a/aws/data_source_aws_partition_test.go b/aws/data_source_aws_partition_test.go index 71884542011..f23951978dd 100644 --- a/aws/data_source_aws_partition_test.go +++ b/aws/data_source_aws_partition_test.go @@ -18,7 +18,7 @@ func TestAccAWSPartition_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAwsPartition("data.aws_partition.current"), testAccCheckAwsDnsSuffix("data.aws_partition.current"), - resource.TestCheckResourceAttr("data.aws_partition.current", "service_prefix", testAccGetPartitionReverseDNSPrefix()), + resource.TestCheckResourceAttr("data.aws_partition.current", "reverse_dns_prefix", testAccGetPartitionReverseDNSPrefix()), ), }, }, diff --git a/website/docs/d/partition.html.markdown b/website/docs/d/partition.html.markdown index 1647c633c6d..741ffd1b9b8 100644 --- a/website/docs/d/partition.html.markdown +++ b/website/docs/d/partition.html.markdown @@ -40,4 +40,4 @@ There are no arguments available for this data source. * `dns_suffix` - Base DNS domain name for the current partition (e.g. `amazonaws.com` in AWS Commercial, `amazonaws.com.cn` in AWS China). * `id` - Identifier of the current partition (e.g. `aws` in AWS Commercial, `aws-cn` in AWS China). * `partition` - Identifier of the current partition (e.g. `aws` in AWS Commercial, `aws-cn` in AWS China). -* `service_prefix` - Prefix of service names (e.g. `com.amazonaws` in AWS Commercial, `cn.com.amazonaws` in AWS China). +* `reverse_dns_prefix` - Prefix of service names (e.g. `com.amazonaws` in AWS Commercial, `cn.com.amazonaws` in AWS China). From c92f15ee8df6b65a17c6a3ed9914f04897bce7e8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Mon, 11 Jan 2021 12:27:34 -0500 Subject: [PATCH 0528/1212] add lakeformation to issue/pr labeler --- .hashibot.hcl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.hashibot.hcl b/.hashibot.hcl index 27f9cfe1d97..fbf7a56a9ce 100644 --- a/.hashibot.hcl +++ b/.hashibot.hcl @@ -387,6 +387,9 @@ behavior "regexp_issue_labeler_v2" "service_labels" { "service/kms" = [ "aws_kms_", ], + "service/lakeformation" = [ + "aws_lakeformation_", + ], "service/lambda" = [ "aws_lambda_", ], @@ -1186,6 +1189,11 @@ behavior "pull_request_path_labeler" "service_labels" { "**/*_kms_*", "**/kms_*" ] + "service/lakeformation" = [ + "aws/internal/service/lakeformation/**/*", + "**/*_lakeformation_*", + "**/lakeformation_*" + ] "service/lambda" = [ "aws/internal/service/lambda/**/*", "**/*_lambda_*", From 62c1d45eceee22b6dbd6d5479444b6c2dff40b1f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 11 Jan 2021 14:22:29 -0500 Subject: [PATCH 0529/1212] docs/contributing: Initial error handling documentation (#16794) * docs/contributing: Initial error handling documentation Some initial and lift/shift documentation on how to handle errors in the Terraform AWS Provider codebase. Provides a walkthrough of the AWS Go SDK error type, Terraform Plugin SDK error types, and sections on some common error scenarios and expected handling. * docs/contributing: Additional information for AWS Go SDK Error Code Constants * docs/contributing: Fix misspell reports * Update docs/contributing/error-handling.md Co-authored-by: Kit Ewbank * Update docs/contributing/error-handling.md Co-authored-by: Kit Ewbank Co-authored-by: Kit Ewbank --- docs/CONTRIBUTING.md | 1 + docs/contributing/error-handling.md | 366 ++++++++++++++++++ .../pullrequest-submission-and-lifecycle.md | 39 +- 3 files changed, 370 insertions(+), 36 deletions(-) create mode 100644 docs/contributing/error-handling.md diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 9e32dc2a6b5..8c0cabf9d4a 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -20,3 +20,4 @@ This documentation also contains reference material specific to certain function - [Running and Writing Acceptance Tests](contributing/running-and-writing-acceptance-tests.md) - [Data Handling and Conversion](contributing/data-handling-and-conversion.md) +- [Error Handling](contributing/error-handling.md) diff --git a/docs/contributing/error-handling.md b/docs/contributing/error-handling.md new file mode 100644 index 00000000000..301fb1924e0 --- /dev/null +++ b/docs/contributing/error-handling.md @@ -0,0 +1,366 @@ +# Error Handling + +_Please Note: This documentation is intended for Terraform AWS Provider code developers. Typical operators writing and applying Terraform configurations do not need to read or understand this material._ + +The Terraform AWS Provider codebase bridges the implementation of a [Terraform Plugin](https://www.terraform.io/docs/extend/how-terraform-works.html) and an AWS API client to support AWS operations and data types as Terraform Resources. An important aspect of performing resource and remote actions is properly handling those operations, but those operations are not guaranteed to succeed every time. Some common examples include where network connections are unreliable, necessary permissions are not properly setup, incorrect Terraform configurations, or the remote system responds unexpectedly. All these situations lead to an unexpected workflow action that must be surfaced to the Terraform user interface for operators to troubleshoot. This guide is intended to explain and show various Terraform AWS Provider code implementations that are considered best practice for surfacing these issues properly to operators and code maintainers. + +- [General Guidelines and Helpers](#general-guidelines-and-helpers) + - [Naming and Check Style](#naming-and-check-style) + - [Wrap Errors](#wrap-errors) + - [AWS Go SDK Errors](#aws-go-sdk-errors) + - [AWS Go SDK Error Helpers](#aws-go-sdk-error-helpers) + - [Use AWS Go SDK Error Code Constants](#use-aws-go-sdk-error-code-constants) + - [Terraform Plugin SDK Types and Helpers](#terraform-plugin-sdk-types-and-helpers) +- [Resource Lifecycle Guidelines](#resource-lifecycle-guidelines) + - [Resource Creation](#resource-creation) + - [d.IsNewResource() Checks](#disnewresource-checks) + - [Creation Error Message Context](#creation-error-message-context) + - [Resource Deletion](#resource-deletion) + - [Resource Already Deleted](#resource-already-deleted) + - [Deletion Error Message Context](#deletion-error-message-context) + - [Resource Read](#resource-read) + - [Singular Data Source Errors](#singular-data-source-errors) + - [Plural Data Source Errors](#plural-data-source-errors) + - [Read Error Message Context](#read-error-message-context) + - [Resource Update](#resource-update) + - [Update Error Message Context](#update-error-message-context) + +## General Guidelines and Helpers + +### Naming and Check Style + +Following typical Go conventions, error variables in the Terraform AWS Provider codebase should be named `err`, e.g. + +```go +result, err := strconv.Itoa("oh no!") +``` + +The code that then checks these errors should prefer `if` conditionals that usually `return` (or in the case of looping constructs, `break`/`continue`) early, especially in the case of multiple error checks, e.g. + +```go +if /* ... something checking err first ... */ { + // ... return, break, continue, etc. ... +} + +if err != nil { + // ... return, break, continue, etc. ... +} + +// all good! +``` + +This is in preference of some other styles of error checking, such as `switch` conditionals without a condition. + +### Wrap Errors + +Go implements error wrapping, which means that a deeply nested function call can return a particular error type, while each function up the stack can provide additional error message context without losing the ability to determine the original error. Additional information about this concept can be found on the [Go blog entry titled Working with Errors in Go 1.13](https://blog.golang.org/go1.13-errors). + +For most use cases in this codebase, this means if code is receiving an error and needs to return it, it should implement [`fmt.Errorf()`](https://pkg.go.dev/fmt#Errorf) and the `%w` verb, e.g. + +```go +return fmt.Errorf("adding some additional message: %w", err) +``` + +### AWS Go SDK Errors + +The [AWS Go SDK documentation](https://docs.aws.amazon.com/sdk-for-go/) includes a [section on handling errors](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/handling-errors.html), which is recommended reading. + +For the purposes of this documentation, the most important concepts with handling these errors are: + +- Each response error (which eventually implements `awserr.Error`) has a `string` error code (`Code`) and `string` error message (`Message`). When printed as a string, they format as: `Code: Message`, e.g. `InvalidParameterValueException: IAM Role arn:aws:iam::123456789012:role/XXX cannot be assumed by AWS Backup`. +- Error handling is almost exclusively done via those `string` fields and not other response information, such as HTTP Status Codes. +- When the error code is non-specific, the error message should also be checked. Unfortunately, AWS APIs generally do not provide documentation or API modeling with the contents of these messages and often the Terraform AWS Provider code must rely on substring matching. +- Not all errors are returned in the response error from an AWS Go SDK operation. This is service and sometimes API call specific. For example, the [EC2 `DeleteVpcEndpoints` API call](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteVpcEndpoints.html) can return a "successful" response (in terms of no response error) but include information in an `Unsuccessful` field in the response body. + +When working with AWS Go SDK errors, it is preferred to use the helpers outlined below and use the `%w` format verb. Code should generally avoid type assertions with the underlying `awserr.Error` type or calling its `Code()`, `Error()`, `Message()`, or `String()` receiver methods. Using the `%v`, `%#v`, or `%+v` format verbs generally provides extraneous information that is not helpful to operators or code maintainers. + +#### AWS Go SDK Error Helpers + +To simplify operations with AWS Go SDK error types, the following helpers are available via the `github.com/hashicorp/aws-sdk-go-base/tfawserr` Go package: + +- `tfawserr.ErrCodeEquals(err, "Code")`: Preferred when the error code is specific enough for the check condition. For example, a `ResourceNotFoundError` code provides enough information that the requested API resource identifier/Amazon Resource Name does not exist. +- `tfawserr.ErrMessageContains(err, "Code", "MessageContains")`: Does simple substring matching for the error message. + +The recommendation for error message checking is to be just specific enough to capture the anticipated issue, but not include _too_ much matching as the AWS API can change over time without notice. The maintainers have observed changes in wording and capitalization cause unexpected issues in the past. + +For example, given this error code and message: + +``` +InvalidParameterValueException: IAM Role arn:aws:iam::123456789012:role/XXX cannot be assumed by AWS Backup +``` + +An error check for this might be: + +```go +if tfawserr.ErrMessageContains(err, backup.ErrCodeInvalidParameterValueException, "cannot be assumed") { /* ... */ } +``` + +The Amazon Resource Name in the error message will be different for every environment and does not add value to the check. The AWS Backup suffix is also extraneous and could change should the service ever rename. + +_The codebase also contains an older style `isAWSErr(err, "CodeEquals", "MessageContains")` helper, which has not yet been refactored out. The helpers above are preferred for clarity._ + +#### Use AWS Go SDK Error Code Constants + +Each AWS Go SDK service API typically implements common error codes, which get exported as public constants in the AWS Go SDK. In the [AWS Go SDK API Reference](https://docs.aws.amazon.com/sdk-for-go/api/), these can be found in each of the service packages under the `Constants` section (typically named `ErrCode{ExceptionName}`). + +If an AWS Go SDK service API is missing an error code constant, an AWS Support case should be submitted and a new constant can be added to `aws/internal/service/{SERVICE}/errors.go` file (created if not present), e.g. + +```go +const( + ErrCodeInvalidParameterException = "InvalidParameterException" +) +``` + +Then referencing code can use it via: + +```go +// imports +tf{SERVICE} "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/{SERVICE}" + +// logic +tfawserr.ErrCodeEquals(err, tf{SERVICE}.ErrCodeInvalidParameterException) +``` + +e.g. + +```go +// imports +tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + +// logic +tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidParameterException) +``` + +### Terraform Plugin SDK Types and Helpers + +The Terraform Plugin SDK includes some error types which are used in certain operations and typically preferred over implementing new types: + +* [`resource.NotFoundError`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#NotFoundError) +* [`resource.TimeoutError`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#TimeoutError): Returned from [`resource.Retry()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#Retry), [`resource.RetryContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#RetryContext), [`(resource.StateChangeConf).WaitForState()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#StateChangeConf.WaitForState), and [`(resource.StateChangeConf).WaitForStateContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#StateChangeConf.WaitForStateContext) + +The Terraform AWS Provider codebase implements some additional helpers for working with these in the `github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource` package: + +- `tfresource.NotFound(err)`: Returns true if the error is a `resource.NotFoundError`. +- `tfresource.TimedOut(err)`: Returns true if the error is a `resource.TimeoutError` and contains no `LastError`. This typically signifies that the retry logic was never signaled for a retry, which can happen when AWS Go SDK operations are automatically retrying before returning. + +## Resource Lifecycle Guidelines + +Terraform CLI and the Terraform Plugin SDK have certain expectations and automatic behaviors depending on the lifecycle operation of a resource. This section highlights some common issues that can occur and their expected resolution. + +### Resource Creation + +Invoked in the resource via the `schema.Resource` type `Create`/`CreateContext` function. + +#### d.IsNewResource() Checks + +During resource creation, Terraform CLI expects either a properly applied state for the new resource or an error. To signal proper resource existence, the Terraform Plugin SDK uses an underlying resource identifier (set via `d.SetId(/* some value */)`). If for some reason the resource creation is returned without an error, but also without the resource identifier being set, Terraform CLI will return an error such as: + +``` +Error: Provider produced inconsistent result after apply + +When applying changes to aws_sns_topic_subscription.sqs, +provider "registry.terraform.io/hashicorp/aws" produced an unexpected new +value: Root resource was present, but now absent. + +This is a bug in the provider, which should be reported in the provider's own +issue tracker. +``` + +A typical pattern in resource implementations in the `Create`/`CreateContext` function is to `return` the `Read`/`ReadContext` function at the end to fill in the Terraform State for all attributes. Another typical pattern in resource implementations in the `Read`/`ReadContext` function is to remove the resource from the Terraform State if the remote system returns an error or status that indicates the remote resource no longer exists by explicitly calling `d.SetId("")` and returning no error. If the remote system is not strongly read-after-write consistent (eventually consistent), this means the resource creation can return no error and also return no resource state. + +To prevent this type of Terraform CLI error, the resource implementation should also check against `d.IsNewResource()` before removing from the Terraform State and returning no error. If that check is `true`, then remote operation error (or one synthesized from the non-existent status) should be returned instead. While adding this check will not fix the resource implementation to handle the eventually consistent nature of the remote system, the error being returned will be less opaque for operators and code maintainers to troubleshoot. + +In the Terraform AWS Provider, an initial fix for the Terraform CLI error will typically look like: + +```go +func resourceServiceThingCreate(d *schema.ResourceData, meta interface{}) error { + /* ... */ + + return resourceServiceThingRead(d, meta) +} + +func resourceServiceThingRead(d *schema.ResourceData, meta interface{}) error { + /* ... */ + + output, err := conn.DescribeServiceThing(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, "ResourceNotFoundException") { + log.Printf("[WARN] {Service} {Thing} (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading {Service} {Thing} (%s): %w", d.Id(), err) + } + + /* ... */ +} +``` + +Future documentation will show how to properly retry the remote operation for a short period of time until it is successful to remove the error completely. + +#### Creation Error Message Context + +Returning errors during creation should include additional messaging about the location or cause of the error for operators and code maintainers by wrapping with [`fmt.Errorf()`](https://pkg.go.dev/fmt#Errorf): + +```go +if err != nil { + return fmt.Errorf("error creating {SERVICE} {THING}: %w", err) +} +``` + +e.g. + +```go +if err != nil { + return fmt.Errorf("error creating EC2 VPC: %w", err) +} +``` + +Code that also uses waiters or other operations that return errors should follow a similar pattern, including the resource identifier since it has typically been set before this execution: + +```go +if _, err := waiter.VpcAvailable(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EC2 VPC (%s) availability: %w", d.Id(), err) +} +``` + +### Resource Deletion + +Invoked in the resource via the `schema.Resource` type `Delete`/`DeleteContext` function. + +#### Resource Already Deleted + +A typical pattern for resource deletion is to immediately perform the remote system deletion operation without checking existence. This is generally acceptable as operators are encouraged to always refresh their Terraform State prior to performing changes. However in certain scenarios, such as external systems modifying the remote system prior to the Terraform execution, it is certainly still possible that the remote system will return an error signifying that remote resource does not exist. In these cases, resources should implement logic that catches the error and returns no error. + +_NOTE: The Terraform Plugin SDK automatically handles the equivalent of d.SetId("") on deletion, so it is not necessary to include it._ + +For example in the Terraform AWS Provider: + +```go +func resourceServiceThingDelete(d *schema.ResourceData, meta interface{}) error { + /* ... */ + + output, err := conn.DeleteServiceThing(input) + + if tfawserr.ErrCodeEquals(err, "ResourceNotFoundException") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting {Service} {Thing} (%s): %w", d.Id(), err) + } + + /* ... */ +} +``` + +#### Deletion Error Message Context + +Returning errors during deletion should include the resource identifier and additional messaging about the location or cause of the error for operators and code maintainers by wrapping with [`fmt.Errorf()`](https://pkg.go.dev/fmt#Errorf): + +```go +if err != nil { + return fmt.Errorf("error deleting {SERVICE} {THING} (%s): %w", d.Id(), err) +} +``` + +e.g. + +```go +if err != nil { + return fmt.Errorf("error deleting EC2 VPC (%s): %w", d.Id(), err) +} +``` + +Code that also uses waiters or other operations that return errors should follow a similar pattern: + +```go +if _, err := waiter.VpcDeleted(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EC2 VPC (%s) deletion: %w", d.Id(), err) +} +``` + +### Resource Read + +Invoked in the resource via the `schema.Resource` type `Read`/`ReadContext` function. + +#### Singular Data Source Errors + +A data source which is expected to return Terraform State about a single remote resource is commonly referred to as a "singular" data source. Implementation-wise, it may use any available describe or listing functionality from the remote system to retrieve the information. In addition to any remote operation and other data handling errors that should be returned, these two additional cases should be covered: + +- Returning an error when zero results are found. +- Returning an error when multiple results are found. + +For remote operations that are designed to return an error when the remote resource is not found, this error is typically just passed through similar to other remote operation errors. For remote operations that are designed to return a successful result whether there is zero, one, or multiple multiple results the error must be generated. + +For example in pseudo-code: + +```go +output, err := conn.ListServiceThings(input) + +if err != nil { + return fmt.Errorf("error listing {Service} {Thing}s: %w", err) +} + +if output == nil || len(output.Results) == 0 { + return fmt.Errorf("no {Service} {Thing} found matching criteria; try different search") +} + +if len(output.Results) > 1 { + return fmt.Errorf("multiple {Service} {Thing} found matching criteria; try different search") +} +``` + +#### Plural Data Source Errors + +An emergent concept is a data source that returns multiple results, acting similar to any available listing functionality available from the remote system. These types of data sources should return _no_ error if zero results are returned and _no_ error if multiple results are found. Remote operation and other data handling errors should still be returned. + +#### Read Error Message Context + +Returning errors during read should include the resource identifier (for managed resources) and additional messaging about the location or cause of the error for operators and code maintainers by wrapping with [`fmt.Errorf()`](https://pkg.go.dev/fmt#Errorf): + +```go +if err != nil { + return fmt.Errorf("error reading {SERVICE} {THING} (%s): %w", d.Id(), err) +} +``` + +e.g. + +```go +if err != nil { + return fmt.Errorf("error reading EC2 VPC (%s): %w", d.Id(), err) +} +``` + +### Resource Update + +Invoked in the resource via the `schema.Resource` type `Update`/`UpdateContext` function. + +#### Update Error Message Context + +Returning errors during update should include the resource identifier and additional messaging about the location or cause of the error for operators and code maintainers by wrapping with [`fmt.Errorf()`](https://pkg.go.dev/fmt#Errorf): + +```go +if err != nil { + return fmt.Errorf("error updating {SERVICE} {THING} (%s): %w", d.Id(), err) +} +``` + +e.g. + +```go +if err != nil { + return fmt.Errorf("error updating EC2 VPC (%s): %w", d.Id(), err) +} +``` + +Code that also uses waiters or other operations that return errors should follow a similar pattern: + +```go +if _, err := waiter.VpcAvailable(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EC2 VPC (%s) update: %w", d.Id(), err) +} +``` diff --git a/docs/contributing/pullrequest-submission-and-lifecycle.md b/docs/contributing/pullrequest-submission-and-lifecycle.md index 34f55c2b9f4..789e346b4d4 100644 --- a/docs/contributing/pullrequest-submission-and-lifecycle.md +++ b/docs/contributing/pullrequest-submission-and-lifecycle.md @@ -111,7 +111,9 @@ For greater detail, the following Go language resources provide common coding pr ### Resource Contribution Guidelines -The following resource checks need to be addressed before your contribution can be merged. The exclusion of any applicable check may result in a delayed time to merge. +The following resource checks need to be addressed before your contribution can be merged. The exclusion of any applicable check may result in a delayed time to merge. Some of these are not handled by the automated code testing that occurs during submission, so reviewers (even those outside the maintainers) are encouraged to reach out to contributors about any issues to save time. + +This Contribution Guide also includes separate sections on topics such as [Error Handling](error-handling.md), which also applies to contributions. - [ ] __Passes Testing__: All code and documentation changes must pass unit testing, code linting, and website link testing. Resource code changes must pass all acceptance testing for the resource. - [ ] __Avoids API Calls Across Account, Region, and Service Boundaries__: Resources should not implement cross-account, cross-region, or cross-service API calls. @@ -154,16 +156,12 @@ The following resource checks need to be addressed before your contribution can } ``` -- [ ] __Uses resource.NotFoundError__: Custom errors for missing resources should use [`resource.NotFoundError`](https://godoc.org/github.com/hashicorp/terraform/helper/resource#NotFoundError). - [ ] __Uses resource.UniqueId()__: API fields for concurrency protection such as `CallerReference` and `IdempotencyToken` should use [`resource.UniqueId()`](https://godoc.org/github.com/hashicorp/terraform/helper/resource#UniqueId). The implementation includes a monotonic counter which is safer for concurrent operations than solutions such as `time.Now()`. - [ ] __Skips id Attribute__: The `id` attribute is implicit for all Terraform resources and does not need to be defined in the schema. The below are style-based items that _may_ be noted during review and are recommended for simplicity, consistency, and quality assurance: - [ ] __Avoids CustomizeDiff__: Usage of `CustomizeDiff` is generally discouraged. -- [ ] __Implements Error Message Context__: Returning errors from resource `Create`, `Read`, `Update`, and `Delete` functions should include additional messaging about the location or cause of the error for operators and code maintainers by wrapping with [`fmt.Errorf()`](https://godoc.org/golang.org/x/exp/errors/fmt#Errorf). - - An example `Delete` API error: `return fmt.Errorf("error deleting {SERVICE} {THING} (%s): %w", d.Id(), err)` - - An example `d.Set()` error: `return fmt.Errorf("error setting {ATTRIBUTE}: %w", err)` - [ ] __Implements arn Attribute__: APIs that return an Amazon Resource Name (ARN) should implement `arn` as an attribute. Alternatively, the ARN can be synthesized using the AWS Go SDK [`arn.ARN`](https://docs.aws.amazon.com/sdk-for-go/api/aws/arn/#ARN) structure. For example: ```go @@ -182,38 +180,7 @@ The below are style-based items that _may_ be noted during review and are recomm When the `arn` attribute is synthesized this way, add the resource to the [list](https://www.terraform.io/docs/providers/aws/index.html#argument-reference) of those affected by the provider's `skip_requesting_account_id` attribute. - [ ] __Implements Warning Logging With Resource State Removal__: If a resource is removed outside of Terraform (e.g. via different tool, API, or web UI), `d.SetId("")` and `return nil` can be used in the resource `Read` function to trigger resource recreation. When this occurs, a warning log message should be printed beforehand: `log.Printf("[WARN] {SERVICE} {THING} (%s) not found, removing from state", d.Id())` -- [ ] __Uses Functions from aws-sdk-go-base/tfawserr with AWS Go SDK Error Objects__: Use the [`ErrCodeEquals(err error, code string)`](https://godoc.org/github.com/hashicorp/aws-sdk-go-base/tfawserr#ErrCodeEquals) and [`ErrMessageContains(err error, code string, message string)`](https://godoc.org/github.com/hashicorp/aws-sdk-go-base/tfawserr#ErrMessageContains) helper functions instead of the `awserr` package to compare error code and message contents. -- [ ] __Uses %s fmt Verb with AWS Go SDK Objects__: AWS Go SDK objects implement `String()` so using the `%v`, `%#v`, or `%+v` fmt verbs with the object are extraneous or provide unhelpful detail. - [ ] __Uses American English for Attribute Naming__: For any ambiguity with attribute naming, prefer American English over British English. e.g. `color` instead of `colour`. - [ ] __Skips Timestamp Attributes__: Generally, creation and modification dates from the API should be omitted from the schema. -- [ ] __Skips Error() Call with AWS Go SDK Error Objects__: Error objects do not need to have `Error()` called. -- [ ] __Adds Error Codes Missing from the AWS Go SDK to Internal Service Package__: If an AWS API error code is checked and the AWS Go SDK has no constant string value defined for that error code, a new constant should be added to a file named `errors.go` in a per-service internal package. For example: - -```go -// In `aws/internal/service/s3/errors.go`. - -package s3 - -const ( - ErrCodeNoSuchTagSet = "NoSuchTagSet" -) -``` - -```go -// Example usage. - -import ( - "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/aws-sdk-go-base/tfawserr" - tfs3 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3" -) - -output, err := conn.GetBucketTagging(input) - -if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchTagSet) { - return nil -} -``` - - [ ] __Uses Paginated AWS Go SDK Functions When Iterating Over a Collection of Objects__: When the API for listing a collection of objects provides a paginated function, use it instead of looping until the next page token is not set. For example, with the EC2 API, [`DescribeInstancesPages`](https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#EC2.DescribeInstancesPages) should be used instead of [`DescribeInstances`](https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#EC2.DescribeInstances) when more than one result is expected. - [ ] __Adds Paginated Functions Missing from the AWS Go SDK to Internal Service Package__: If the AWS Go SDK does not define a paginated equivalent for a function to list a collection of objects, it should be added to a per-service internal package using the [`listpages` generator](../../aws/internal/generators/listpages/README.md). A support case should also be opened with AWS to have the paginated functions added to the AWS Go SDK. From eea0a4683dd84b365ec8b3fb301dcb189d2c0567 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 11:48:16 -0800 Subject: [PATCH 0530/1212] Replaces expandStringList(x.List()) with expandStringSet(x) --- aws/structure.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/structure.go b/aws/structure.go index 10a7f881773..ba374cef354 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -428,14 +428,14 @@ func expandOptionConfiguration(configured []interface{}) []*rds.OptionConfigurat } if raw, ok := data["db_security_group_memberships"]; ok { - memberships := expandStringList(raw.(*schema.Set).List()) + memberships := expandStringSet(raw.(*schema.Set)) if len(memberships) > 0 { o.DBSecurityGroupMemberships = memberships } } if raw, ok := data["vpc_security_group_memberships"]; ok { - memberships := expandStringList(raw.(*schema.Set).List()) + memberships := expandStringSet(raw.(*schema.Set)) if len(memberships) > 0 { o.VpcSecurityGroupMemberships = memberships } @@ -1509,10 +1509,10 @@ func expandESVPCOptions(m map[string]interface{}) *elasticsearch.VPCOptions { options := elasticsearch.VPCOptions{} if v, ok := m["security_group_ids"]; ok { - options.SecurityGroupIds = expandStringList(v.(*schema.Set).List()) + options.SecurityGroupIds = expandStringSet(v.(*schema.Set)) } if v, ok := m["subnet_ids"]; ok { - options.SubnetIds = expandStringList(v.(*schema.Set).List()) + options.SubnetIds = expandStringSet(v.(*schema.Set)) } return &options @@ -1531,7 +1531,7 @@ func expandConfigRecordingGroup(configured []interface{}) *configservice.Recordi } if v, ok := group["resource_types"]; ok { - recordingGroup.ResourceTypes = expandStringList(v.(*schema.Set).List()) + recordingGroup.ResourceTypes = expandStringSet(v.(*schema.Set)) } return &recordingGroup } @@ -2277,7 +2277,7 @@ func expandConfigRuleScope(l []interface{}) *configservice.Scope { if v, ok := configured["compliance_resource_types"]; ok { l := v.(*schema.Set) if l.Len() > 0 { - scope.ComplianceResourceTypes = expandStringList(l.List()) + scope.ComplianceResourceTypes = expandStringSet(l) } } if v, ok := configured["tag_key"].(string); ok && v != "" { @@ -3631,7 +3631,7 @@ func expandMqUsers(cfg []interface{}) []*mq.User { user.ConsoleAccess = aws.Bool(v.(bool)) } if v, ok := u["groups"]; ok { - user.Groups = expandStringList(v.(*schema.Set).List()) + user.Groups = expandStringSet(v.(*schema.Set)) } users[i] = &user } @@ -4228,7 +4228,7 @@ func expandDynamoDbProjection(data map[string]interface{}) *dynamodb.Projection } if v, ok := data["non_key_attributes"].(*schema.Set); ok && v.Len() > 0 { - projection.NonKeyAttributes = expandStringList(v.List()) + projection.NonKeyAttributes = expandStringSet(v) } return projection @@ -4323,7 +4323,7 @@ func flattenDynamoDbTableItemAttributes(attrs map[string]*dynamodb.AttributeValu func expandIotThingTypeProperties(config map[string]interface{}) *iot.ThingTypeProperties { properties := &iot.ThingTypeProperties{ - SearchableAttributes: expandStringList(config["searchable_attributes"].(*schema.Set).List()), + SearchableAttributes: expandStringSet(config["searchable_attributes"].(*schema.Set)), } if v, ok := config["description"]; ok && v.(string) != "" { From e378231f29713142152584891efd28f732ab18a8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Mon, 11 Jan 2021 16:36:20 -0500 Subject: [PATCH 0531/1212] fix wording in arg description --- website/docs/r/db_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/db_instance.html.markdown b/website/docs/r/db_instance.html.markdown index b1494a295e8..ca827c86c82 100644 --- a/website/docs/r/db_instance.html.markdown +++ b/website/docs/r/db_instance.html.markdown @@ -120,7 +120,7 @@ For supported values, see the EngineVersion parameter in [API action CreateDBIns Note that for Amazon Aurora instances the engine version must match the [DB cluster](/docs/providers/aws/r/rds_cluster.html)'s engine version'. * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB instance is deleted. Must be provided if `skip_final_snapshot` is -set to `false`. The value must begin with a letter, must contain alphanumeric characters and hyphens, and must not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. +set to `false`. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. * `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled. From 0a8302e684c886d716d7a99f8ac3d944a92c4ca6 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Mon, 11 Jan 2021 16:48:22 -0500 Subject: [PATCH 0532/1212] Update CHANGELOG for #16884 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 401109e1242..fbe80a4f63f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ FEATURES * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) +BUX FIXES + +* resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] + ## 3.23.0 (January 08, 2021) FEATURES From 2a47cbedf3ac75394af0a3da1b0dac5f11e21413 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Mon, 11 Jan 2021 16:59:01 -0500 Subject: [PATCH 0533/1212] Update CHANGELOG for #16885 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fbe80a4f63f..ada1aff3502 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ FEATURES BUX FIXES +* resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] ## 3.23.0 (January 08, 2021) From 8aa6d4459f93cac4c3f48581cd6dc071407f0c0c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 9 Jan 2021 17:56:55 -0500 Subject: [PATCH 0534/1212] r/aws_apigatewayv2_integration: Data mapping in HTTP APIs. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSAPIGatewayV2Integration_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSAPIGatewayV2Integration_ -timeout 120m === RUN TestAccAWSAPIGatewayV2Integration_basicWebSocket === PAUSE TestAccAWSAPIGatewayV2Integration_basicWebSocket === RUN TestAccAWSAPIGatewayV2Integration_basicHttp === PAUSE TestAccAWSAPIGatewayV2Integration_basicHttp === RUN TestAccAWSAPIGatewayV2Integration_disappears === PAUSE TestAccAWSAPIGatewayV2Integration_disappears === RUN TestAccAWSAPIGatewayV2Integration_DataMappingHttp === PAUSE TestAccAWSAPIGatewayV2Integration_DataMappingHttp === RUN TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp === PAUSE TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp === RUN TestAccAWSAPIGatewayV2Integration_LambdaWebSocket === PAUSE TestAccAWSAPIGatewayV2Integration_LambdaWebSocket === RUN TestAccAWSAPIGatewayV2Integration_LambdaHttp === PAUSE TestAccAWSAPIGatewayV2Integration_LambdaHttp === RUN TestAccAWSAPIGatewayV2Integration_VpcLinkWebSocket === PAUSE TestAccAWSAPIGatewayV2Integration_VpcLinkWebSocket === RUN TestAccAWSAPIGatewayV2Integration_VpcLinkHttp === PAUSE TestAccAWSAPIGatewayV2Integration_VpcLinkHttp === RUN TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration === PAUSE TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration === CONT TestAccAWSAPIGatewayV2Integration_basicWebSocket === CONT TestAccAWSAPIGatewayV2Integration_LambdaHttp === CONT TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration === CONT TestAccAWSAPIGatewayV2Integration_VpcLinkHttp === CONT TestAccAWSAPIGatewayV2Integration_VpcLinkWebSocket === CONT TestAccAWSAPIGatewayV2Integration_DataMappingHttp === CONT TestAccAWSAPIGatewayV2Integration_LambdaWebSocket === CONT TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp === CONT TestAccAWSAPIGatewayV2Integration_disappears === CONT TestAccAWSAPIGatewayV2Integration_basicHttp --- PASS: TestAccAWSAPIGatewayV2Integration_disappears (29.08s) --- PASS: TestAccAWSAPIGatewayV2Integration_basicWebSocket (30.68s) --- PASS: TestAccAWSAPIGatewayV2Integration_basicHttp (32.61s) --- PASS: TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp (46.05s) --- PASS: TestAccAWSAPIGatewayV2Integration_DataMappingHttp (48.71s) --- PASS: TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration (48.96s) --- PASS: TestAccAWSAPIGatewayV2Integration_LambdaWebSocket (49.47s) --- PASS: TestAccAWSAPIGatewayV2Integration_LambdaHttp (53.71s) --- PASS: TestAccAWSAPIGatewayV2Integration_VpcLinkHttp (412.36s) --- PASS: TestAccAWSAPIGatewayV2Integration_VpcLinkWebSocket (718.91s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 719.017s --- aws/resource_aws_apigatewayv2_integration.go | 99 +++++++++++ ...ource_aws_apigatewayv2_integration_test.go | 165 ++++++++++++++++++ .../r/apigatewayv2_integration.html.markdown | 15 +- 3 files changed, 276 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_apigatewayv2_integration.go b/aws/resource_aws_apigatewayv2_integration.go index be5ed63328e..5c7a517bd65 100644 --- a/aws/resource_aws_apigatewayv2_integration.go +++ b/aws/resource_aws_apigatewayv2_integration.go @@ -119,6 +119,25 @@ func resourceAwsApiGatewayV2Integration() *schema.Resource { // Length between [0-32768]. Elem: &schema.Schema{Type: schema.TypeString}, }, + "response_parameters": { + Type: schema.TypeSet, + Optional: true, + MinItems: 0, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mappings": { + Type: schema.TypeMap, + Required: true, + // Length between [1-512]. + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status_code": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "template_selection_expression": { Type: schema.TypeString, Optional: true, @@ -189,6 +208,9 @@ func resourceAwsApiGatewayV2IntegrationCreate(d *schema.ResourceData, meta inter if v, ok := d.GetOk("request_templates"); ok { req.RequestTemplates = stringMapToPointers(v.(map[string]interface{})) } + if v, ok := d.GetOk("response_parameters"); ok && v.(*schema.Set).Len() > 0 { + req.ResponseParameters = expandApiGateway2IntegrationResponseParameters(v.(*schema.Set).List()) + } if v, ok := d.GetOk("template_selection_expression"); ok { req.TemplateSelectionExpression = aws.String(v.(string)) } @@ -246,6 +268,10 @@ func resourceAwsApiGatewayV2IntegrationRead(d *schema.ResourceData, meta interfa if err != nil { return fmt.Errorf("error setting request_templates: %s", err) } + err = d.Set("response_parameters", flattenApiGateway2IntegrationResponseParameters(resp.ResponseParameters)) + if err != nil { + return fmt.Errorf("error setting response_parameters: %s", err) + } d.Set("template_selection_expression", resp.TemplateSelectionExpression) d.Set("timeout_milliseconds", resp.TimeoutInMillis) if err := d.Set("tls_config", flattenApiGateway2TlsConfig(resp.TlsConfig)); err != nil { @@ -298,6 +324,7 @@ func resourceAwsApiGatewayV2IntegrationUpdate(d *schema.ResourceData, meta inter if d.HasChange("request_parameters") { o, n := d.GetChange("request_parameters") add, del, nop := diffStringMaps(o.(map[string]interface{}), n.(map[string]interface{})) + // Parameters are removed by setting the associated value to "". for k := range del { del[k] = aws.String("") @@ -311,11 +338,36 @@ func resourceAwsApiGatewayV2IntegrationUpdate(d *schema.ResourceData, meta inter for k, v := range nop { variables[k] = v } + req.RequestParameters = variables } if d.HasChange("request_templates") { req.RequestTemplates = stringMapToPointers(d.Get("request_templates").(map[string]interface{})) } + if d.HasChange("response_parameters") { + o, n := d.GetChange("response_parameters") + os := o.(*schema.Set) + ns := n.(*schema.Set) + del := os.Difference(ns).List() + + req.ResponseParameters = expandApiGateway2IntegrationResponseParameters(ns.List()) + + // Parameters are removed by setting the associated value to {}. + for _, tfMapRaw := range del { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + if v, ok := tfMap["status_code"].(string); ok && v != "" { + if req.ResponseParameters == nil { + req.ResponseParameters = map[string]map[string]*string{} + } + req.ResponseParameters[v] = map[string]*string{} + } + } + } if d.HasChange("template_selection_expression") { req.TemplateSelectionExpression = aws.String(d.Get("template_selection_expression").(string)) } @@ -406,3 +458,50 @@ func flattenApiGateway2TlsConfig(config *apigatewayv2.TlsConfig) []interface{} { "server_name_to_verify": aws.StringValue(config.ServerNameToVerify), }} } + +func expandApiGateway2IntegrationResponseParameters(tfList []interface{}) map[string]map[string]*string { + if len(tfList) == 0 { + return nil + } + + responseParameters := map[string]map[string]*string{} + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + if vStatusCode, ok := tfMap["status_code"].(string); ok && vStatusCode != "" { + if v, ok := tfMap["mappings"].(map[string]interface{}); ok && len(v) > 0 { + responseParameters[vStatusCode] = stringMapToPointers(v) + } + } + } + + return responseParameters +} + +func flattenApiGateway2IntegrationResponseParameters(responseParameters map[string]map[string]*string) []interface{} { + if len(responseParameters) == 0 { + return nil + } + + var tfList []interface{} + + for statusCode, mappings := range responseParameters { + if len(mappings) == 0 { + continue + } + + tfMap := map[string]interface{}{} + + tfMap["status_code"] = statusCode + tfMap["mappings"] = aws.StringValueMap(mappings) + + tfList = append(tfList, tfMap) + } + + return tfList +} diff --git a/aws/resource_aws_apigatewayv2_integration_test.go b/aws/resource_aws_apigatewayv2_integration_test.go index b70a53689c4..2b0248cc96d 100644 --- a/aws/resource_aws_apigatewayv2_integration_test.go +++ b/aws/resource_aws_apigatewayv2_integration_test.go @@ -40,6 +40,7 @@ func TestAccAWSAPIGatewayV2Integration_basicWebSocket(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "29000"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -82,9 +83,12 @@ func TestAccAWSAPIGatewayV2Integration_basicHttp(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "integration_uri", "https://example.com"), resource.TestCheckResourceAttr(resourceName, "passthrough_behavior", ""), resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), ), }, { @@ -120,6 +124,96 @@ func TestAccAWSAPIGatewayV2Integration_disappears(t *testing.T) { }) } +func TestAccAWSAPIGatewayV2Integration_DataMappingHttp(t *testing.T) { + var apiId string + var v apigatewayv2.GetIntegrationOutput + resourceName := "aws_apigatewayv2_integration.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayV2IntegrationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayV2IntegrationConfig_dataMappingHttp(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayV2IntegrationExists(resourceName, &apiId, &v), + resource.TestCheckResourceAttr(resourceName, "connection_id", ""), + resource.TestCheckResourceAttr(resourceName, "connection_type", "INTERNET"), + resource.TestCheckResourceAttr(resourceName, "content_handling_strategy", ""), + resource.TestCheckResourceAttr(resourceName, "credentials_arn", ""), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "integration_method", "ANY"), + resource.TestCheckResourceAttr(resourceName, "integration_response_selection_expression", ""), + resource.TestCheckResourceAttr(resourceName, "integration_subtype", ""), + resource.TestCheckResourceAttr(resourceName, "integration_type", "HTTP_PROXY"), + resource.TestCheckResourceAttr(resourceName, "integration_uri", "http://www.example.com"), + resource.TestCheckResourceAttr(resourceName, "passthrough_behavior", ""), + resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "2"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.append:header.header1", "$context.requestId"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.remove:querystring.qs1", "''"), + resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "response_parameters.*", map[string]string{ + "status_code": "500", + "mappings.%": "2", + "mappings.append:header.header1": "$context.requestId", + "mappings.overwrite:statuscode": "403", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "response_parameters.*", map[string]string{ + "status_code": "404", + "mappings.%": "1", + "mappings.append:header.error": "$stageVariables.environmentId", + }), + resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), + resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), + ), + }, + { + Config: testAccAWSAPIGatewayV2IntegrationConfig_dataMappingHttpUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayV2IntegrationExists(resourceName, &apiId, &v), + resource.TestCheckResourceAttr(resourceName, "connection_id", ""), + resource.TestCheckResourceAttr(resourceName, "connection_type", "INTERNET"), + resource.TestCheckResourceAttr(resourceName, "content_handling_strategy", ""), + resource.TestCheckResourceAttr(resourceName, "credentials_arn", ""), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "integration_method", "ANY"), + resource.TestCheckResourceAttr(resourceName, "integration_response_selection_expression", ""), + resource.TestCheckResourceAttr(resourceName, "integration_subtype", ""), + resource.TestCheckResourceAttr(resourceName, "integration_type", "HTTP_PROXY"), + resource.TestCheckResourceAttr(resourceName, "integration_uri", "http://www.example.com"), + resource.TestCheckResourceAttr(resourceName, "passthrough_behavior", ""), + resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "2"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.append:header.header1", "$context.accountId"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.overwrite:header.header2", "$stageVariables.environmentId"), + resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "response_parameters.*", map[string]string{ + "status_code": "500", + "mappings.%": "2", + "mappings.append:header.header1": "$context.requestId", + "mappings.overwrite:statuscode": "403", + }), + resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), + resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAWSAPIGatewayV2IntegrationImportStateIdFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp(t *testing.T) { var apiId string var v apigatewayv2.GetIntegrationOutput @@ -151,6 +245,7 @@ func TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "request_parameters.integration.request.querystring.stage", "'value1'"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "1"), resource.TestCheckResourceAttr(resourceName, "request_templates.application/json", ""), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", "$request.body.name"), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "28999"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -178,6 +273,7 @@ func TestAccAWSAPIGatewayV2Integration_IntegrationTypeHttp(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "request_templates.%", "2"), resource.TestCheckResourceAttr(resourceName, "request_templates.application/json", "#set($number=42)"), resource.TestCheckResourceAttr(resourceName, "request_templates.application/xml", "#set($percent=$number/100)"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", "$request.body.id"), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "51"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -222,6 +318,7 @@ func TestAccAWSAPIGatewayV2Integration_LambdaWebSocket(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "29000"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -266,6 +363,7 @@ func TestAccAWSAPIGatewayV2Integration_LambdaHttp(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "payload_format_version", "2.0"), resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -311,6 +409,7 @@ func TestAccAWSAPIGatewayV2Integration_VpcLinkWebSocket(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "12345"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -355,7 +454,9 @@ func TestAccAWSAPIGatewayV2Integration_VpcLinkHttp(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "integration_uri", lbListenerResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "passthrough_behavior", ""), resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "29001"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "1"), @@ -384,7 +485,9 @@ func TestAccAWSAPIGatewayV2Integration_VpcLinkHttp(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "integration_uri", lbListenerResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "passthrough_behavior", ""), resource.TestCheckResourceAttr(resourceName, "payload_format_version", "1.0"), + resource.TestCheckResourceAttr(resourceName, "request_parameters.%", "0"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "29001"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "1"), @@ -435,6 +538,7 @@ func TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "request_parameters.MessageGroupId", "$request.body.authentication_key"), resource.TestCheckResourceAttrPair(resourceName, "request_parameters.QueueUrl", sqsQueue1ResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -460,6 +564,7 @@ func TestAccAWSAPIGatewayV2Integration_AwsServiceIntegration(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "request_parameters.MessageGroupId", "$request.body.authentication_key"), resource.TestCheckResourceAttrPair(resourceName, "request_parameters.QueueUrl", sqsQueue2ResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "request_templates.%", "0"), + resource.TestCheckResourceAttr(resourceName, "response_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "template_selection_expression", ""), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "30000"), resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), @@ -681,6 +786,66 @@ resource "aws_apigatewayv2_integration" "test" { ` } +func testAccAWSAPIGatewayV2IntegrationConfig_dataMappingHttp(rName string) string { + return testAccAWSAPIGatewayV2IntegrationConfig_apiHttp(rName) + ` +resource "aws_apigatewayv2_integration" "test" { + api_id = aws_apigatewayv2_api.test.id + + integration_type = "HTTP_PROXY" + integration_method = "ANY" + integration_uri = "http://www.example.com" + + request_parameters = { + "append:header.header1" = "$context.requestId" + "remove:querystring.qs1" = "''" + } + + response_parameters { + status_code = "500" + + mappings = { + "append:header.header1" = "$context.requestId" + "overwrite:statuscode" = "403" + } + } + + response_parameters { + status_code = "404" + + mappings = { + "append:header.error" = "$stageVariables.environmentId" + } + } +} +` +} + +func testAccAWSAPIGatewayV2IntegrationConfig_dataMappingHttpUpdated(rName string) string { + return testAccAWSAPIGatewayV2IntegrationConfig_apiHttp(rName) + ` +resource "aws_apigatewayv2_integration" "test" { + api_id = aws_apigatewayv2_api.test.id + + integration_type = "HTTP_PROXY" + integration_method = "ANY" + integration_uri = "http://www.example.com" + + request_parameters = { + "append:header.header1" = "$context.accountId" + "overwrite:header.header2" = "$stageVariables.environmentId" + } + + response_parameters { + status_code = "500" + + mappings = { + "append:header.header1" = "$context.requestId" + "overwrite:statuscode" = "403" + } + } +} +` +} + func testAccAWSAPIGatewayV2IntegrationConfig_integrationTypeHttp(rName string) string { return testAccAWSAPIGatewayV2IntegrationConfig_apiWebSocket(rName) + ` resource "aws_apigatewayv2_integration" "test" { diff --git a/website/docs/r/apigatewayv2_integration.html.markdown b/website/docs/r/apigatewayv2_integration.html.markdown index 2801f6e7d20..5cd30429334 100644 --- a/website/docs/r/apigatewayv2_integration.html.markdown +++ b/website/docs/r/apigatewayv2_integration.html.markdown @@ -82,15 +82,24 @@ For an `HTTP` integration, specify a fully-qualified URL. For an HTTP API privat * `passthrough_behavior` - (Optional) The pass-through behavior for incoming requests based on the Content-Type header in the request, and the available mapping templates specified as the `request_templates` attribute. Valid values: `WHEN_NO_MATCH`, `WHEN_NO_TEMPLATES`, `NEVER`. Default is `WHEN_NO_MATCH`. Supported only for WebSocket APIs. * `payload_format_version` - (Optional) The [format of the payload](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html#http-api-develop-integrations-lambda.proxy-format) sent to an integration. Valid values: `1.0`, `2.0`. Default is `1.0`. -* `request_parameters` - (Optional) A key-value map specifying request parameters that are passed from the method request to the backend. -Supported only for WebSocket APIs. -* `request_templates` - (Optional) A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client. Supported only for WebSocket APIs. +* `request_parameters` - (Optional) For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. +For HTTP APIs with a specified `integration_subtype`, a key-value map specifying parameters that are passed to `AWS_PROXY` integrations. +For HTTP APIs without a specified `integration_subtype`, a key-value map specifying how to transform HTTP requests before sending them to the backend. +See the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) for details. +* `request_templates` - (Optional) A map of [Velocity](https://velocity.apache.org/) templates that are applied on the request payload based on the value of the Content-Type header sent by the client. Supported only for WebSocket APIs. +* `response_parameters` - (Optional) Mappings to transform the HTTP response from a backend integration before returning the response to clients. Supported only for HTTP APIs. * `template_selection_expression` - (Optional) The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration. * `timeout_milliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. Terraform will only perform drift detection of its value when present in a configuration. * `tls_config` - (Optional) The TLS configuration for a private integration. Supported only for HTTP APIs. +The `response_parameters` object supports the following: + +* `status_code` - (Required) The HTTP status code in the range 200-599. +* `mappings` - (Required) A key-value map. The key of ths map identifies the location of the request parameter to change, and how to change it. The corresponding value specifies the new data for the parameter. +See the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) for details. + The `tls_config` object supports the following: * `server_name_to_verify` - (Optional) If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting. From 60cfe4d603014dd8c5e71f8c52f8d46f110b3b65 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 17:16:37 -0500 Subject: [PATCH 0535/1212] tests/resource/efs_file_system: Fix hardcoded region --- aws/resource_aws_efs_file_system_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_efs_file_system_test.go b/aws/resource_aws_efs_file_system_test.go index 0a8d096fc42..9288e76392d 100644 --- a/aws/resource_aws_efs_file_system_test.go +++ b/aws/resource_aws_efs_file_system_test.go @@ -657,7 +657,7 @@ func testAccAWSEFSFileSystemConfigPagedTags(rInt int) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { tags = { - Name = "test-efs-%d" + Name = "test-efs-%[1]d" Another = "tag" Test = "yes" User = "root" @@ -667,10 +667,10 @@ resource "aws_efs_file_system" "test" { AcceptanceTest = "PagedTags" CreationToken = "radek" PerfMode = "max" - Region = "us-west-2" + Region = %[2]q } } -`, rInt) +`, rInt, testAccGetRegion()) } func testAccAWSEFSFileSystemConfigWithMaxTags(rName string) string { From baeaf1f9a2f01f6b4b74faa1e89df5c2d0c12d46 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 11 Jan 2021 17:37:30 -0500 Subject: [PATCH 0536/1212] tests/efs_file_system: Remove arbitrary hardcoded region tag --- aws/resource_aws_efs_file_system_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_efs_file_system_test.go b/aws/resource_aws_efs_file_system_test.go index 9288e76392d..250fbe618ef 100644 --- a/aws/resource_aws_efs_file_system_test.go +++ b/aws/resource_aws_efs_file_system_test.go @@ -192,7 +192,7 @@ func TestAccAWSEFSFileSystem_pagedTags(t *testing.T) { Config: testAccAWSEFSFileSystemConfigPagedTags(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckEfsFileSystem(resourceName, &desc), - resource.TestCheckResourceAttr(resourceName, "tags.%", "11"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "10"), ), }, { @@ -667,10 +667,9 @@ resource "aws_efs_file_system" "test" { AcceptanceTest = "PagedTags" CreationToken = "radek" PerfMode = "max" - Region = %[2]q } } -`, rInt, testAccGetRegion()) +`, rInt) } func testAccAWSEFSFileSystemConfigWithMaxTags(rName string) string { From 8357ca731376008b324c20d053bb183c896167f8 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 14:54:06 -0500 Subject: [PATCH 0537/1212] resource/config_aggregate_authorization: Fix hardcoded regions --- ...urce_aws_config_aggregate_authorization.go | 2 +- ...aws_config_aggregate_authorization_test.go | 26 +++++++++---------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_config_aggregate_authorization.go b/aws/resource_aws_config_aggregate_authorization.go index 463f03e0d09..ac70ad3a7a3 100644 --- a/aws/resource_aws_config_aggregate_authorization.go +++ b/aws/resource_aws_config_aggregate_authorization.go @@ -168,7 +168,7 @@ func describeConfigAggregateAuthorizations(conn *configservice.ConfigService) ([ func resourceAwsConfigAggregateAuthorizationParseID(id string) (string, string, error) { idParts := strings.Split(id, ":") if len(idParts) != 2 { - return "", "", fmt.Errorf("Please make sure the ID is in the form account_id:region (i.e. 123456789012:us-east-1") + return "", "", fmt.Errorf("Please make sure the ID is in the form account_id:region (i.e. 123456789012:us-east-1") // lintignore:AWSAT003 } accountId := idParts[0] region := idParts[1] diff --git a/aws/resource_aws_config_aggregate_authorization_test.go b/aws/resource_aws_config_aggregate_authorization_test.go index 077ed66c9ea..becb0760eb5 100644 --- a/aws/resource_aws_config_aggregate_authorization_test.go +++ b/aws/resource_aws_config_aggregate_authorization_test.go @@ -61,19 +61,17 @@ func TestAccAWSConfigAggregateAuthorization_basic(t *testing.T) { rString := acctest.RandStringFromCharSet(12, "0123456789") resourceName := "aws_config_aggregate_authorization.example" - region := "eu-west-1" - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSConfigAggregateAuthorizationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString), + Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString, testAccGetAlternateRegion()), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "account_id", rString), - resource.TestCheckResourceAttr(resourceName, "region", region), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf(`aggregation-authorization/%s/%s$`, rString, region))), + resource.TestCheckResourceAttr(resourceName, "region", testAccGetAlternateRegion()), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf(`aggregation-authorization/%s/%s$`, rString, testAccGetAlternateRegion()))), ), }, { @@ -95,7 +93,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { CheckDestroy: testAccCheckAWSConfigAggregateAuthorizationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar", "fizz", "buzz"), + Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar", "fizz", "buzz", testAccGetAlternateRegion()), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), resource.TestCheckResourceAttr(resourceName, "tags.Name", rString), @@ -104,7 +102,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { ), }, { - Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar2", "fizz2", "buzz2"), + Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar2", "fizz2", "buzz2", testAccGetAlternateRegion()), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), resource.TestCheckResourceAttr(resourceName, "tags.Name", rString), @@ -118,7 +116,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString), + Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString, testAccGetAlternateRegion()), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -156,20 +154,20 @@ func testAccCheckAWSConfigAggregateAuthorizationDestroy(s *terraform.State) erro return nil } -func testAccAWSConfigAggregateAuthorizationConfig_basic(rString string) string { +func testAccAWSConfigAggregateAuthorizationConfig_basic(rString, region string) string { return fmt.Sprintf(` resource "aws_config_aggregate_authorization" "example" { account_id = %[1]q - region = "eu-west-1" + region = %[2]q } -`, rString) +`, rString, region) } -func testAccAWSConfigAggregateAuthorizationConfig_tags(rString, tagKey1, tagValue1, tagKey2, tagValue2 string) string { +func testAccAWSConfigAggregateAuthorizationConfig_tags(rString, tagKey1, tagValue1, tagKey2, tagValue2, region string) string { return fmt.Sprintf(` resource "aws_config_aggregate_authorization" "example" { account_id = %[1]q - region = "eu-west-1" + region = %[6]q tags = { Name = %[1]q @@ -178,5 +176,5 @@ resource "aws_config_aggregate_authorization" "example" { %[4]s = %[5]q } } -`, rString, tagKey1, tagValue1, tagKey2, tagValue2) +`, rString, tagKey1, tagValue1, tagKey2, tagValue2, region) } From 4bc887d2de421caaf6c0d60b2365e9ea148be1a0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 15:05:54 -0800 Subject: [PATCH 0538/1212] Updates to aws_elasticache_cluster --- aws/resource_aws_elasticache_cluster.go | 13 +++- aws/resource_aws_elasticache_cluster_test.go | 63 +++++++++++++++++++ .../docs/r/elasticache_cluster.html.markdown | 2 +- 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index f9e3a38f8b3..e7fae6a9ef3 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -302,6 +302,15 @@ func resourceAwsElasticacheCluster() *schema.Resource { } return diff.ForceNew("node_type") }, + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + if v, ok := diff.GetOk("engine"); !ok || v.(string) == "redis" { + return nil + } + if _, ok := diff.GetOk("final_snapshot_identifier"); !ok { + return nil + } + return errors.New(`engine "memcached" does not support final_snapshot_identifier`) + }, ), } } @@ -793,7 +802,9 @@ func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID input := &elasticache.DeleteCacheClusterInput{ CacheClusterId: aws.String(cacheClusterID), } - input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) + if finalSnapshotID != "" { + input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) + } log.Printf("[DEBUG] Deleting Elasticache Cache Cluster: %s", input) err := resource.Retry(5*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_elasticache_cluster_test.go b/aws/resource_aws_elasticache_cluster_test.go index 556f3abfe77..0edea3ec325 100644 --- a/aws/resource_aws_elasticache_cluster_test.go +++ b/aws/resource_aws_elasticache_cluster_test.go @@ -690,6 +690,43 @@ func TestAccAWSElasticacheCluster_ReplicationGroupID_MultipleReplica(t *testing. }) } +func TestAccAWSElasticacheCluster_Memcached_FinalSnapshot(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheClusterConfig_Memcached_FinalSnapshot(rName), + ExpectError: regexp.MustCompile(`engine "memcached" does not support final_snapshot_identifier`), + }, + }, + }) +} + +func TestAccAWSElasticacheCluster_Redis_FinalSnapshot(t *testing.T) { + var cluster elasticache.CacheCluster + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheClusterConfig_Redis_FinalSnapshot(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheClusterExists(resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "final_snapshot_identifier", rName), + ), + }, + }, + }) +} + func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { if v.NotificationConfiguration == nil { @@ -1297,3 +1334,29 @@ resource "aws_elasticache_cluster" "test" { } `, rName, count) } + +func testAccAWSElasticacheClusterConfig_Memcached_FinalSnapshot(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_cluster" "test" { + cluster_id = %[1]q + engine = "memcached" + node_type = "cache.t3.small" + num_cache_nodes = 1 + + final_snapshot_identifier = %[1]q +} +`, rName) +} + +func testAccAWSElasticacheClusterConfig_Redis_FinalSnapshot(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_cluster" "test" { + cluster_id = %[1]q + engine = "redis" + node_type = "cache.t3.small" + num_cache_nodes = 1 + + final_snapshot_identifier = %[1]q +} +`, rName) +} diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index e57b5afd1b5..6d482909a1d 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -132,7 +132,7 @@ SNS topic to send ElastiCache notifications to. Example: * `preferred_availability_zones` - (Optional, Memcached only) A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference. -* `final_snapshot_identifier` - (Optional) The name of your final cluster snapshot. If omitted, no final snapshot will be made. +* `final_snapshot_identifier` - (Optional, Redis only) The name of your final cluster snapshot. If omitted, no final snapshot will be made. * `tags` - (Optional) A map of tags to assign to the resource From 79400dc28b300df2f024f74096edd706b7d743d6 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 11 Jan 2021 18:22:32 -0500 Subject: [PATCH 0539/1212] resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association (#17023) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15945 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17016 Reference: https://docs.aws.amazon.com/Route53/latest/APIReference/API_DisassociateVPCFromHostedZone.html#API_DisassociateVPCFromHostedZone_Errors This is a best effort fix for the errors returned by the Route 53 `DisassociateVPCFromHostedZone` API, which are unrelated to the potential errors during the `Read` function that uses the `ListHostedZonesByVPC` API. The acceptance testing framework does not lend itself well to testing this situation and this highlights a case where #15945 would need special handling. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSRoute53ZoneAssociation_disappears_Zone (146.44s) --- PASS: TestAccAWSRoute53ZoneAssociation_disappears (147.22s) --- PASS: TestAccAWSRoute53ZoneAssociation_disappears_VPC (147.31s) --- PASS: TestAccAWSRoute53ZoneAssociation_CrossRegion (149.84s) --- PASS: TestAccAWSRoute53ZoneAssociation_basic (150.01s) --- PASS: TestAccAWSRoute53ZoneAssociation_CrossAccount (507.91s) ``` --- aws/resource_aws_route53_zone_association.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/aws/resource_aws_route53_zone_association.go b/aws/resource_aws_route53_zone_association.go index e5b275c68ce..a2158c7aa2f 100644 --- a/aws/resource_aws_route53_zone_association.go +++ b/aws/resource_aws_route53_zone_association.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -172,6 +173,14 @@ func resourceAwsRoute53ZoneAssociationDelete(d *schema.ResourceData, meta interf _, err = conn.DisassociateVPCFromHostedZone(input) + if tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchHostedZone) { + return nil + } + + if tfawserr.ErrCodeEquals(err, route53.ErrCodeVPCAssociationNotFound) { + return nil + } + if err != nil { return fmt.Errorf("error disassociating Route 53 Hosted Zone (%s) from EC2 VPC (%s): %w", zoneID, vpcID, err) } From 26fd4f2b6a071c97756064a5428fedfa8bbaea3c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 11 Jan 2021 18:23:08 -0500 Subject: [PATCH 0540/1212] Update CHANGELOG for #17023 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ada1aff3502..9c1aca3b585 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ BUX FIXES * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] +* resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] ## 3.23.0 (January 08, 2021) From 7cd11c31d98e0e43b3d95c4fb6d6d91f7bc9a5c8 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 11 Jan 2021 18:27:00 -0500 Subject: [PATCH 0541/1212] resource/config_aggregate_auth: Simplify test structure --- ...aws_config_aggregate_authorization_test.go | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_config_aggregate_authorization_test.go b/aws/resource_aws_config_aggregate_authorization_test.go index becb0760eb5..7c6675c2e37 100644 --- a/aws/resource_aws_config_aggregate_authorization_test.go +++ b/aws/resource_aws_config_aggregate_authorization_test.go @@ -60,6 +60,7 @@ func testSweepConfigAggregateAuthorizations(region string) error { func TestAccAWSConfigAggregateAuthorization_basic(t *testing.T) { rString := acctest.RandStringFromCharSet(12, "0123456789") resourceName := "aws_config_aggregate_authorization.example" + dataSourceName := "data.aws_region.current" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -67,11 +68,11 @@ func TestAccAWSConfigAggregateAuthorization_basic(t *testing.T) { CheckDestroy: testAccCheckAWSConfigAggregateAuthorizationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString, testAccGetAlternateRegion()), + Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "account_id", rString), - resource.TestCheckResourceAttr(resourceName, "region", testAccGetAlternateRegion()), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf(`aggregation-authorization/%s/%s$`, rString, testAccGetAlternateRegion()))), + resource.TestCheckResourceAttrPair(resourceName, "region", dataSourceName, "name"), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf(`aggregation-authorization/%s/%s$`, rString, testAccGetRegion()))), ), }, { @@ -93,7 +94,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { CheckDestroy: testAccCheckAWSConfigAggregateAuthorizationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar", "fizz", "buzz", testAccGetAlternateRegion()), + Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar", "fizz", "buzz"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), resource.TestCheckResourceAttr(resourceName, "tags.Name", rString), @@ -102,7 +103,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { ), }, { - Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar2", "fizz2", "buzz2", testAccGetAlternateRegion()), + Config: testAccAWSConfigAggregateAuthorizationConfig_tags(rString, "foo", "bar2", "fizz2", "buzz2"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), resource.TestCheckResourceAttr(resourceName, "tags.Name", rString), @@ -116,7 +117,7 @@ func TestAccAWSConfigAggregateAuthorization_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString, testAccGetAlternateRegion()), + Config: testAccAWSConfigAggregateAuthorizationConfig_basic(rString), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -154,20 +155,24 @@ func testAccCheckAWSConfigAggregateAuthorizationDestroy(s *terraform.State) erro return nil } -func testAccAWSConfigAggregateAuthorizationConfig_basic(rString, region string) string { +func testAccAWSConfigAggregateAuthorizationConfig_basic(rString string) string { return fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_config_aggregate_authorization" "example" { account_id = %[1]q - region = %[2]q + region = data.aws_region.current.name } -`, rString, region) +`, rString) } -func testAccAWSConfigAggregateAuthorizationConfig_tags(rString, tagKey1, tagValue1, tagKey2, tagValue2, region string) string { +func testAccAWSConfigAggregateAuthorizationConfig_tags(rString, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_config_aggregate_authorization" "example" { account_id = %[1]q - region = %[6]q + region = data.aws_region.current.name tags = { Name = %[1]q @@ -176,5 +181,5 @@ resource "aws_config_aggregate_authorization" "example" { %[4]s = %[5]q } } -`, rString, tagKey1, tagValue1, tagKey2, tagValue2, region) +`, rString, tagKey1, tagValue1, tagKey2, tagValue2) } From fc915f6e129bb0ab4c39dc2adf8fd6844379fa17 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 5 Jan 2021 17:32:45 -0500 Subject: [PATCH 0542/1212] tests/resource/guardduty_filter: Fix hardcoded region --- aws/resource_aws_guardduty_filter_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_guardduty_filter_test.go b/aws/resource_aws_guardduty_filter_test.go index 486d70ad3ce..7ac64bea19d 100644 --- a/aws/resource_aws_guardduty_filter_test.go +++ b/aws/resource_aws_guardduty_filter_test.go @@ -40,7 +40,7 @@ func testAccAwsGuardDutyFilter_basic(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "finding_criteria.0.criterion.*", map[string]string{ "field": "region", "equals.#": "1", - "equals.0": "eu-west-1", + "equals.0": testAccGetAlternateRegion(), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "finding_criteria.0.criterion.*", map[string]string{ "field": "service.additionalInfo.threatListName", @@ -257,7 +257,7 @@ resource "aws_guardduty_filter" "test" { finding_criteria { criterion { field = "region" - equals = ["eu-west-1"] + equals = [%[1]q] } criterion { @@ -267,8 +267,8 @@ resource "aws_guardduty_filter" "test" { criterion { field = "updatedAt" - greater_than_or_equal = %[1]q - less_than = %[2]q + greater_than_or_equal = %[2]q + less_than = %[3]q } } } @@ -276,7 +276,7 @@ resource "aws_guardduty_filter" "test" { resource "aws_guardduty_detector" "test" { enable = true } -`, startDate, endDate) +`, testAccGetAlternateRegion(), startDate, endDate) } func testAccGuardDutyFilterConfigNoop_full(startDate, endDate string) string { @@ -291,7 +291,7 @@ resource "aws_guardduty_filter" "test" { finding_criteria { criterion { field = "region" - equals = ["eu-west-1"] + equals = [%[1]q] } criterion { @@ -301,8 +301,8 @@ resource "aws_guardduty_filter" "test" { criterion { field = "updatedAt" - greater_than_or_equal = %[1]q - less_than = %[2]q + greater_than_or_equal = %[2]q + less_than = %[3]q } } } @@ -310,7 +310,7 @@ resource "aws_guardduty_filter" "test" { resource "aws_guardduty_detector" "test" { enable = true } -`, startDate, endDate) +`, testAccGetAlternateRegion(), startDate, endDate) } func testAccGuardDutyFilterConfig_multipleTags() string { From 3831e456f082758fe7d332655c4a5fa97252896a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Mon, 11 Jan 2021 18:37:25 -0500 Subject: [PATCH 0543/1212] tests/guardduty_filter: Simplify with data source --- aws/resource_aws_guardduty_filter_test.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_guardduty_filter_test.go b/aws/resource_aws_guardduty_filter_test.go index 7ac64bea19d..44b5b4d62f2 100644 --- a/aws/resource_aws_guardduty_filter_test.go +++ b/aws/resource_aws_guardduty_filter_test.go @@ -40,7 +40,7 @@ func testAccAwsGuardDutyFilter_basic(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "finding_criteria.0.criterion.*", map[string]string{ "field": "region", "equals.#": "1", - "equals.0": testAccGetAlternateRegion(), + "equals.0": testAccGetRegion(), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "finding_criteria.0.criterion.*", map[string]string{ "field": "service.additionalInfo.threatListName", @@ -248,6 +248,8 @@ func testAccCheckAwsGuardDutyFilterExists(name string, filter *guardduty.GetFilt func testAccGuardDutyFilterConfig_full(startDate, endDate string) string { return fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_guardduty_filter" "test" { detector_id = aws_guardduty_detector.test.id name = "test-filter" @@ -257,7 +259,7 @@ resource "aws_guardduty_filter" "test" { finding_criteria { criterion { field = "region" - equals = [%[1]q] + equals = [data.aws_region.current.name] } criterion { @@ -267,8 +269,8 @@ resource "aws_guardduty_filter" "test" { criterion { field = "updatedAt" - greater_than_or_equal = %[2]q - less_than = %[3]q + greater_than_or_equal = %[1]q + less_than = %[2]q } } } @@ -276,11 +278,13 @@ resource "aws_guardduty_filter" "test" { resource "aws_guardduty_detector" "test" { enable = true } -`, testAccGetAlternateRegion(), startDate, endDate) +`, startDate, endDate) } func testAccGuardDutyFilterConfigNoop_full(startDate, endDate string) string { return fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_guardduty_filter" "test" { detector_id = aws_guardduty_detector.test.id name = "test-filter" @@ -291,7 +295,7 @@ resource "aws_guardduty_filter" "test" { finding_criteria { criterion { field = "region" - equals = [%[1]q] + equals = [data.aws_region.current.name] } criterion { @@ -301,8 +305,8 @@ resource "aws_guardduty_filter" "test" { criterion { field = "updatedAt" - greater_than_or_equal = %[2]q - less_than = %[3]q + greater_than_or_equal = %[1]q + less_than = %[2]q } } } @@ -310,7 +314,7 @@ resource "aws_guardduty_filter" "test" { resource "aws_guardduty_detector" "test" { enable = true } -`, testAccGetAlternateRegion(), startDate, endDate) +`, startDate, endDate) } func testAccGuardDutyFilterConfig_multipleTags() string { From 33ad0e0c65c46de5c99605aff29313f1bc15539f Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 17:38:24 -0800 Subject: [PATCH 0544/1212] Replaces expandStringList(x.List()) with expandStringSet(x) --- ...nt_distribution_configuration_structure.go | 18 +++++------- ...stribution_configuration_structure_test.go | 10 +++---- aws/data_source_aws_autoscaling_groups.go | 2 +- aws/resource_aws_api_gateway_integration.go | 2 +- aws/resource_aws_api_gateway_method.go | 2 +- aws/resource_aws_autoscaling_group.go | 29 +++++++++---------- aws/resource_aws_autoscaling_notification.go | 16 +++++----- aws/resource_aws_cloudformation_stack.go | 8 ++--- aws/resource_aws_codebuild_project.go | 4 +-- ...esource_aws_codedeploy_deployment_group.go | 4 +-- ...codestarnotifications_notification_rule.go | 4 +-- aws/resource_aws_dax_cluster.go | 11 ++++--- aws/resource_aws_db_event_subscription.go | 4 +-- aws/resource_aws_dms_event_subscription.go | 6 ++-- aws/resource_aws_dms_replication_instance.go | 4 +-- ...source_aws_dms_replication_subnet_group.go | 4 +-- aws/resource_aws_docdb_cluster.go | 10 +++---- aws/resource_aws_ec2_client_vpn_endpoint.go | 4 +-- aws/resource_aws_ec2_traffic_mirror_filter.go | 12 ++++---- aws/resource_aws_ecs_task_definition.go | 2 +- aws/resource_aws_elasticache_cluster.go | 12 ++++---- aws/resource_aws_elasticache_subnet_group.go | 2 +- aws/resource_aws_elb.go | 18 +++++------- aws/resource_aws_fms_policy.go | 4 +-- 24 files changed, 93 insertions(+), 99 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 924766fd143..71ca5c39316 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -508,7 +508,7 @@ func flattenCookieNames(cn *cloudfront.CookieNames) []interface{} { func expandAllowedMethods(s *schema.Set) *cloudfront.AllowedMethods { return &cloudfront.AllowedMethods{ Quantity: aws.Int64(int64(s.Len())), - Items: expandStringList(s.List()), + Items: expandStringSet(s), } } @@ -522,7 +522,7 @@ func flattenAllowedMethods(am *cloudfront.AllowedMethods) *schema.Set { func expandCachedMethods(s *schema.Set) *cloudfront.CachedMethods { return &cloudfront.CachedMethods{ Quantity: aws.Int64(int64(s.Len())), - Items: expandStringList(s.List()), + Items: expandStringSet(s), } } @@ -1000,14 +1000,12 @@ func flattenLoggingConfig(lc *cloudfront.LoggingConfig) []interface{} { return []interface{}{m} } -func expandAliases(as *schema.Set) *cloudfront.Aliases { - s := as.List() - var aliases cloudfront.Aliases - if len(s) > 0 { - aliases.Quantity = aws.Int64(int64(len(s))) - aliases.Items = expandStringList(s) - } else { - aliases.Quantity = aws.Int64(0) +func expandAliases(s *schema.Set) *cloudfront.Aliases { + aliases := cloudfront.Aliases{ + Quantity: aws.Int64(int64(s.Len())), + } + if s.Len() > 0 { + aliases.Items = expandStringSet(s) } return &aliases } diff --git a/aws/cloudfront_distribution_configuration_structure_test.go b/aws/cloudfront_distribution_configuration_structure_test.go index c7bad0c5a50..c3e38114616 100644 --- a/aws/cloudfront_distribution_configuration_structure_test.go +++ b/aws/cloudfront_distribution_configuration_structure_test.go @@ -301,10 +301,10 @@ func TestCloudFrontStructure_expandCloudFrontDefaultCacheBehavior(t *testing.T) if *dcb.LambdaFunctionAssociations.Quantity != 2 { t.Fatalf("Expected LambdaFunctionAssociations to be 2, got %v", *dcb.LambdaFunctionAssociations.Quantity) } - if !reflect.DeepEqual(dcb.AllowedMethods.Items, expandStringList(allowedMethodsConf().List())) { + if !reflect.DeepEqual(dcb.AllowedMethods.Items, expandStringSet(allowedMethodsConf())) { t.Fatalf("Expected AllowedMethods.Items to be %v, got %v", allowedMethodsConf().List(), dcb.AllowedMethods.Items) } - if !reflect.DeepEqual(dcb.AllowedMethods.CachedMethods.Items, expandStringList(cachedMethodsConf().List())) { + if !reflect.DeepEqual(dcb.AllowedMethods.CachedMethods.Items, expandStringSet(cachedMethodsConf())) { t.Fatalf("Expected AllowedMethods.CachedMethods.Items to be %v, got %v", cachedMethodsConf().List(), dcb.AllowedMethods.CachedMethods.Items) } } @@ -502,7 +502,7 @@ func TestCloudFrontStructure_expandAllowedMethods(t *testing.T) { if *am.Quantity != 7 { t.Fatalf("Expected Quantity to be 7, got %v", *am.Quantity) } - if !reflect.DeepEqual(am.Items, expandStringList(data.List())) { + if !reflect.DeepEqual(am.Items, expandStringSet(data)) { t.Fatalf("Expected Items to be %v, got %v", data, am.Items) } } @@ -523,7 +523,7 @@ func TestCloudFrontStructure_expandCachedMethods(t *testing.T) { if *cm.Quantity != 3 { t.Fatalf("Expected Quantity to be 3, got %v", *cm.Quantity) } - if !reflect.DeepEqual(cm.Items, expandStringList(data.List())) { + if !reflect.DeepEqual(cm.Items, expandStringSet(data)) { t.Fatalf("Expected Items to be %v, got %v", data, cm.Items) } } @@ -870,7 +870,7 @@ func TestCloudFrontStructure_expandAliases(t *testing.T) { if *a.Quantity != 2 { t.Fatalf("Expected Quantity to be 2, got %v", *a.Quantity) } - if !reflect.DeepEqual(a.Items, expandStringList(data.List())) { + if !reflect.DeepEqual(a.Items, expandStringSet(data)) { t.Fatalf("Expected Items to be [example.com www.example.com], got %v", a.Items) } } diff --git a/aws/data_source_aws_autoscaling_groups.go b/aws/data_source_aws_autoscaling_groups.go index fbe4b71d389..fe1d95885fd 100644 --- a/aws/data_source_aws_autoscaling_groups.go +++ b/aws/data_source_aws_autoscaling_groups.go @@ -122,7 +122,7 @@ func expandAsgTagFilters(in []interface{}) []*autoscaling.Filter { out := make([]*autoscaling.Filter, len(in)) for i, filter := range in { m := filter.(map[string]interface{}) - values := expandStringList(m["values"].(*schema.Set).List()) + values := expandStringSet(m["values"].(*schema.Set)) out[i] = &autoscaling.Filter{ Name: aws.String(m["name"].(string)), diff --git a/aws/resource_aws_api_gateway_integration.go b/aws/resource_aws_api_gateway_integration.go index 8ec56b5a794..140c9a517d6 100644 --- a/aws/resource_aws_api_gateway_integration.go +++ b/aws/resource_aws_api_gateway_integration.go @@ -207,7 +207,7 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa var cacheKeyParameters []*string if v, ok := d.GetOk("cache_key_parameters"); ok { - cacheKeyParameters = expandStringList(v.(*schema.Set).List()) + cacheKeyParameters = expandStringSet(v.(*schema.Set)) } var cacheNamespace *string diff --git a/aws/resource_aws_api_gateway_method.go b/aws/resource_aws_api_gateway_method.go index d989da46595..f028ebb5bc9 100644 --- a/aws/resource_aws_api_gateway_method.go +++ b/aws/resource_aws_api_gateway_method.go @@ -133,7 +133,7 @@ func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("authorization_scopes"); ok { - input.AuthorizationScopes = expandStringList(v.(*schema.Set).List()) + input.AuthorizationScopes = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("request_validator_id"); ok { diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index c92d698178c..bae3357df03 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -650,7 +650,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) // Availability Zones are optional if VPC Zone Identifier(s) are specified if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - createOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + createOpts.AvailabilityZones = expandStringSet(v.(*schema.Set)) } resourceID := d.Get("name").(string) @@ -684,8 +684,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { - createOpts.LoadBalancerNames = expandStringList( - v.(*schema.Set).List()) + createOpts.LoadBalancerNames = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 { @@ -697,7 +696,7 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 { - createOpts.TargetGroupARNs = expandStringList(v.(*schema.Set).List()) + createOpts.TargetGroupARNs = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("service_linked_role_arn"); ok { @@ -1034,7 +1033,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) if d.HasChange("availability_zones") { if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - opts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + opts.AvailabilityZones = expandStringSet(v.(*schema.Set)) } } @@ -1092,8 +1091,8 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if len(remove) > 0 { // API only supports removing 10 at a time @@ -1162,8 +1161,8 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if len(remove) > 0 { // AWS API only supports adding/removing 10 at a time @@ -1431,7 +1430,7 @@ func resourceAwsAutoscalingGroupDrain(d *schema.ResourceData, meta interface{}) func enableASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoScaling) error { props := &autoscaling.ScalingProcessQuery{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(d.Get("suspended_processes").(*schema.Set).List()), + ScalingProcesses: expandStringSet(d.Get("suspended_processes").(*schema.Set)), } _, err := conn.SuspendProcesses(props) @@ -1442,7 +1441,7 @@ func enableASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc props := &autoscaling.EnableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), Granularity: aws.String(d.Get("metrics_granularity").(string)), - Metrics: expandStringList(d.Get("enabled_metrics").(*schema.Set).List()), + Metrics: expandStringSet(d.Get("enabled_metrics").(*schema.Set)), } log.Printf("[INFO] Enabling metrics collection for the Auto Scaling Group: %s", d.Id()) @@ -1467,7 +1466,7 @@ func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoS if resumeProcesses.Len() != 0 { props := &autoscaling.ScalingProcessQuery{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(resumeProcesses.List()), + ScalingProcesses: expandStringSet(resumeProcesses), } _, err := conn.ResumeProcesses(props) @@ -1480,7 +1479,7 @@ func updateASGSuspendedProcesses(d *schema.ResourceData, conn *autoscaling.AutoS if suspendedProcesses.Len() != 0 { props := &autoscaling.ScalingProcessQuery{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: expandStringList(suspendedProcesses.List()), + ScalingProcesses: expandStringSet(suspendedProcesses), } _, err := conn.SuspendProcesses(props) @@ -1510,7 +1509,7 @@ func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc if disableMetrics.Len() != 0 { props := &autoscaling.DisableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), - Metrics: expandStringList(disableMetrics.List()), + Metrics: expandStringSet(disableMetrics), } _, err := conn.DisableMetricsCollection(props) @@ -1523,7 +1522,7 @@ func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc if enabledMetrics.Len() != 0 { props := &autoscaling.EnableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), - Metrics: expandStringList(enabledMetrics.List()), + Metrics: expandStringSet(enabledMetrics), Granularity: aws.String(d.Get("metrics_granularity").(string)), } diff --git a/aws/resource_aws_autoscaling_notification.go b/aws/resource_aws_autoscaling_notification.go index 72f59c9a625..2868fb2ce51 100644 --- a/aws/resource_aws_autoscaling_notification.go +++ b/aws/resource_aws_autoscaling_notification.go @@ -43,8 +43,8 @@ func resourceAwsAutoscalingNotification() *schema.Resource { func resourceAwsAutoscalingNotificationCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn - gl := expandStringList(d.Get("group_names").(*schema.Set).List()) - nl := expandStringList(d.Get("notifications").(*schema.Set).List()) + gl := expandStringSet(d.Get("group_names").(*schema.Set)) + nl := expandStringSet(d.Get("notifications").(*schema.Set)) topic := d.Get("topic_arn").(string) if err := addNotificationConfigToGroupsWithTopic(conn, gl, nl, topic); err != nil { @@ -59,7 +59,7 @@ func resourceAwsAutoscalingNotificationCreate(d *schema.ResourceData, meta inter func resourceAwsAutoscalingNotificationRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn - gl := expandStringList(d.Get("group_names").(*schema.Set).List()) + gl := expandStringSet(d.Get("group_names").(*schema.Set)) opts := &autoscaling.DescribeNotificationConfigurationsInput{ AutoScalingGroupNames: gl, @@ -121,7 +121,7 @@ func resourceAwsAutoscalingNotificationUpdate(d *schema.ResourceData, meta inter // Notifications API call is a PUT, so we don't need to diff the list, just // push whatever it is and AWS sorts it out - nl := expandStringList(d.Get("notifications").(*schema.Set).List()) + nl := expandStringSet(d.Get("notifications").(*schema.Set)) o, n := d.GetChange("group_names") if o == nil { @@ -131,8 +131,8 @@ func resourceAwsAutoscalingNotificationUpdate(d *schema.ResourceData, meta inter n = new(schema.Set) } - remove := expandStringList(o.(*schema.Set).List()) - add := expandStringList(n.(*schema.Set).List()) + remove := expandStringSet(o.(*schema.Set)) + add := expandStringSet(n.(*schema.Set)) topic := d.Get("topic_arn").(string) @@ -142,7 +142,7 @@ func resourceAwsAutoscalingNotificationUpdate(d *schema.ResourceData, meta inter var update []*string if d.HasChange("notifications") { - update = expandStringList(d.Get("group_names").(*schema.Set).List()) + update = expandStringSet(d.Get("group_names").(*schema.Set)) } else { update = add } @@ -191,7 +191,7 @@ func removeNotificationConfigToGroupsWithTopic(conn *autoscaling.AutoScaling, gr func resourceAwsAutoscalingNotificationDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).autoscalingconn - gl := expandStringList(d.Get("group_names").(*schema.Set).List()) + gl := expandStringSet(d.Get("group_names").(*schema.Set)) topic := d.Get("topic_arn").(string) err := removeNotificationConfigToGroupsWithTopic(conn, gl, topic) diff --git a/aws/resource_aws_cloudformation_stack.go b/aws/resource_aws_cloudformation_stack.go index c6e87629b1e..69d0cfca6c0 100644 --- a/aws/resource_aws_cloudformation_stack.go +++ b/aws/resource_aws_cloudformation_stack.go @@ -136,13 +136,13 @@ func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface input.TemplateURL = aws.String(v.(string)) } if v, ok := d.GetOk("capabilities"); ok { - input.Capabilities = expandStringList(v.(*schema.Set).List()) + input.Capabilities = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("disable_rollback"); ok { input.DisableRollback = aws.Bool(v.(bool)) } if v, ok := d.GetOk("notification_arns"); ok { - input.NotificationARNs = expandStringList(v.(*schema.Set).List()) + input.NotificationARNs = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("on_failure"); ok { input.OnFailure = aws.String(v.(string)) @@ -311,11 +311,11 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface // Capabilities must be present whether they are changed or not if v, ok := d.GetOk("capabilities"); ok { - input.Capabilities = expandStringList(v.(*schema.Set).List()) + input.Capabilities = expandStringSet(v.(*schema.Set)) } if d.HasChange("notification_arns") { - input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List()) + input.NotificationARNs = expandStringSet(d.Get("notification_arns").(*schema.Set)) } // Parameters must be present whether they are changed or not diff --git a/aws/resource_aws_codebuild_project.go b/aws/resource_aws_codebuild_project.go index 19af082113e..36e2630d4ed 100644 --- a/aws/resource_aws_codebuild_project.go +++ b/aws/resource_aws_codebuild_project.go @@ -981,8 +981,8 @@ func expandCodeBuildVpcConfig(rawVpcConfig []interface{}) *codebuild.VpcConfig { data := rawVpcConfig[0].(map[string]interface{}) vpcConfig.VpcId = aws.String(data["vpc_id"].(string)) - vpcConfig.Subnets = expandStringList(data["subnets"].(*schema.Set).List()) - vpcConfig.SecurityGroupIds = expandStringList(data["security_group_ids"].(*schema.Set).List()) + vpcConfig.Subnets = expandStringSet(data["subnets"].(*schema.Set)) + vpcConfig.SecurityGroupIds = expandStringSet(data["security_group_ids"].(*schema.Set)) return &vpcConfig } diff --git a/aws/resource_aws_codedeploy_deployment_group.go b/aws/resource_aws_codedeploy_deployment_group.go index 9ffa32f77ee..274741c735c 100644 --- a/aws/resource_aws_codedeploy_deployment_group.go +++ b/aws/resource_aws_codedeploy_deployment_group.go @@ -504,7 +504,7 @@ func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta int } if attr, ok := d.GetOk("autoscaling_groups"); ok { - input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List()) + input.AutoScalingGroups = expandStringSet(attr.(*schema.Set)) } if attr, ok := d.GetOk("on_premises_instance_tag_filter"); ok { @@ -687,7 +687,7 @@ func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta int // include (original or new) autoscaling groups when blue_green_deployment_config changes except for ECS if _, isEcs := d.GetOk("ecs_service"); d.HasChange("autoscaling_groups") || (d.HasChange("blue_green_deployment_config") && !isEcs) { _, n := d.GetChange("autoscaling_groups") - input.AutoScalingGroups = expandStringList(n.(*schema.Set).List()) + input.AutoScalingGroups = expandStringSet(n.(*schema.Set)) } // TagFilters aren't like tags. They don't append. They simply replace. diff --git a/aws/resource_aws_codestarnotifications_notification_rule.go b/aws/resource_aws_codestarnotifications_notification_rule.go index cc3a5a64f9d..dd8134b639e 100644 --- a/aws/resource_aws_codestarnotifications_notification_rule.go +++ b/aws/resource_aws_codestarnotifications_notification_rule.go @@ -125,7 +125,7 @@ func resourceAwsCodeStarNotificationsNotificationRuleCreate(d *schema.ResourceDa params := &codestarnotifications.CreateNotificationRuleInput{ DetailType: aws.String(d.Get("detail_type").(string)), - EventTypeIds: expandStringList(d.Get("event_type_ids").(*schema.Set).List()), + EventTypeIds: expandStringSet(d.Get("event_type_ids").(*schema.Set)), Name: aws.String(d.Get("name").(string)), Resource: aws.String(d.Get("resource").(string)), Status: aws.String(d.Get("status").(string)), @@ -252,7 +252,7 @@ func resourceAwsCodeStarNotificationsNotificationRuleUpdate(d *schema.ResourceDa params := &codestarnotifications.UpdateNotificationRuleInput{ Arn: aws.String(d.Id()), DetailType: aws.String(d.Get("detail_type").(string)), - EventTypeIds: expandStringList(d.Get("event_type_ids").(*schema.Set).List()), + EventTypeIds: expandStringSet(d.Get("event_type_ids").(*schema.Set)), Name: aws.String(d.Get("name").(string)), Status: aws.String(d.Get("status").(string)), Targets: expandCodeStarNotificationsNotificationRuleTargets(d.Get("target").(*schema.Set).List()), diff --git a/aws/resource_aws_dax_cluster.go b/aws/resource_aws_dax_cluster.go index 069c417c910..dc9cded4b39 100644 --- a/aws/resource_aws_dax_cluster.go +++ b/aws/resource_aws_dax_cluster.go @@ -181,7 +181,7 @@ func resourceAwsDaxClusterCreate(d *schema.ResourceData, meta interface{}) error subnetGroupName := d.Get("subnet_group_name").(string) securityIdSet := d.Get("security_group_ids").(*schema.Set) - securityIds := expandStringList(securityIdSet.List()) + securityIds := expandStringSet(securityIdSet) tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().DaxTags() req := &dax.CreateClusterInput{ @@ -211,10 +211,9 @@ func resourceAwsDaxClusterCreate(d *schema.ResourceData, meta interface{}) error req.NotificationTopicArn = aws.String(v.(string)) } - preferred_azs := d.Get("availability_zones").(*schema.Set).List() - if len(preferred_azs) > 0 { - azs := expandStringList(preferred_azs) - req.AvailabilityZones = azs + preferredAZs := d.Get("availability_zones").(*schema.Set) + if preferredAZs.Len() > 0 { + req.AvailabilityZones = expandStringSet(preferredAZs) } if v, ok := d.GetOk("server_side_encryption"); ok && len(v.([]interface{})) > 0 { @@ -368,7 +367,7 @@ func resourceAwsDaxClusterUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("security_group_ids") { if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { - req.SecurityGroupIds = expandStringList(attr.List()) + req.SecurityGroupIds = expandStringSet(attr) requestUpdate = true } } diff --git a/aws/resource_aws_db_event_subscription.go b/aws/resource_aws_db_event_subscription.go index 162954877d6..0a7383489d6 100644 --- a/aws/resource_aws_db_event_subscription.go +++ b/aws/resource_aws_db_event_subscription.go @@ -317,8 +317,8 @@ func resourceAwsDbEventSubscriptionUpdate(d *schema.ResourceData, meta interface os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if len(remove) > 0 { for _, removing := range remove { diff --git a/aws/resource_aws_dms_event_subscription.go b/aws/resource_aws_dms_event_subscription.go index d46865ef73d..8c90c7c900c 100644 --- a/aws/resource_aws_dms_event_subscription.go +++ b/aws/resource_aws_dms_event_subscription.go @@ -91,11 +91,11 @@ func resourceAwsDmsEventSubscriptionCreate(d *schema.ResourceData, meta interfac } if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = expandStringList(v.(*schema.Set).List()) + request.EventCategories = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("source_ids"); ok { - request.SourceIds = expandStringList(v.(*schema.Set).List()) + request.SourceIds = expandStringSet(v.(*schema.Set)) } _, err := conn.CreateEventSubscription(request) @@ -135,7 +135,7 @@ func resourceAwsDmsEventSubscriptionUpdate(d *schema.ResourceData, meta interfac } if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = expandStringList(v.(*schema.Set).List()) + request.EventCategories = expandStringSet(v.(*schema.Set)) } _, err := conn.ModifyEventSubscription(request) diff --git a/aws/resource_aws_dms_replication_instance.go b/aws/resource_aws_dms_replication_instance.go index 4154d8869d3..2fc4c92f9de 100644 --- a/aws/resource_aws_dms_replication_instance.go +++ b/aws/resource_aws_dms_replication_instance.go @@ -164,7 +164,7 @@ func resourceAwsDmsReplicationInstanceCreate(d *schema.ResourceData, meta interf request.ReplicationSubnetGroupIdentifier = aws.String(v.(string)) } if v, ok := d.GetOk("vpc_security_group_ids"); ok { - request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) + request.VpcSecurityGroupIds = expandStringSet(v.(*schema.Set)) } log.Println("[DEBUG] DMS create replication instance:", request) @@ -322,7 +322,7 @@ func resourceAwsDmsReplicationInstanceUpdate(d *schema.ResourceData, meta interf if d.HasChange("vpc_security_group_ids") { if v, ok := d.GetOk("vpc_security_group_ids"); ok { - request.VpcSecurityGroupIds = expandStringList(v.(*schema.Set).List()) + request.VpcSecurityGroupIds = expandStringSet(v.(*schema.Set)) hasChanges = true } } diff --git a/aws/resource_aws_dms_replication_subnet_group.go b/aws/resource_aws_dms_replication_subnet_group.go index 03147680ca4..5a13040c780 100644 --- a/aws/resource_aws_dms_replication_subnet_group.go +++ b/aws/resource_aws_dms_replication_subnet_group.go @@ -58,7 +58,7 @@ func resourceAwsDmsReplicationSubnetGroupCreate(d *schema.ResourceData, meta int request := &dms.CreateReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), ReplicationSubnetGroupDescription: aws.String(d.Get("replication_subnet_group_description").(string)), - SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().DatabasemigrationserviceTags(), } @@ -129,7 +129,7 @@ func resourceAwsDmsReplicationSubnetGroupUpdate(d *schema.ResourceData, meta int // changes to SubnetIds. request := &dms.ModifyReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - SubnetIds: expandStringList(d.Get("subnet_ids").(*schema.Set).List()), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), } if d.HasChange("replication_subnet_group_description") { diff --git a/aws/resource_aws_docdb_cluster.go b/aws/resource_aws_docdb_cluster.go index d6bc5b0bfd9..9170efd0acf 100644 --- a/aws/resource_aws_docdb_cluster.go +++ b/aws/resource_aws_docdb_cluster.go @@ -288,7 +288,7 @@ func resourceAwsDocDBClusterCreate(d *schema.ResourceData, meta interface{}) err } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - opts.AvailabilityZones = expandStringList(attr.List()) + opts.AvailabilityZones = expandStringSet(attr) } if attr, ok := d.GetOk("backup_retention_period"); ok { @@ -332,7 +332,7 @@ func resourceAwsDocDBClusterCreate(d *schema.ResourceData, meta interface{}) err } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - opts.VpcSecurityGroupIds = expandStringList(attr.List()) + opts.VpcSecurityGroupIds = expandStringSet(attr) } log.Printf("[DEBUG] DocDB Cluster restore from snapshot configuration: %s", opts) @@ -387,11 +387,11 @@ func resourceAwsDocDBClusterCreate(d *schema.ResourceData, meta interface{}) err } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + createOpts.VpcSecurityGroupIds = expandStringSet(attr) } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) + createOpts.AvailabilityZones = expandStringSet(attr) } if v, ok := d.GetOk("backup_retention_period"); ok { @@ -602,7 +602,7 @@ func resourceAwsDocDBClusterUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("vpc_security_group_ids") { if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = expandStringList(attr.List()) + req.VpcSecurityGroupIds = expandStringSet(attr) } else { req.VpcSecurityGroupIds = []*string{} } diff --git a/aws/resource_aws_ec2_client_vpn_endpoint.go b/aws/resource_aws_ec2_client_vpn_endpoint.go index b9124f053b0..c4a731f7b6b 100644 --- a/aws/resource_aws_ec2_client_vpn_endpoint.go +++ b/aws/resource_aws_ec2_client_vpn_endpoint.go @@ -150,7 +150,7 @@ func resourceAwsEc2ClientVpnEndpointCreate(d *schema.ResourceData, meta interfac } if v, ok := d.GetOk("dns_servers"); ok { - req.DnsServers = expandStringList(v.(*schema.Set).List()) + req.DnsServers = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("authentication_options"); ok { @@ -288,7 +288,7 @@ func resourceAwsEc2ClientVpnEndpointUpdate(d *schema.ResourceData, meta interfac } if d.HasChange("dns_servers") { - dnsValue := expandStringList(d.Get("dns_servers").(*schema.Set).List()) + dnsValue := expandStringSet(d.Get("dns_servers").(*schema.Set)) var enabledValue *bool if len(dnsValue) > 0 { diff --git a/aws/resource_aws_ec2_traffic_mirror_filter.go b/aws/resource_aws_ec2_traffic_mirror_filter.go index e08a31b35ab..fcf9a485924 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter.go @@ -86,14 +86,14 @@ func resourceAwsEc2TrafficMirrorFilterUpdate(d *schema.ResourceData, meta interf } o, n := d.GetChange("network_services") - newServices := n.(*schema.Set).Difference(o.(*schema.Set)).List() - if len(newServices) > 0 { - input.AddNetworkServices = expandStringList(newServices) + newServices := n.(*schema.Set).Difference(o.(*schema.Set)) + if newServices.Len() > 0 { + input.AddNetworkServices = expandStringSet(newServices) } - removeServices := o.(*schema.Set).Difference(n.(*schema.Set)).List() - if len(removeServices) > 0 { - input.RemoveNetworkServices = expandStringList(removeServices) + removeServices := o.(*schema.Set).Difference(n.(*schema.Set)) + if removeServices.Len() > 0 { + input.RemoveNetworkServices = expandStringSet(removeServices) } _, err := conn.ModifyTrafficMirrorFilterNetworkServices(input) diff --git a/aws/resource_aws_ecs_task_definition.go b/aws/resource_aws_ecs_task_definition.go index 3154458569c..a68ed716130 100644 --- a/aws/resource_aws_ecs_task_definition.go +++ b/aws/resource_aws_ecs_task_definition.go @@ -442,7 +442,7 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{} } if v, ok := d.GetOk("requires_compatibilities"); ok && v.(*schema.Set).Len() > 0 { - input.RequiresCompatibilities = expandStringList(v.(*schema.Set).List()) + input.RequiresCompatibilities = expandStringSet(v.(*schema.Set)) } proxyConfigs := d.Get("proxy_configuration").([]interface{}) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index a1b94ba0177..09d8e2ba2fd 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -312,8 +312,8 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ } else { securityNameSet := d.Get("security_group_names").(*schema.Set) securityIdSet := d.Get("security_group_ids").(*schema.Set) - securityNames := expandStringList(securityNameSet.List()) - securityIds := expandStringList(securityIdSet.List()) + securityNames := expandStringSet(securityNameSet) + securityIds := expandStringSet(securityIdSet) tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().ElasticacheTags() req.CacheSecurityGroupNames = securityNames @@ -370,9 +370,9 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.NotificationTopicArn = aws.String(v.(string)) } - snaps := d.Get("snapshot_arns").(*schema.Set).List() - if len(snaps) > 0 { - s := expandStringList(snaps) + snaps := d.Get("snapshot_arns").(*schema.Set) + if snaps.Len() > 0 { + s := expandStringSet(snaps) req.SnapshotArns = s log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) } @@ -515,7 +515,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ requestUpdate := false if d.HasChange("security_group_ids") { if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { - req.SecurityGroupIds = expandStringList(attr.List()) + req.SecurityGroupIds = expandStringSet(attr) requestUpdate = true } } diff --git a/aws/resource_aws_elasticache_subnet_group.go b/aws/resource_aws_elasticache_subnet_group.go index c4f20a16c2e..bbce175e125 100644 --- a/aws/resource_aws_elasticache_subnet_group.go +++ b/aws/resource_aws_elasticache_subnet_group.go @@ -60,7 +60,7 @@ func resourceAwsElasticacheSubnetGroupCreate(d *schema.ResourceData, meta interf log.Printf("[DEBUG] Cache subnet group create: name: %s, description: %s", name, desc) - subnetIds := expandStringList(subnetIdsSet.List()) + subnetIds := expandStringSet(subnetIdsSet) req := &elasticache.CreateCacheSubnetGroupInput{ CacheSubnetGroupDescription: aws.String(desc), diff --git a/aws/resource_aws_elb.go b/aws/resource_aws_elb.go index d01c84c3b32..7ac1e208847 100644 --- a/aws/resource_aws_elb.go +++ b/aws/resource_aws_elb.go @@ -290,15 +290,15 @@ func resourceAwsElbCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("availability_zones"); ok { - elbOpts.AvailabilityZones = expandStringList(v.(*schema.Set).List()) + elbOpts.AvailabilityZones = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("security_groups"); ok { - elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) + elbOpts.SecurityGroups = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("subnets"); ok { - elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) + elbOpts.Subnets = expandStringSet(v.(*schema.Set)) } log.Printf("[DEBUG] ELB create configuration: %#v", elbOpts) @@ -669,11 +669,9 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("security_groups") { - groups := d.Get("security_groups").(*schema.Set).List() - applySecurityGroupsOpts := elb.ApplySecurityGroupsToLoadBalancerInput{ LoadBalancerName: aws.String(d.Id()), - SecurityGroups: expandStringList(groups), + SecurityGroups: expandStringSet(d.Get("security_groups").(*schema.Set)), } _, err := elbconn.ApplySecurityGroupsToLoadBalancer(&applySecurityGroupsOpts) @@ -687,8 +685,8 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { os := o.(*schema.Set) ns := n.(*schema.Set) - removed := expandStringList(os.Difference(ns).List()) - added := expandStringList(ns.Difference(os).List()) + removed := expandStringSet(os.Difference(ns)) + added := expandStringSet(ns.Difference(os)) if len(added) > 0 { enableOpts := &elb.EnableAvailabilityZonesForLoadBalancerInput{ @@ -722,8 +720,8 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { os := o.(*schema.Set) ns := n.(*schema.Set) - removed := expandStringList(os.Difference(ns).List()) - added := expandStringList(ns.Difference(os).List()) + removed := expandStringSet(os.Difference(ns)) + added := expandStringSet(ns.Difference(os)) if len(removed) > 0 { detachOpts := &elb.DetachLoadBalancerFromSubnetsInput{ diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index fccddfa43b4..a8b88a6768c 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -142,7 +142,7 @@ func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error PolicyName: aws.String(d.Get("name").(string)), RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), ResourceType: aws.String("ResourceTypeList"), - ResourceTypeList: expandStringList(d.Get("resource_type_list").(*schema.Set).List()), + ResourceTypeList: expandStringSet(d.Get("resource_type_list").(*schema.Set)), ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } @@ -240,7 +240,7 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error PolicyUpdateToken: aws.String(d.Get("policy_update_token").(string)), RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), ResourceType: aws.String("ResourceTypeList"), - ResourceTypeList: expandStringList(d.Get("resource_type_list").(*schema.Set).List()), + ResourceTypeList: expandStringSet(d.Get("resource_type_list").(*schema.Set)), ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } From bdd1437e837c5db6642d48f52efeb16833e785b9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 17:39:40 -0800 Subject: [PATCH 0545/1212] Replaces expandStringList(x.List()) with expandStringSet(x) --- aws/resource_aws_iam_group_membership.go | 8 +++---- aws/resource_aws_iam_policy_attachment.go | 24 +++++++++---------- aws/resource_aws_iam_user_group_membership.go | 8 +++---- aws/resource_aws_kinesis_stream.go | 10 ++------ aws/resource_aws_lambda_layer_version.go | 2 +- aws/resource_aws_launch_configuration.go | 8 ++----- aws/resource_aws_launch_template.go | 4 ++-- aws/resource_aws_lb.go | 8 +++---- aws/resource_aws_lex_intent.go | 4 ++-- ...aws_load_balancer_backend_server_policy.go | 2 +- ...ource_aws_load_balancer_listener_policy.go | 2 +- aws/resource_aws_mq_broker.go | 2 +- aws/resource_aws_neptune_cluster.go | 18 +++++++------- ...resource_aws_neptune_event_subscription.go | 4 ++-- aws/resource_aws_rds_cluster.go | 16 ++++++------- aws/resource_aws_rds_cluster_endpoint.go | 4 ++-- aws/resource_aws_redshift_cluster.go | 24 +++++++++---------- ...resource_aws_redshift_snapshot_schedule.go | 2 +- aws/resource_aws_route53_health_check.go | 8 +++---- aws/resource_aws_s3_bucket_inventory.go | 2 +- ...ource_aws_secretsmanager_secret_version.go | 2 +- aws/resource_aws_ses_event_destination.go | 4 ++-- aws/resource_aws_ses_receipt_rule.go | 2 +- aws/resource_aws_ssm_patch_baseline.go | 8 +++---- aws/resource_aws_vpc_endpoint.go | 14 +++++------ aws/resource_aws_vpc_endpoint_service.go | 4 ++-- ...aws_wafv2_web_acl_logging_configuration.go | 2 +- 27 files changed, 93 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_iam_group_membership.go b/aws/resource_aws_iam_group_membership.go index 1d69c710bf4..47a46f25a7a 100644 --- a/aws/resource_aws_iam_group_membership.go +++ b/aws/resource_aws_iam_group_membership.go @@ -43,7 +43,7 @@ func resourceAwsIamGroupMembershipCreate(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).iamconn group := d.Get("group").(string) - userList := expandStringList(d.Get("users").(*schema.Set).List()) + userList := expandStringSet(d.Get("users").(*schema.Set)) if err := addUsersToGroup(conn, userList, group); err != nil { return err @@ -110,8 +110,8 @@ func resourceAwsIamGroupMembershipUpdate(d *schema.ResourceData, meta interface{ os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if err := removeUsersFromGroup(conn, remove, group); err != nil { return err @@ -127,7 +127,7 @@ func resourceAwsIamGroupMembershipUpdate(d *schema.ResourceData, meta interface{ func resourceAwsIamGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).iamconn - userList := expandStringList(d.Get("users").(*schema.Set).List()) + userList := expandStringSet(d.Get("users").(*schema.Set)) group := d.Get("group").(string) err := removeUsersFromGroup(conn, userList, group) diff --git a/aws/resource_aws_iam_policy_attachment.go b/aws/resource_aws_iam_policy_attachment.go index c63b0db4b76..0f3521dd7c2 100644 --- a/aws/resource_aws_iam_policy_attachment.go +++ b/aws/resource_aws_iam_policy_attachment.go @@ -57,9 +57,9 @@ func resourceAwsIamPolicyAttachmentCreate(d *schema.ResourceData, meta interface name := d.Get("name").(string) arn := d.Get("policy_arn").(string) - users := expandStringList(d.Get("users").(*schema.Set).List()) - roles := expandStringList(d.Get("roles").(*schema.Set).List()) - groups := expandStringList(d.Get("groups").(*schema.Set).List()) + users := expandStringSet(d.Get("users").(*schema.Set)) + roles := expandStringSet(d.Get("roles").(*schema.Set)) + groups := expandStringSet(d.Get("groups").(*schema.Set)) if len(users) == 0 && len(roles) == 0 && len(groups) == 0 { return fmt.Errorf("No Users, Roles, or Groups specified for IAM Policy Attachment %s", name) @@ -161,9 +161,9 @@ func resourceAwsIamPolicyAttachmentDelete(d *schema.ResourceData, meta interface conn := meta.(*AWSClient).iamconn name := d.Get("name").(string) arn := d.Get("policy_arn").(string) - users := expandStringList(d.Get("users").(*schema.Set).List()) - roles := expandStringList(d.Get("roles").(*schema.Set).List()) - groups := expandStringList(d.Get("groups").(*schema.Set).List()) + users := expandStringSet(d.Get("users").(*schema.Set)) + roles := expandStringSet(d.Get("roles").(*schema.Set)) + groups := expandStringSet(d.Get("groups").(*schema.Set)) var userErr, roleErr, groupErr error if len(users) != 0 { @@ -239,8 +239,8 @@ func updateUsers(conn *iam.IAM, d *schema.ResourceData) error { } os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if rErr := detachPolicyFromUsers(conn, remove, arn); rErr != nil { return rErr @@ -261,8 +261,8 @@ func updateRoles(conn *iam.IAM, d *schema.ResourceData) error { } os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if rErr := detachPolicyFromRoles(conn, remove, arn); rErr != nil { return rErr @@ -283,8 +283,8 @@ func updateGroups(conn *iam.IAM, d *schema.ResourceData) error { } os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if rErr := detachPolicyFromGroups(conn, remove, arn); rErr != nil { return rErr diff --git a/aws/resource_aws_iam_user_group_membership.go b/aws/resource_aws_iam_user_group_membership.go index 7f037231c07..e44631f826d 100644 --- a/aws/resource_aws_iam_user_group_membership.go +++ b/aws/resource_aws_iam_user_group_membership.go @@ -40,7 +40,7 @@ func resourceAwsIamUserGroupMembershipCreate(d *schema.ResourceData, meta interf conn := meta.(*AWSClient).iamconn user := d.Get("user").(string) - groupList := expandStringList(d.Get("groups").(*schema.Set).List()) + groupList := expandStringSet(d.Get("groups").(*schema.Set)) if err := addUserToGroups(conn, user, groupList); err != nil { return err @@ -112,8 +112,8 @@ func resourceAwsIamUserGroupMembershipUpdate(d *schema.ResourceData, meta interf os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if err := removeUserFromGroups(conn, user, remove); err != nil { return err @@ -130,7 +130,7 @@ func resourceAwsIamUserGroupMembershipUpdate(d *schema.ResourceData, meta interf func resourceAwsIamUserGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).iamconn user := d.Get("user").(string) - groups := expandStringList(d.Get("groups").(*schema.Set).List()) + groups := expandStringSet(d.Get("groups").(*schema.Set)) err := removeUserFromGroups(conn, user, groups) return err diff --git a/aws/resource_aws_kinesis_stream.go b/aws/resource_aws_kinesis_stream.go index 74ee6e4828c..298d837a121 100644 --- a/aws/resource_aws_kinesis_stream.go +++ b/aws/resource_aws_kinesis_stream.go @@ -399,12 +399,9 @@ func updateKinesisShardLevelMetrics(conn *kinesis.Kinesis, d *schema.ResourceDat disableMetrics := os.Difference(ns) if disableMetrics.Len() != 0 { - metrics := disableMetrics.List() - log.Printf("[DEBUG] Disabling shard level metrics %v for stream %s", metrics, sn) - props := &kinesis.DisableEnhancedMonitoringInput{ StreamName: aws.String(sn), - ShardLevelMetrics: expandStringList(metrics), + ShardLevelMetrics: expandStringSet(disableMetrics), } _, err := conn.DisableEnhancedMonitoring(props) @@ -418,12 +415,9 @@ func updateKinesisShardLevelMetrics(conn *kinesis.Kinesis, d *schema.ResourceDat enabledMetrics := ns.Difference(os) if enabledMetrics.Len() != 0 { - metrics := enabledMetrics.List() - log.Printf("[DEBUG] Enabling shard level metrics %v for stream %s", metrics, sn) - props := &kinesis.EnableEnhancedMonitoringInput{ StreamName: aws.String(sn), - ShardLevelMetrics: expandStringList(metrics), + ShardLevelMetrics: expandStringSet(enabledMetrics), } _, err := conn.EnableEnhancedMonitoring(props) diff --git a/aws/resource_aws_lambda_layer_version.go b/aws/resource_aws_lambda_layer_version.go index a1714e9128f..433a119a722 100644 --- a/aws/resource_aws_lambda_layer_version.go +++ b/aws/resource_aws_lambda_layer_version.go @@ -162,7 +162,7 @@ func resourceAwsLambdaLayerVersionPublish(d *schema.ResourceData, meta interface } if v, ok := d.GetOk("compatible_runtimes"); ok && v.(*schema.Set).Len() > 0 { - params.CompatibleRuntimes = expandStringList(v.(*schema.Set).List()) + params.CompatibleRuntimes = expandStringSet(v.(*schema.Set)) } log.Printf("[DEBUG] Publishing Lambda layer: %s", params) diff --git a/aws/resource_aws_launch_configuration.go b/aws/resource_aws_launch_configuration.go index f9e4f2789c0..0740e9d1dad 100644 --- a/aws/resource_aws_launch_configuration.go +++ b/aws/resource_aws_launch_configuration.go @@ -373,9 +373,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface } if v, ok := d.GetOk("security_groups"); ok { - createLaunchConfigurationOpts.SecurityGroups = expandStringList( - v.(*schema.Set).List(), - ) + createLaunchConfigurationOpts.SecurityGroups = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("vpc_classic_link_id"); ok { @@ -387,9 +385,7 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface } if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok { - createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList( - v.(*schema.Set).List(), - ) + createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringSet(v.(*schema.Set)) } var blockDevices []*autoscaling.BlockDeviceMapping diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index 684f22bdc95..485fa30caa5 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -1323,11 +1323,11 @@ func buildLaunchTemplateData(d *schema.ResourceData) (*ec2.RequestLaunchTemplate } if v, ok := d.GetOk("security_group_names"); ok { - opts.SecurityGroups = expandStringList(v.(*schema.Set).List()) + opts.SecurityGroups = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("vpc_security_group_ids"); ok { - opts.SecurityGroupIds = expandStringList(v.(*schema.Set).List()) + opts.SecurityGroupIds = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("block_device_mappings"); ok { diff --git a/aws/resource_aws_lb.go b/aws/resource_aws_lb.go index 9208c2c2fc3..9084b7f555d 100644 --- a/aws/resource_aws_lb.go +++ b/aws/resource_aws_lb.go @@ -274,11 +274,11 @@ func resourceAwsLbCreate(d *schema.ResourceData, meta interface{}) error { } if v, ok := d.GetOk("security_groups"); ok { - elbOpts.SecurityGroups = expandStringList(v.(*schema.Set).List()) + elbOpts.SecurityGroups = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("subnets"); ok { - elbOpts.Subnets = expandStringList(v.(*schema.Set).List()) + elbOpts.Subnets = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("subnet_mapping"); ok { @@ -455,7 +455,7 @@ func resourceAwsLbUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("security_groups") { - sgs := expandStringList(d.Get("security_groups").(*schema.Set).List()) + sgs := expandStringSet(d.Get("security_groups").(*schema.Set)) params := &elbv2.SetSecurityGroupsInput{ LoadBalancerArn: aws.String(d.Id()), @@ -473,7 +473,7 @@ func resourceAwsLbUpdate(d *schema.ResourceData, meta interface{}) error { // resource is just created, so we don't attempt if it is a newly created // resource. if d.HasChange("subnets") && !d.IsNewResource() { - subnets := expandStringList(d.Get("subnets").(*schema.Set).List()) + subnets := expandStringSet(d.Get("subnets").(*schema.Set)) params := &elbv2.SetSubnetsInput{ LoadBalancerArn: aws.String(d.Id()), diff --git a/aws/resource_aws_lex_intent.go b/aws/resource_aws_lex_intent.go index 4b3b07f3500..ae7cc5117a7 100644 --- a/aws/resource_aws_lex_intent.go +++ b/aws/resource_aws_lex_intent.go @@ -289,7 +289,7 @@ func resourceAwsLexIntentCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("sample_utterances"); ok { - input.SampleUtterances = expandStringList(v.(*schema.Set).List()) + input.SampleUtterances = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("slot"); ok { @@ -440,7 +440,7 @@ func resourceAwsLexIntentUpdate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("sample_utterances"); ok { - input.SampleUtterances = expandStringList(v.(*schema.Set).List()) + input.SampleUtterances = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("slot"); ok { diff --git a/aws/resource_aws_load_balancer_backend_server_policy.go b/aws/resource_aws_load_balancer_backend_server_policy.go index 41d9c573def..5a6dd5ddd07 100644 --- a/aws/resource_aws_load_balancer_backend_server_policy.go +++ b/aws/resource_aws_load_balancer_backend_server_policy.go @@ -46,7 +46,7 @@ func resourceAwsLoadBalancerBackendServerPoliciesCreate(d *schema.ResourceData, policyNames := []*string{} if v, ok := d.GetOk("policy_names"); ok { - policyNames = expandStringList(v.(*schema.Set).List()) + policyNames = expandStringSet(v.(*schema.Set)) } setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ diff --git a/aws/resource_aws_load_balancer_listener_policy.go b/aws/resource_aws_load_balancer_listener_policy.go index b7979a8eede..466c93648f6 100644 --- a/aws/resource_aws_load_balancer_listener_policy.go +++ b/aws/resource_aws_load_balancer_listener_policy.go @@ -46,7 +46,7 @@ func resourceAwsLoadBalancerListenerPoliciesCreate(d *schema.ResourceData, meta policyNames := []*string{} if v, ok := d.GetOk("policy_names"); ok { - policyNames = expandStringList(v.(*schema.Set).List()) + policyNames = expandStringSet(v.(*schema.Set)) } setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ diff --git a/aws/resource_aws_mq_broker.go b/aws/resource_aws_mq_broker.go index 2b7a8fc68a8..4a4d3938a37 100644 --- a/aws/resource_aws_mq_broker.go +++ b/aws/resource_aws_mq_broker.go @@ -286,7 +286,7 @@ func resourceAwsMqBrokerCreate(d *schema.ResourceData, meta interface{}) error { input.MaintenanceWindowStartTime = expandMqWeeklyStartTime(v.([]interface{})) } if v, ok := d.GetOk("subnet_ids"); ok { - input.SubnetIds = expandStringList(v.(*schema.Set).List()) + input.SubnetIds = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("tags"); ok { input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().MqTags() diff --git a/aws/resource_aws_neptune_cluster.go b/aws/resource_aws_neptune_cluster.go index 9563277e3b8..3272ff27702 100644 --- a/aws/resource_aws_neptune_cluster.go +++ b/aws/resource_aws_neptune_cluster.go @@ -304,8 +304,8 @@ func resourceAwsNeptuneClusterCreate(d *schema.ResourceData, meta interface{}) e } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createDbClusterInput.AvailabilityZones = expandStringList(attr.List()) - restoreDBClusterFromSnapshotInput.AvailabilityZones = expandStringList(attr.List()) + createDbClusterInput.AvailabilityZones = expandStringSet(attr) + restoreDBClusterFromSnapshotInput.AvailabilityZones = expandStringSet(attr) } if attr, ok := d.GetOk("backup_retention_period"); ok { @@ -316,8 +316,8 @@ func resourceAwsNeptuneClusterCreate(d *schema.ResourceData, meta interface{}) e } if attr := d.Get("enable_cloudwatch_logs_exports").(*schema.Set); attr.Len() > 0 { - createDbClusterInput.EnableCloudwatchLogsExports = expandStringList(attr.List()) - restoreDBClusterFromSnapshotInput.EnableCloudwatchLogsExports = expandStringList(attr.List()) + createDbClusterInput.EnableCloudwatchLogsExports = expandStringSet(attr) + restoreDBClusterFromSnapshotInput.EnableCloudwatchLogsExports = expandStringSet(attr) } if attr, ok := d.GetOk("engine_version"); ok { @@ -359,11 +359,11 @@ func resourceAwsNeptuneClusterCreate(d *schema.ResourceData, meta interface{}) e } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createDbClusterInput.VpcSecurityGroupIds = expandStringList(attr.List()) + createDbClusterInput.VpcSecurityGroupIds = expandStringSet(attr) if restoreDBClusterFromSnapshot { clusterUpdate = true } - restoreDBClusterFromSnapshotInput.VpcSecurityGroupIds = expandStringList(attr.List()) + restoreDBClusterFromSnapshotInput.VpcSecurityGroupIds = expandStringSet(attr) } if restoreDBClusterFromSnapshot { @@ -552,7 +552,7 @@ func resourceAwsNeptuneClusterUpdate(d *schema.ResourceData, meta interface{}) e if d.HasChange("vpc_security_group_ids") { if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = expandStringList(attr.List()) + req.VpcSecurityGroupIds = expandStringSet(attr) } else { req.VpcSecurityGroupIds = []*string{} } @@ -567,13 +567,13 @@ func resourceAwsNeptuneClusterUpdate(d *schema.ResourceData, meta interface{}) e disableLogTypes := old.(*schema.Set).Difference(new.(*schema.Set)) if disableLogTypes.Len() > 0 { - logs.SetDisableLogTypes(expandStringList(disableLogTypes.List())) + logs.SetDisableLogTypes(expandStringSet(disableLogTypes)) } enableLogTypes := new.(*schema.Set).Difference(old.(*schema.Set)) if enableLogTypes.Len() > 0 { - logs.SetEnableLogTypes(expandStringList(enableLogTypes.List())) + logs.SetEnableLogTypes(expandStringSet(enableLogTypes)) } req.CloudwatchLogsExportConfiguration = logs diff --git a/aws/resource_aws_neptune_event_subscription.go b/aws/resource_aws_neptune_event_subscription.go index ad370224add..4de331447b7 100644 --- a/aws/resource_aws_neptune_event_subscription.go +++ b/aws/resource_aws_neptune_event_subscription.go @@ -283,8 +283,8 @@ func resourceAwsNeptuneEventSubscriptionUpdate(d *schema.ResourceData, meta inte os := o.(*schema.Set) ns := n.(*schema.Set) - remove := expandStringList(os.Difference(ns).List()) - add := expandStringList(ns.Difference(os).List()) + remove := expandStringSet(os.Difference(ns)) + add := expandStringSet(ns.Difference(os)) if len(remove) > 0 { for _, removing := range remove { diff --git a/aws/resource_aws_rds_cluster.go b/aws/resource_aws_rds_cluster.go index 29ed4c883b0..0c31295da32 100644 --- a/aws/resource_aws_rds_cluster.go +++ b/aws/resource_aws_rds_cluster.go @@ -505,7 +505,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - opts.AvailabilityZones = expandStringList(attr.List()) + opts.AvailabilityZones = expandStringSet(attr) } if v, ok := d.GetOk("backtrack_window"); ok { @@ -565,7 +565,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - opts.VpcSecurityGroupIds = expandStringList(attr.List()) + opts.VpcSecurityGroupIds = expandStringSet(attr) } log.Printf("[DEBUG] RDS Cluster restore from snapshot configuration: %s", opts) @@ -633,11 +633,11 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + createOpts.VpcSecurityGroupIds = expandStringSet(attr) } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) + createOpts.AvailabilityZones = expandStringSet(attr) } if v, ok := d.GetOk("backup_retention_period"); ok { @@ -743,7 +743,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + createOpts.VpcSecurityGroupIds = expandStringSet(attr) } if attr, ok := d.GetOk("kms_key_id"); ok { @@ -854,11 +854,11 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(attr.List()) + createOpts.VpcSecurityGroupIds = expandStringSet(attr) } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = expandStringList(attr.List()) + createOpts.AvailabilityZones = expandStringSet(attr) } if v, ok := d.GetOk("backup_retention_period"); ok { @@ -1146,7 +1146,7 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("vpc_security_group_ids") { if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = expandStringList(attr.List()) + req.VpcSecurityGroupIds = expandStringSet(attr) } else { req.VpcSecurityGroupIds = []*string{} } diff --git a/aws/resource_aws_rds_cluster_endpoint.go b/aws/resource_aws_rds_cluster_endpoint.go index 6e830248705..72de494a8a8 100644 --- a/aws/resource_aws_rds_cluster_endpoint.go +++ b/aws/resource_aws_rds_cluster_endpoint.go @@ -192,13 +192,13 @@ func resourceAwsRDSClusterEndpointUpdate(d *schema.ResourceData, meta interface{ } if attr := d.Get("excluded_members").(*schema.Set); attr.Len() > 0 { - input.ExcludedMembers = expandStringList(attr.List()) + input.ExcludedMembers = expandStringSet(attr) } else { input.ExcludedMembers = make([]*string, 0) } if attr := d.Get("static_members").(*schema.Set); attr.Len() > 0 { - input.StaticMembers = expandStringList(attr.List()) + input.StaticMembers = expandStringSet(attr) } else { input.StaticMembers = make([]*string, 0) } diff --git a/aws/resource_aws_redshift_cluster.go b/aws/resource_aws_redshift_cluster.go index a8f9ac0f874..e9fe42c035a 100644 --- a/aws/resource_aws_redshift_cluster.go +++ b/aws/resource_aws_redshift_cluster.go @@ -370,11 +370,11 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) } if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - restoreOpts.ClusterSecurityGroups = expandStringList(v.List()) + restoreOpts.ClusterSecurityGroups = expandStringSet(v) } if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - restoreOpts.VpcSecurityGroupIds = expandStringList(v.List()) + restoreOpts.VpcSecurityGroupIds = expandStringSet(v) } if v, ok := d.GetOk("preferred_maintenance_window"); ok { @@ -394,7 +394,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("iam_roles"); ok { - restoreOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + restoreOpts.IamRoles = expandStringSet(v.(*schema.Set)) } log.Printf("[DEBUG] Redshift Cluster restore cluster options: %s", restoreOpts) @@ -438,11 +438,11 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) } if v := d.Get("cluster_security_groups").(*schema.Set); v.Len() > 0 { - createOpts.ClusterSecurityGroups = expandStringList(v.List()) + createOpts.ClusterSecurityGroups = expandStringSet(v) } if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - createOpts.VpcSecurityGroupIds = expandStringList(v.List()) + createOpts.VpcSecurityGroupIds = expandStringSet(v) } if v, ok := d.GetOk("cluster_subnet_group_name"); ok { @@ -478,7 +478,7 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("iam_roles"); ok { - createOpts.IamRoles = expandStringList(v.(*schema.Set).List()) + createOpts.IamRoles = expandStringSet(v.(*schema.Set)) } log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) @@ -675,12 +675,12 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("cluster_security_groups") { - req.ClusterSecurityGroups = expandStringList(d.Get("cluster_security_groups").(*schema.Set).List()) + req.ClusterSecurityGroups = expandStringSet(d.Get("cluster_security_groups").(*schema.Set)) requestUpdate = true } if d.HasChange("vpc_security_group_ids") { - req.VpcSecurityGroupIds = expandStringList(d.Get("vpc_security_group_ids").(*schema.Set).List()) + req.VpcSecurityGroupIds = expandStringSet(d.Get("vpc_security_group_ids").(*schema.Set)) requestUpdate = true } @@ -755,14 +755,14 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) os := o.(*schema.Set) ns := n.(*schema.Set) - removeIams := os.Difference(ns).List() - addIams := ns.Difference(os).List() + removeIams := os.Difference(ns) + addIams := ns.Difference(os) log.Printf("[INFO] Building Redshift Modify Cluster IAM Role Options") req := &redshift.ModifyClusterIamRolesInput{ ClusterIdentifier: aws.String(d.Id()), - AddIamRoles: expandStringList(addIams), - RemoveIamRoles: expandStringList(removeIams), + AddIamRoles: expandStringSet(addIams), + RemoveIamRoles: expandStringSet(removeIams), } log.Printf("[INFO] Modifying Redshift Cluster IAM Roles: %s", d.Id()) diff --git a/aws/resource_aws_redshift_snapshot_schedule.go b/aws/resource_aws_redshift_snapshot_schedule.go index 8d9c9eb26c1..f4c7bb9c028 100644 --- a/aws/resource_aws_redshift_snapshot_schedule.go +++ b/aws/resource_aws_redshift_snapshot_schedule.go @@ -152,7 +152,7 @@ func resourceAwsRedshiftSnapshotScheduleUpdate(d *schema.ResourceData, meta inte if d.HasChange("definitions") { modifyOpts := &redshift.ModifySnapshotScheduleInput{ ScheduleIdentifier: aws.String(d.Id()), - ScheduleDefinitions: expandStringList(d.Get("definitions").(*schema.Set).List()), + ScheduleDefinitions: expandStringSet(d.Get("definitions").(*schema.Set)), } _, err := conn.ModifySnapshotSchedule(modifyOpts) if isAWSErr(err, redshift.ErrCodeSnapshotScheduleNotFoundFault, "") { diff --git a/aws/resource_aws_route53_health_check.go b/aws/resource_aws_route53_health_check.go index 4ecf1323ccf..44b0c32adef 100644 --- a/aws/resource_aws_route53_health_check.go +++ b/aws/resource_aws_route53_health_check.go @@ -186,7 +186,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{ } if d.HasChange("child_healthchecks") { - updateHealthCheck.ChildHealthChecks = expandStringList(d.Get("child_healthchecks").(*schema.Set).List()) + updateHealthCheck.ChildHealthChecks = expandStringSet(d.Get("child_healthchecks").(*schema.Set)) } if d.HasChange("child_health_threshold") { @@ -215,7 +215,7 @@ func resourceAwsRoute53HealthCheckUpdate(d *schema.ResourceData, meta interface{ } if d.HasChange("regions") { - updateHealthCheck.Regions = expandStringList(d.Get("regions").(*schema.Set).List()) + updateHealthCheck.Regions = expandStringSet(d.Get("regions").(*schema.Set)) } if d.HasChange("disabled") { @@ -289,7 +289,7 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{ if *healthConfig.Type == route53.HealthCheckTypeCalculated { if v, ok := d.GetOk("child_healthchecks"); ok { - healthConfig.ChildHealthChecks = expandStringList(v.(*schema.Set).List()) + healthConfig.ChildHealthChecks = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("child_health_threshold"); ok { @@ -316,7 +316,7 @@ func resourceAwsRoute53HealthCheckCreate(d *schema.ResourceData, meta interface{ } if v, ok := d.GetOk("regions"); ok { - healthConfig.Regions = expandStringList(v.(*schema.Set).List()) + healthConfig.Regions = expandStringSet(v.(*schema.Set)) } callerRef := resource.UniqueId() diff --git a/aws/resource_aws_s3_bucket_inventory.go b/aws/resource_aws_s3_bucket_inventory.go index 0eec3467790..1ebf89b8bfc 100644 --- a/aws/resource_aws_s3_bucket_inventory.go +++ b/aws/resource_aws_s3_bucket_inventory.go @@ -197,7 +197,7 @@ func resourceAwsS3BucketInventoryPut(d *schema.ResourceData, meta interface{}) e } if v, ok := d.GetOk("optional_fields"); ok { - inventoryConfiguration.OptionalFields = expandStringList(v.(*schema.Set).List()) + inventoryConfiguration.OptionalFields = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("schedule"); ok { diff --git a/aws/resource_aws_secretsmanager_secret_version.go b/aws/resource_aws_secretsmanager_secret_version.go index b6f68757968..f1f100fb873 100644 --- a/aws/resource_aws_secretsmanager_secret_version.go +++ b/aws/resource_aws_secretsmanager_secret_version.go @@ -87,7 +87,7 @@ func resourceAwsSecretsManagerSecretVersionCreate(d *schema.ResourceData, meta i } if v, ok := d.GetOk("version_stages"); ok { - input.VersionStages = expandStringList(v.(*schema.Set).List()) + input.VersionStages = expandStringSet(v.(*schema.Set)) } log.Printf("[DEBUG] Putting Secrets Manager Secret %q value", secretID) diff --git a/aws/resource_aws_ses_event_destination.go b/aws/resource_aws_ses_event_destination.go index 83dd660a7b3..619d6089c8a 100644 --- a/aws/resource_aws_ses_event_destination.go +++ b/aws/resource_aws_ses_event_destination.go @@ -136,14 +136,14 @@ func resourceAwsSesEventDestinationCreate(d *schema.ResourceData, meta interface configurationSetName := d.Get("configuration_set_name").(string) eventDestinationName := d.Get("name").(string) enabled := d.Get("enabled").(bool) - matchingEventTypes := d.Get("matching_types").(*schema.Set).List() + matchingEventTypes := d.Get("matching_types").(*schema.Set) createOpts := &ses.CreateConfigurationSetEventDestinationInput{ ConfigurationSetName: aws.String(configurationSetName), EventDestination: &ses.EventDestination{ Name: aws.String(eventDestinationName), Enabled: aws.Bool(enabled), - MatchingEventTypes: expandStringList(matchingEventTypes), + MatchingEventTypes: expandStringSet(matchingEventTypes), }, } diff --git a/aws/resource_aws_ses_receipt_rule.go b/aws/resource_aws_ses_receipt_rule.go index 0614ce3e547..653c403699f 100644 --- a/aws/resource_aws_ses_receipt_rule.go +++ b/aws/resource_aws_ses_receipt_rule.go @@ -626,7 +626,7 @@ func buildReceiptRule(d *schema.ResourceData) *ses.ReceiptRule { } if v, ok := d.GetOk("recipients"); ok { - receiptRule.Recipients = expandStringList(v.(*schema.Set).List()) + receiptRule.Recipients = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("scan_enabled"); ok { diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index f11a9019f59..fe64e3286df 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -147,11 +147,11 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("approved_patches"); ok && v.(*schema.Set).Len() > 0 { - params.ApprovedPatches = expandStringList(v.(*schema.Set).List()) + params.ApprovedPatches = expandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("rejected_patches"); ok && v.(*schema.Set).Len() > 0 { - params.RejectedPatches = expandStringList(v.(*schema.Set).List()) + params.RejectedPatches = expandStringSet(v.(*schema.Set)) } if _, ok := d.GetOk("global_filter"); ok { @@ -187,11 +187,11 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("approved_patches") { - params.ApprovedPatches = expandStringList(d.Get("approved_patches").(*schema.Set).List()) + params.ApprovedPatches = expandStringSet(d.Get("approved_patches").(*schema.Set)) } if d.HasChange("rejected_patches") { - params.RejectedPatches = expandStringList(d.Get("rejected_patches").(*schema.Set).List()) + params.RejectedPatches = expandStringSet(d.Get("rejected_patches").(*schema.Set)) } if d.HasChange("approved_patches_compliance_level") { diff --git a/aws/resource_aws_vpc_endpoint.go b/aws/resource_aws_vpc_endpoint.go index b99e610721a..0632a5fbee0 100644 --- a/aws/resource_aws_vpc_endpoint.go +++ b/aws/resource_aws_vpc_endpoint.go @@ -470,11 +470,11 @@ func vpcEndpointWaitUntilAvailable(conn *ec2.EC2, vpceId string, timeout time.Du return err } -func vpcEndpointWaitUntilDeleted(conn *ec2.EC2, vpceId string, timeout time.Duration) error { +func vpcEndpointWaitUntilDeleted(conn *ec2.EC2, vpceID string, timeout time.Duration) error { stateConf := &resource.StateChangeConf{ Pending: []string{"available", "pending", "deleting"}, Target: []string{"deleted"}, - Refresh: vpcEndpointStateRefresh(conn, vpceId), + Refresh: vpcEndpointStateRefresh(conn, vpceID), Timeout: timeout, Delay: 5 * time.Second, MinTimeout: 5 * time.Second, @@ -487,9 +487,9 @@ func vpcEndpointWaitUntilDeleted(conn *ec2.EC2, vpceId string, timeout time.Dura func setVpcEndpointCreateList(d *schema.ResourceData, key string, c *[]*string) { if v, ok := d.GetOk(key); ok { - list := v.(*schema.Set).List() - if len(list) > 0 { - *c = expandStringList(list) + list := v.(*schema.Set) + if list.Len() > 0 { + *c = expandStringSet(list) } } } @@ -500,12 +500,12 @@ func setVpcEndpointUpdateLists(d *schema.ResourceData, key string, a, r *[]*stri os := o.(*schema.Set) ns := n.(*schema.Set) - add := expandStringList(ns.Difference(os).List()) + add := expandStringSet(ns.Difference(os)) if len(add) > 0 { *a = add } - remove := expandStringList(os.Difference(ns).List()) + remove := expandStringSet(os.Difference(ns)) if len(remove) > 0 { *r = remove } diff --git a/aws/resource_aws_vpc_endpoint_service.go b/aws/resource_aws_vpc_endpoint_service.go index eead52efb51..0dbea69e416 100644 --- a/aws/resource_aws_vpc_endpoint_service.go +++ b/aws/resource_aws_vpc_endpoint_service.go @@ -420,12 +420,12 @@ func setVpcEndpointServiceUpdateLists(d *schema.ResourceData, key string, a, r * os := o.(*schema.Set) ns := n.(*schema.Set) - add := expandStringList(ns.Difference(os).List()) + add := expandStringSet(ns.Difference(os)) if len(add) > 0 { *a = add } - remove := expandStringList(os.Difference(ns).List()) + remove := expandStringSet(os.Difference(ns)) if len(remove) > 0 { *r = remove } diff --git a/aws/resource_aws_wafv2_web_acl_logging_configuration.go b/aws/resource_aws_wafv2_web_acl_logging_configuration.go index cd4fde07987..a1285ec62c5 100644 --- a/aws/resource_aws_wafv2_web_acl_logging_configuration.go +++ b/aws/resource_aws_wafv2_web_acl_logging_configuration.go @@ -56,7 +56,7 @@ func resourceAwsWafv2WebACLLoggingConfigurationPut(d *schema.ResourceData, meta resourceArn := d.Get("resource_arn").(string) config := &wafv2.LoggingConfiguration{ - LogDestinationConfigs: expandStringList(d.Get("log_destination_configs").(*schema.Set).List()), + LogDestinationConfigs: expandStringSet(d.Get("log_destination_configs").(*schema.Set)), ResourceArn: aws.String(resourceArn), } From 8e07162b08fce903d5578734814db7069eea6cb1 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 23 Sep 2020 15:42:37 -0500 Subject: [PATCH 0546/1212] resource_aws_sso_permission_set scaffolding --- aws/resource_aws_sso_permission_set.go | 98 ++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 aws/resource_aws_sso_permission_set.go diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go new file mode 100644 index 00000000000..eaedc147fe3 --- /dev/null +++ b/aws/resource_aws_sso_permission_set.go @@ -0,0 +1,98 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsSsoPermissionSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoPermissionSetCreate, + Read: resourceAwsSsoPermissionSetRead, + Update: resourceAwsSsoPermissionSetUpdate, + Delete: resourceAwsSsoPermissionSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "instance_arn": { + Type: schema.TypeString, + }, + + "permission_set_arn": { + Type: schema.TypeString, + }, + + "created_date": { + Type: schema.TypeString, + }, + + "description": { + Type: schema.TypeString, + }, + + "name": { + Type: schema.TypeString, + }, + + "relay_state": { + Type: schema.TypeString, + }, + + "session_duration": { + Type: schema.TypeString, + }, + + "inline_policy": { + Type: schema.TypeString, + }, + + "managed_policies": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString + }, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + // d.SetId(*resp.PermissionSetArn) + return resourceAwsSsoPermissionSetRead(d, meta) +} + +func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} + +func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return resourceAwsSsoPermissionSetRead(d, meta) +} + +func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} + +func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { + +} From 29198cca5c685b7a8c27d943a1a4497e228f8010 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 24 Sep 2020 23:25:07 -0500 Subject: [PATCH 0547/1212] update schema --- aws/resource_aws_sso_permission_set.go | 75 ++++++++++++++++++-------- 1 file changed, 53 insertions(+), 22 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index eaedc147fe3..bd6973368a2 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -22,44 +22,74 @@ func resourceAwsSsoPermissionSet() *schema.Resource { Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, - Schema: map[string]*schema.Schema{ - "instance_arn": { - Type: schema.TypeString, - }, - - "permission_set_arn": { - Type: schema.TypeString, + "arn": { + Type: schema.TypeString, + Computed: true, }, "created_date": { - Type: schema.TypeString, + Type: schema.TypeString, + Computed: true, }, - "description": { - Type: schema.TypeString, + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + ), }, "name": { - Type: schema.TypeString, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), + ), }, - "relay_state": { - Type: schema.TypeString, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 700), + validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*$`), "must match [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]"), + ), }, "session_duration": { - Type: schema.TypeString, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + + "relay_state": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 240), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9&$@#\\\/%?=~\-_'"|!:,.;*+\[\]\(\)\{\} ]+$`), "must match [a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\} ]"), + ), }, "inline_policy": { - Type: schema.TypeString, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateIAMPolicyJson, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, }, "managed_policies": { - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, }, }, @@ -70,7 +100,9 @@ func resourceAwsSsoPermissionSet() *schema.Resource { func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssoadminconn - // TODO + + + // d.SetId(*resp.PermissionSetArn) return resourceAwsSsoPermissionSetRead(d, meta) } @@ -93,6 +125,5 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) return nil } -func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { - -} +// func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { +// } From 5d91a548fd1f1ef7892596a20a1a8d456845c52c Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 25 Sep 2020 20:03:48 -0500 Subject: [PATCH 0548/1212] add aws_sso_assignment --- aws/resource_aws_sso_assignment.go | 130 +++++++++++++++++++++++++ aws/resource_aws_sso_permission_set.go | 4 +- 2 files changed, 131 insertions(+), 3 deletions(-) create mode 100644 aws/resource_aws_sso_assignment.go diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go new file mode 100644 index 00000000000..5a0d8fbd0b5 --- /dev/null +++ b/aws/resource_aws_sso_assignment.go @@ -0,0 +1,130 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsSsoAssignment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoAssignmentCreate, + Read: resourceAwsSsoAssignmentRead, + Update: resourceAwsSsoAssignmentUpdate, + Delete: resourceAwsSsoAssignmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "failure_reason": { + Type: schema.TypeString, + Computed: true, + }, + + "request_id": { + Type: schema.TypeString, + Computed: true, + }, + + "status": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + ), + }, + + "permission_set_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$`), "must match arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}"), + ), + }, + + "principal_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "principal_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"USER", "GROUP"}, false), + }, + + "target_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + + "target_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "AWS_ACCOUNT", + ValidateFunc: validation.StringInSlice([]string{"AWS_ACCOUNT"}, false), + }, + }, + } +} + +func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + // d.SetId(*resp.PermissionSetArn) + return resourceAwsSsoAssignmentRead(d, meta) +} + +func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} + +func resourceAwsSsoAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return resourceAwsSsoAssignmentRead(d, meta) +} + +func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} + +// func waitForAssignmentCreation(conn *identitystore.IdentityStore, instanceArn string, requestId string) error { +// } + +// func waitForAssignmentDeletion(conn *identitystore.IdentityStore, instanceArn string, requestId string) error { +// } diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index bd6973368a2..da84dd41ce3 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -100,9 +100,7 @@ func resourceAwsSsoPermissionSet() *schema.Resource { func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssoadminconn - - - + // TODO // d.SetId(*resp.PermissionSetArn) return resourceAwsSsoPermissionSetRead(d, meta) } From 75188a7cbc457a4f8248834c7afa3a578a3eb870 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 25 Sep 2020 22:08:47 -0500 Subject: [PATCH 0549/1212] add data sources --- aws/data_source_aws_identity_store_group.go | 57 +++++++++++++++++++++ aws/data_source_aws_identity_store_user.go | 57 +++++++++++++++++++++ aws/data_source_aws_sso_instance.go | 36 +++++++++++++ aws/data_source_aws_sso_instances.go | 38 ++++++++++++++ aws/resource_aws_sso_permission_set.go | 20 ++++++++ 5 files changed, 208 insertions(+) create mode 100644 aws/data_source_aws_identity_store_group.go create mode 100644 aws/data_source_aws_identity_store_user.go create mode 100644 aws/data_source_aws_sso_instance.go create mode 100644 aws/data_source_aws_sso_instances.go diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go new file mode 100644 index 00000000000..687bbc922e9 --- /dev/null +++ b/aws/data_source_aws_identity_store_group.go @@ -0,0 +1,57 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsidentityStoreGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsidentityStoreGroupRead, + + Schema: map[string]*schema.Schema{ + "identity_store_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), + ), + }, + + "group_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"display_name"}, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"group_id"}, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{S}\p{N}\p{P}\t\n\r ]+$`), "must match [\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]"), + ), + }, + }, + } +} + +func dataSourceAwsidentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).identitystoreconn + // TODO + return nil +} diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go new file mode 100644 index 00000000000..b46fd5c7980 --- /dev/null +++ b/aws/data_source_aws_identity_store_user.go @@ -0,0 +1,57 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsidentityStoreUser() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsidentityStoreUserRead, + + Schema: map[string]*schema.Schema{ + "identity_store_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), + ), + }, + + "user_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"user_name"}, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "user_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"user_id"}, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{S}\p{N}\p{P}]+$`), "must match [\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]"), + ), + }, + }, + } +} + +func dataSourceAwsidentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).identitystoreconn + // TODO + return nil +} diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go new file mode 100644 index 00000000000..b050b403c7a --- /dev/null +++ b/aws/data_source_aws_sso_instance.go @@ -0,0 +1,36 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsSsoInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoInstanceRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "identity_store_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} diff --git a/aws/data_source_aws_sso_instances.go b/aws/data_source_aws_sso_instances.go new file mode 100644 index 00000000000..cea72660997 --- /dev/null +++ b/aws/data_source_aws_sso_instances.go @@ -0,0 +1,38 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsSsoInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoInstanceRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "identity_store_id": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceAwsSsoInstancesRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index da84dd41ce3..d5a8a1fbbb7 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -33,6 +33,26 @@ func resourceAwsSsoPermissionSet() *schema.Resource { Computed: true, }, + "provisioning_created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_failure_reason": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_request_id": { + Type: schema.TypeString, + Computed: true, + }, + + "provisioning_status": { + Type: schema.TypeString, + Computed: true, + }, + "instance_arn": { Type: schema.TypeString, Required: true, From 8c60069b08dbb49e447353f5b36f30450e054a6a Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 25 Sep 2020 22:11:46 -0500 Subject: [PATCH 0550/1212] fmt --- aws/data_source_aws_identity_store_group.go | 6 +++--- aws/data_source_aws_identity_store_user.go | 6 +++--- aws/resource_aws_sso_assignment.go | 18 +++++++++--------- aws/resource_aws_sso_permission_set.go | 20 ++++++++++---------- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index 687bbc922e9..6a9e3c046d9 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -17,8 +17,8 @@ func dataSourceAwsidentityStoreGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "identity_store_id": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 64), validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), @@ -37,7 +37,7 @@ func dataSourceAwsidentityStoreGroup() *schema.Resource { }, "display_name": { - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, Computed: true, ConflictsWith: []string{"group_id"}, diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index b46fd5c7980..8bdd55cccb3 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -17,8 +17,8 @@ func dataSourceAwsidentityStoreUser() *schema.Resource { Schema: map[string]*schema.Schema{ "identity_store_id": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 64), validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), @@ -37,7 +37,7 @@ func dataSourceAwsidentityStoreUser() *schema.Resource { }, "user_name": { - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, Computed: true, ConflictsWith: []string{"user_id"}, diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 5a0d8fbd0b5..ebffb34e118 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -44,9 +44,9 @@ func resourceAwsSsoAssignment() *schema.Resource { }, "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(10, 1224), validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), @@ -54,9 +54,9 @@ func resourceAwsSsoAssignment() *schema.Resource { }, "permission_set_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(10, 1224), validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$`), "must match arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}"), @@ -64,9 +64,9 @@ func resourceAwsSsoAssignment() *schema.Resource { }, "principal_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 47), validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index d5a8a1fbbb7..26f98003a79 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -54,9 +54,9 @@ func resourceAwsSsoPermissionSet() *schema.Resource { }, "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(10, 1224), validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), @@ -64,9 +64,9 @@ func resourceAwsSsoPermissionSet() *schema.Resource { }, "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 32), validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), @@ -83,8 +83,8 @@ func resourceAwsSsoPermissionSet() *schema.Resource { }, "session_duration": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, ValidateFunc: validation.StringLenBetween(1, 100), }, @@ -107,8 +107,8 @@ func resourceAwsSsoPermissionSet() *schema.Resource { "managed_policies": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Elem: &schema.Schema{ + Type: schema.TypeString, ValidateFunc: validateArn, }, }, From 987457abeac032676c9b23995e3b12b25d56b00b Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 25 Sep 2020 22:24:40 -0500 Subject: [PATCH 0551/1212] make build --- aws/data_source_aws_identity_store_group.go | 16 +++++++------ aws/data_source_aws_identity_store_user.go | 16 +++++++------ aws/data_source_aws_sso_instance.go | 14 ++++++------ aws/data_source_aws_sso_instances.go | 14 ++++++------ aws/resource_aws_sso_assignment.go | 25 +++++++++++---------- aws/resource_aws_sso_permission_set.go | 25 +++++++++++---------- 6 files changed, 58 insertions(+), 52 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index 6a9e3c046d9..f225ba7d088 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -1,14 +1,16 @@ package aws import ( - "fmt" - "log" - "sort" - "time" + // "fmt" + // "log" + // "sort" + // "time" + "regexp" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/identitystore" + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceAwsidentityStoreGroup() *schema.Resource { @@ -51,7 +53,7 @@ func dataSourceAwsidentityStoreGroup() *schema.Resource { } func dataSourceAwsidentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).identitystoreconn + // conn := meta.(*AWSClient).identitystoreconn // TODO return nil } diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 8bdd55cccb3..41f2ee71dae 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -1,14 +1,16 @@ package aws import ( - "fmt" - "log" - "sort" - "time" + // "fmt" + // "log" + // "sort" + // "time" + "regexp" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/identitystore" + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceAwsidentityStoreUser() *schema.Resource { @@ -51,7 +53,7 @@ func dataSourceAwsidentityStoreUser() *schema.Resource { } func dataSourceAwsidentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).identitystoreconn + // conn := meta.(*AWSClient).identitystoreconn // TODO return nil } diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index b050b403c7a..dcaac3d578a 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -1,13 +1,13 @@ package aws import ( - "fmt" - "log" - "sort" - "time" + // "fmt" + // "log" + // "sort" + // "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -30,7 +30,7 @@ func dataSourceAwsSsoInstance() *schema.Resource { } func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } diff --git a/aws/data_source_aws_sso_instances.go b/aws/data_source_aws_sso_instances.go index cea72660997..cbd8d9c54a2 100644 --- a/aws/data_source_aws_sso_instances.go +++ b/aws/data_source_aws_sso_instances.go @@ -1,13 +1,13 @@ package aws import ( - "fmt" - "log" - "sort" - "time" + // "fmt" + // "log" + // "sort" + // "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -32,7 +32,7 @@ func dataSourceAwsSsoInstances() *schema.Resource { } func dataSourceAwsSsoInstancesRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index ebffb34e118..1984cb99a92 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -1,16 +1,17 @@ package aws import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + // "fmt" + // "log" + // "time" + "regexp" + + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/ssoadmin" + // "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + // "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSsoAssignment() *schema.Resource { @@ -99,26 +100,26 @@ func resourceAwsSsoAssignment() *schema.Resource { } func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO // d.SetId(*resp.PermissionSetArn) return resourceAwsSsoAssignmentRead(d, meta) } func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } func resourceAwsSsoAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return resourceAwsSsoAssignmentRead(d, meta) } func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 26f98003a79..18154ce143b 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -1,16 +1,17 @@ package aws import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + // "fmt" + // "log" + // "time" + "regexp" + + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/ssoadmin" + // "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + // "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSsoPermissionSet() *schema.Resource { @@ -119,26 +120,26 @@ func resourceAwsSsoPermissionSet() *schema.Resource { } func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO // d.SetId(*resp.PermissionSetArn) return resourceAwsSsoPermissionSetRead(d, meta) } func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return resourceAwsSsoPermissionSetRead(d, meta) } func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn + // conn := meta.(*AWSClient).ssoadminconn // TODO return nil } From 15d883a7ae1545710144a0e4aba2234583bfafcd Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 25 Sep 2020 22:43:45 -0500 Subject: [PATCH 0552/1212] add to provider --- aws/data_source_aws_identity_store_group.go | 6 +++--- aws/data_source_aws_identity_store_user.go | 6 +++--- aws/data_source_aws_sso_instances.go | 2 +- aws/provider.go | 3 +++ 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index f225ba7d088..bc30472e693 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -13,9 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func dataSourceAwsidentityStoreGroup() *schema.Resource { +func dataSourceAwsIdentityStoreGroup() *schema.Resource { return &schema.Resource{ - Read: dataSourceAwsidentityStoreGroupRead, + Read: dataSourceAwsIdentityStoreGroupRead, Schema: map[string]*schema.Schema{ "identity_store_id": { @@ -52,7 +52,7 @@ func dataSourceAwsidentityStoreGroup() *schema.Resource { } } -func dataSourceAwsidentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).identitystoreconn // TODO return nil diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 41f2ee71dae..1539172e05c 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -13,9 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -func dataSourceAwsidentityStoreUser() *schema.Resource { +func dataSourceAwsIdentityStoreUser() *schema.Resource { return &schema.Resource{ - Read: dataSourceAwsidentityStoreUserRead, + Read: dataSourceAwsIdentityStoreUserRead, Schema: map[string]*schema.Schema{ "identity_store_id": { @@ -52,7 +52,7 @@ func dataSourceAwsidentityStoreUser() *schema.Resource { } } -func dataSourceAwsidentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).identitystoreconn // TODO return nil diff --git a/aws/data_source_aws_sso_instances.go b/aws/data_source_aws_sso_instances.go index cbd8d9c54a2..553bc9b446e 100644 --- a/aws/data_source_aws_sso_instances.go +++ b/aws/data_source_aws_sso_instances.go @@ -13,7 +13,7 @@ import ( func dataSourceAwsSsoInstances() *schema.Resource { return &schema.Resource{ - Read: dataSourceAwsSsoInstanceRead, + Read: dataSourceAwsSsoInstancesRead, Schema: map[string]*schema.Schema{ "arn": { diff --git a/aws/provider.go b/aws/provider.go index 7cf44f61983..227b1f7b6ea 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -267,6 +267,8 @@ func Provider() *schema.Provider { "aws_iam_role": dataSourceAwsIAMRole(), "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), "aws_iam_user": dataSourceAwsIAMUser(), + "aws_identity_store_group": dataSourceAwsIdentityStoreGroup(), + "aws_identity_store_user": dataSourceAwsIdentityStoreUser(), "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), @@ -940,6 +942,7 @@ func Provider() *schema.Provider { "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), + "aws_sso_assignment": resourceAwsSsoAssignment(), "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), From 42ace8187a940dcd03fa7e3f8d1a4f14cef6ab40 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 29 Sep 2020 00:06:10 -0500 Subject: [PATCH 0553/1212] add docs --- aws/data_source_aws_sso_instances.go | 38 --------- aws/data_source_aws_sso_permission_set.go | 84 +++++++++++++++++++ .../docs/d/identity_store_group.html.markdown | 27 ++++++ .../docs/d/identity_store_user.html.markdown | 27 ++++++ website/docs/d/sso_instance.html.markdown | 27 ++++++ .../docs/d/sso_permission_set.html.markdown | 27 ++++++ website/docs/r/sso_assignment.html.markdown | 31 +++++++ .../docs/r/sso_permission_set.html.markdown | 31 +++++++ 8 files changed, 254 insertions(+), 38 deletions(-) delete mode 100644 aws/data_source_aws_sso_instances.go create mode 100644 aws/data_source_aws_sso_permission_set.go create mode 100644 website/docs/d/identity_store_group.html.markdown create mode 100644 website/docs/d/identity_store_user.html.markdown create mode 100644 website/docs/d/sso_instance.html.markdown create mode 100644 website/docs/d/sso_permission_set.html.markdown create mode 100644 website/docs/r/sso_assignment.html.markdown create mode 100644 website/docs/r/sso_permission_set.html.markdown diff --git a/aws/data_source_aws_sso_instances.go b/aws/data_source_aws_sso_instances.go deleted file mode 100644 index 553bc9b446e..00000000000 --- a/aws/data_source_aws_sso_instances.go +++ /dev/null @@ -1,38 +0,0 @@ -package aws - -import ( - // "fmt" - // "log" - // "sort" - // "time" - - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func dataSourceAwsSsoInstances() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSsoInstancesRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "identity_store_id": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func dataSourceAwsSsoInstancesRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO - return nil -} diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go new file mode 100644 index 00000000000..b381ae973b3 --- /dev/null +++ b/aws/data_source_aws_sso_permission_set.go @@ -0,0 +1,84 @@ +package aws + +import ( + // "fmt" + // "log" + // "sort" + // "time" + + // "github.com/aws/aws-sdk-go/aws" + // "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsSsoPermissionSet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSsoPermissionSetRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(10, 1224), + validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), + ), + }, + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), + ), + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "session_duration": { + Type: schema.TypeString, + Computed: true, + }, + + "relay_state": { + Type: schema.TypeString, + Computed: true, + }, + + "inline_policy": { + Type: schema.TypeString, + Computed: true, + }, + + "managed_policies": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { + // conn := meta.(*AWSClient).ssoadminconn + // TODO + return nil +} diff --git a/website/docs/d/identity_store_group.html.markdown b/website/docs/d/identity_store_group.html.markdown new file mode 100644 index 00000000000..38126eb6d42 --- /dev/null +++ b/website/docs/d/identity_store_group.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "Identity Store" +layout: "aws" +page_title: "AWS: aws_identity_store_group" +description: |- + Get information on an AWS SSO Identity Store group +--- + +# Data Source: aws_identity_store_group + +TODO + +## Example Usage + +```hcl +data "aws_identity_store_group" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attributes Reference + +TODO diff --git a/website/docs/d/identity_store_user.html.markdown b/website/docs/d/identity_store_user.html.markdown new file mode 100644 index 00000000000..8bc3524590d --- /dev/null +++ b/website/docs/d/identity_store_user.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "Identity Store" +layout: "aws" +page_title: "AWS: aws_identity_store_user" +description: |- + Get information on an AWS SSO Identity Store user +--- + +# Data Source: aws_identity_store_user + +TODO + +## Example Usage + +```hcl +data "aws_identity_store_user" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attributes Reference + +TODO diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown new file mode 100644 index 00000000000..65ea2c40b1b --- /dev/null +++ b/website/docs/d/sso_instance.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_instance" +description: |- + Get information on an AWS Single Sign-On instance +--- + +# Data Source: aws_sso_instance + +TODO + +## Example Usage + +```hcl +data "aws_sso_instance" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attributes Reference + +TODO diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown new file mode 100644 index 00000000000..19f16ab8209 --- /dev/null +++ b/website/docs/d/sso_permission_set.html.markdown @@ -0,0 +1,27 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_permission_set" +description: |- + Get information on an AWS Single Sign-On permission set +--- + +# Data Source: aws_sso_permission_set + +TODO + +## Example Usage + +```hcl +data "aws_sso_permission_set" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attributes Reference + +TODO diff --git a/website/docs/r/sso_assignment.html.markdown b/website/docs/r/sso_assignment.html.markdown new file mode 100644 index 00000000000..0ab77a554fc --- /dev/null +++ b/website/docs/r/sso_assignment.html.markdown @@ -0,0 +1,31 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: sso_assignment" +description: |- + Manages an AWS Single Sign-On assignment +--- + +# Resource: sso_assignment + +TODO + +## Example Usage + +```hcl +resource "sso_assignment" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attribute Reference + +TODO + +## Import + +TODO diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown new file mode 100644 index 00000000000..0ea03b9e7b5 --- /dev/null +++ b/website/docs/r/sso_permission_set.html.markdown @@ -0,0 +1,31 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_sso_permission_set" +description: |- + Manages an AWS Single Sign-On permission set +--- + +# Resource: aws_sso_permission_set + +TODO + +## Example Usage + +```hcl +resource "aws_sso_permission_set" "example" { + # TODO +} +``` + +## Argument Reference + +TODO + +## Attribute Reference + +TODO + +## Import + +TODO From 1073f4e7361f1811da3fee9766c1177bbb4f82d1 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 29 Sep 2020 00:21:57 -0500 Subject: [PATCH 0554/1212] fix build --- aws/data_source_aws_sso_permission_set.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index b381ae973b3..51c97ecd6cb 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -5,10 +5,12 @@ import ( // "log" // "sort" // "time" + "regexp" // "github.com/aws/aws-sdk-go/aws" // "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func dataSourceAwsSsoPermissionSet() *schema.Resource { @@ -68,7 +70,7 @@ func dataSourceAwsSsoPermissionSet() *schema.Resource { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, }, }, From cd1076152945f7a259853b7d0094b3a687dabd44 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 9 Oct 2020 11:27:09 -0500 Subject: [PATCH 0555/1212] remove update from sso assignment --- aws/resource_aws_sso_assignment.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 1984cb99a92..fff653ac1b7 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -18,7 +18,7 @@ func resourceAwsSsoAssignment() *schema.Resource { return &schema.Resource{ Create: resourceAwsSsoAssignmentCreate, Read: resourceAwsSsoAssignmentRead, - Update: resourceAwsSsoAssignmentUpdate, + // Update: resourceAwsSsoAssignmentUpdate, Delete: resourceAwsSsoAssignmentDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -112,11 +112,11 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro return nil } -func resourceAwsSsoAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO - return resourceAwsSsoAssignmentRead(d, meta) -} +// func resourceAwsSsoAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { +// // conn := meta.(*AWSClient).ssoadminconn +// // TODO +// return resourceAwsSsoAssignmentRead(d, meta) +// } func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).ssoadminconn From 44d49a342ab0901d1271da408c36ddf896df435e Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Sun, 4 Oct 2020 16:22:44 -0500 Subject: [PATCH 0556/1212] implement aws_sso_instance data source --- aws/data_source_aws_sso_instance.go | 32 +++++++++++++++++------ website/docs/d/sso_instance.html.markdown | 19 +++++++++----- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index dcaac3d578a..c9d3eba148e 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -1,13 +1,11 @@ package aws import ( - // "fmt" - // "log" - // "sort" - // "time" + "fmt" + "log" + "time" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -30,7 +28,25 @@ func dataSourceAwsSsoInstance() *schema.Resource { } func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + conn := meta.(*AWSClient).ssoadminconn + + log.Printf("[DEBUG] Reading AWS SSO Instances") + resp, err := conn.ListInstances(&ssoadmin.ListInstancesInput{}) + if err != nil { + return err + } + + // 'AccountAliases': [] if there is no alias. + if resp == nil || len(resp.Instances) == 0 { + return fmt.Errorf("No AWS SSO Instance found") + } + + instance := resp.Instances[0] + log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) + + d.SetId(time.Now().UTC().String()) + d.Set("arn", instance.InstanceArn) + d.Set("identity_store_id", instance.IdentityStoreId) + return nil } diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown index 65ea2c40b1b..d55f94c167d 100644 --- a/website/docs/d/sso_instance.html.markdown +++ b/website/docs/d/sso_instance.html.markdown @@ -3,25 +3,32 @@ subcategory: "SSO Admin" layout: "aws" page_title: "AWS: aws_sso_instance" description: |- - Get information on an AWS Single Sign-On instance + Get information on an AWS Single Sign-On Instance. --- # Data Source: aws_sso_instance -TODO +Use this data source to get the Single Sign-On Instance ARN and Identity Store ID. ## Example Usage ```hcl -data "aws_sso_instance" "example" { - # TODO +data "aws_sso_instance" "selected" {} + +output "arn" { + value = data.aws_sso_instance.selected.arn +} + +output "identity_store_id" { + value = data.aws_sso_instance.selected.identity_store_id } ``` ## Argument Reference -TODO +There are no arguments available for this data source. ## Attributes Reference -TODO +* `arn` - The AWS ARN associated with the AWS Single Sign-On Instance. +* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. From 62b94e0810cacea6fc24820f7aab3af2602f1232 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Sun, 4 Oct 2020 21:10:48 -0500 Subject: [PATCH 0557/1212] add in the sso permission set data source --- aws/data_source_aws_sso_instance.go | 3 +- aws/data_source_aws_sso_permission_set.go | 128 ++++++++++++++++-- aws/internal/keyvaluetags/sso_tags.go | 95 +++++++++++++ .../docs/d/sso_permission_set.html.markdown | 30 +++- 4 files changed, 239 insertions(+), 17 deletions(-) create mode 100644 aws/internal/keyvaluetags/sso_tags.go diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index c9d3eba148e..0422c5e37f1 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -33,10 +33,9 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Reading AWS SSO Instances") resp, err := conn.ListInstances(&ssoadmin.ListInstancesInput{}) if err != nil { - return err + return fmt.Errorf("Error getting AWS SSO Instances: %s", err) } - // 'AccountAliases': [] if there is no alias. if resp == nil || len(resp.Instances) == 0 { return fmt.Errorf("No AWS SSO Instance found") } diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 51c97ecd6cb..076439c22ec 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -1,16 +1,18 @@ package aws import ( - // "fmt" - // "log" - // "sort" - // "time" + "bytes" + "fmt" + "log" "regexp" + "time" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsSsoPermissionSet() *schema.Resource { @@ -69,8 +71,18 @@ func dataSourceAwsSsoPermissionSet() *schema.Resource { "managed_policies": { Type: schema.TypeSet, Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Set: permissionSetManagedPoliciesHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, }, }, @@ -80,7 +92,103 @@ func dataSourceAwsSsoPermissionSet() *schema.Resource { } func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + conn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + instanceArn := d.Get("instance_arn").(string) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading AWS SSO Permission Sets") + resp, err := conn.ListPermissionSets(&ssoadmin.ListPermissionSetsInput{ + InstanceArn: aws.String(instanceArn), + }) + if err != nil { + return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) + } + if resp == nil || len(resp.PermissionSets) == 0 { + return fmt.Errorf("No AWS SSO Permission Sets found") + } + + // TODO: paging (if resp.NextToken != nil) + var permissionSetArn string + var permissionSet *ssoadmin.PermissionSet + for _, permissionSetArns := range resp.PermissionSets { + permissionSetArn = aws.StringValue(permissionSetArns) + log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) + permissionSetResp, permissionSetErr := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if permissionSetErr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) + } + if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { + permissionSet = permissionSetResp.PermissionSet + break + } + } + + if permissionSet == nil { + return fmt.Errorf("AWS SSO Permission Set %v not found", name) + } + + log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := conn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + managedPoliciesResp, managedPoliciesErr := conn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if managedPoliciesErr != nil { + return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) + } + managedPoliciesSet := &schema.Set{ + F: permissionSetManagedPoliciesHash, + } + for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { + managedPoliciesSet.Add(map[string]interface{}{ + "arn": aws.StringValue(managedPolicy.Arn), + "name": aws.StringValue(managedPolicy.Name), + }) + } + + tags, err := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) + if err != nil { + return fmt.Errorf("error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) + } + + d.SetId(permissionSetArn) + d.Set("arn", permissionSetArn) + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("description", permissionSet.Description) + d.Set("session_duration", permissionSet.SessionDuration) + d.Set("relay_state", permissionSet.RelayState) + d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + d.Set("managed_policies", managedPoliciesSet) + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } + +// Generates a hash for the set hash function used by the +// managed_policies attribute. +func permissionSetManagedPoliciesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["arn"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) +} diff --git a/aws/internal/keyvaluetags/sso_tags.go b/aws/internal/keyvaluetags/sso_tags.go new file mode 100644 index 00000000000..05391e0c734 --- /dev/null +++ b/aws/internal/keyvaluetags/sso_tags.go @@ -0,0 +1,95 @@ +// +build !generate + +package keyvaluetags + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" +) + +// Custom SSO tag service functions using the same format as generated code. + +// SsoListTags lists sso service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoListTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string) (KeyValueTags, error) { + input := &ssoadmin.ListTagsForResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(input) + + if err != nil { + return New(nil), err + } + + return SsoKeyValueTags(output.Tags), nil +} + +// SsoUpdateTags updates sso service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func SsoUpdateTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string, oldTagsMap interface{}, newTagsMap interface{}) error { + oldTags := New(oldTagsMap) + newTags := New(newTagsMap) + + if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { + input := &ssoadmin.UntagResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), + } + + _, err := conn.UntagResource(input) + + if err != nil { + return fmt.Errorf("error untagging resource (%s): %w", identifier, err) + } + } + + if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { + input := &ssoadmin.TagResourceInput{ + InstanceArn: aws.String(instanceArn), + ResourceArn: aws.String(identifier), + Tags: updatedTags.IgnoreAws().SsoTags(), + } + + _, err := conn.TagResource(input) + + if err != nil { + return fmt.Errorf("error tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// SsoTags returns sso service tags. +func (tags KeyValueTags) SsoTags() []*ssoadmin.Tag { + result := make([]*ssoadmin.Tag, 0, len(tags)) + + for k, v := range tags.Map() { + tag := &ssoadmin.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + + result = append(result, tag) + } + + return result +} + +// SsoKeyValueTags creates KeyValueTags from sso service tags. +func SsoKeyValueTags(tags []*ssoadmin.Tag) KeyValueTags { + m := make(map[string]*string, len(tags)) + + for _, tag := range tags { + m[aws.StringValue(tag.Key)] = tag.Value + } + + return New(m) +} diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown index 19f16ab8209..e905e70a928 100644 --- a/website/docs/d/sso_permission_set.html.markdown +++ b/website/docs/d/sso_permission_set.html.markdown @@ -3,25 +3,45 @@ subcategory: "SSO Admin" layout: "aws" page_title: "AWS: aws_sso_permission_set" description: |- - Get information on an AWS Single Sign-On permission set + Get information on an AWS Single Sign-On Permission Set. --- # Data Source: aws_sso_permission_set -TODO +Use this data source to get the Single Sign-On Permission Set. ## Example Usage ```hcl +data "aws_sso_instance" "selected" {} + data "aws_sso_permission_set" "example" { - # TODO + instance_arn = data.aws_sso_instance.selected.arn + name = "Example" +} + +output "arn" { + value = data.aws_sso_permission_set.example.arn } ``` ## Argument Reference -TODO +The following arguments are supported: + +* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. +* `name` - (Required) The name of the AWS Single Sign-On Permission Set. ## Attributes Reference -TODO +In addition to all arguments above, the following attributes are exported: + +* `id` - The arn of the permission set. +* `arn` - The arn of the permission set. +* `created_date` - The created date of the permission set. +* `description` - The description of the permission set. +* `session_duration` - The session duration of the permission set. +* `relay_state` - The relay state of the permission set. +* `inline_policy` - The inline policy of the permission set. +* `managed_policies` - The managed policies attached to the permission set. +* `tags` - The tags of the permission set. From b3bf1bb8dc08890baea850713bc844c4ac64a564 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Sun, 4 Oct 2020 23:35:23 -0500 Subject: [PATCH 0558/1212] docs --- aws/data_source_aws_identity_store_group.go | 53 ++++++++++++++++--- aws/data_source_aws_identity_store_user.go | 53 ++++++++++++++++--- .../docs/d/identity_store_group.html.markdown | 25 +++++++-- .../docs/d/identity_store_user.html.markdown | 25 +++++++-- 4 files changed, 130 insertions(+), 26 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index bc30472e693..7e72405dd9b 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -1,14 +1,12 @@ package aws import ( - // "fmt" - // "log" - // "sort" - // "time" + "fmt" + "log" "regexp" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -53,7 +51,46 @@ func dataSourceAwsIdentityStoreGroup() *schema.Resource { } func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).identitystoreconn - // TODO + conn := meta.(*AWSClient).identitystoreconn + + identityStoreID := d.Get("identity_store_id").(string) + groupID := d.Get("group_id").(string) + displayName := d.Get("display_name").(string) + + if groupID != "" { + log.Printf("[DEBUG] Reading AWS Identity Store Group") + resp, err := conn.DescribeGroup(&identitystore.DescribeGroupInput{ + IdentityStoreId: aws.String(identityStoreID), + GroupId: aws.String(groupID), + }) + if err != nil { + return fmt.Errorf("Error getting AWS Identity Store Group: %s", err) + } + d.SetId(groupID) + d.Set("display_name", resp.DisplayName) + } else if displayName != "" { + log.Printf("[DEBUG] Reading AWS Identity Store Group") + resp, err := conn.ListGroups(&identitystore.ListGroupsInput{ + IdentityStoreId: aws.String(identityStoreID), + Filters: []*identitystore.Filter{ + &identitystore.Filter{ + AttributePath: aws.String("DisplayName"), + AttributeValue: aws.String(displayName), + }, + }, + }) + if err != nil { + return fmt.Errorf("Error getting AWS Identity Store Group: %s", err) + } + if resp == nil || len(resp.Groups) == 0 { + return fmt.Errorf("No AWS Identity Store Group found") + } + group := resp.Groups[0] + d.SetId(aws.StringValue(group.GroupId)) + d.Set("group_id", group.GroupId) + } else { + return fmt.Errorf("One of group_id or display_name is required") + } + return nil } diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 1539172e05c..7da865217bc 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -1,14 +1,12 @@ package aws import ( - // "fmt" - // "log" - // "sort" - // "time" + "fmt" + "log" "regexp" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -53,7 +51,46 @@ func dataSourceAwsIdentityStoreUser() *schema.Resource { } func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).identitystoreconn - // TODO + conn := meta.(*AWSClient).identitystoreconn + + identityStoreID := d.Get("identity_store_id").(string) + userID := d.Get("user_id").(string) + userName := d.Get("user_name").(string) + + if userID != "" { + log.Printf("[DEBUG] Reading AWS Identity Store User") + resp, err := conn.DescribeUser(&identitystore.DescribeUserInput{ + IdentityStoreId: aws.String(identityStoreID), + UserId: aws.String(userID), + }) + if err != nil { + return fmt.Errorf("Error getting AWS Identity Store User: %s", err) + } + d.SetId(userID) + d.Set("user_name", resp.UserName) + } else if userName != "" { + log.Printf("[DEBUG] Reading AWS Identity Store User") + resp, err := conn.ListUsers(&identitystore.ListUsersInput{ + IdentityStoreId: aws.String(identityStoreID), + Filters: []*identitystore.Filter{ + &identitystore.Filter{ + AttributePath: aws.String("UserName"), + AttributeValue: aws.String(userName), + }, + }, + }) + if err != nil { + return fmt.Errorf("Error getting AWS Identity Store User: %s", err) + } + if resp == nil || len(resp.Users) == 0 { + return fmt.Errorf("No AWS Identity Store User found") + } + user := resp.Users[0] + d.SetId(aws.StringValue(user.UserId)) + d.Set("user_id", user.UserId) + } else { + return fmt.Errorf("One of user_id or user_name is required") + } + return nil } diff --git a/website/docs/d/identity_store_group.html.markdown b/website/docs/d/identity_store_group.html.markdown index 38126eb6d42..460166f4d4a 100644 --- a/website/docs/d/identity_store_group.html.markdown +++ b/website/docs/d/identity_store_group.html.markdown @@ -3,25 +3,40 @@ subcategory: "Identity Store" layout: "aws" page_title: "AWS: aws_identity_store_group" description: |- - Get information on an AWS SSO Identity Store group + Get information on an AWS Identity Store Group --- # Data Source: aws_identity_store_group -TODO +Use this data source to get an Identity Store Group. ## Example Usage ```hcl +data "aws_sso_instance" "selected" {} + data "aws_identity_store_group" "example" { - # TODO + identity_store_id = data.aws_sso_instance.selected.identity_store_id + display_name = "ExampleGroup@example.com" +} + +output "group_id" { + value = data.aws_identity_store_group.example.group_id } ``` ## Argument Reference -TODO +The following arguments are supported: + +* `identity_store_id` - (Required) The Identity Store ID associated with the AWS Single Sign-On Instance. +* `group_id` - (Optional) An Identity Store group ID. +* `display_name` - (Optional) An Identity Store group display name. ## Attributes Reference -TODO +In addition to all arguments above, the following attributes are exported: + +* `id` - The Identity Store group ID. +* `group_id` - The Identity Store group ID. +* `display_name` - The Identity Store group display name. diff --git a/website/docs/d/identity_store_user.html.markdown b/website/docs/d/identity_store_user.html.markdown index 8bc3524590d..e5ca68f3b23 100644 --- a/website/docs/d/identity_store_user.html.markdown +++ b/website/docs/d/identity_store_user.html.markdown @@ -3,25 +3,40 @@ subcategory: "Identity Store" layout: "aws" page_title: "AWS: aws_identity_store_user" description: |- - Get information on an AWS SSO Identity Store user + Get information on an AWS Identity Store User --- # Data Source: aws_identity_store_user -TODO +Use this data source to get an Identity Store User. ## Example Usage ```hcl +data "aws_sso_instance" "selected" {} + data "aws_identity_store_user" "example" { - # TODO + identity_store_id = data.aws_sso_instance.selected.identity_store_id + user_name = "example@example.com" +} + +output "user_id" { + value = data.aws_identity_store_user.example.user_id } ``` ## Argument Reference -TODO +The following arguments are supported: + +* `identity_store_id` - (Required) The Identity Store ID associated with the AWS Single Sign-On Instance. +* `user_id` - (Optional) An Identity Store user ID. +* `user_name` - (Optional) An Identity Store user name. ## Attributes Reference -TODO +In addition to all arguments above, the following attributes are exported: + +* `id` - The Identity Store user ID. +* `user_id` - The Identity Store user ID. +* `user_name` - The Identity Store user name. From 08010fbc74371df257f8884fb36798a1de4410c7 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Sun, 4 Oct 2020 23:38:55 -0500 Subject: [PATCH 0559/1212] fmt --- aws/data_source_aws_identity_store_group.go | 2 +- aws/data_source_aws_identity_store_user.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index 7e72405dd9b..69c02a7b347 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -73,7 +73,7 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ resp, err := conn.ListGroups(&identitystore.ListGroupsInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ - &identitystore.Filter{ + { AttributePath: aws.String("DisplayName"), AttributeValue: aws.String(displayName), }, diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 7da865217bc..17f47e9de0c 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -73,7 +73,7 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} resp, err := conn.ListUsers(&identitystore.ListUsersInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ - &identitystore.Filter{ + { AttributePath: aws.String("UserName"), AttributeValue: aws.String(userName), }, From 42298e38e10c80eccf66110a5a3c906e5bddf5c6 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Mon, 5 Oct 2020 02:18:56 -0500 Subject: [PATCH 0560/1212] resourceAwsSsoAssignmentCreate --- aws/resource_aws_sso_assignment.go | 117 +++++++++++++++++++++++++---- 1 file changed, 104 insertions(+), 13 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index fff653ac1b7..7f3b55bb825 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -1,17 +1,16 @@ package aws import ( - // "fmt" - // "log" - // "time" + "fmt" + "log" "regexp" + "strings" + "time" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/ssoadmin" - // "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - // "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSsoAssignment() *schema.Resource { @@ -100,9 +99,65 @@ func resourceAwsSsoAssignment() *schema.Resource { } func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO - // d.SetId(*resp.PermissionSetArn) + conn := meta.(*AWSClient).ssoadminconn + + log.Printf("[INFO] Creating AWS SSO Assignment") + + instanceArn := d.Get("instance_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + principalID := d.Get("principal_id").(string) + principalType := d.Get("principal_type").(string) + targetID := d.Get("target_id").(string) + targetType := d.Get("target_type").(string) + + vars := []string{ + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + } + d.SetId(strings.Join(vars, "_")) + + req := &ssoadmin.CreateAccountAssignmentInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + PrincipalId: aws.String(principalID), + PrincipalType: aws.String(principalType), + TargetId: aws.String(targetID), + TargetType: aws.String(targetType), + } + + resp, err := conn.CreateAccountAssignment(req) + if err != nil { + return fmt.Errorf("Error creating AWS SSO Assignment: %s", err) + } + + status := resp.AccountAssignmentCreationStatus + + if status.CreatedDate != nil { + d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) + } + if status.FailureReason != nil { + d.Set("failure_reason", status.FailureReason) + } + if status.RequestId != nil { + d.Set("request_id", status.RequestId) + } + if status.Status != nil { + d.Set("status", status.Status) + } + + waitResp, waitErr := waitForAssignmentCreation(d, conn, instanceArn, aws.StringValue(status.RequestId)) + if waitErr != nil { + return fmt.Errorf("Error waiting for AWS SSO Assignment: %s", waitErr) + } + + // IN_PROGRESS | FAILED | SUCCEEDED + if aws.StringValue(waitResp.Status) == "FAILED" { + return fmt.Errorf("Failed to create AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) + } + return resourceAwsSsoAssignmentRead(d, meta) } @@ -124,8 +179,44 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er return nil } -// func waitForAssignmentCreation(conn *identitystore.IdentityStore, instanceArn string, requestId string) error { -// } +func waitForAssignmentCreation(d *schema.ResourceData, conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { + var status *ssoadmin.AccountAssignmentOperationStatus + + // TODO: timeout + for { + resp, err := conn.DescribeAccountAssignmentCreationStatus(&ssoadmin.DescribeAccountAssignmentCreationStatusInput{ + InstanceArn: aws.String(instanceArn), + AccountAssignmentCreationRequestId: aws.String(requestID), + }) + + if err != nil { + return nil, err + } + + status = resp.AccountAssignmentCreationStatus + + if status.CreatedDate != nil { + d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) + } + if status.FailureReason != nil { + d.Set("failure_reason", status.FailureReason) + } + if status.RequestId != nil { + d.Set("request_id", status.RequestId) + } + if status.Status != nil { + d.Set("status", status.Status) + } + + if aws.StringValue(status.Status) != "IN_PROGRESS" { + break + } + + time.Sleep(time.Second) + } + + return status, nil +} -// func waitForAssignmentDeletion(conn *identitystore.IdentityStore, instanceArn string, requestId string) error { +// func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestId string) error { // } From af767eaa57e4ac236c86d6948d9fc5fbfee1e189 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 9 Oct 2020 15:37:40 -0500 Subject: [PATCH 0561/1212] add sso assignment Read --- aws/data_source_aws_sso_permission_set.go | 1 + aws/resource_aws_sso_assignment.go | 78 +++++++++++++++-------- 2 files changed, 54 insertions(+), 25 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 076439c22ec..55365048d93 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -101,6 +101,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Reading AWS SSO Permission Sets") resp, err := conn.ListPermissionSets(&ssoadmin.ListPermissionSetsInput{ InstanceArn: aws.String(instanceArn), + MaxResults: aws.Int64(100), }) if err != nil { return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 7f3b55bb825..c44dc2d14b5 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -28,21 +28,6 @@ func resourceAwsSsoAssignment() *schema.Resource { Computed: true, }, - "failure_reason": { - Type: schema.TypeString, - Computed: true, - }, - - "request_id": { - Type: schema.TypeString, - Computed: true, - }, - - "status": { - Type: schema.TypeString, - Computed: true, - }, - "instance_arn": { Type: schema.TypeString, Required: true, @@ -101,8 +86,6 @@ func resourceAwsSsoAssignment() *schema.Resource { func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ssoadminconn - log.Printf("[INFO] Creating AWS SSO Assignment") - instanceArn := d.Get("instance_arn").(string) permissionSetArn := d.Get("permission_set_arn").(string) principalID := d.Get("principal_id").(string) @@ -128,6 +111,7 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er TargetType: aws.String(targetType), } + log.Printf("[INFO] Creating AWS SSO Assignment") resp, err := conn.CreateAccountAssignment(req) if err != nil { return fmt.Errorf("Error creating AWS SSO Assignment: %s", err) @@ -162,17 +146,61 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er } func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + principalID := d.Get("principal_id").(string) + principalType := d.Get("principal_type").(string) + targetID := d.Get("target_id").(string) + targetType := d.Get("target_type").(string) + + vars := []string{ + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + } + + req := &ssoadmin.ListAccountAssignmentsInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + AccountId: aws.String(targetID), + } + + log.Printf("[DEBUG] Reading AWS SSO Assignments for %s", req) + resp, err := conn.ListAccountAssignments(req) + if err != nil { + return fmt.Errorf("Error getting AWS SSO Assignments: %s", err) + } + + if resp == nil || len(resp.AccountAssignments) == 0 { + // TODO: is this correct? + log.Printf("[DEBUG] No account assignments found") + d.SetId("") + return nil + } + + for _, accountAssignment := range resp.AccountAssignments { + if aws.StringValue(accountAssignment.PrincipalType) == principalType { + if aws.StringValue(accountAssignment.PrincipalId) == principalID { + // TODO: is this correct? + d.SetId(strings.Join(vars, "_")) + return nil + } + } + } + + // TODO: is this correct? + log.Printf("[DEBUG] Account assignment not found for %s", map[string]string{ + "PrincipalType": principalType, + "PrincipalId": principalID, + }) + d.SetId("") return nil } -// func resourceAwsSsoAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { -// // conn := meta.(*AWSClient).ssoadminconn -// // TODO -// return resourceAwsSsoAssignmentRead(d, meta) -// } - func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).ssoadminconn // TODO From f5ffa28d1b2a952f7f86ecb9f2ed3a6d2f21ebbd Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 9 Oct 2020 16:16:57 -0500 Subject: [PATCH 0562/1212] add sso assignment delete --- aws/resource_aws_sso_assignment.go | 98 +++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 28 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index c44dc2d14b5..cb5e5f1fd12 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -17,7 +17,6 @@ func resourceAwsSsoAssignment() *schema.Resource { return &schema.Resource{ Create: resourceAwsSsoAssignmentCreate, Read: resourceAwsSsoAssignmentRead, - // Update: resourceAwsSsoAssignmentUpdate, Delete: resourceAwsSsoAssignmentDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -122,19 +121,10 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er if status.CreatedDate != nil { d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) } - if status.FailureReason != nil { - d.Set("failure_reason", status.FailureReason) - } - if status.RequestId != nil { - d.Set("request_id", status.RequestId) - } - if status.Status != nil { - d.Set("status", status.Status) - } - waitResp, waitErr := waitForAssignmentCreation(d, conn, instanceArn, aws.StringValue(status.RequestId)) + waitResp, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId)) if waitErr != nil { - return fmt.Errorf("Error waiting for AWS SSO Assignment: %s", waitErr) + return fmt.Errorf("Error waiting for AWS SSO Assignment creation: %s", waitErr) } // IN_PROGRESS | FAILED | SUCCEEDED @@ -142,6 +132,10 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed to create AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) } + if waitResp.CreatedDate != nil { + d.Set("created_date", waitResp.CreatedDate.Format(time.RFC3339)) + } + return resourceAwsSsoAssignmentRead(d, meta) } @@ -202,12 +196,48 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro } func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + principalID := d.Get("principal_id").(string) + principalType := d.Get("principal_type").(string) + targetID := d.Get("target_id").(string) + targetType := d.Get("target_type").(string) + + req := &ssoadmin.DeleteAccountAssignmentInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + PrincipalId: aws.String(principalID), + PrincipalType: aws.String(principalType), + TargetId: aws.String(targetID), + TargetType: aws.String(targetType), + } + + log.Printf("[INFO] Deleting AWS SSO Assignment") + resp, err := conn.DeleteAccountAssignment(req) + if err != nil { + return fmt.Errorf("Error deleting AWS SSO Assignment: %s", err) + } + + status := resp.AccountAssignmentDeletionStatus + + waitResp, waitErr := waitForAssignmentDeletion(conn, instanceArn, aws.StringValue(status.RequestId)) + if waitErr != nil { + return fmt.Errorf("Error waiting for AWS SSO Assignment deletion: %s", waitErr) + } + + // IN_PROGRESS | FAILED | SUCCEEDED + if aws.StringValue(waitResp.Status) == "FAILED" { + return fmt.Errorf("Failed to delete AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) + } + + // TODO: is this correct? + d.SetId("") return nil } -func waitForAssignmentCreation(d *schema.ResourceData, conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { +func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { var status *ssoadmin.AccountAssignmentOperationStatus // TODO: timeout @@ -223,28 +253,40 @@ func waitForAssignmentCreation(d *schema.ResourceData, conn *ssoadmin.SSOAdmin, status = resp.AccountAssignmentCreationStatus - if status.CreatedDate != nil { - d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) - } - if status.FailureReason != nil { - d.Set("failure_reason", status.FailureReason) - } - if status.RequestId != nil { - d.Set("request_id", status.RequestId) + if aws.StringValue(status.Status) != "IN_PROGRESS" { + break } - if status.Status != nil { - d.Set("status", status.Status) + + // TODO: configure wait time + time.Sleep(time.Second) + } + + return status, nil +} + +func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { + var status *ssoadmin.AccountAssignmentOperationStatus + + // TODO: timeout + for { + resp, err := conn.DescribeAccountAssignmentDeletionStatus(&ssoadmin.DescribeAccountAssignmentDeletionStatusInput{ + InstanceArn: aws.String(instanceArn), + AccountAssignmentDeletionRequestId: aws.String(requestID), + }) + + if err != nil { + return nil, err } + status = resp.AccountAssignmentDeletionStatus + if aws.StringValue(status.Status) != "IN_PROGRESS" { break } + // TODO: configure wait time time.Sleep(time.Second) } return status, nil } - -// func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestId string) error { -// } From 9ab6547c1c88b6dcbfda4930b8da86990969a5a1 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Fri, 9 Oct 2020 17:22:37 -0500 Subject: [PATCH 0563/1212] fix usage of SetId --- aws/data_source_aws_identity_store_group.go | 18 +++++++-- aws/data_source_aws_identity_store_user.go | 18 +++++++-- aws/data_source_aws_sso_instance.go | 8 +++- aws/data_source_aws_sso_permission_set.go | 10 +++-- aws/resource_aws_sso_assignment.go | 44 +++++++++++---------- 5 files changed, 67 insertions(+), 31 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index 69c02a7b347..5bc9130d6a8 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -6,6 +6,7 @@ import ( "regexp" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -64,12 +65,18 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ GroupId: aws.String(groupID), }) if err != nil { + aerr, ok := err.(awserr.Error) + if ok && aerr.Code() == identitystore.ErrCodeResourceNotFoundException { + log.Printf("[DEBUG] AWS Identity Store Group not found with the id %v", groupID) + d.SetId("") + return nil + } return fmt.Errorf("Error getting AWS Identity Store Group: %s", err) } d.SetId(groupID) d.Set("display_name", resp.DisplayName) } else if displayName != "" { - log.Printf("[DEBUG] Reading AWS Identity Store Group") + log.Printf("[DEBUG] Reading AWS Identity Store Groups") resp, err := conn.ListGroups(&identitystore.ListGroupsInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ @@ -80,10 +87,15 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ }, }) if err != nil { - return fmt.Errorf("Error getting AWS Identity Store Group: %s", err) + return fmt.Errorf("Error getting AWS Identity Store Groups: %s", err) } if resp == nil || len(resp.Groups) == 0 { - return fmt.Errorf("No AWS Identity Store Group found") + log.Printf("[DEBUG] No AWS Identity Store Groups found") + d.SetId("") + return nil + } + if len(resp.Groups) > 1 { + return fmt.Errorf("Found multiple AWS Identity Store Groups with the DisplayName %v. Not sure which one to use. %s", displayName, resp.Groups) } group := resp.Groups[0] d.SetId(aws.StringValue(group.GroupId)) diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 17f47e9de0c..78d72ad1be5 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -6,6 +6,7 @@ import ( "regexp" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/identitystore" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -64,12 +65,18 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} UserId: aws.String(userID), }) if err != nil { + aerr, ok := err.(awserr.Error) + if ok && aerr.Code() == identitystore.ErrCodeResourceNotFoundException { + log.Printf("[DEBUG] AWS Identity Store User not found with the id %v", userID) + d.SetId("") + return nil + } return fmt.Errorf("Error getting AWS Identity Store User: %s", err) } d.SetId(userID) d.Set("user_name", resp.UserName) } else if userName != "" { - log.Printf("[DEBUG] Reading AWS Identity Store User") + log.Printf("[DEBUG] Reading AWS Identity Store Users") resp, err := conn.ListUsers(&identitystore.ListUsersInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ @@ -80,10 +87,15 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} }, }) if err != nil { - return fmt.Errorf("Error getting AWS Identity Store User: %s", err) + return fmt.Errorf("Error getting AWS Identity Store Users: %s", err) } if resp == nil || len(resp.Users) == 0 { - return fmt.Errorf("No AWS Identity Store User found") + log.Printf("[DEBUG] No AWS Identity Store Users found") + d.SetId("") + return nil + } + if len(resp.Users) > 1 { + return fmt.Errorf("Found multiple AWS Identity Store Users with the UserName %v. Not sure which one to use. %s", userName, resp.Users) } user := resp.Users[0] d.SetId(aws.StringValue(user.UserId)) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 0422c5e37f1..092fcb3b206 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -37,7 +37,13 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro } if resp == nil || len(resp.Instances) == 0 { - return fmt.Errorf("No AWS SSO Instance found") + log.Printf("[DEBUG] No AWS SSO Instance found") + d.SetId("") + return nil + } + + if len(resp.Instances) > 1 { + return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", resp.Instances) } instance := resp.Instances[0] diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 55365048d93..a59e80977e5 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -107,7 +107,9 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) } if resp == nil || len(resp.PermissionSets) == 0 { - return fmt.Errorf("No AWS SSO Permission Sets found") + log.Printf("[DEBUG] No AWS SSO Permission Sets found") + d.SetId("") + return nil } // TODO: paging (if resp.NextToken != nil) @@ -130,7 +132,9 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) } if permissionSet == nil { - return fmt.Errorf("AWS SSO Permission Set %v not found", name) + log.Printf("[DEBUG] AWS SSO Permission Set %v not found", name) + d.SetId("") + return nil } log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) @@ -164,7 +168,7 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) tags, err := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) if err != nil { - return fmt.Errorf("error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) + return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) } d.SetId(permissionSetArn) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index cb5e5f1fd12..d6d43ee6692 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -8,6 +8,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -92,15 +93,6 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er targetID := d.Get("target_id").(string) targetType := d.Get("target_type").(string) - vars := []string{ - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - } - d.SetId(strings.Join(vars, "_")) - req := &ssoadmin.CreateAccountAssignmentInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), @@ -132,6 +124,15 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed to create AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) } + vars := []string{ + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + } + d.SetId(strings.Join(vars, "_")) + if waitResp.CreatedDate != nil { d.Set("created_date", waitResp.CreatedDate.Format(time.RFC3339)) } @@ -149,14 +150,6 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro targetID := d.Get("target_id").(string) targetType := d.Get("target_type").(string) - vars := []string{ - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - } - req := &ssoadmin.ListAccountAssignmentsInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), @@ -170,7 +163,6 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro } if resp == nil || len(resp.AccountAssignments) == 0 { - // TODO: is this correct? log.Printf("[DEBUG] No account assignments found") d.SetId("") return nil @@ -179,14 +171,19 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro for _, accountAssignment := range resp.AccountAssignments { if aws.StringValue(accountAssignment.PrincipalType) == principalType { if aws.StringValue(accountAssignment.PrincipalId) == principalID { - // TODO: is this correct? + vars := []string{ + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + } d.SetId(strings.Join(vars, "_")) return nil } } } - // TODO: is this correct? log.Printf("[DEBUG] Account assignment not found for %s", map[string]string{ "PrincipalType": principalType, "PrincipalId": principalID, @@ -217,6 +214,12 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] Deleting AWS SSO Assignment") resp, err := conn.DeleteAccountAssignment(req) if err != nil { + aerr, ok := err.(awserr.Error) + if ok && aerr.Code() == ssoadmin.ErrCodeResourceNotFoundException { + log.Printf("[DEBUG] AWS SSO Assignment not found") + d.SetId("") + return nil + } return fmt.Errorf("Error deleting AWS SSO Assignment: %s", err) } @@ -232,7 +235,6 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed to delete AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) } - // TODO: is this correct? d.SetId("") return nil } From 748e8e00d9eeff9a8b70ff9b19f48485c6f5a040 Mon Sep 17 00:00:00 2001 From: lawdhavmercy Date: Fri, 9 Oct 2020 16:17:47 -0500 Subject: [PATCH 0564/1212] add sso permission set create --- aws/resource_aws_sso_permission_set.go | 98 +++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 18154ce143b..d0670eef0d3 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -1,17 +1,15 @@ package aws import ( - // "fmt" - // "log" - // "time" + "fmt" + "log" "regexp" - // "github.com/aws/aws-sdk-go/aws" - // "github.com/aws/aws-sdk-go/service/ssoadmin" - // "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - // "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsSsoPermissionSet() *schema.Resource { @@ -120,21 +118,59 @@ func resourceAwsSsoPermissionSet() *schema.Resource { } func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO - // d.SetId(*resp.PermissionSetArn) + ssoadminconn := meta.(*AWSClient).ssoadminconn + + log.Printf("[INFO] Creating AWS SSO Permission Set") + + instanceArn := aws.String(d.Get("instance_arn").(string)) + + params := &ssoadmin.CreatePermissionSetInput{ + InstanceArn: instanceArn, + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok { + params.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("relay_state"); ok { + params.RelayState = aws.String(v.(string)) + } + + if v, ok := d.GetOk("session_duration"); ok { + params.SessionDuration = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoTags() + } + + createPermissionSetResp, createPermissionSetErr := ssoadminconn.CreatePermissionSet(params) + if createPermissionSetErr != nil { + return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionSetErr) + } + + permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn + d.SetId(*permissionSetArn) + + if attachPoliciesErr := attachPoliciesToPermissionSet(ssoadminconn, d, permissionSetArn, instanceArn); attachPoliciesErr != nil { + return attachPoliciesErr + } + return resourceAwsSsoPermissionSetRead(d, meta) } func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).ssoadminconn // TODO + return nil } func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { // conn := meta.(*AWSClient).ssoadminconn // TODO + return resourceAwsSsoPermissionSetRead(d, meta) } @@ -144,5 +180,47 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) return nil } +func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, instanceArn *string, permissionSetArn *string) error { + + if v, ok := d.GetOk("inline_policy"); ok { + log.Printf("[INFO] Attaching IAM inline policy to AWS SSO Permission Set") + + inlinePolicy := aws.String(v.(string)) + + input := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: instanceArn, + PermissionSetArn: permissionSetArn, + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(input) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + + if v, ok := d.GetOk("managed_policies"); ok { + log.Printf("[INFO] Attaching Managed Policies to AWS SSO Permission Set") + + managedPolicies := expandStringSet(v.(*schema.Set)) + + for _, managedPolicyArn := range managedPolicies { + + input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ + InstanceArn: instanceArn, + ManagedPolicyArn: managedPolicyArn, + PermissionSetArn: permissionSetArn, + } + + _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + } + + return nil +} + // func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { // } From bdb08aa77ba5305cf8ad41a9cb65688d8a4922db Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Fri, 9 Oct 2020 16:27:33 -0500 Subject: [PATCH 0565/1212] add sso permission set read --- aws/resource_aws_sso_permission_set.go | 74 +++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index d0670eef0d3..eb0c829778f 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "regexp" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssoadmin" @@ -161,8 +162,77 @@ func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) } func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + ssoadminconn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + var permissionSet *ssoadmin.PermissionSet + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) + + permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if permissionSetErr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) + } + if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { + permissionSet = permissionSetResp.PermissionSet + } + + if permissionSet == nil { + return fmt.Errorf("AWS SSO Permission Set %v not found", name) + } + + log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if managedPoliciesErr != nil { + return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) + } + managedPoliciesSet := &schema.Set{ + F: permissionSetManagedPoliciesHash, + } + for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { + managedPoliciesSet.Add(map[string]interface{}{ + "arn": aws.StringValue(managedPolicy.Arn), + "name": aws.StringValue(managedPolicy.Name), + }) + } + + tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) + if err != nil { + return fmt.Errorf("error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) + } + + d.Set("arn", permissionSetArn) + d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + d.Set("instance_arn", instanceArn) + d.Set("name", permissionSet.Name) + d.Set("description", permissionSet.Description) + d.Set("session_duration", permissionSet.SessionDuration) + d.Set("relay_state", permissionSet.RelayState) + d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + d.Set("managed_policies", managedPoliciesSet) + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } return nil } From 57de0118dc79742db9f47de24a70f756a387b035 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 12 Oct 2020 09:34:29 -0500 Subject: [PATCH 0566/1212] add sso permission set update --- aws/resource_aws_sso_permission_set.go | 98 +++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index eb0c829778f..34e0ef5245f 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -176,6 +176,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), }) + if permissionSetErr != nil { return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) } @@ -184,7 +185,9 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e } if permissionSet == nil { - return fmt.Errorf("AWS SSO Permission Set %v not found", name) + log.Printf("[WARN] AWS SSO Permission Set %s not found, removing from state", name) + d.SetId("") + return nil } log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) @@ -238,8 +241,97 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e } func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + ssoadminconn := meta.(*AWSClient).ssoadminconn + + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + + log.Printf("[DEBUG] Updating ASW SSO Permission Set: %s", permissionSetArn) + + if d.HasChanges("description", "relay_state", "session_duration") { + input := &ssoadmin.UpdatePermissionSetInput{ + PermissionSetArn: aws.String(permissionSetArn), + InstanceArn: aws.String(instanceArn), + Description: aws.String(d.Get("description").(string)), + RelayState: aws.String(d.Get("relay_state").(string)), + SessionDuration: aws.String(d.Get("session_duration").(string)), + } + + log.Printf("[DEBUG] Updating ASW SSO Permission Set: %s", input) + _, permissionSetErr := ssoadminconn.UpdatePermissionSet(input) + if permissionSetErr != nil { + return fmt.Errorf("error updating AWS SSO Permission Set: %s", permissionSetErr) + } + } + + if d.HasChange("tags") { + oldTags, newTags := d.GetChange("tags") + if updateTagsErr := keyvaluetags.SsoUpdateTags(ssoadminconn, d.Get("arn").(string), d.Get("instance_arn").(string), oldTags, newTags); updateTagsErr != nil { + return fmt.Errorf("error updating tags: %s", updateTagsErr) + } + } + + if v, ok := d.GetOk("inline_policy"); ok { + log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) + + inlinePolicy := aws.String(v.(string)) + + updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ + InlinePolicy: inlinePolicy, + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) + } + } else if d.HasChange("inline_policy") { + deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) + if inlinePolicyErr != nil { + return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) + } + } + + if d.HasChange("managed_policies") { + o, n := d.GetChange("managed_policies") + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removalList := os.Difference(ns) + for _, v := range removalList.List() { + input := &ssoadmin.DetachManagedPolicyFromPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + ManagedPolicyArn: aws.String(v.(string)), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, managedPoliciesErr := ssoadminconn.DetachManagedPolicyFromPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error detaching Managed Policy from AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + + additionList := ns.Difference(os) + for _, v := range additionList.List() { + input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + ManagedPolicyArn: aws.String(v.(string)), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) + if managedPoliciesErr != nil { + return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) + } + } + } return resourceAwsSsoPermissionSetRead(d, meta) } From 771bffb369d4580c58c9b496ca824cfaf9c47b92 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 12 Oct 2020 11:06:43 -0500 Subject: [PATCH 0567/1212] add sso permission set delete --- aws/resource_aws_sso_permission_set.go | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 34e0ef5245f..a1352920dcf 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -221,7 +221,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) if err != nil { - return fmt.Errorf("error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) + return fmt.Errorf("error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) } d.Set("arn", permissionSetArn) @@ -246,7 +246,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) permissionSetArn := d.Id() instanceArn := d.Get("instance_arn").(string) - log.Printf("[DEBUG] Updating ASW SSO Permission Set: %s", permissionSetArn) + log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) if d.HasChanges("description", "relay_state", "session_duration") { input := &ssoadmin.UpdatePermissionSetInput{ @@ -257,7 +257,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) SessionDuration: aws.String(d.Get("session_duration").(string)), } - log.Printf("[DEBUG] Updating ASW SSO Permission Set: %s", input) + log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) _, permissionSetErr := ssoadminconn.UpdatePermissionSet(input) if permissionSetErr != nil { return fmt.Errorf("error updating AWS SSO Permission Set: %s", permissionSetErr) @@ -337,8 +337,24 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { - // conn := meta.(*AWSClient).ssoadminconn - // TODO + ssoadminconn := meta.(*AWSClient).ssoadminconn + + permissionSetArn := d.Id() + instanceArn := d.Get("instance_arn").(string) + + log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) + + params := &ssoadmin.DeletePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + _, err := ssoadminconn.DeletePermissionSet(params) + if err != nil { + return fmt.Errorf("error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) + } + + d.SetId("") return nil } From 6e375231237825713c305cd54c468b64da1f5660 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Mon, 12 Oct 2020 16:42:49 -0500 Subject: [PATCH 0568/1212] parse instance id from permission set arn --- aws/resource_aws_sso_permission_set.go | 65 ++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index a1352920dcf..7de6e716751 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -4,9 +4,11 @@ import ( "fmt" "log" "regexp" + "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -20,7 +22,15 @@ func resourceAwsSsoPermissionSet() *schema.Resource { Update: resourceAwsSsoPermissionSetUpdate, Delete: resourceAwsSsoPermissionSetDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + instanceArn, err := resourceAwsSsoPermissionSetParseID(d.Id()) + if err != nil { + return nil, fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", d.Id(), err) + } + + d.Set("instance_arn", instanceArn) + return []*schema.ResourceData{d}, nil + }, }, Schema: map[string]*schema.Schema{ "arn": { @@ -167,7 +177,11 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e var permissionSet *ssoadmin.PermissionSet permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) + instanceArn, err := resourceAwsSsoPermissionSetParseID(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", d.Id(), err) + } + name := d.Get("name").(string) log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) @@ -221,7 +235,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) if err != nil { - return fmt.Errorf("error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) + return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) } d.Set("arn", permissionSetArn) @@ -234,7 +248,7 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e d.Set("inline_policy", inlinePolicyResp.InlinePolicy) d.Set("managed_policies", managedPoliciesSet) if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("Error setting tags: %s", err) } return nil @@ -244,7 +258,10 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) ssoadminconn := meta.(*AWSClient).ssoadminconn permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) + instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) + if err != nil { + return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", permissionSetArn, err) + } log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) @@ -260,14 +277,14 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) _, permissionSetErr := ssoadminconn.UpdatePermissionSet(input) if permissionSetErr != nil { - return fmt.Errorf("error updating AWS SSO Permission Set: %s", permissionSetErr) + return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionSetErr) } } if d.HasChange("tags") { oldTags, newTags := d.GetChange("tags") if updateTagsErr := keyvaluetags.SsoUpdateTags(ssoadminconn, d.Get("arn").(string), d.Get("instance_arn").(string), oldTags, newTags); updateTagsErr != nil { - return fmt.Errorf("error updating tags: %s", updateTagsErr) + return fmt.Errorf("Error updating tags: %s", updateTagsErr) } } @@ -340,7 +357,10 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) ssoadminconn := meta.(*AWSClient).ssoadminconn permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) + instanceArn, parseErr := resourceAwsSsoPermissionSetParseID(permissionSetArn) + if parseErr != nil { + return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", permissionSetArn, parseErr) + } log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) @@ -351,7 +371,7 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) _, err := ssoadminconn.DeletePermissionSet(params) if err != nil { - return fmt.Errorf("error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) + return fmt.Errorf("Error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) } d.SetId("") @@ -400,5 +420,32 @@ func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.Re return nil } +func resourceAwsSsoPermissionSetParseID(id string) (string, error) { + // id = arn:aws:sso:::permissionSet/${InstanceID}/${PermissionSetID} + idFormatErr := fmt.Errorf("Unexpected format of AWS Permission Set ID (%s), expected format arn:aws:sso:::permissionSet/ins-123456A/ps-56789B", id) + permissionSetARN, err := arn.Parse(id) + if err != nil { + return "", idFormatErr + } + + // We need: + // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/ins-123456A/ps-56789B) + // Split up the resource of the permission set ARN + resourceParts := strings.SplitN(permissionSetARN.Resource, "/", 3) + if len(resourceParts) != 3 { + return "", idFormatErr + } + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceARN := &arn.ARN{ + AccountID: permissionSetARN.AccountID, + Partition: permissionSetARN.Partition, + Region: permissionSetARN.Region, + Service: "instance", + Resource: resourceParts[1], + } + + return instanceARN.String(), nil +} + // func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { // } From ae650d483d2e9441689048484af0d0c242d4832a Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 13 Oct 2020 20:57:13 -0500 Subject: [PATCH 0569/1212] add timeouts and use resource.StateChangeConf --- aws/resource_aws_sso_assignment.go | 128 +++++++++++++++-------------- 1 file changed, 66 insertions(+), 62 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index d6d43ee6692..f57744b8fa7 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -10,18 +10,34 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +const ( + AWSSSOAssignmentCreateRetryTimeout = 5 * time.Minute + AWSSSOAssignmentDeleteRetryTimeout = 5 * time.Minute + AWSSSOAssignmentRetryDelay = 5 * time.Second + AWSSSOAssignmentRetryMinTimeout = 3 * time.Second +) + func resourceAwsSsoAssignment() *schema.Resource { return &schema.Resource{ Create: resourceAwsSsoAssignmentCreate, Read: resourceAwsSsoAssignmentRead, Delete: resourceAwsSsoAssignmentDelete, + + // TODO Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSSSOAssignmentCreateRetryTimeout), + Delete: schema.DefaultTimeout(AWSSSOAssignmentDeleteRetryTimeout), + }, + Schema: map[string]*schema.Schema{ "created_date": { Type: schema.TypeString, @@ -114,14 +130,9 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) } - waitResp, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId)) + waitResp, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutCreate)) if waitErr != nil { - return fmt.Errorf("Error waiting for AWS SSO Assignment creation: %s", waitErr) - } - - // IN_PROGRESS | FAILED | SUCCEEDED - if aws.StringValue(waitResp.Status) == "FAILED" { - return fmt.Errorf("Failed to create AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) + return waitErr } vars := []string{ @@ -225,70 +236,63 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er status := resp.AccountAssignmentDeletionStatus - waitResp, waitErr := waitForAssignmentDeletion(conn, instanceArn, aws.StringValue(status.RequestId)) + _, waitErr := waitForAssignmentDeletion(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutDelete)) if waitErr != nil { - return fmt.Errorf("Error waiting for AWS SSO Assignment deletion: %s", waitErr) - } - - // IN_PROGRESS | FAILED | SUCCEEDED - if aws.StringValue(waitResp.Status) == "FAILED" { - return fmt.Errorf("Failed to delete AWS SSO Assignment: %s", aws.StringValue(waitResp.FailureReason)) + return waitErr } d.SetId("") return nil } -func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { - var status *ssoadmin.AccountAssignmentOperationStatus - - // TODO: timeout - for { - resp, err := conn.DescribeAccountAssignmentCreationStatus(&ssoadmin.DescribeAccountAssignmentCreationStatusInput{ - InstanceArn: aws.String(instanceArn), - AccountAssignmentCreationRequestId: aws.String(requestID), - }) - - if err != nil { - return nil, err - } - - status = resp.AccountAssignmentCreationStatus - - if aws.StringValue(status.Status) != "IN_PROGRESS" { - break - } - - // TODO: configure wait time - time.Sleep(time.Second) +func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeAccountAssignmentCreationStatus(&ssoadmin.DescribeAccountAssignmentCreationStatusInput{ + InstanceArn: aws.String(instanceArn), + AccountAssignmentCreationRequestId: aws.String(requestID), + }) + if err != nil { + return resp, "", fmt.Errorf("Error describing account assignment creation status: %s", err) + } + status := resp.AccountAssignmentCreationStatus + return status, aws.StringValue(status.Status), nil + }, + Timeout: timeout, + Delay: AWSSSOAssignmentRetryDelay, + MinTimeout: AWSSSOAssignmentRetryMinTimeout, } - - return status, nil + status, err := stateConf.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for account assignment to be created: %s", err) + } + return status.(*ssoadmin.AccountAssignmentOperationStatus), nil } -func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { - var status *ssoadmin.AccountAssignmentOperationStatus - - // TODO: timeout - for { - resp, err := conn.DescribeAccountAssignmentDeletionStatus(&ssoadmin.DescribeAccountAssignmentDeletionStatusInput{ - InstanceArn: aws.String(instanceArn), - AccountAssignmentDeletionRequestId: aws.String(requestID), - }) - - if err != nil { - return nil, err - } - - status = resp.AccountAssignmentDeletionStatus - - if aws.StringValue(status.Status) != "IN_PROGRESS" { - break - } - - // TODO: configure wait time - time.Sleep(time.Second) +func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Refresh: func() (interface{}, string, error) { + resp, err := conn.DescribeAccountAssignmentDeletionStatus(&ssoadmin.DescribeAccountAssignmentDeletionStatusInput{ + InstanceArn: aws.String(instanceArn), + AccountAssignmentDeletionRequestId: aws.String(requestID), + }) + if err != nil { + return resp, "", fmt.Errorf("Error describing account assignment deletion status: %s", err) + } + status := resp.AccountAssignmentDeletionStatus + return status, aws.StringValue(status.Status), nil + }, + Timeout: timeout, + Delay: AWSSSOAssignmentRetryDelay, + MinTimeout: AWSSSOAssignmentRetryMinTimeout, } - - return status, nil + status, err := stateConf.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for account assignment to be deleted: %s", err) + } + return status.(*ssoadmin.AccountAssignmentOperationStatus), nil } From d5a1f514e9e5ff48de7211753d9e13687cacf497 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Wed, 14 Oct 2020 10:15:11 -0500 Subject: [PATCH 0570/1212] add sso permission set provisioning --- aws/resource_aws_sso_permission_set.go | 79 +++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 7de6e716751..e0152e65c8c 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -10,11 +10,20 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) +const ( + AWSSSOPermissionSetCreateTimeout = 5 * time.Minute + AWSSSOPermissionSetUpdateTimeout = 10 * time.Minute + AWSSSOPermissionSetDeleteTimeout = 5 * time.Minute + AWSSSOPermissionSetProvisioningRetryDelay = 5 * time.Second + AWSSSOPermissionSetProvisioningRetryMinTimeout = 3 * time.Second +) + func resourceAwsSsoPermissionSet() *schema.Resource { return &schema.Resource{ Create: resourceAwsSsoPermissionSetCreate, @@ -32,6 +41,11 @@ func resourceAwsSsoPermissionSet() *schema.Resource { return []*schema.ResourceData{d}, nil }, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSSSOPermissionSetCreateTimeout), + Update: schema.DefaultTimeout(AWSSSOPermissionSetUpdateTimeout), + Delete: schema.DefaultTimeout(AWSSSOPermissionSetDeleteTimeout), + }, Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -350,6 +364,45 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } } + // Reprovision if anything has changed + if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policies", "tags") { + + // Auto provision all accounts + targetType := ssoadmin.ProvisionTargetTypeAllProvisionedAccounts + provisionInput := &ssoadmin.ProvisionPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + TargetType: aws.String(targetType), + } + + log.Printf("[INFO] Provisioning AWS SSO Permission Set") + provisionResponse, err := ssoadminconn.ProvisionPermissionSet(provisionInput) + if err != nil { + return fmt.Errorf("Error provisioning AWS SSO Permission Set (%s): %w", d.Id(), err) + } + + if provisionResponse != nil && provisionResponse.PermissionSetProvisioningStatus != nil { + status := provisionResponse.PermissionSetProvisioningStatus + + if status.CreatedDate != nil { + d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) + } + + wait := resource.StateChangeConf{ + Delay: AWSSSOPermissionSetProvisioningRetryDelay, + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, + Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, aws.StringValue(status.RequestId), instanceArn), + } + + if _, err := wait.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for AWS SSO Permission Set (%s) provisioning: %w", d.Id(), err) + } + } + } + return resourceAwsSsoPermissionSetRead(d, meta) } @@ -447,5 +500,27 @@ func resourceAwsSsoPermissionSetParseID(id string) (string, error) { return instanceARN.String(), nil } -// func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error { -// } +func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ + InstanceArn: aws.String(instanceArn), + ProvisionPermissionSetRequestId: aws.String(requestID), + } + + return resourceAwsSsoPermissionSetProvisioningWait(ssoadminconn, input) + } +} + +func resourceAwsSsoPermissionSetProvisioningWait(ssoadminconn *ssoadmin.SSOAdmin, input *ssoadmin.DescribePermissionSetProvisioningStatusInput) (result interface{}, state string, err error) { + + resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) + + if aws.StringValue(resp.PermissionSetProvisioningStatus.Status) == ssoadmin.StatusValuesFailed { + return nil, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(resp.PermissionSetProvisioningStatus.PermissionSetArn), aws.StringValue(resp.PermissionSetProvisioningStatus.FailureReason)) + } + + if err != nil { + return nil, *resp.PermissionSetProvisioningStatus.Status, err + } + return true, *resp.PermissionSetProvisioningStatus.Status, nil +} From 26a3e3989526a8fe5d580e159e8a05c3cb397b8f Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 14 Oct 2020 19:29:55 -0500 Subject: [PATCH 0571/1212] fix id and import --- aws/resource_aws_sso_assignment.go | 204 +++++++++++++++++++++-------- 1 file changed, 152 insertions(+), 52 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index f57744b8fa7..03f2b11721d 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -8,6 +8,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -30,7 +31,7 @@ func resourceAwsSsoAssignment() *schema.Resource { // TODO Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceAwsSsoAssignmentImport, }, Timeouts: &schema.ResourceTimeout{ @@ -39,11 +40,6 @@ func resourceAwsSsoAssignment() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - "instance_arn": { Type: schema.TypeString, Required: true, @@ -64,21 +60,12 @@ func resourceAwsSsoAssignment() *schema.Resource { ), }, - "principal_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 47), - validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), - ), - }, - - "principal_type": { + "target_type": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"USER", "GROUP"}, false), + Default: ssoadmin.TargetTypeAwsAccount, + ValidateFunc: validation.StringInSlice([]string{ssoadmin.TargetTypeAwsAccount}, false), }, "target_id": { @@ -88,12 +75,26 @@ func resourceAwsSsoAssignment() *schema.Resource { ValidateFunc: validateAwsAccountId, }, - "target_type": { + "principal_type": { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, - Default: "AWS_ACCOUNT", - ValidateFunc: validation.StringInSlice([]string{"AWS_ACCOUNT"}, false), + ValidateFunc: validation.StringInSlice([]string{ssoadmin.PrincipalTypeUser, ssoadmin.PrincipalTypeGroup}, false), + }, + + "principal_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, }, }, } @@ -104,18 +105,18 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er instanceArn := d.Get("instance_arn").(string) permissionSetArn := d.Get("permission_set_arn").(string) - principalID := d.Get("principal_id").(string) - principalType := d.Get("principal_type").(string) - targetID := d.Get("target_id").(string) targetType := d.Get("target_type").(string) + targetID := d.Get("target_id").(string) + principalType := d.Get("principal_type").(string) + principalID := d.Get("principal_id").(string) req := &ssoadmin.CreateAccountAssignmentInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), - PrincipalId: aws.String(principalID), - PrincipalType: aws.String(principalType), - TargetId: aws.String(targetID), TargetType: aws.String(targetType), + TargetId: aws.String(targetID), + PrincipalType: aws.String(principalType), + PrincipalId: aws.String(principalID), } log.Printf("[INFO] Creating AWS SSO Assignment") @@ -135,14 +136,11 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er return waitErr } - vars := []string{ - permissionSetArn, - targetType, - targetID, - principalType, - principalID, + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) + if idErr != nil { + return idErr } - d.SetId(strings.Join(vars, "_")) + d.SetId(id) if waitResp.CreatedDate != nil { d.Set("created_date", waitResp.CreatedDate.Format(time.RFC3339)) @@ -156,10 +154,10 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro instanceArn := d.Get("instance_arn").(string) permissionSetArn := d.Get("permission_set_arn").(string) - principalID := d.Get("principal_id").(string) - principalType := d.Get("principal_type").(string) - targetID := d.Get("target_id").(string) targetType := d.Get("target_type").(string) + targetID := d.Get("target_id").(string) + principalType := d.Get("principal_type").(string) + principalID := d.Get("principal_id").(string) req := &ssoadmin.ListAccountAssignmentsInput{ InstanceArn: aws.String(instanceArn), @@ -182,14 +180,11 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro for _, accountAssignment := range resp.AccountAssignments { if aws.StringValue(accountAssignment.PrincipalType) == principalType { if aws.StringValue(accountAssignment.PrincipalId) == principalID { - vars := []string{ - permissionSetArn, - targetType, - targetID, - principalType, - principalID, + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) + if idErr != nil { + return idErr } - d.SetId(strings.Join(vars, "_")) + d.SetId(id) return nil } } @@ -208,18 +203,18 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er instanceArn := d.Get("instance_arn").(string) permissionSetArn := d.Get("permission_set_arn").(string) - principalID := d.Get("principal_id").(string) - principalType := d.Get("principal_type").(string) - targetID := d.Get("target_id").(string) targetType := d.Get("target_type").(string) + targetID := d.Get("target_id").(string) + principalType := d.Get("principal_type").(string) + principalID := d.Get("principal_id").(string) req := &ssoadmin.DeleteAccountAssignmentInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), - PrincipalId: aws.String(principalID), - PrincipalType: aws.String(principalType), - TargetId: aws.String(targetID), TargetType: aws.String(targetType), + TargetId: aws.String(targetID), + PrincipalType: aws.String(principalType), + PrincipalId: aws.String(principalID), } log.Printf("[INFO] Deleting AWS SSO Assignment") @@ -245,6 +240,111 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er return nil } +func resourceAwsSsoAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} + idParts := strings.Split(d.Id(), "/") + if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { + return nil, fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", d.Id()) + } + + instanceID := idParts[0] + permissionSetID := idParts[1] + targetType := idParts[2] + targetID := idParts[3] + principalType := idParts[4] + principalID := idParts[5] + + var err error + + // arn:${Partition}:sso:::instance/${InstanceId} + instanceArn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("instance/%s", instanceID), + }.String() + + // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} + permissionSetArn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), + }.String() + + err = d.Set("instance_arn", instanceArn) + if err != nil { + return nil, err + } + err = d.Set("permission_set_arn", permissionSetArn) + if err != nil { + return nil, err + } + err = d.Set("target_type", targetType) + if err != nil { + return nil, err + } + err = d.Set("target_id", targetID) + if err != nil { + return nil, err + } + err = d.Set("principal_type", principalType) + if err != nil { + return nil, err + } + err = d.Set("principal_id", principalID) + if err != nil { + return nil, err + } + + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) + if idErr != nil { + return nil, idErr + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func resourceAwsSsoAssignmentID( + instanceArn string, + permissionSetArn string, + targetType string, + targetID string, + principalType string, + principalID string, +) (string, error) { + // arn:${Partition}:sso:::instance/${InstanceId} + iArn, err := arn.Parse(instanceArn) + if err != nil { + return "", err + } + iArnResourceParts := strings.Split(iArn.Resource, "/") + if len(iArnResourceParts) != 2 || iArnResourceParts[0] != "instance" || iArnResourceParts[1] == "" { + return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::instance/${InstanceId}", instanceArn) + } + instanceID := iArnResourceParts[1] + + // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} + pArn, err := arn.Parse(permissionSetArn) + if err != nil { + return "", err + } + pArnResourceParts := strings.Split(pArn.Resource, "/") + if len(iArnResourceParts) != 3 || pArnResourceParts[0] != "permissionSet" || pArnResourceParts[1] == "" || pArnResourceParts[2] == "" { + return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", instanceArn) + } + permissionSetID := pArnResourceParts[2] + + vars := []string{ + instanceID, + permissionSetID, + targetType, + targetID, + principalType, + principalID, + } + return strings.Join(vars, "/"), nil +} + func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { stateConf := &resource.StateChangeConf{ Pending: []string{ssoadmin.StatusValuesInProgress}, From 958ad406532151a192bf8a604663a4617e88ef3b Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 14 Oct 2020 21:27:27 -0500 Subject: [PATCH 0572/1212] bugfix --- aws/resource_aws_sso_assignment.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 03f2b11721d..ca7fb10bb20 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -330,7 +330,7 @@ func resourceAwsSsoAssignmentID( } pArnResourceParts := strings.Split(pArn.Resource, "/") if len(iArnResourceParts) != 3 || pArnResourceParts[0] != "permissionSet" || pArnResourceParts[1] == "" || pArnResourceParts[2] == "" { - return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", instanceArn) + return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", permissionSetArn) } permissionSetID := pArnResourceParts[2] From 0f98c9458296cf5c123d20482411958b9174d2eb Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 14 Oct 2020 21:32:25 -0500 Subject: [PATCH 0573/1212] bug fix --- aws/resource_aws_sso_assignment.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index ca7fb10bb20..d7275b136fc 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -329,7 +329,7 @@ func resourceAwsSsoAssignmentID( return "", err } pArnResourceParts := strings.Split(pArn.Resource, "/") - if len(iArnResourceParts) != 3 || pArnResourceParts[0] != "permissionSet" || pArnResourceParts[1] == "" || pArnResourceParts[2] == "" { + if len(pArnResourceParts) != 3 || pArnResourceParts[0] != "permissionSet" || pArnResourceParts[1] == "" || pArnResourceParts[2] == "" { return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", permissionSetArn) } permissionSetID := pArnResourceParts[2] From 4dbdcffa60d9094f7893169ec370f49669732df6 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 14 Oct 2020 22:52:07 -0500 Subject: [PATCH 0574/1212] update Create to check the assignment exists before creating it --- aws/resource_aws_sso_assignment.go | 118 ++++++++++++++++++++--------- 1 file changed, 84 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index d7275b136fc..e54ec59bd57 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -29,7 +29,6 @@ func resourceAwsSsoAssignment() *schema.Resource { Read: resourceAwsSsoAssignmentRead, Delete: resourceAwsSsoAssignmentDelete, - // TODO Importer: &schema.ResourceImporter{ State: resourceAwsSsoAssignmentImport, }, @@ -110,6 +109,29 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er principalType := d.Get("principal_type").(string) principalID := d.Get("principal_id").(string) + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) + if idErr != nil { + return idErr + } + + // We need to check if the assignment exists before creating it + // since the AWS SSO API doesn't prevent us from creating duplicates + accountAssignment, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( + conn, + instanceArn, + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + ) + if getAccountAssignmentErr != nil { + return getAccountAssignmentErr + } + if accountAssignment != nil { + return fmt.Errorf("AWS SSO Assignment already exists. Import the resource by calling: terraform import %s", id) + } + req := &ssoadmin.CreateAccountAssignmentInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), @@ -136,10 +158,6 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er return waitErr } - id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) - if idErr != nil { - return idErr - } d.SetId(id) if waitResp.CreatedDate != nil { @@ -159,42 +177,32 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro principalType := d.Get("principal_type").(string) principalID := d.Get("principal_id").(string) - req := &ssoadmin.ListAccountAssignmentsInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - AccountId: aws.String(targetID), - } - - log.Printf("[DEBUG] Reading AWS SSO Assignments for %s", req) - resp, err := conn.ListAccountAssignments(req) + accountAssignment, err := resourceAwsSsoAssignmentGet( + conn, + instanceArn, + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + ) if err != nil { - return fmt.Errorf("Error getting AWS SSO Assignments: %s", err) + return err } - - if resp == nil || len(resp.AccountAssignments) == 0 { - log.Printf("[DEBUG] No account assignments found") + if accountAssignment == nil { + log.Printf("[DEBUG] Account assignment not found for %s", map[string]string{ + "PrincipalType": principalType, + "PrincipalId": principalID, + }) d.SetId("") return nil } - for _, accountAssignment := range resp.AccountAssignments { - if aws.StringValue(accountAssignment.PrincipalType) == principalType { - if aws.StringValue(accountAssignment.PrincipalId) == principalID { - id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) - if idErr != nil { - return idErr - } - d.SetId(id) - return nil - } - } + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) + if idErr != nil { + return idErr } - - log.Printf("[DEBUG] Account assignment not found for %s", map[string]string{ - "PrincipalType": principalType, - "PrincipalId": principalID, - }) - d.SetId("") + d.SetId(id) return nil } @@ -345,6 +353,48 @@ func resourceAwsSsoAssignmentID( return strings.Join(vars, "/"), nil } +func resourceAwsSsoAssignmentGet( + conn *ssoadmin.SSOAdmin, + instanceArn string, + permissionSetArn string, + targetType string, + targetID string, + principalType string, + principalID string, +) (*ssoadmin.AccountAssignment, error) { + if targetType != ssoadmin.TargetTypeAwsAccount { + return nil, fmt.Errorf("Invalid AWS SSO Assignments Target type %s. Only %s is supported", targetType, ssoadmin.TargetTypeAwsAccount) + } + + req := &ssoadmin.ListAccountAssignmentsInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + AccountId: aws.String(targetID), + } + + log.Printf("[DEBUG] Reading AWS SSO Assignments for %s", req) + resp, err := conn.ListAccountAssignments(req) + if err != nil { + return nil, fmt.Errorf("Error getting AWS SSO Assignments: %s", err) + } + + if resp == nil || len(resp.AccountAssignments) == 0 { + log.Printf("[DEBUG] No account assignments found") + return nil, nil + } + + for _, accountAssignment := range resp.AccountAssignments { + if aws.StringValue(accountAssignment.PrincipalType) == principalType { + if aws.StringValue(accountAssignment.PrincipalId) == principalID { + return accountAssignment, nil + } + } + } + + // not found + return nil, nil +} + func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { stateConf := &resource.StateChangeConf{ Pending: []string{ssoadmin.StatusValuesInProgress}, From 6f7cac8c41dde085a6378fa9d718d4e92b7af43b Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Thu, 15 Oct 2020 15:48:01 -0500 Subject: [PATCH 0575/1212] update manage_policy_arns --- aws/data_source_aws_sso_permission_set.go | 39 ++----- aws/resource_aws_sso_permission_set.go | 124 ++++++++++++---------- 2 files changed, 76 insertions(+), 87 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index a59e80977e5..55dd7be9d34 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -1,7 +1,6 @@ package aws import ( - "bytes" "fmt" "log" "regexp" @@ -11,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) @@ -68,21 +66,11 @@ func dataSourceAwsSsoPermissionSet() *schema.Resource { Computed: true, }, - "managed_policies": { + "managed_policy_arns": { Type: schema.TypeSet, Computed: true, - Set: permissionSetManagedPoliciesHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - }, + Elem: &schema.Schema{ + Type: schema.TypeString, }, }, @@ -156,14 +144,9 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) if managedPoliciesErr != nil { return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) } - managedPoliciesSet := &schema.Set{ - F: permissionSetManagedPoliciesHash, - } + var managedPolicyArns []string for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPoliciesSet.Add(map[string]interface{}{ - "arn": aws.StringValue(managedPolicy.Arn), - "name": aws.StringValue(managedPolicy.Name), - }) + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) } tags, err := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) @@ -180,20 +163,10 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) d.Set("session_duration", permissionSet.SessionDuration) d.Set("relay_state", permissionSet.RelayState) d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - d.Set("managed_policies", managedPoliciesSet) + d.Set("managed_policy_arns", managedPolicyArns) if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } return nil } - -// Generates a hash for the set hash function used by the -// managed_policies attribute. -func permissionSetManagedPoliciesHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["arn"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - return hashcode.String(buf.String()) -} diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index e0152e65c8c..b7b62b89052 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -37,7 +37,10 @@ func resourceAwsSsoPermissionSet() *schema.Resource { return nil, fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", d.Id(), err) } - d.Set("instance_arn", instanceArn) + err = d.Set("instance_arn", instanceArn) + if err != nil { + return nil, err + } return []*schema.ResourceData{d}, nil }, }, @@ -110,6 +113,7 @@ func resourceAwsSsoPermissionSet() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(1, 100), + Default: "PT1H", }, "relay_state": { @@ -128,13 +132,14 @@ func resourceAwsSsoPermissionSet() *schema.Resource { DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, }, - "managed_policies": { + "managed_policy_arns": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validateArn, }, + Set: schema.HashString, }, "tags": tagsSchema(), @@ -170,9 +175,9 @@ func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoTags() } - createPermissionSetResp, createPermissionSetErr := ssoadminconn.CreatePermissionSet(params) - if createPermissionSetErr != nil { - return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionSetErr) + createPermissionSetResp, createPermissionerr := ssoadminconn.CreatePermissionSet(params) + if createPermissionerr != nil { + return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) } permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn @@ -191,22 +196,18 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e var permissionSet *ssoadmin.PermissionSet permissionSetArn := d.Id() - instanceArn, err := resourceAwsSsoPermissionSetParseID(d.Id()) - if err != nil { - return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", d.Id(), err) - } - + instanceArn := d.Get("instance_arn").(string) name := d.Get("name").(string) log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) - permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + permissionSetResp, permissionerr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), }) - if permissionSetErr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) + if permissionerr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) } if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { permissionSet = permissionSetResp.PermissionSet @@ -237,14 +238,9 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e if managedPoliciesErr != nil { return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) } - managedPoliciesSet := &schema.Set{ - F: permissionSetManagedPoliciesHash, - } + var managedPolicyArns []string for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPoliciesSet.Add(map[string]interface{}{ - "arn": aws.StringValue(managedPolicy.Arn), - "name": aws.StringValue(managedPolicy.Name), - }) + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) } tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) @@ -252,15 +248,42 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) } - d.Set("arn", permissionSetArn) - d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - d.Set("instance_arn", instanceArn) - d.Set("name", permissionSet.Name) - d.Set("description", permissionSet.Description) - d.Set("session_duration", permissionSet.SessionDuration) - d.Set("relay_state", permissionSet.RelayState) - d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - d.Set("managed_policies", managedPoliciesSet) + err = d.Set("arn", permissionSetArn) + if err != nil { + return err + } + err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + if err != nil { + return err + } + err = d.Set("instance_arn", instanceArn) + if err != nil { + return err + } + err = d.Set("name", permissionSet.Name) + if err != nil { + return err + } + err = d.Set("description", permissionSet.Description) + if err != nil { + return err + } + err = d.Set("session_duration", permissionSet.SessionDuration) + if err != nil { + return err + } + err = d.Set("relay_state", permissionSet.RelayState) + if err != nil { + return err + } + err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + if err != nil { + return err + } + err = d.Set("managed_policy_arns", managedPolicyArns) + if err != nil { + return err + } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("Error setting tags: %s", err) } @@ -272,10 +295,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) ssoadminconn := meta.(*AWSClient).ssoadminconn permissionSetArn := d.Id() - instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) - if err != nil { - return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", permissionSetArn, err) - } + instanceArn := d.Get("instance_arn").(string) log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) @@ -289,9 +309,9 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) - _, permissionSetErr := ssoadminconn.UpdatePermissionSet(input) - if permissionSetErr != nil { - return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionSetErr) + _, permissionerr := ssoadminconn.UpdatePermissionSet(input) + if permissionerr != nil { + return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionerr) } } @@ -329,8 +349,8 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } } - if d.HasChange("managed_policies") { - o, n := d.GetChange("managed_policies") + if d.HasChange("managed_policy_arns") { + o, n := d.GetChange("managed_policy_arns") os := o.(*schema.Set) ns := n.(*schema.Set) @@ -365,7 +385,7 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) } // Reprovision if anything has changed - if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policies", "tags") { + if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policy_arns", "tags") { // Auto provision all accounts targetType := ssoadmin.ProvisionTargetTypeAllProvisionedAccounts @@ -410,10 +430,7 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) ssoadminconn := meta.(*AWSClient).ssoadminconn permissionSetArn := d.Id() - instanceArn, parseErr := resourceAwsSsoPermissionSetParseID(permissionSetArn) - if parseErr != nil { - return fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", permissionSetArn, parseErr) - } + instanceArn := d.Get("instance_arn").(string) log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) @@ -431,7 +448,7 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) return nil } -func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, instanceArn *string, permissionSetArn *string) error { +func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, permissionSetArn *string, instanceArn *string) error { if v, ok := d.GetOk("inline_policy"); ok { log.Printf("[INFO] Attaching IAM inline policy to AWS SSO Permission Set") @@ -450,7 +467,7 @@ func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.Re } } - if v, ok := d.GetOk("managed_policies"); ok { + if v, ok := d.GetOk("managed_policy_arns"); ok { log.Printf("[INFO] Attaching Managed Policies to AWS SSO Permission Set") managedPolicies := expandStringSet(v.(*schema.Set)) @@ -474,27 +491,26 @@ func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.Re } func resourceAwsSsoPermissionSetParseID(id string) (string, error) { - // id = arn:aws:sso:::permissionSet/${InstanceID}/${PermissionSetID} - idFormatErr := fmt.Errorf("Unexpected format of AWS Permission Set ID (%s), expected format arn:aws:sso:::permissionSet/ins-123456A/ps-56789B", id) + // id = arn:${Partition}:sso:::permissionSet/${InstanceID}/${PermissionSetID} + idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", id) permissionSetARN, err := arn.Parse(id) if err != nil { return "", idFormatErr } // We need: - // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/ins-123456A/ps-56789B) + // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/${InstanceId}/${PermissionSetId}) // Split up the resource of the permission set ARN - resourceParts := strings.SplitN(permissionSetARN.Resource, "/", 3) - if len(resourceParts) != 3 { + resourceParts := strings.Split(permissionSetARN.Resource, "/") + if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { return "", idFormatErr } + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] instanceARN := &arn.ARN{ - AccountID: permissionSetARN.AccountID, Partition: permissionSetARN.Partition, - Region: permissionSetARN.Region, - Service: "instance", - Resource: resourceParts[1], + Service: permissionSetARN.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), } return instanceARN.String(), nil From 70e789dce72ebad6281f60ee1570a9dc8f6f3e48 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 15 Oct 2020 12:37:00 -0500 Subject: [PATCH 0576/1212] update to use paging with data.aws_sso_instance --- aws/data_source_aws_sso_instance.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 092fcb3b206..18cbb4405e7 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -31,22 +31,28 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro conn := meta.(*AWSClient).ssoadminconn log.Printf("[DEBUG] Reading AWS SSO Instances") - resp, err := conn.ListInstances(&ssoadmin.ListInstancesInput{}) + instances := []*ssoadmin.InstanceMetadata{} + err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page != nil && page.Instances != nil && len(page.Instances) != 0 { + instances = append(instances, page.Instances...) + } + return !lastPage + }) if err != nil { return fmt.Errorf("Error getting AWS SSO Instances: %s", err) } - if resp == nil || len(resp.Instances) == 0 { + if instances == nil || len(instances) == 0 { log.Printf("[DEBUG] No AWS SSO Instance found") d.SetId("") return nil } - if len(resp.Instances) > 1 { - return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", resp.Instances) + if len(instances) > 1 { + return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", instances) } - instance := resp.Instances[0] + instance := instances[0] log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) d.SetId(time.Now().UTC().String()) From da390fdbbd865d48e90bb1ddb1d66570279184b8 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 15 Oct 2020 15:00:47 -0500 Subject: [PATCH 0577/1212] update to use paging with data.aws_identity_store_group and data.aws_identity_store_user --- aws/data_source_aws_identity_store_group.go | 17 ++++++++++++----- aws/data_source_aws_identity_store_user.go | 17 ++++++++++++----- aws/data_source_aws_sso_instance.go | 2 +- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index 5bc9130d6a8..e5099f9ca8b 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -77,7 +77,7 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ d.Set("display_name", resp.DisplayName) } else if displayName != "" { log.Printf("[DEBUG] Reading AWS Identity Store Groups") - resp, err := conn.ListGroups(&identitystore.ListGroupsInput{ + req := &identitystore.ListGroupsInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ { @@ -85,19 +85,26 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ AttributeValue: aws.String(displayName), }, }, + } + groups := []*identitystore.Group{} + err := conn.ListGroupsPages(req, func(page *identitystore.ListGroupsOutput, lastPage bool) bool { + if page != nil && page.Groups != nil && len(page.Groups) != 0 { + groups = append(groups, page.Groups...) + } + return !lastPage }) if err != nil { return fmt.Errorf("Error getting AWS Identity Store Groups: %s", err) } - if resp == nil || len(resp.Groups) == 0 { + if len(groups) == 0 { log.Printf("[DEBUG] No AWS Identity Store Groups found") d.SetId("") return nil } - if len(resp.Groups) > 1 { - return fmt.Errorf("Found multiple AWS Identity Store Groups with the DisplayName %v. Not sure which one to use. %s", displayName, resp.Groups) + if len(groups) > 1 { + return fmt.Errorf("Found multiple AWS Identity Store Groups with the DisplayName %v. Not sure which one to use. %s", displayName, groups) } - group := resp.Groups[0] + group := groups[0] d.SetId(aws.StringValue(group.GroupId)) d.Set("group_id", group.GroupId) } else { diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index 78d72ad1be5..c176aefcc5a 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -77,7 +77,7 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} d.Set("user_name", resp.UserName) } else if userName != "" { log.Printf("[DEBUG] Reading AWS Identity Store Users") - resp, err := conn.ListUsers(&identitystore.ListUsersInput{ + req := &identitystore.ListUsersInput{ IdentityStoreId: aws.String(identityStoreID), Filters: []*identitystore.Filter{ { @@ -85,19 +85,26 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} AttributeValue: aws.String(userName), }, }, + } + users := []*identitystore.User{} + err := conn.ListUsersPages(req, func(page *identitystore.ListUsersOutput, lastPage bool) bool { + if page != nil && page.Users != nil && len(page.Users) != 0 { + users = append(users, page.Users...) + } + return !lastPage }) if err != nil { return fmt.Errorf("Error getting AWS Identity Store Users: %s", err) } - if resp == nil || len(resp.Users) == 0 { + if len(users) == 0 { log.Printf("[DEBUG] No AWS Identity Store Users found") d.SetId("") return nil } - if len(resp.Users) > 1 { - return fmt.Errorf("Found multiple AWS Identity Store Users with the UserName %v. Not sure which one to use. %s", userName, resp.Users) + if len(users) > 1 { + return fmt.Errorf("Found multiple AWS Identity Store Users with the UserName %v. Not sure which one to use. %s", userName, users) } - user := resp.Users[0] + user := users[0] d.SetId(aws.StringValue(user.UserId)) d.Set("user_id", user.UserId) } else { diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 18cbb4405e7..08b6c4c1088 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -42,7 +42,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error getting AWS SSO Instances: %s", err) } - if instances == nil || len(instances) == 0 { + if len(instances) == 0 { log.Printf("[DEBUG] No AWS SSO Instance found") d.SetId("") return nil From 25acc97f7c72a42d119ccd941cbc5707e078b230 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 15 Oct 2020 15:45:19 -0500 Subject: [PATCH 0578/1212] update to use paging with aws_sso_assignment --- aws/data_source_aws_identity_store_group.go | 2 +- aws/data_source_aws_identity_store_user.go | 2 +- aws/data_source_aws_sso_instance.go | 2 +- aws/resource_aws_sso_assignment.go | 34 +++++++++------------ 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go index e5099f9ca8b..b69028d0cbb 100644 --- a/aws/data_source_aws_identity_store_group.go +++ b/aws/data_source_aws_identity_store_group.go @@ -88,7 +88,7 @@ func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{ } groups := []*identitystore.Group{} err := conn.ListGroupsPages(req, func(page *identitystore.ListGroupsOutput, lastPage bool) bool { - if page != nil && page.Groups != nil && len(page.Groups) != 0 { + if page != nil && len(page.Groups) != 0 { groups = append(groups, page.Groups...) } return !lastPage diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go index c176aefcc5a..766a0e518d2 100644 --- a/aws/data_source_aws_identity_store_user.go +++ b/aws/data_source_aws_identity_store_user.go @@ -88,7 +88,7 @@ func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{} } users := []*identitystore.User{} err := conn.ListUsersPages(req, func(page *identitystore.ListUsersOutput, lastPage bool) bool { - if page != nil && page.Users != nil && len(page.Users) != 0 { + if page != nil && len(page.Users) != 0 { users = append(users, page.Users...) } return !lastPage diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go index 08b6c4c1088..2104b5f0cd0 100644 --- a/aws/data_source_aws_sso_instance.go +++ b/aws/data_source_aws_sso_instance.go @@ -33,7 +33,7 @@ func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Reading AWS SSO Instances") instances := []*ssoadmin.InstanceMetadata{} err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { - if page != nil && page.Instances != nil && len(page.Instances) != 0 { + if page != nil && len(page.Instances) != 0 { instances = append(instances, page.Instances...) } return !lastPage diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index e54ec59bd57..4628d92da57 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -365,34 +365,30 @@ func resourceAwsSsoAssignmentGet( if targetType != ssoadmin.TargetTypeAwsAccount { return nil, fmt.Errorf("Invalid AWS SSO Assignments Target type %s. Only %s is supported", targetType, ssoadmin.TargetTypeAwsAccount) } - req := &ssoadmin.ListAccountAssignmentsInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), AccountId: aws.String(targetID), } - log.Printf("[DEBUG] Reading AWS SSO Assignments for %s", req) - resp, err := conn.ListAccountAssignments(req) - if err != nil { - return nil, fmt.Errorf("Error getting AWS SSO Assignments: %s", err) - } - - if resp == nil || len(resp.AccountAssignments) == 0 { - log.Printf("[DEBUG] No account assignments found") - return nil, nil - } - - for _, accountAssignment := range resp.AccountAssignments { - if aws.StringValue(accountAssignment.PrincipalType) == principalType { - if aws.StringValue(accountAssignment.PrincipalId) == principalID { - return accountAssignment, nil + var accountAssignment *ssoadmin.AccountAssignment + err := conn.ListAccountAssignmentsPages(req, func(page *ssoadmin.ListAccountAssignmentsOutput, lastPage bool) bool { + if page != nil && len(page.AccountAssignments) != 0 { + for _, a := range page.AccountAssignments { + if aws.StringValue(a.PrincipalType) == principalType { + if aws.StringValue(a.PrincipalId) == principalID { + accountAssignment = a + return false + } + } } } + return !lastPage + }) + if err != nil { + return nil, fmt.Errorf("Error getting AWS SSO Assignments: %s", err) } - - // not found - return nil, nil + return accountAssignment, nil } func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { From ebb181bdccf04b453c9d64d70bc2e127892d12ff Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Thu, 15 Oct 2020 17:30:11 -0500 Subject: [PATCH 0579/1212] fix permission set provisioning wait --- aws/resource_aws_sso_permission_set.go | 79 ++++++++++++++------------ 1 file changed, 43 insertions(+), 36 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index b7b62b89052..ffecc967f15 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -303,9 +303,18 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) input := &ssoadmin.UpdatePermissionSetInput{ PermissionSetArn: aws.String(permissionSetArn), InstanceArn: aws.String(instanceArn), - Description: aws.String(d.Get("description").(string)), - RelayState: aws.String(d.Get("relay_state").(string)), - SessionDuration: aws.String(d.Get("session_duration").(string)), + } + + if d.HasChange("description") { + input.Description = aws.String(d.Get("description").(string)) + } + + if d.HasChange("relay_state") { + input.RelayState = aws.String(d.Get("relay_state").(string)) + } + + if d.HasChange("session_duration") { + input.SessionDuration = aws.String(d.Get("session_duration").(string)) } log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) @@ -401,25 +410,11 @@ func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error provisioning AWS SSO Permission Set (%s): %w", d.Id(), err) } - if provisionResponse != nil && provisionResponse.PermissionSetProvisioningStatus != nil { - status := provisionResponse.PermissionSetProvisioningStatus - - if status.CreatedDate != nil { - d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) - } - - wait := resource.StateChangeConf{ - Delay: AWSSSOPermissionSetProvisioningRetryDelay, - Pending: []string{ssoadmin.StatusValuesInProgress}, - Target: []string{ssoadmin.StatusValuesSucceeded}, - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, - Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, aws.StringValue(status.RequestId), instanceArn), - } + status := provisionResponse.PermissionSetProvisioningStatus - if _, err := wait.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for AWS SSO Permission Set (%s) provisioning: %w", d.Id(), err) - } + _, waitErr := waitForPermissionSetProvisioning(ssoadminconn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutUpdate)) + if waitErr != nil { + return waitErr } } @@ -516,27 +511,39 @@ func resourceAwsSsoPermissionSetParseID(id string) (string, error) { return instanceARN.String(), nil } -func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { +func waitForPermissionSetProvisioning(ssoadminconn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.PermissionSetProvisioningStatus, error) { + + stateConf := resource.StateChangeConf{ + Delay: AWSSSOPermissionSetProvisioningRetryDelay, + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Timeout: timeout, + MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, + Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, requestID, instanceArn), + } + status, err := stateConf.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for AWS SSO Permission Set provisioning status: %s", err) + } + return status.(*ssoadmin.PermissionSetProvisioningStatus), nil +} + +func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, requestID, instanceArn string) resource.StateRefreshFunc { return func() (interface{}, string, error) { input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ InstanceArn: aws.String(instanceArn), ProvisionPermissionSetRequestId: aws.String(requestID), } - return resourceAwsSsoPermissionSetProvisioningWait(ssoadminconn, input) - } -} - -func resourceAwsSsoPermissionSetProvisioningWait(ssoadminconn *ssoadmin.SSOAdmin, input *ssoadmin.DescribePermissionSetProvisioningStatusInput) (result interface{}, state string, err error) { - - resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) - - if aws.StringValue(resp.PermissionSetProvisioningStatus.Status) == ssoadmin.StatusValuesFailed { - return nil, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(resp.PermissionSetProvisioningStatus.PermissionSetArn), aws.StringValue(resp.PermissionSetProvisioningStatus.FailureReason)) - } + resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) + if err != nil { + return resp, "", fmt.Errorf("Error describing permission set provisioning status: %s", err) + } + status := resp.PermissionSetProvisioningStatus + if aws.StringValue(status.Status) == ssoadmin.StatusValuesFailed { + return resp, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(status.PermissionSetArn), aws.StringValue(status.FailureReason)) + } + return status, aws.StringValue(status.Status), nil - if err != nil { - return nil, *resp.PermissionSetProvisioningStatus.Status, err } - return true, *resp.PermissionSetProvisioningStatus.Status, nil } From d83b5736e637c10aa1f7bf29c99d92974709146f Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Fri, 16 Oct 2020 09:28:03 -0500 Subject: [PATCH 0580/1212] permission set import bug fix --- aws/resource_aws_sso_permission_set.go | 106 ++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index ffecc967f15..313d2935e8a 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -31,18 +31,7 @@ func resourceAwsSsoPermissionSet() *schema.Resource { Update: resourceAwsSsoPermissionSetUpdate, Delete: resourceAwsSsoPermissionSetDelete, Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - instanceArn, err := resourceAwsSsoPermissionSetParseID(d.Id()) - if err != nil { - return nil, fmt.Errorf("Error parsing AWS Permission Set ID %s: %s", d.Id(), err) - } - - err = d.Set("instance_arn", instanceArn) - if err != nil { - return nil, err - } - return []*schema.ResourceData{d}, nil - }, + State: resourceAwsSsoPermissionSetImport, }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(AWSSSOPermissionSetCreateTimeout), @@ -147,6 +136,99 @@ func resourceAwsSsoPermissionSet() *schema.Resource { } } +func resourceAwsSsoPermissionSetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + permissionSetArn := d.Id() + instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error parsing AWS Permission Set (%s) for import: %s", permissionSetArn, err) + } + + ssoadminconn := meta.(*AWSClient).ssoadminconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + + if permissionSetErr != nil { + return []*schema.ResourceData{}, permissionSetErr + } + + var permissionSet *ssoadmin.PermissionSet + permissionSet = permissionSetResp.PermissionSet + + log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") + inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if inlinePolicyErr != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing Inline Policy for AWS SSO Permission Set (%s): %s", permissionSetArn, inlinePolicyErr) + } + + log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") + managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if managedPoliciesErr != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing Managed Policies for AWS SSO Permission Set (%s): %s", permissionSetArn, managedPoliciesErr) + } + var managedPolicyArns []string + for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) + } + + tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) + if err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error listing tags during AWS SSO Permission Set (%s) import: %s", permissionSetArn, err) + } + + err = d.Set("instance_arn", instanceArn) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("arn", permissionSetArn) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("name", permissionSet.Name) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("description", permissionSet.Description) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("session_duration", permissionSet.SessionDuration) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("relay_state", permissionSet.RelayState) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) + if err != nil { + return []*schema.ResourceData{}, err + } + err = d.Set("managed_policy_arns", managedPolicyArns) + if err != nil { + return []*schema.ResourceData{}, err + } + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return []*schema.ResourceData{}, fmt.Errorf("Error importing AWS SSO Permission Set (%s) tags: %s", permissionSetArn, err) + } + d.SetId(permissionSetArn) + + return []*schema.ResourceData{d}, nil +} + func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { ssoadminconn := meta.(*AWSClient).ssoadminconn From 722987a0c9a62eeaa83ef490c7396e75069c8c53 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Fri, 16 Oct 2020 09:41:40 -0500 Subject: [PATCH 0581/1212] add permission set create, update, and delete test --- aws/resource_aws_sso_permission_set_test.go | 194 ++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 aws/resource_aws_sso_permission_set_test.go diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go new file mode 100644 index 00000000000..c181f804b01 --- /dev/null +++ b/aws/resource_aws_sso_permission_set_test.go @@ -0,0 +1,194 @@ +package aws + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func testAccPreCheckAWSSSOPermissionSet(t *testing.T) { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + input := &ssoadmin.ListInstancesInput{} + + _, err := ssoadminconn.ListInstances(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func TestAccAWSSSOPermissionSet_basic(t *testing.T) { + var permissionSet, updatedPermissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + name := acctest.RandString(5) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOPermissionSetBasicConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("Test_Permission_Set_%s", name)), + resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSOPermissionSetBasicConfigUpdated(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), + resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("Test_Permission_Set_Update_%s", name)), + resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + instanceArn, err := resourceAwsSsoPermissionSetParseID(rs.Primary.ID) + + if err != nil { + return err + } + + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(rs.Primary.ID), + }) + + if permissionSetErr != nil { + return permissionSetErr + } + + if *permissionSetResp.PermissionSet.PermissionSetArn == rs.Primary.ID { + *permissionSet = *permissionSetResp.PermissionSet + return nil + } + + return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) + } +} + +func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sso_permission_set" { + continue + } + + idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", rs.Primary.ID) + permissionSetArn, err := arn.Parse(rs.Primary.ID) + if err != nil { + return err + } + + resourceParts := strings.Split(permissionSetArn.Resource, "/") + if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { + return idFormatErr + } + + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceArn := arn.ARN{ + Partition: permissionSetArn.Partition, + Service: permissionSetArn.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), + }.String() + + input := &ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(rs.Primary.ID), + } + + output, err := ssoadminconn.DescribePermissionSet(input) + + if isAWSErr(err, "ResourceNotFoundException", "") { + continue + } + + if err != nil { + return err + } + + if output != nil { + return fmt.Errorf("AWS SSO Permission Set (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccAWSSSOPermissionSetBasicConfig(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + + data "aws_sso_instance" "selected" { } + + resource "aws_sso_permission_set" "example" { + name = "Test_Permission_Set_%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + ] + } +`, rName) +} + +func testAccAWSSSOPermissionSetBasicConfigUpdated(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + + data "aws_sso_instance" "selected" { } + + resource "aws_sso_permission_set" "example" { + name = "Test_Permission_Set_Update_%s" + description = "Just a test update" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess", + ] + } +`, rName) +} From 97058c763cca7f40778b0f6d169829f6ea6acf34 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 09:29:15 -0500 Subject: [PATCH 0582/1212] add sso permisson set website docs --- .../docs/d/sso_permission_set.html.markdown | 2 +- .../docs/r/sso_permission_set.html.markdown | 52 +++++++++++++++++-- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown index e905e70a928..8a2858bd3f7 100644 --- a/website/docs/d/sso_permission_set.html.markdown +++ b/website/docs/d/sso_permission_set.html.markdown @@ -43,5 +43,5 @@ In addition to all arguments above, the following attributes are exported: * `session_duration` - The session duration of the permission set. * `relay_state` - The relay state of the permission set. * `inline_policy` - The inline policy of the permission set. -* `managed_policies` - The managed policies attached to the permission set. +* `managed_policy_arns` - The managed policies attached to the permission set. * `tags` - The tags of the permission set. diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown index 0ea03b9e7b5..323a34cdb13 100644 --- a/website/docs/r/sso_permission_set.html.markdown +++ b/website/docs/r/sso_permission_set.html.markdown @@ -8,24 +8,66 @@ description: |- # Resource: aws_sso_permission_set -TODO +Provides an AWS Single Sign-On Permission Set resource ## Example Usage ```hcl +data "aws_sso_instance" "selected" { } + +data "aws_iam_policy_document" "example" { + statement { + sid = "1" + + actions = [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } +} + resource "aws_sso_permission_set" "example" { - # TODO + name = "Example" + description = "An example" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + inline_policy = data.aws_iam_policy_document.example.json + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + ] } ``` ## Argument Reference -TODO +The following arguments are supported: + +* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. +* `name` - (Required) The name of the AWS Single Sign-On Permission Set. ## Attribute Reference -TODO +In addition to all arguments above, the following attributes are exported: + +* `id` - The arn of the permission set. +* `arn` - The arn of the permission set. +* `created_date` - The created date of the permission set. +* `description` - The description of the permission set. +* `session_duration` - The session duration of the permission set in the ISO-8601 standard. +* `relay_state` - The relay state of the permission set. +* `inline_policy` - The inline policy of the permission set. +* `managed_policy_arns` - The managed policies attached to the permission set. +* `tags` - The tags of the permission set. ## Import -TODO +`aws_sso_permission_set` can be imported by using the AWS Single Sign-On Permission Set Resource Name (ARN), e.g. + +``` +$ terraform import aws_sso_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk +``` From be376819970ae4a16d20beb1595d6abd2dddbe66 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 20 Oct 2020 10:42:24 -0500 Subject: [PATCH 0583/1212] update to use paging with data.aws_sso_permission_set --- aws/data_source_aws_sso_permission_set.go | 78 +++++++++++++---------- 1 file changed, 44 insertions(+), 34 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go index 55dd7be9d34..58ac29a0ba8 100644 --- a/aws/data_source_aws_sso_permission_set.go +++ b/aws/data_source_aws_sso_permission_set.go @@ -87,36 +87,42 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) name := d.Get("name").(string) log.Printf("[DEBUG] Reading AWS SSO Permission Sets") - resp, err := conn.ListPermissionSets(&ssoadmin.ListPermissionSetsInput{ + + var permissionSetArn string + var permissionSet *ssoadmin.PermissionSet + var permissionSetErr error + + req := &ssoadmin.ListPermissionSetsInput{ InstanceArn: aws.String(instanceArn), - MaxResults: aws.Int64(100), + } + err := conn.ListPermissionSetsPages(req, func(page *ssoadmin.ListPermissionSetsOutput, lastPage bool) bool { + if page != nil && len(page.PermissionSets) != 0 { + for _, ps := range page.PermissionSets { + permissionSetArn = aws.StringValue(ps) + log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) + var permissionSetResp *ssoadmin.DescribePermissionSetOutput + permissionSetResp, permissionSetErr = conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + }) + if permissionSetErr != nil { + return false + } + if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { + permissionSet = permissionSetResp.PermissionSet + return false + } + } + } + return !lastPage }) + if err != nil { return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) } - if resp == nil || len(resp.PermissionSets) == 0 { - log.Printf("[DEBUG] No AWS SSO Permission Sets found") - d.SetId("") - return nil - } - // TODO: paging (if resp.NextToken != nil) - var permissionSetArn string - var permissionSet *ssoadmin.PermissionSet - for _, permissionSetArns := range resp.PermissionSets { - permissionSetArn = aws.StringValue(permissionSetArns) - log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) - permissionSetResp, permissionSetErr := conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if permissionSetErr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) - } - if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { - permissionSet = permissionSetResp.PermissionSet - break - } + if permissionSetErr != nil { + return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) } if permissionSet == nil { @@ -137,21 +143,24 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) } log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - managedPoliciesResp, managedPoliciesErr := conn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ + var managedPolicyArns []string + managedPoliciesReq := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ InstanceArn: aws.String(instanceArn), PermissionSetArn: aws.String(permissionSetArn), + } + managedPoliciesErr := conn.ListManagedPoliciesInPermissionSetPages(managedPoliciesReq, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { + for _, managedPolicy := range page.AttachedManagedPolicies { + managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) + } + return !lastPage }) if managedPoliciesErr != nil { return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) } - var managedPolicyArns []string - for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - tags, err := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) - if err != nil { - return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, err) + tags, tagsErr := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) + if tagsErr != nil { + return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, tagsErr) } d.SetId(permissionSetArn) @@ -164,8 +173,9 @@ func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) d.Set("relay_state", permissionSet.RelayState) d.Set("inline_policy", inlinePolicyResp.InlinePolicy) d.Set("managed_policy_arns", managedPolicyArns) - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + tagsMapErr := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) + if tagsMapErr != nil { + return fmt.Errorf("Error setting tags: %s", tagsMapErr) } return nil From 985e93cc5c9bf5d8d43c513103d6b54c5a47a420 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 10:31:45 -0500 Subject: [PATCH 0584/1212] add sso assignment website doc --- website/docs/r/sso_assignment.html.markdown | 43 ++++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/website/docs/r/sso_assignment.html.markdown b/website/docs/r/sso_assignment.html.markdown index 0ab77a554fc..4b41b55899a 100644 --- a/website/docs/r/sso_assignment.html.markdown +++ b/website/docs/r/sso_assignment.html.markdown @@ -8,24 +8,55 @@ description: |- # Resource: sso_assignment -TODO +Provides an AWS Single Sign-On Assignment resource ## Example Usage ```hcl -resource "sso_assignment" "example" { - # TODO +data "aws_sso_permission_set" "example" { + instance_arn = data.aws_sso_instance.selected.arn + name = "AWSReadOnlyAccess" +} + +data "aws_identity_store_group" "example_group" { + identity_store_id = data.aws_sso_instance.selected.identity_store_id + display_name = "Example Group@example.com" +} + +resource "aws_sso_assignment" "example" { + instance_arn = data.aws_sso_instance.selected.arn + permission_set_arn = data.aws_sso_permission_set.example.arn + + target_type = "AWS_ACCOUNT" + target_id = "012347678910" + + principal_type = "GROUP" + principal_id = data.aws_identity_store_group.example_group.group_id } ``` ## Argument Reference -TODO +The following arguments are supported: + +* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. +* `permission_set_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Permission Set. +* `target_id` - (Required) The identifier of the AWS account to assign to the AWS Single Sign-On Permission Set. +* `principal_type` - (Required) The entity type for which the assignment will be created. Valid values: `USER`, `GROUP`. +* `principal_id` - (Required) An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). +* `target_type` - (Optional) Type of AWS Single Sign-On Assignment. Valid values: `AWS_ACCOUNT`. ## Attribute Reference -TODO +In addition to all arguments above, the following attributes are exported: + +* `id` - Identifier of the AWS Single Sign-On Assignment. +* `created_date` - The created date of the AWS Single Sign-On Assignment. ## Import -TODO +`aws_sso_assignment` can be imported by using the identifier of the AWS Single Sign-On Assignment, e.g. +identifier = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} +``` +$ terraform import aws_sso_assignment.example ssoins-0123456789abcdef/ps-0123456789abcdef/AWS_ACCOUNT/012347678910/GROUP/51b3755f39-e945c18b-e449-4a93-3e95-12231cb7ef96 +``` From 9e5e798a44294c89dc886468720414eb294fa007 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 10:32:27 -0500 Subject: [PATCH 0585/1212] update sso permission set website doc --- .../docs/r/sso_permission_set.html.markdown | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown index 323a34cdb13..6c743a703c3 100644 --- a/website/docs/r/sso_permission_set.html.markdown +++ b/website/docs/r/sso_permission_set.html.markdown @@ -49,20 +49,20 @@ The following arguments are supported: * `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. * `name` - (Required) The name of the AWS Single Sign-On Permission Set. +* `description` - (Optional) The description of the AWS Single Sign-On Permission Set. +* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. +* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. +* `inline_policy` - (Optional) The inline policy of the AWS Single Sign-On Permission Set. +* `managed_policy_arns` - (Optional) The managed policies attached to the AWS Single Sign-On Permission Set. +* `tags` - (Optional) Key-value map of resource tags. ## Attribute Reference In addition to all arguments above, the following attributes are exported: -* `id` - The arn of the permission set. -* `arn` - The arn of the permission set. -* `created_date` - The created date of the permission set. -* `description` - The description of the permission set. -* `session_duration` - The session duration of the permission set in the ISO-8601 standard. -* `relay_state` - The relay state of the permission set. -* `inline_policy` - The inline policy of the permission set. -* `managed_policy_arns` - The managed policies attached to the permission set. -* `tags` - The tags of the permission set. +* `id` - The arn of the AWS Single Sign-On Permission Set. +* `arn` - The arn of the AWS Single Sign-On Permission Set. +* `created_date` - The created date of the AWS Single Sign-On Permission Set. ## Import From 810bf2d7d5565399ebe522ca91f0c58c3330d98a Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 20 Oct 2020 10:54:59 -0500 Subject: [PATCH 0586/1212] resolve make lint errors --- aws/resource_aws_sso_permission_set.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 313d2935e8a..2c43464756c 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -155,8 +155,7 @@ func resourceAwsSsoPermissionSetImport(d *schema.ResourceData, meta interface{}) return []*schema.ResourceData{}, permissionSetErr } - var permissionSet *ssoadmin.PermissionSet - permissionSet = permissionSetResp.PermissionSet + permissionSet := permissionSetResp.PermissionSet log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ From c1fa9e25d02066c4ed354f31d4e4358d9ad723b0 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 12:14:56 -0500 Subject: [PATCH 0587/1212] fix lint errors --- website/docs/r/sso_assignment.html.markdown | 1 + website/docs/r/sso_permission_set.html.markdown | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/sso_assignment.html.markdown b/website/docs/r/sso_assignment.html.markdown index 4b41b55899a..ffd29b66233 100644 --- a/website/docs/r/sso_assignment.html.markdown +++ b/website/docs/r/sso_assignment.html.markdown @@ -57,6 +57,7 @@ In addition to all arguments above, the following attributes are exported: `aws_sso_assignment` can be imported by using the identifier of the AWS Single Sign-On Assignment, e.g. identifier = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} + ``` $ terraform import aws_sso_assignment.example ssoins-0123456789abcdef/ps-0123456789abcdef/AWS_ACCOUNT/012347678910/GROUP/51b3755f39-e945c18b-e449-4a93-3e95-12231cb7ef96 ``` diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown index 6c743a703c3..827dfdbda5e 100644 --- a/website/docs/r/sso_permission_set.html.markdown +++ b/website/docs/r/sso_permission_set.html.markdown @@ -29,7 +29,7 @@ data "aws_iam_policy_document" "example" { ] } } - + resource "aws_sso_permission_set" "example" { name = "Example" description = "An example" From 2b483a39c4b22027bf5551eb73ef0f22a04a28c9 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 20 Oct 2020 13:19:48 -0500 Subject: [PATCH 0588/1212] make website-lint-fix --- .../docs/r/sso_permission_set.html.markdown | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown index 827dfdbda5e..ef3516c37e6 100644 --- a/website/docs/r/sso_permission_set.html.markdown +++ b/website/docs/r/sso_permission_set.html.markdown @@ -13,7 +13,7 @@ Provides an AWS Single Sign-On Permission Set resource ## Example Usage ```hcl -data "aws_sso_instance" "selected" { } +data "aws_sso_instance" "selected" {} data "aws_iam_policy_document" "example" { statement { @@ -31,14 +31,14 @@ data "aws_iam_policy_document" "example" { } resource "aws_sso_permission_set" "example" { - name = "Example" - description = "An example" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - inline_policy = data.aws_iam_policy_document.example.json + name = "Example" + description = "An example" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + inline_policy = data.aws_iam_policy_document.example.json managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", + "arn:aws:iam::aws:policy/ReadOnlyAccess", ] } ``` @@ -50,8 +50,8 @@ The following arguments are supported: * `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. * `name` - (Required) The name of the AWS Single Sign-On Permission Set. * `description` - (Optional) The description of the AWS Single Sign-On Permission Set. -* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. -* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. +* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. +* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. * `inline_policy` - (Optional) The inline policy of the AWS Single Sign-On Permission Set. * `managed_policy_arns` - (Optional) The managed policies attached to the AWS Single Sign-On Permission Set. * `tags` - (Optional) Key-value map of resource tags. From f2af1ebd742aa7b61b056ddabee6dbc651f7b68a Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 14:05:02 -0500 Subject: [PATCH 0589/1212] add sso permission set datasource tests --- ...data_source_aws_sso_permission_set_test.go | 110 ++++++++++++++++++ aws/resource_aws_sso_permission_set_test.go | 24 ++-- 2 files changed, 119 insertions(+), 15 deletions(-) create mode 100644 aws/data_source_aws_sso_permission_set_test.go diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go new file mode 100644 index 00000000000..4089637f819 --- /dev/null +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -0,0 +1,110 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func TestAccDataSourceAwsSsoPermissionSetBasic(t *testing.T) { + datasourceName := "data.aws_sso_permission_set.test" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(datasourceName, "name", fmt.Sprintf("%s", rName)), + resource.TestCheckResourceAttr(datasourceName, "description", "testing"), + resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), + resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), + resource.TestCheckResourceAttr(datasourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsSsoPermissionSetByTags(t *testing.T) { + datasourceName := "data.aws_sso_permission_set.test" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), + tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), + resource.TestCheckResourceAttr(datasourceName, "name", fmt.Sprintf("%s", rName)), + resource.TestCheckResourceAttr(datasourceName, "description", "testing"), + resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), + resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), + resource.TestCheckResourceAttr(datasourceName, "tags.%", "3"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsSsoPermissionSetConfigBasic(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "test" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} + +data "aws_sso_permission_set" "test" { + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name +} +`, rName) +} + +func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "test" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} + +data "aws_sso_permission_set" "test" { + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name + + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } +} +`, rName) +} diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index c181f804b01..6e347700e40 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -30,7 +30,7 @@ func testAccPreCheckAWSSSOPermissionSet(t *testing.T) { } } -func TestAccAWSSSOPermissionSet_basic(t *testing.T) { +func TestAccAWSSSOPermissionSetBasic(t *testing.T) { var permissionSet, updatedPermissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" name := acctest.RandString(5) @@ -160,34 +160,28 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { func testAccAWSSSOPermissionSetBasicConfig(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_sso_instance" "selected" { } resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - ] + name = "Test_Permission_Set_%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] } `, rName) } func testAccAWSSSOPermissionSetBasicConfigUpdated(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_sso_instance" "selected" { } resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_Update_%s" - description = "Just a test update" - instance_arn = data.aws_sso_instance.selected.arn + name = "Test_Permission_Set_Update_%s" + description = "Just a test update" + instance_arn = data.aws_sso_instance.selected.arn managed_policy_arns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess", - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess", + "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" ] } `, rName) From 1df4f8613191a04b6ceac808f9c08bb9beff6f2f Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Tue, 20 Oct 2020 16:12:35 -0500 Subject: [PATCH 0590/1212] add sso instance datasource test --- aws/data_source_aws_sso_instance_test.go | 68 +++++++++++++++++++ ...data_source_aws_sso_permission_set_test.go | 4 +- aws/resource_aws_sso_permission_set_test.go | 18 +---- 3 files changed, 71 insertions(+), 19 deletions(-) create mode 100644 aws/data_source_aws_sso_instance_test.go diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go new file mode 100644 index 00000000000..01e6fc73cc8 --- /dev/null +++ b/aws/data_source_aws_sso_instance_test.go @@ -0,0 +1,68 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccPreCheckAWSSSOInstance(t *testing.T) { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + input := &ssoadmin.ListInstancesInput{} + + _, err := ssoadminconn.ListInstances(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func TestAccDataSourceAwsSsoInstanceBasic(t *testing.T) { + datasourceName := "data.aws_sso_instance.selected" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsSsoInstanceConfigBasic(), + Check: resource.ComposeTestCheckFunc( + testAccMatchResourceAttrAwsSsoARN(datasourceName, "arn", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), + resource.TestMatchResourceAttr(datasourceName, "identity_store_id", regexp.MustCompile("^[a-zA-Z0-9-]*")), + ), + }, + }, + }) +} + +func testAccDataSourceAwsSsoInstanceConfigBasic() string { + return `data "aws_sso_instance" "selected" {}` +} + +func testAccMatchResourceAttrAwsSsoARN(resourceName, attributeName string, arnResourceRegexp *regexp.Regexp) resource.TestCheckFunc { + return func(s *terraform.State) error { + arnRegexp := arn.ARN{ + Partition: testAccGetPartition(), + Resource: arnResourceRegexp.String(), + Service: "sso", + }.String() + + attributeMatch, err := regexp.Compile(arnRegexp) + + if err != nil { + return fmt.Errorf("Unable to compile ARN regexp (%s): %s", arnRegexp, err) + } + + return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) + } +} diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index 4089637f819..8d2c7b6f45a 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -14,7 +14,7 @@ func TestAccDataSourceAwsSsoPermissionSetBasic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-sso-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -38,7 +38,7 @@ func TestAccDataSourceAwsSsoPermissionSetByTags(t *testing.T) { rName := acctest.RandomWithPrefix("tf-sso-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 6e347700e40..7fe5ee73a80 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -14,29 +14,13 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" ) -func testAccPreCheckAWSSSOPermissionSet(t *testing.T) { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - input := &ssoadmin.ListInstancesInput{} - - _, err := ssoadminconn.ListInstances(input) - - if testAccPreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - func TestAccAWSSSOPermissionSetBasic(t *testing.T) { var permissionSet, updatedPermissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOPermissionSet(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, Steps: []resource.TestStep{ From 7af14a246c41c6ae8fbe8b51c008c1851b71a127 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Tue, 20 Oct 2020 16:51:15 -0500 Subject: [PATCH 0591/1212] terrafmt --- ...data_source_aws_sso_permission_set_test.go | 52 +++++++++---------- aws/resource_aws_sso_permission_set_test.go | 38 +++++++------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index 8d2c7b6f45a..b224bfb9171 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -62,17 +62,17 @@ func testAccDataSourceAwsSsoPermissionSetConfigBasic(rName string) string { data "aws_sso_instance" "selected" {} resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] } data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name } `, rName) } @@ -82,29 +82,29 @@ func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { data "aws_sso_instance" "selected" {} resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + session_duration = "PT1H" + relay_state = "https://console.aws.amazon.com/console/home" + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } } data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name + instance_arn = data.aws_sso_instance.selected.arn + name = aws_sso_permission_set.test.name - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } + tags = { + Key1 = "Value1" + Key2 = "Value2" + Key3 = "Value3" + } } `, rName) } diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index 7fe5ee73a80..a06754ce228 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -144,29 +144,29 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { func testAccAWSSSOPermissionSetBasicConfig(rName string) string { return fmt.Sprintf(` - data "aws_sso_instance" "selected" { } - - resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - } +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "Test_Permission_Set_%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} `, rName) } func testAccAWSSSOPermissionSetBasicConfigUpdated(rName string) string { return fmt.Sprintf(` - data "aws_sso_instance" "selected" { } - - resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_Update_%s" - description = "Just a test update" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" - ] - } +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "Test_Permission_Set_Update_%s" + description = "Just a test update" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = [ + "arn:aws:iam::aws:policy/ReadOnlyAccess", + "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" + ] +} `, rName) } From a5ca96d8062ac35e71588e5ec24596ffd4d65743 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Wed, 21 Oct 2020 17:58:43 -0500 Subject: [PATCH 0592/1212] add permission set tag and disappear tests --- ...data_source_aws_sso_permission_set_test.go | 4 +- aws/resource_aws_sso_permission_set.go | 12 ++ aws/resource_aws_sso_permission_set_test.go | 153 ++++++++++++++++-- 3 files changed, 156 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index b224bfb9171..5872d1ced90 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -9,7 +9,7 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" ) -func TestAccDataSourceAwsSsoPermissionSetBasic(t *testing.T) { +func TestAccDataSourceAwsSsoPermissionSet_basic(t *testing.T) { datasourceName := "data.aws_sso_permission_set.test" rName := acctest.RandomWithPrefix("tf-sso-test") @@ -33,7 +33,7 @@ func TestAccDataSourceAwsSsoPermissionSetBasic(t *testing.T) { }) } -func TestAccDataSourceAwsSsoPermissionSetByTags(t *testing.T) { +func TestAccDataSourceAwsSsoPermissionSet_byTags(t *testing.T) { datasourceName := "data.aws_sso_permission_set.test" rName := acctest.RandomWithPrefix("tf-sso-test") diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go index 2c43464756c..4473240d72a 100644 --- a/aws/resource_aws_sso_permission_set.go +++ b/aws/resource_aws_sso_permission_set.go @@ -287,6 +287,12 @@ func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) e PermissionSetArn: aws.String(permissionSetArn), }) + if isAWSErr(permissionerr, ssoadmin.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] AWS SSO Permission Set (%s) not found, removing from state", permissionSetArn) + d.SetId("") + return nil + } + if permissionerr != nil { return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) } @@ -516,7 +522,13 @@ func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) } _, err := ssoadminconn.DeletePermissionSet(params) + if err != nil { + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { + log.Printf("[DEBUG] AWS SSO Permission Set not found") + d.SetId("") + return nil + } return fmt.Errorf("Error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) } diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go index a06754ce228..4c4b7a3f0d1 100644 --- a/aws/resource_aws_sso_permission_set_test.go +++ b/aws/resource_aws_sso_permission_set_test.go @@ -14,10 +14,10 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" ) -func TestAccAWSSSOPermissionSetBasic(t *testing.T) { +func TestAccAWSSSOPermissionSet_basic(t *testing.T) { var permissionSet, updatedPermissionSet ssoadmin.PermissionSet resourceName := "aws_sso_permission_set.example" - name := acctest.RandString(5) + rName := acctest.RandomWithPrefix("tf-sso-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, @@ -25,12 +25,12 @@ func TestAccAWSSSOPermissionSetBasic(t *testing.T) { CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSOPermissionSetBasicConfig(name), + Config: testAccSSOPermissionSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("Test_Permission_Set_%s", name)), + resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -41,13 +41,13 @@ func TestAccAWSSSOPermissionSetBasic(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSSSOPermissionSetBasicConfigUpdated(name), + Config: testAccSSOPermissionSetBasicConfigUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), - resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("Test_Permission_Set_Update_%s", name)), + resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -56,6 +56,72 @@ func TestAccAWSSSOPermissionSetBasic(t *testing.T) { }) } +func TestAccAWSSSOPermissionSet_disappears(t *testing.T) { + var permissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + testAccCheckAWSSSOPermissionSetDisappears(&permissionSet), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSSOPermissionSet_tags(t *testing.T) { + var permissionSet ssoadmin.PermissionSet + resourceName := "aws_sso_permission_set.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSSOPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -126,7 +192,7 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { output, err := ssoadminconn.DescribePermissionSet(input) - if isAWSErr(err, "ResourceNotFoundException", "") { + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { continue } @@ -142,12 +208,42 @@ func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { return nil } -func testAccAWSSSOPermissionSetBasicConfig(rName string) string { +func testAccCheckAWSSSOPermissionSetDisappears(permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { + return func(s *terraform.State) error { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + permissionSetArn, permissionSetErr := arn.Parse(*permissionSet.PermissionSetArn) + if permissionSetErr != nil { + return permissionSetErr + } + + resourceParts := strings.Split(permissionSetArn.Resource, "/") + + // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] + instanceArn := arn.ARN{ + Partition: permissionSetArn.Partition, + Service: permissionSetArn.Service, + Resource: fmt.Sprintf("instance/%s", resourceParts[1]), + }.String() + + input := &ssoadmin.DeletePermissionSetInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: permissionSet.PermissionSetArn, + } + + _, err := ssoadminconn.DeletePermissionSet(input) + + return err + + } +} + +func testAccSSOPermissionSetBasicConfig(rName string) string { return fmt.Sprintf(` data "aws_sso_instance" "selected" {} resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_%s" + name = "%s" description = "Just a test" instance_arn = data.aws_sso_instance.selected.arn managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] @@ -155,12 +251,12 @@ resource "aws_sso_permission_set" "example" { `, rName) } -func testAccAWSSSOPermissionSetBasicConfigUpdated(rName string) string { +func testAccSSOPermissionSetBasicConfigUpdated(rName string) string { return fmt.Sprintf(` data "aws_sso_instance" "selected" {} resource "aws_sso_permission_set" "example" { - name = "Test_Permission_Set_Update_%s" + name = "%s" description = "Just a test update" instance_arn = data.aws_sso_instance.selected.arn managed_policy_arns = [ @@ -170,3 +266,38 @@ resource "aws_sso_permission_set" "example" { } `, rName) } + +func testAccSSOPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccSSOPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "Just a test" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From c73bc9eca81258ed1fff0c6e2db7ea3aa65825c8 Mon Sep 17 00:00:00 2001 From: lawdhavmercy <24194810+lawdhavmercy@users.noreply.github.com> Date: Wed, 21 Oct 2020 21:42:52 -0500 Subject: [PATCH 0593/1212] add sso account assignment tests --- aws/data_source_aws_sso_instance_test.go | 21 +- aws/resource_aws_sso_assignment.go | 16 +- aws/resource_aws_sso_assignment_test.go | 302 +++++++++++++++++++++++ 3 files changed, 326 insertions(+), 13 deletions(-) create mode 100644 aws/resource_aws_sso_assignment_test.go diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go index 01e6fc73cc8..906a5deab96 100644 --- a/aws/data_source_aws_sso_instance_test.go +++ b/aws/data_source_aws_sso_instance_test.go @@ -14,20 +14,31 @@ import ( func testAccPreCheckAWSSSOInstance(t *testing.T) { ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - input := &ssoadmin.ListInstancesInput{} - - _, err := ssoadminconn.ListInstances(input) - + instances := []*ssoadmin.InstanceMetadata{} + err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { + if page != nil && len(page.Instances) != 0 { + instances = append(instances, page.Instances...) + } + return !lastPage + }) if testAccPreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) } + if len(instances) == 0 { + t.Skip("skipping acceptance testing: No AWS SSO Instance found.") + } + + if len(instances) > 1 { + t.Skip("skipping acceptance testing: Found multiple AWS SSO Instances. Not sure which one to use.") + } + if err != nil { t.Fatalf("unexpected PreCheck error: %s", err) } } -func TestAccDataSourceAwsSsoInstanceBasic(t *testing.T) { +func TestAccDataSourceAwsSsoInstance_basic(t *testing.T) { datasourceName := "data.aws_sso_instance.selected" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 4628d92da57..3bbc0b4cd37 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -186,11 +185,9 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro principalType, principalID, ) - if err != nil { - return err - } - if accountAssignment == nil { - log.Printf("[DEBUG] Account assignment not found for %s", map[string]string{ + + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") || accountAssignment == nil { + log.Printf("[WARN] AWS SSO Account Assignment (%s) not found, removing from state", map[string]string{ "PrincipalType": principalType, "PrincipalId": principalID, }) @@ -198,6 +195,10 @@ func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) erro return nil } + if err != nil { + return err + } + id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) if idErr != nil { return idErr @@ -228,8 +229,7 @@ func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) er log.Printf("[INFO] Deleting AWS SSO Assignment") resp, err := conn.DeleteAccountAssignment(req) if err != nil { - aerr, ok := err.(awserr.Error) - if ok && aerr.Code() == ssoadmin.ErrCodeResourceNotFoundException { + if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { log.Printf("[DEBUG] AWS SSO Assignment not found") d.SetId("") return nil diff --git a/aws/resource_aws_sso_assignment_test.go b/aws/resource_aws_sso_assignment_test.go new file mode 100644 index 00000000000..46487fff31d --- /dev/null +++ b/aws/resource_aws_sso_assignment_test.go @@ -0,0 +1,302 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccPreCheckAWSSIdentityStoreGroup(t *testing.T, identityStoreGroup string) { + if identityStoreGroup == "" { + t.Skip("skipping acceptance testing: No Identity Store Group was provided") + } +} + +func testAccPreCheckAWSSIdentityStoreUser(t *testing.T, identityStoreUser string) { + if identityStoreUser == "" { + t.Skip("skipping acceptance testing: No Identity Store User was provided") + } +} + +func TestAccAWSSSOAssignmentGroup_basic(t *testing.T) { + var accountAssignment ssoadmin.AccountAssignment + resourceName := "aws_sso_assignment.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + // Read identity store group from environment since they must exist in the caller's identity store + identityStoreGroup := os.Getenv("AWS_IDENTITY_STORE_GROUP") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), + resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), + resource.TestCheckResourceAttr(resourceName, "principal_type", "GROUP"), + resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAssignmentUser_basic(t *testing.T) { + var accountAssignment ssoadmin.AccountAssignment + resourceName := "aws_sso_assignment.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + // Read identity store user from environment since they must exist in the caller's identity store + identityStoreUser := os.Getenv("AWS_IDENTITY_STORE_USER") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSIdentityStoreUser(t, identityStoreUser) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAssignmentBasicUserConfig(identityStoreUser, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), + resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), + resource.TestCheckResourceAttr(resourceName, "principal_type", "USER"), + resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAssignmentGroup_disappears(t *testing.T) { + var accountAssignment ssoadmin.AccountAssignment + resourceName := "aws_sso_assignment.example" + rName := acctest.RandomWithPrefix("tf-sso-test") + + // Read identity store group from environment since they must exist in the caller's identity store + identityStoreGroup := os.Getenv("AWS_IDENTITY_STORE_GROUP") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAssignment(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) + +} + +func testAccCheckAWSSSOAssignmentExists(resourceName string, accountAssignment *ssoadmin.AccountAssignment) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} + idParts := strings.Split(rs.Primary.ID, "/") + if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { + return fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", rs.Primary.ID) + } + + instanceID := idParts[0] + permissionSetID := idParts[1] + targetType := idParts[2] + targetID := idParts[3] + principalType := idParts[4] + principalID := idParts[5] + + // arn:${Partition}:sso:::instance/${InstanceId} + instanceArn := arn.ARN{ + Partition: testAccProvider.Meta().(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("instance/%s", instanceID), + }.String() + + // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} + permissionSetArn := arn.ARN{ + Partition: testAccProvider.Meta().(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), + }.String() + + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + accountAssignmentResp, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( + ssoadminconn, + instanceArn, + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + ) + if getAccountAssignmentErr != nil { + return getAccountAssignmentErr + } + + *accountAssignment = *accountAssignmentResp + return nil + } +} + +func testAccCheckAWSSSOAssignmentDestroy(s *terraform.State) error { + ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sso_assignment" { + continue + } + + // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} + idParts := strings.Split(rs.Primary.ID, "/") + if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { + return fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", rs.Primary.ID) + } + + instanceID := idParts[0] + permissionSetID := idParts[1] + targetType := idParts[2] + targetID := idParts[3] + principalType := idParts[4] + principalID := idParts[5] + + // arn:${Partition}:sso:::instance/${InstanceId} + instanceArn := arn.ARN{ + Partition: testAccProvider.Meta().(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("instance/%s", instanceID), + }.String() + + // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} + permissionSetArn := arn.ARN{ + Partition: testAccProvider.Meta().(*AWSClient).partition, + Service: "sso", + Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), + }.String() + + accountAssignment, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( + ssoadminconn, + instanceArn, + permissionSetArn, + targetType, + targetID, + principalType, + principalID, + ) + + if isAWSErr(getAccountAssignmentErr, "ResourceNotFoundException", "") { + continue + } + + if getAccountAssignmentErr != nil { + return getAccountAssignmentErr + } + + if accountAssignment != nil { + return fmt.Errorf("AWS SSO Account Assignment (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} +data "aws_caller_identity" "current" {} + +data "aws_identity_store_group" "example_group" { + identity_store_id = data.aws_sso_instance.selected.identity_store_id + display_name = "%s" +} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} + +resource "aws_sso_assignment" "example" { + instance_arn = data.aws_sso_instance.selected.arn + permission_set_arn = aws_sso_permission_set.example.arn + target_type = "AWS_ACCOUNT" + target_id = data.aws_caller_identity.current.account_id + principal_type = "GROUP" + principal_id = data.aws_identity_store_group.example_group.group_id +} +`, identityStoreGroup, rName) +} + +func testAccSSOAssignmentBasicUserConfig(identityStoreUser, rName string) string { + return fmt.Sprintf(` +data "aws_sso_instance" "selected" {} +data "aws_caller_identity" "current" {} + +data "aws_identity_store_user" "example_user" { + identity_store_id = data.aws_sso_instance.selected.identity_store_id + user_name = "%s" +} + +resource "aws_sso_permission_set" "example" { + name = "%s" + description = "testing" + instance_arn = data.aws_sso_instance.selected.arn + managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] +} + +resource "aws_sso_assignment" "example" { + instance_arn = data.aws_sso_instance.selected.arn + permission_set_arn = aws_sso_permission_set.example.arn + target_type = "AWS_ACCOUNT" + target_id = data.aws_caller_identity.current.account_id + principal_type = "USER" + principal_id = data.aws_identity_store_user.example_user.user_id +} +`, identityStoreUser, rName) +} From 7d40a02053ff25c71d11ad2bd955251bcd313d86 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Wed, 21 Oct 2020 20:07:58 -0500 Subject: [PATCH 0594/1212] lint fixes --- aws/data_source_aws_sso_permission_set_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go index 5872d1ced90..6a23cf6aa62 100644 --- a/aws/data_source_aws_sso_permission_set_test.go +++ b/aws/data_source_aws_sso_permission_set_test.go @@ -22,7 +22,7 @@ func TestAccDataSourceAwsSsoPermissionSet_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - resource.TestCheckResourceAttr(datasourceName, "name", fmt.Sprintf("%s", rName)), + resource.TestCheckResourceAttr(datasourceName, "name", rName), resource.TestCheckResourceAttr(datasourceName, "description", "testing"), resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), From a7dcf644fe94bf9c0c0041a6b588a4eff835238c Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 22 Oct 2020 11:21:51 -0500 Subject: [PATCH 0595/1212] remove created_date --- aws/resource_aws_sso_assignment.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 3bbc0b4cd37..0cd5b6fbc8c 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -89,11 +89,6 @@ func resourceAwsSsoAssignment() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), ), }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -147,22 +142,12 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er } status := resp.AccountAssignmentCreationStatus - - if status.CreatedDate != nil { - d.Set("created_date", status.CreatedDate.Format(time.RFC3339)) - } - waitResp, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutCreate)) if waitErr != nil { return waitErr } d.SetId(id) - - if waitResp.CreatedDate != nil { - d.Set("created_date", waitResp.CreatedDate.Format(time.RFC3339)) - } - return resourceAwsSsoAssignmentRead(d, meta) } From 0c7b1e30ce720de1a30da04196b4bbb2a90ef545 Mon Sep 17 00:00:00 2001 From: Alex Burck Date: Thu, 22 Oct 2020 11:25:39 -0500 Subject: [PATCH 0596/1212] fix lint --- aws/resource_aws_sso_assignment.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go index 0cd5b6fbc8c..afeb4a81370 100644 --- a/aws/resource_aws_sso_assignment.go +++ b/aws/resource_aws_sso_assignment.go @@ -142,7 +142,7 @@ func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) er } status := resp.AccountAssignmentCreationStatus - waitResp, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutCreate)) + _, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutCreate)) if waitErr != nil { return waitErr } From ed6d69e6e13246157219f77144e9bf339439fe9f Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Mon, 11 Jan 2021 21:30:11 -0500 Subject: [PATCH 0597/1212] isolate only new data/resources --- aws/data_source_aws_sso_instance.go | 63 -- aws/data_source_aws_sso_instance_test.go | 79 --- aws/data_source_aws_sso_permission_set.go | 182 ----- ...data_source_aws_sso_permission_set_test.go | 110 --- aws/internal/keyvaluetags/sso_tags.go | 95 --- aws/provider.go | 2 +- aws/resource_aws_sso_assignment_test.go | 10 +- aws/resource_aws_sso_permission_set.go | 642 ------------------ aws/resource_aws_sso_permission_set_test.go | 303 --------- website/docs/d/sso_instance.html.markdown | 34 - .../docs/d/sso_permission_set.html.markdown | 47 -- .../docs/r/sso_permission_set.html.markdown | 73 -- 12 files changed, 7 insertions(+), 1633 deletions(-) delete mode 100644 aws/data_source_aws_sso_instance.go delete mode 100644 aws/data_source_aws_sso_instance_test.go delete mode 100644 aws/data_source_aws_sso_permission_set.go delete mode 100644 aws/data_source_aws_sso_permission_set_test.go delete mode 100644 aws/internal/keyvaluetags/sso_tags.go delete mode 100644 aws/resource_aws_sso_permission_set.go delete mode 100644 aws/resource_aws_sso_permission_set_test.go delete mode 100644 website/docs/d/sso_instance.html.markdown delete mode 100644 website/docs/d/sso_permission_set.html.markdown delete mode 100644 website/docs/r/sso_permission_set.html.markdown diff --git a/aws/data_source_aws_sso_instance.go b/aws/data_source_aws_sso_instance.go deleted file mode 100644 index 2104b5f0cd0..00000000000 --- a/aws/data_source_aws_sso_instance.go +++ /dev/null @@ -1,63 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "time" - - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func dataSourceAwsSsoInstance() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSsoInstanceRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "identity_store_id": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceAwsSsoInstanceRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - - log.Printf("[DEBUG] Reading AWS SSO Instances") - instances := []*ssoadmin.InstanceMetadata{} - err := conn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { - if page != nil && len(page.Instances) != 0 { - instances = append(instances, page.Instances...) - } - return !lastPage - }) - if err != nil { - return fmt.Errorf("Error getting AWS SSO Instances: %s", err) - } - - if len(instances) == 0 { - log.Printf("[DEBUG] No AWS SSO Instance found") - d.SetId("") - return nil - } - - if len(instances) > 1 { - return fmt.Errorf("Found multiple AWS SSO Instances. Not sure which one to use. %s", instances) - } - - instance := instances[0] - log.Printf("[DEBUG] Received AWS SSO Instance: %s", instance) - - d.SetId(time.Now().UTC().String()) - d.Set("arn", instance.InstanceArn) - d.Set("identity_store_id", instance.IdentityStoreId) - - return nil -} diff --git a/aws/data_source_aws_sso_instance_test.go b/aws/data_source_aws_sso_instance_test.go deleted file mode 100644 index 906a5deab96..00000000000 --- a/aws/data_source_aws_sso_instance_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package aws - -import ( - "fmt" - "regexp" - "testing" - - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -func testAccPreCheckAWSSSOInstance(t *testing.T) { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - instances := []*ssoadmin.InstanceMetadata{} - err := ssoadminconn.ListInstancesPages(&ssoadmin.ListInstancesInput{}, func(page *ssoadmin.ListInstancesOutput, lastPage bool) bool { - if page != nil && len(page.Instances) != 0 { - instances = append(instances, page.Instances...) - } - return !lastPage - }) - if testAccPreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if len(instances) == 0 { - t.Skip("skipping acceptance testing: No AWS SSO Instance found.") - } - - if len(instances) > 1 { - t.Skip("skipping acceptance testing: Found multiple AWS SSO Instances. Not sure which one to use.") - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - -func TestAccDataSourceAwsSsoInstance_basic(t *testing.T) { - datasourceName := "data.aws_sso_instance.selected" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoInstanceConfigBasic(), - Check: resource.ComposeTestCheckFunc( - testAccMatchResourceAttrAwsSsoARN(datasourceName, "arn", regexp.MustCompile("instance/ssoins-[a-zA-Z0-9-.]{16}")), - resource.TestMatchResourceAttr(datasourceName, "identity_store_id", regexp.MustCompile("^[a-zA-Z0-9-]*")), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSsoInstanceConfigBasic() string { - return `data "aws_sso_instance" "selected" {}` -} - -func testAccMatchResourceAttrAwsSsoARN(resourceName, attributeName string, arnResourceRegexp *regexp.Regexp) resource.TestCheckFunc { - return func(s *terraform.State) error { - arnRegexp := arn.ARN{ - Partition: testAccGetPartition(), - Resource: arnResourceRegexp.String(), - Service: "sso", - }.String() - - attributeMatch, err := regexp.Compile(arnRegexp) - - if err != nil { - return fmt.Errorf("Unable to compile ARN regexp (%s): %s", arnRegexp, err) - } - - return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) - } -} diff --git a/aws/data_source_aws_sso_permission_set.go b/aws/data_source_aws_sso_permission_set.go deleted file mode 100644 index 58ac29a0ba8..00000000000 --- a/aws/data_source_aws_sso_permission_set.go +++ /dev/null @@ -1,182 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" -) - -func dataSourceAwsSsoPermissionSet() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsSsoPermissionSetRead, - - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), - ), - }, - - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), - ), - }, - - "description": { - Type: schema.TypeString, - Computed: true, - }, - - "session_duration": { - Type: schema.TypeString, - Computed: true, - }, - - "relay_state": { - Type: schema.TypeString, - Computed: true, - }, - - "inline_policy": { - Type: schema.TypeString, - Computed: true, - }, - - "managed_policy_arns": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "tags": tagsSchemaComputed(), - }, - } -} - -func dataSourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - instanceArn := d.Get("instance_arn").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading AWS SSO Permission Sets") - - var permissionSetArn string - var permissionSet *ssoadmin.PermissionSet - var permissionSetErr error - - req := &ssoadmin.ListPermissionSetsInput{ - InstanceArn: aws.String(instanceArn), - } - err := conn.ListPermissionSetsPages(req, func(page *ssoadmin.ListPermissionSetsOutput, lastPage bool) bool { - if page != nil && len(page.PermissionSets) != 0 { - for _, ps := range page.PermissionSets { - permissionSetArn = aws.StringValue(ps) - log.Printf("[DEBUG] Reading AWS SSO Permission Set: %v", permissionSetArn) - var permissionSetResp *ssoadmin.DescribePermissionSetOutput - permissionSetResp, permissionSetErr = conn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if permissionSetErr != nil { - return false - } - if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { - permissionSet = permissionSetResp.PermissionSet - return false - } - } - } - return !lastPage - }) - - if err != nil { - return fmt.Errorf("Error getting AWS SSO Permission Sets: %s", err) - } - - if permissionSetErr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionSetErr) - } - - if permissionSet == nil { - log.Printf("[DEBUG] AWS SSO Permission Set %v not found", name) - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := conn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - var managedPolicyArns []string - managedPoliciesReq := &ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - managedPoliciesErr := conn.ListManagedPoliciesInPermissionSetPages(managedPoliciesReq, func(page *ssoadmin.ListManagedPoliciesInPermissionSetOutput, lastPage bool) bool { - for _, managedPolicy := range page.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - return !lastPage - }) - if managedPoliciesErr != nil { - return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) - } - - tags, tagsErr := keyvaluetags.SsoListTags(conn, permissionSetArn, instanceArn) - if tagsErr != nil { - return fmt.Errorf("Error listing tags for ASW SSO Permission Set (%s): %s", permissionSetArn, tagsErr) - } - - d.SetId(permissionSetArn) - d.Set("arn", permissionSetArn) - d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - d.Set("instance_arn", instanceArn) - d.Set("name", permissionSet.Name) - d.Set("description", permissionSet.Description) - d.Set("session_duration", permissionSet.SessionDuration) - d.Set("relay_state", permissionSet.RelayState) - d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - d.Set("managed_policy_arns", managedPolicyArns) - tagsMapErr := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) - if tagsMapErr != nil { - return fmt.Errorf("Error setting tags: %s", tagsMapErr) - } - - return nil -} diff --git a/aws/data_source_aws_sso_permission_set_test.go b/aws/data_source_aws_sso_permission_set_test.go deleted file mode 100644 index 6a23cf6aa62..00000000000 --- a/aws/data_source_aws_sso_permission_set_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" -) - -func TestAccDataSourceAwsSsoPermissionSet_basic(t *testing.T) { - datasourceName := "data.aws_sso_permission_set.test" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoPermissionSetConfigBasic(rName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - resource.TestCheckResourceAttr(datasourceName, "name", rName), - resource.TestCheckResourceAttr(datasourceName, "description", "testing"), - resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), - resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), - resource.TestCheckResourceAttr(datasourceName, "tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsSsoPermissionSet_byTags(t *testing.T) { - datasourceName := "data.aws_sso_permission_set.test" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsSsoPermissionSetConfigByTags(rName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(datasourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(datasourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - resource.TestCheckResourceAttr(datasourceName, "name", fmt.Sprintf("%s", rName)), - resource.TestCheckResourceAttr(datasourceName, "description", "testing"), - resource.TestCheckResourceAttr(datasourceName, "session_duration", "PT1H"), - resource.TestCheckResourceAttr(datasourceName, "relay_state", "https://console.aws.amazon.com/console/home"), - resource.TestCheckResourceAttr(datasourceName, "tags.%", "3"), - ), - }, - }, - }) -} - -func testAccDataSourceAwsSsoPermissionSetConfigBasic(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} - -data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name -} -`, rName) -} - -func testAccDataSourceAwsSsoPermissionSetConfigByTags(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "test" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } -} - -data "aws_sso_permission_set" "test" { - instance_arn = data.aws_sso_instance.selected.arn - name = aws_sso_permission_set.test.name - - tags = { - Key1 = "Value1" - Key2 = "Value2" - Key3 = "Value3" - } -} -`, rName) -} diff --git a/aws/internal/keyvaluetags/sso_tags.go b/aws/internal/keyvaluetags/sso_tags.go deleted file mode 100644 index 05391e0c734..00000000000 --- a/aws/internal/keyvaluetags/sso_tags.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build !generate - -package keyvaluetags - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ssoadmin" -) - -// Custom SSO tag service functions using the same format as generated code. - -// SsoListTags lists sso service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func SsoListTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string) (KeyValueTags, error) { - input := &ssoadmin.ListTagsForResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - } - - output, err := conn.ListTagsForResource(input) - - if err != nil { - return New(nil), err - } - - return SsoKeyValueTags(output.Tags), nil -} - -// SsoUpdateTags updates sso service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func SsoUpdateTags(conn *ssoadmin.SSOAdmin, identifier string, instanceArn string, oldTagsMap interface{}, newTagsMap interface{}) error { - oldTags := New(oldTagsMap) - newTags := New(newTagsMap) - - if removedTags := oldTags.Removed(newTags); len(removedTags) > 0 { - input := &ssoadmin.UntagResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.IgnoreAws().Keys()), - } - - _, err := conn.UntagResource(input) - - if err != nil { - return fmt.Errorf("error untagging resource (%s): %w", identifier, err) - } - } - - if updatedTags := oldTags.Updated(newTags); len(updatedTags) > 0 { - input := &ssoadmin.TagResourceInput{ - InstanceArn: aws.String(instanceArn), - ResourceArn: aws.String(identifier), - Tags: updatedTags.IgnoreAws().SsoTags(), - } - - _, err := conn.TagResource(input) - - if err != nil { - return fmt.Errorf("error tagging resource (%s): %w", identifier, err) - } - } - - return nil -} - -// SsoTags returns sso service tags. -func (tags KeyValueTags) SsoTags() []*ssoadmin.Tag { - result := make([]*ssoadmin.Tag, 0, len(tags)) - - for k, v := range tags.Map() { - tag := &ssoadmin.Tag{ - Key: aws.String(k), - Value: aws.String(v), - } - - result = append(result, tag) - } - - return result -} - -// SsoKeyValueTags creates KeyValueTags from sso service tags. -func SsoKeyValueTags(tags []*ssoadmin.Tag) KeyValueTags { - m := make(map[string]*string, len(tags)) - - for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value - } - - return New(m) -} diff --git a/aws/provider.go b/aws/provider.go index 227b1f7b6ea..3b2c1d2e0a9 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -268,7 +268,7 @@ func Provider() *schema.Provider { "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), "aws_iam_user": dataSourceAwsIAMUser(), "aws_identity_store_group": dataSourceAwsIdentityStoreGroup(), - "aws_identity_store_user": dataSourceAwsIdentityStoreUser(), + "aws_identity_store_user": dataSourceAwsIdentityStoreUser(), "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), diff --git a/aws/resource_aws_sso_assignment_test.go b/aws/resource_aws_sso_assignment_test.go index 46487fff31d..6c8461832b9 100644 --- a/aws/resource_aws_sso_assignment_test.go +++ b/aws/resource_aws_sso_assignment_test.go @@ -37,7 +37,7 @@ func TestAccAWSSSOAssignmentGroup_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSSOAdminInstances(t) testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) }, Providers: testAccProviders, @@ -72,7 +72,7 @@ func TestAccAWSSSOAssignmentUser_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSSOAdminInstances(t) testAccPreCheckAWSSIdentityStoreUser(t, identityStoreUser) }, Providers: testAccProviders, @@ -107,7 +107,7 @@ func TestAccAWSSSOAssignmentGroup_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccPreCheckAWSSSOInstance(t) + testAccPreCheckAWSSSOAdminInstances(t) testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) }, Providers: testAccProviders, @@ -248,8 +248,9 @@ func testAccCheckAWSSSOAssignmentDestroy(s *terraform.State) error { func testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName string) string { return fmt.Sprintf(` data "aws_sso_instance" "selected" {} + data "aws_caller_identity" "current" {} - + data "aws_identity_store_group" "example_group" { identity_store_id = data.aws_sso_instance.selected.identity_store_id display_name = "%s" @@ -276,6 +277,7 @@ resource "aws_sso_assignment" "example" { func testAccSSOAssignmentBasicUserConfig(identityStoreUser, rName string) string { return fmt.Sprintf(` data "aws_sso_instance" "selected" {} + data "aws_caller_identity" "current" {} data "aws_identity_store_user" "example_user" { diff --git a/aws/resource_aws_sso_permission_set.go b/aws/resource_aws_sso_permission_set.go deleted file mode 100644 index 4473240d72a..00000000000 --- a/aws/resource_aws_sso_permission_set.go +++ /dev/null @@ -1,642 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" -) - -const ( - AWSSSOPermissionSetCreateTimeout = 5 * time.Minute - AWSSSOPermissionSetUpdateTimeout = 10 * time.Minute - AWSSSOPermissionSetDeleteTimeout = 5 * time.Minute - AWSSSOPermissionSetProvisioningRetryDelay = 5 * time.Second - AWSSSOPermissionSetProvisioningRetryMinTimeout = 3 * time.Second -) - -func resourceAwsSsoPermissionSet() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsoPermissionSetCreate, - Read: resourceAwsSsoPermissionSetRead, - Update: resourceAwsSsoPermissionSetUpdate, - Delete: resourceAwsSsoPermissionSetDelete, - Importer: &schema.ResourceImporter{ - State: resourceAwsSsoPermissionSetImport, - }, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(AWSSSOPermissionSetCreateTimeout), - Update: schema.DefaultTimeout(AWSSSOPermissionSetUpdateTimeout), - Delete: schema.DefaultTimeout(AWSSSOPermissionSetDeleteTimeout), - }, - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_failure_reason": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_request_id": { - Type: schema.TypeString, - Computed: true, - }, - - "provisioning_status": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), - ), - }, - - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]+$`), "must match [\\w+=,.@-]"), - ), - }, - - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 700), - validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{Z}\p{S}\p{N}\p{P}]*$`), "must match [\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]"), - ), - }, - - "session_duration": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 100), - Default: "PT1H", - }, - - "relay_state": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 240), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9&$@#\\\/%?=~\-_'"|!:,.;*+\[\]\(\)\{\} ]+$`), "must match [a-zA-Z0-9&$@#\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\} ]"), - ), - }, - - "inline_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateIAMPolicyJson, - DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, - }, - - "managed_policy_arns": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, - Set: schema.HashString, - }, - - "tags": tagsSchema(), - }, - } -} - -func resourceAwsSsoPermissionSetImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - permissionSetArn := d.Id() - instanceArn, err := resourceAwsSsoPermissionSetParseID(permissionSetArn) - if err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error parsing AWS Permission Set (%s) for import: %s", permissionSetArn, err) - } - - ssoadminconn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - - if permissionSetErr != nil { - return []*schema.ResourceData{}, permissionSetErr - } - - permissionSet := permissionSetResp.PermissionSet - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing Inline Policy for AWS SSO Permission Set (%s): %s", permissionSetArn, inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if managedPoliciesErr != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing Managed Policies for AWS SSO Permission Set (%s): %s", permissionSetArn, managedPoliciesErr) - } - var managedPolicyArns []string - for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - - tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) - if err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error listing tags during AWS SSO Permission Set (%s) import: %s", permissionSetArn, err) - } - - err = d.Set("instance_arn", instanceArn) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("arn", permissionSetArn) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("name", permissionSet.Name) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("description", permissionSet.Description) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("session_duration", permissionSet.SessionDuration) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("relay_state", permissionSet.RelayState) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - if err != nil { - return []*schema.ResourceData{}, err - } - err = d.Set("managed_policy_arns", managedPolicyArns) - if err != nil { - return []*schema.ResourceData{}, err - } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return []*schema.ResourceData{}, fmt.Errorf("Error importing AWS SSO Permission Set (%s) tags: %s", permissionSetArn, err) - } - d.SetId(permissionSetArn) - - return []*schema.ResourceData{d}, nil -} - -func resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - log.Printf("[INFO] Creating AWS SSO Permission Set") - - instanceArn := aws.String(d.Get("instance_arn").(string)) - - params := &ssoadmin.CreatePermissionSetInput{ - InstanceArn: instanceArn, - Name: aws.String(d.Get("name").(string)), - } - - if v, ok := d.GetOk("description"); ok { - params.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("relay_state"); ok { - params.RelayState = aws.String(v.(string)) - } - - if v, ok := d.GetOk("session_duration"); ok { - params.SessionDuration = aws.String(v.(string)) - } - - if v, ok := d.GetOk("tags"); ok { - params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SsoTags() - } - - createPermissionSetResp, createPermissionerr := ssoadminconn.CreatePermissionSet(params) - if createPermissionerr != nil { - return fmt.Errorf("Error creating AWS SSO Permission Set: %s", createPermissionerr) - } - - permissionSetArn := createPermissionSetResp.PermissionSet.PermissionSetArn - d.SetId(*permissionSetArn) - - if attachPoliciesErr := attachPoliciesToPermissionSet(ssoadminconn, d, permissionSetArn, instanceArn); attachPoliciesErr != nil { - return attachPoliciesErr - } - - return resourceAwsSsoPermissionSetRead(d, meta) -} - -func resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - - var permissionSet *ssoadmin.PermissionSet - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - name := d.Get("name").(string) - - log.Printf("[DEBUG] Reading AWS SSO Permission Set: %s", permissionSetArn) - - permissionSetResp, permissionerr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - - if isAWSErr(permissionerr, ssoadmin.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] AWS SSO Permission Set (%s) not found, removing from state", permissionSetArn) - d.SetId("") - return nil - } - - if permissionerr != nil { - return fmt.Errorf("Error getting AWS SSO Permission Set: %s", permissionerr) - } - if aws.StringValue(permissionSetResp.PermissionSet.Name) == name { - permissionSet = permissionSetResp.PermissionSet - } - - if permissionSet == nil { - log.Printf("[WARN] AWS SSO Permission Set %s not found, removing from state", name) - d.SetId("") - return nil - } - - log.Printf("[DEBUG] Found AWS SSO Permission Set: %s", permissionSet) - - log.Printf("[DEBUG] Getting Inline Policy for AWS SSO Permission Set") - inlinePolicyResp, inlinePolicyErr := ssoadminconn.GetInlinePolicyForPermissionSet(&ssoadmin.GetInlinePolicyForPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if inlinePolicyErr != nil { - return fmt.Errorf("Error getting Inline Policy for AWS SSO Permission Set: %s", inlinePolicyErr) - } - - log.Printf("[DEBUG] Getting Managed Policies for AWS SSO Permission Set") - managedPoliciesResp, managedPoliciesErr := ssoadminconn.ListManagedPoliciesInPermissionSet(&ssoadmin.ListManagedPoliciesInPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - }) - if managedPoliciesErr != nil { - return fmt.Errorf("Error getting Managed Policies for AWS SSO Permission Set: %s", managedPoliciesErr) - } - var managedPolicyArns []string - for _, managedPolicy := range managedPoliciesResp.AttachedManagedPolicies { - managedPolicyArns = append(managedPolicyArns, aws.StringValue(managedPolicy.Arn)) - } - - tags, err := keyvaluetags.SsoListTags(ssoadminconn, permissionSetArn, instanceArn) - if err != nil { - return fmt.Errorf("Error listing tags for AWS SSO Permission Set (%s): %s", permissionSetArn, err) - } - - err = d.Set("arn", permissionSetArn) - if err != nil { - return err - } - err = d.Set("created_date", permissionSet.CreatedDate.Format(time.RFC3339)) - if err != nil { - return err - } - err = d.Set("instance_arn", instanceArn) - if err != nil { - return err - } - err = d.Set("name", permissionSet.Name) - if err != nil { - return err - } - err = d.Set("description", permissionSet.Description) - if err != nil { - return err - } - err = d.Set("session_duration", permissionSet.SessionDuration) - if err != nil { - return err - } - err = d.Set("relay_state", permissionSet.RelayState) - if err != nil { - return err - } - err = d.Set("inline_policy", inlinePolicyResp.InlinePolicy) - if err != nil { - return err - } - err = d.Set("managed_policy_arns", managedPolicyArns) - if err != nil { - return err - } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("Error setting tags: %s", err) - } - - return nil -} - -func resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - - log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", permissionSetArn) - - if d.HasChanges("description", "relay_state", "session_duration") { - input := &ssoadmin.UpdatePermissionSetInput{ - PermissionSetArn: aws.String(permissionSetArn), - InstanceArn: aws.String(instanceArn), - } - - if d.HasChange("description") { - input.Description = aws.String(d.Get("description").(string)) - } - - if d.HasChange("relay_state") { - input.RelayState = aws.String(d.Get("relay_state").(string)) - } - - if d.HasChange("session_duration") { - input.SessionDuration = aws.String(d.Get("session_duration").(string)) - } - - log.Printf("[DEBUG] Updating AWS SSO Permission Set: %s", input) - _, permissionerr := ssoadminconn.UpdatePermissionSet(input) - if permissionerr != nil { - return fmt.Errorf("Error updating AWS SSO Permission Set: %s", permissionerr) - } - } - - if d.HasChange("tags") { - oldTags, newTags := d.GetChange("tags") - if updateTagsErr := keyvaluetags.SsoUpdateTags(ssoadminconn, d.Get("arn").(string), d.Get("instance_arn").(string), oldTags, newTags); updateTagsErr != nil { - return fmt.Errorf("Error updating tags: %s", updateTagsErr) - } - } - - if v, ok := d.GetOk("inline_policy"); ok { - log.Printf("[DEBUG] AWS SSO Permission Set %s updating IAM inline policy", permissionSetArn) - - inlinePolicy := aws.String(v.(string)) - - updateInput := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(updateInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } else if d.HasChange("inline_policy") { - deleteInput := &ssoadmin.DeleteInlinePolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, inlinePolicyErr := ssoadminconn.DeleteInlinePolicyFromPermissionSet(deleteInput) - if inlinePolicyErr != nil { - return fmt.Errorf("Error deleting IAM inline policy from AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - - if d.HasChange("managed_policy_arns") { - o, n := d.GetChange("managed_policy_arns") - - os := o.(*schema.Set) - ns := n.(*schema.Set) - - removalList := os.Difference(ns) - for _, v := range removalList.List() { - input := &ssoadmin.DetachManagedPolicyFromPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - ManagedPolicyArn: aws.String(v.(string)), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, managedPoliciesErr := ssoadminconn.DetachManagedPolicyFromPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error detaching Managed Policy from AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - - additionList := ns.Difference(os) - for _, v := range additionList.List() { - input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - ManagedPolicyArn: aws.String(v.(string)), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - } - - // Reprovision if anything has changed - if d.HasChanges("description", "relay_state", "session_duration", "inline_policy", "managed_policy_arns", "tags") { - - // Auto provision all accounts - targetType := ssoadmin.ProvisionTargetTypeAllProvisionedAccounts - provisionInput := &ssoadmin.ProvisionPermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - TargetType: aws.String(targetType), - } - - log.Printf("[INFO] Provisioning AWS SSO Permission Set") - provisionResponse, err := ssoadminconn.ProvisionPermissionSet(provisionInput) - if err != nil { - return fmt.Errorf("Error provisioning AWS SSO Permission Set (%s): %w", d.Id(), err) - } - - status := provisionResponse.PermissionSetProvisioningStatus - - _, waitErr := waitForPermissionSetProvisioning(ssoadminconn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutUpdate)) - if waitErr != nil { - return waitErr - } - } - - return resourceAwsSsoPermissionSetRead(d, meta) -} - -func resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error { - ssoadminconn := meta.(*AWSClient).ssoadminconn - - permissionSetArn := d.Id() - instanceArn := d.Get("instance_arn").(string) - - log.Printf("[INFO] Deleting AWS SSO Permission Set: %s", permissionSetArn) - - params := &ssoadmin.DeletePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - } - - _, err := ssoadminconn.DeletePermissionSet(params) - - if err != nil { - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { - log.Printf("[DEBUG] AWS SSO Permission Set not found") - d.SetId("") - return nil - } - return fmt.Errorf("Error deleting AWS SSO Permission Set (%s): %s", d.Id(), err) - } - - d.SetId("") - return nil -} - -func attachPoliciesToPermissionSet(ssoadminconn *ssoadmin.SSOAdmin, d *schema.ResourceData, permissionSetArn *string, instanceArn *string) error { - - if v, ok := d.GetOk("inline_policy"); ok { - log.Printf("[INFO] Attaching IAM inline policy to AWS SSO Permission Set") - - inlinePolicy := aws.String(v.(string)) - - input := &ssoadmin.PutInlinePolicyToPermissionSetInput{ - InlinePolicy: inlinePolicy, - InstanceArn: instanceArn, - PermissionSetArn: permissionSetArn, - } - - _, inlinePolicyErr := ssoadminconn.PutInlinePolicyToPermissionSet(input) - if inlinePolicyErr != nil { - return fmt.Errorf("Error attaching IAM inline policy to AWS SSO Permission Set: %s", inlinePolicyErr) - } - } - - if v, ok := d.GetOk("managed_policy_arns"); ok { - log.Printf("[INFO] Attaching Managed Policies to AWS SSO Permission Set") - - managedPolicies := expandStringSet(v.(*schema.Set)) - - for _, managedPolicyArn := range managedPolicies { - - input := &ssoadmin.AttachManagedPolicyToPermissionSetInput{ - InstanceArn: instanceArn, - ManagedPolicyArn: managedPolicyArn, - PermissionSetArn: permissionSetArn, - } - - _, managedPoliciesErr := ssoadminconn.AttachManagedPolicyToPermissionSet(input) - if managedPoliciesErr != nil { - return fmt.Errorf("Error attaching Managed Policy to AWS SSO Permission Set: %s", managedPoliciesErr) - } - } - } - - return nil -} - -func resourceAwsSsoPermissionSetParseID(id string) (string, error) { - // id = arn:${Partition}:sso:::permissionSet/${InstanceID}/${PermissionSetID} - idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", id) - permissionSetARN, err := arn.Parse(id) - if err != nil { - return "", idFormatErr - } - - // We need: - // * The InstanceID portion of the permission set ARN resource (arn:aws:sso:::permissionSet/${InstanceId}/${PermissionSetId}) - // Split up the resource of the permission set ARN - resourceParts := strings.Split(permissionSetARN.Resource, "/") - if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { - return "", idFormatErr - } - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceARN := &arn.ARN{ - Partition: permissionSetARN.Partition, - Service: permissionSetARN.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - } - - return instanceARN.String(), nil -} - -func waitForPermissionSetProvisioning(ssoadminconn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.PermissionSetProvisioningStatus, error) { - - stateConf := resource.StateChangeConf{ - Delay: AWSSSOPermissionSetProvisioningRetryDelay, - Pending: []string{ssoadmin.StatusValuesInProgress}, - Target: []string{ssoadmin.StatusValuesSucceeded}, - Timeout: timeout, - MinTimeout: AWSSSOPermissionSetProvisioningRetryMinTimeout, - Refresh: resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn, requestID, instanceArn), - } - status, err := stateConf.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for AWS SSO Permission Set provisioning status: %s", err) - } - return status.(*ssoadmin.PermissionSetProvisioningStatus), nil -} - -func resourceAwsSsoPermissionSetProvisioningRefreshFunc(ssoadminconn *ssoadmin.SSOAdmin, requestID, instanceArn string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ - InstanceArn: aws.String(instanceArn), - ProvisionPermissionSetRequestId: aws.String(requestID), - } - - resp, err := ssoadminconn.DescribePermissionSetProvisioningStatus(input) - if err != nil { - return resp, "", fmt.Errorf("Error describing permission set provisioning status: %s", err) - } - status := resp.PermissionSetProvisioningStatus - if aws.StringValue(status.Status) == ssoadmin.StatusValuesFailed { - return resp, ssoadmin.StatusValuesFailed, fmt.Errorf("Failed to provision AWS SSO Permission Set (%s): %s", aws.StringValue(status.PermissionSetArn), aws.StringValue(status.FailureReason)) - } - return status, aws.StringValue(status.Status), nil - - } -} diff --git a/aws/resource_aws_sso_permission_set_test.go b/aws/resource_aws_sso_permission_set_test.go deleted file mode 100644 index 4c4b7a3f0d1..00000000000 --- a/aws/resource_aws_sso_permission_set_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package aws - -import ( - "fmt" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" -) - -func TestAccAWSSSOPermissionSet_basic(t *testing.T) { - var permissionSet, updatedPermissionSet ssoadmin.PermissionSet - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "1"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "description", "Just a test"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccSSOPermissionSetBasicConfigUpdated(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &updatedPermissionSet), - resource.TestCheckResourceAttr(resourceName, "managed_policy_arns.#", "2"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/ReadOnlyAccess"), - tfawsresource.TestCheckTypeSetElemAttr(resourceName, "managed_policy_arns.*", "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "description", "Just a test update"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - ), - }, - }, - }) -} - -func TestAccAWSSSOPermissionSet_disappears(t *testing.T) { - var permissionSet ssoadmin.PermissionSet - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetBasicConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - testAccCheckAWSSSOPermissionSetDisappears(&permissionSet), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestAccAWSSSOPermissionSet_tags(t *testing.T) { - var permissionSet ssoadmin.PermissionSet - resourceName := "aws_sso_permission_set.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOInstance(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOPermissionSetDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key1", "value1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccSSOPermissionSetConfigTagsMultiple(rName, "key1", "updatedvalue1", "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "updatedvalue1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - { - Config: testAccSSOPermissionSetConfigTagsSingle(rName, "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOPermissionSetExists(resourceName, &permissionSet), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - }, - }) -} - -func testAccCheckAWSSSOPermissionSetExists(resourceName string, permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) - } - - instanceArn, err := resourceAwsSsoPermissionSetParseID(rs.Primary.ID) - - if err != nil { - return err - } - - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - permissionSetResp, permissionSetErr := ssoadminconn.DescribePermissionSet(&ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(rs.Primary.ID), - }) - - if permissionSetErr != nil { - return permissionSetErr - } - - if *permissionSetResp.PermissionSet.PermissionSetArn == rs.Primary.ID { - *permissionSet = *permissionSetResp.PermissionSet - return nil - } - - return fmt.Errorf("AWS SSO Permission Set (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckAWSSSOPermissionSetDestroy(s *terraform.State) error { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_sso_permission_set" { - continue - } - - idFormatErr := fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", rs.Primary.ID) - permissionSetArn, err := arn.Parse(rs.Primary.ID) - if err != nil { - return err - } - - resourceParts := strings.Split(permissionSetArn.Resource, "/") - if len(resourceParts) != 3 || resourceParts[0] != "permissionSet" || resourceParts[1] == "" || resourceParts[2] == "" { - return idFormatErr - } - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceArn := arn.ARN{ - Partition: permissionSetArn.Partition, - Service: permissionSetArn.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - }.String() - - input := &ssoadmin.DescribePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(rs.Primary.ID), - } - - output, err := ssoadminconn.DescribePermissionSet(input) - - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { - continue - } - - if err != nil { - return err - } - - if output != nil { - return fmt.Errorf("AWS SSO Permission Set (%s) still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccCheckAWSSSOPermissionSetDisappears(permissionSet *ssoadmin.PermissionSet) resource.TestCheckFunc { - return func(s *terraform.State) error { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - permissionSetArn, permissionSetErr := arn.Parse(*permissionSet.PermissionSetArn) - if permissionSetErr != nil { - return permissionSetErr - } - - resourceParts := strings.Split(permissionSetArn.Resource, "/") - - // resourceParts = ["permissionSet","ins-123456A", "ps-56789B"] - instanceArn := arn.ARN{ - Partition: permissionSetArn.Partition, - Service: permissionSetArn.Service, - Resource: fmt.Sprintf("instance/%s", resourceParts[1]), - }.String() - - input := &ssoadmin.DeletePermissionSetInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: permissionSet.PermissionSetArn, - } - - _, err := ssoadminconn.DeletePermissionSet(input) - - return err - - } -} - -func testAccSSOPermissionSetBasicConfig(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} -`, rName) -} - -func testAccSSOPermissionSetBasicConfigUpdated(rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test update" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess" - ] -} -`, rName) -} - -func testAccSSOPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccSSOPermissionSetConfigTagsMultiple(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "Just a test" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} diff --git a/website/docs/d/sso_instance.html.markdown b/website/docs/d/sso_instance.html.markdown deleted file mode 100644 index d55f94c167d..00000000000 --- a/website/docs/d/sso_instance.html.markdown +++ /dev/null @@ -1,34 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_instance" -description: |- - Get information on an AWS Single Sign-On Instance. ---- - -# Data Source: aws_sso_instance - -Use this data source to get the Single Sign-On Instance ARN and Identity Store ID. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -output "arn" { - value = data.aws_sso_instance.selected.arn -} - -output "identity_store_id" { - value = data.aws_sso_instance.selected.identity_store_id -} -``` - -## Argument Reference - -There are no arguments available for this data source. - -## Attributes Reference - -* `arn` - The AWS ARN associated with the AWS Single Sign-On Instance. -* `identity_store_id` - The Identity Store ID associated with the AWS Single Sign-On Instance. diff --git a/website/docs/d/sso_permission_set.html.markdown b/website/docs/d/sso_permission_set.html.markdown deleted file mode 100644 index 8a2858bd3f7..00000000000 --- a/website/docs/d/sso_permission_set.html.markdown +++ /dev/null @@ -1,47 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_permission_set" -description: |- - Get information on an AWS Single Sign-On Permission Set. ---- - -# Data Source: aws_sso_permission_set - -Use this data source to get the Single Sign-On Permission Set. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_sso_permission_set" "example" { - instance_arn = data.aws_sso_instance.selected.arn - name = "Example" -} - -output "arn" { - value = data.aws_sso_permission_set.example.arn -} -``` - -## Argument Reference - -The following arguments are supported: - -* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. -* `name` - (Required) The name of the AWS Single Sign-On Permission Set. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The arn of the permission set. -* `arn` - The arn of the permission set. -* `created_date` - The created date of the permission set. -* `description` - The description of the permission set. -* `session_duration` - The session duration of the permission set. -* `relay_state` - The relay state of the permission set. -* `inline_policy` - The inline policy of the permission set. -* `managed_policy_arns` - The managed policies attached to the permission set. -* `tags` - The tags of the permission set. diff --git a/website/docs/r/sso_permission_set.html.markdown b/website/docs/r/sso_permission_set.html.markdown deleted file mode 100644 index ef3516c37e6..00000000000 --- a/website/docs/r/sso_permission_set.html.markdown +++ /dev/null @@ -1,73 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: aws_sso_permission_set" -description: |- - Manages an AWS Single Sign-On permission set ---- - -# Resource: aws_sso_permission_set - -Provides an AWS Single Sign-On Permission Set resource - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_iam_policy_document" "example" { - statement { - sid = "1" - - actions = [ - "s3:ListAllMyBuckets", - "s3:GetBucketLocation", - ] - - resources = [ - "arn:aws:s3:::*", - ] - } -} - -resource "aws_sso_permission_set" "example" { - name = "Example" - description = "An example" - instance_arn = data.aws_sso_instance.selected.arn - session_duration = "PT1H" - relay_state = "https://console.aws.amazon.com/console/home" - inline_policy = data.aws_iam_policy_document.example.json - managed_policy_arns = [ - "arn:aws:iam::aws:policy/ReadOnlyAccess", - ] -} -``` - -## Argument Reference - -The following arguments are supported: - -* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. -* `name` - (Required) The name of the AWS Single Sign-On Permission Set. -* `description` - (Optional) The description of the AWS Single Sign-On Permission Set. -* `session_duration` - (Optional) The session duration of the AWS Single Sign-On Permission Set in the ISO-8601 standard. The default value is `PT1H`. -* `relay_state` - (Optional) The relay state of AWS Single Sign-On Permission Set. -* `inline_policy` - (Optional) The inline policy of the AWS Single Sign-On Permission Set. -* `managed_policy_arns` - (Optional) The managed policies attached to the AWS Single Sign-On Permission Set. -* `tags` - (Optional) Key-value map of resource tags. - -## Attribute Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The arn of the AWS Single Sign-On Permission Set. -* `arn` - The arn of the AWS Single Sign-On Permission Set. -* `created_date` - The created date of the AWS Single Sign-On Permission Set. - -## Import - -`aws_sso_permission_set` can be imported by using the AWS Single Sign-On Permission Set Resource Name (ARN), e.g. - -``` -$ terraform import aws_sso_permission_set.example arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk -``` From e6cc362756ccd59a9179f3e76ca4ed3b4a876404 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 11 Jan 2021 22:02:58 -0500 Subject: [PATCH 0598/1212] resource/aws_s3_bucket_inventory: Prevent crashes with empty destination, filter, and schedule configuration blocks (#17055) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16952 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16953 Output from acceptance testing: ``` --- PASS: TestAccAWSS3BucketInventory_encryptWithSSEKMS (26.70s) --- PASS: TestAccAWSS3BucketInventory_basic (26.73s) --- PASS: TestAccAWSS3BucketInventory_encryptWithSSES3 (27.01s) ``` --- aws/resource_aws_s3_bucket_inventory.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_inventory.go b/aws/resource_aws_s3_bucket_inventory.go index 0eec3467790..ea19b407e75 100644 --- a/aws/resource_aws_s3_bucket_inventory.go +++ b/aws/resource_aws_s3_bucket_inventory.go @@ -200,7 +200,7 @@ func resourceAwsS3BucketInventoryPut(d *schema.ResourceData, meta interface{}) e inventoryConfiguration.OptionalFields = expandStringList(v.(*schema.Set).List()) } - if v, ok := d.GetOk("schedule"); ok { + if v, ok := d.GetOk("schedule"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { scheduleList := v.([]interface{}) scheduleMap := scheduleList[0].(map[string]interface{}) inventoryConfiguration.Schedule = &s3.InventorySchedule{ @@ -208,13 +208,13 @@ func resourceAwsS3BucketInventoryPut(d *schema.ResourceData, meta interface{}) e } } - if v, ok := d.GetOk("filter"); ok { + if v, ok := d.GetOk("filter"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { filterList := v.([]interface{}) filterMap := filterList[0].(map[string]interface{}) inventoryConfiguration.Filter = expandS3InventoryFilter(filterMap) } - if v, ok := d.GetOk("destination"); ok { + if v, ok := d.GetOk("destination"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { destinationList := v.([]interface{}) destinationMap := destinationList[0].(map[string]interface{}) bucketList := destinationMap["bucket"].([]interface{}) From 93470d705cb32adf3d648843b9a65d1dca81e4de Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 11 Jan 2021 22:03:48 -0500 Subject: [PATCH 0599/1212] Update CHANGELOG for #17055 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c1aca3b585..834d128b1db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ BUX FIXES * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] +* resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] ## 3.23.0 (January 08, 2021) From c5a862b09266bfebaceffeb4ff316d5d39c0db0f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 11:03:53 -0500 Subject: [PATCH 0600/1212] .github/workflows: Reduce goreleaser parallelism to match GitHub Actions hosted runners (#17067) Most of the GitHub Actions logging is either missing or cutoff without explanation, but finally found one with a `signal: killed` entry. We may be triggering oom-killer or another abuse protection mechanism, so this attempts to reduce the footprint on the runner. I did not immediately see a way to dynamically fetch the processor count via workflow information. --- .github/workflows/snapshot.yml | 2 +- .github/workflows/terraform_provider.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index b4accb34088..10f8d82710b 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -27,7 +27,7 @@ jobs: - name: goreleaser release uses: goreleaser/goreleaser-action@v2 with: - args: release --rm-dist --skip-sign --snapshot --timeout 2h + args: release --parallelism 2 --rm-dist --skip-sign --snapshot --timeout 2h - name: artifact naming id: naming run: | diff --git a/.github/workflows/terraform_provider.yml b/.github/workflows/terraform_provider.yml index 857d829f980..1376c45dd32 100644 --- a/.github/workflows/terraform_provider.yml +++ b/.github/workflows/terraform_provider.yml @@ -300,7 +300,7 @@ jobs: - name: goreleaser build uses: goreleaser/goreleaser-action@v2 with: - args: build --snapshot --timeout 2h + args: build --parallelism 2 --snapshot --timeout 2h semgrep: runs-on: ubuntu-latest From ab3347c10736acdbd269ca74c4519fc4a87289a6 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 10:31:55 -0800 Subject: [PATCH 0601/1212] Updates to aws_elasticache_replication_group.test --- ...ource_aws_elasticache_replication_group.go | 4 ++- ..._aws_elasticache_replication_group_test.go | 34 +++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index bd130fe8495..1281ebab505 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -907,7 +907,9 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica input := &elasticache.DeleteReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), } - input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) + if finalSnapshotID != "" { + input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) + } // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete err := resource.Retry(10*time.Minute, func() *resource.RetryError { diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index ca452bd3355..4d710f0a5fa 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -839,6 +839,27 @@ func TestAccAWSElasticacheReplicationGroup_tags(t *testing.T) { }) } +func TestAccAWSElasticacheReplicationGroup_FinalSnapshot(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfigFinalSnapshot(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "final_snapshot_identifier", rName), + ), + }, + }, + }) +} + func TestResourceAWSElastiCacheReplicationGroupEngineValidation(t *testing.T) { cases := []struct { Value string @@ -1761,3 +1782,16 @@ resource "aws_elasticache_replication_group" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccAWSElasticacheReplicationGroupConfigFinalSnapshot(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + node_type = "cache.t3.small" + number_cache_clusters = 1 + + final_snapshot_identifier = %[1]q +} +`, rName) +} From 7eb6172e0800dbcc91db8cbd55f7505c6125bdd0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 10:44:12 -0800 Subject: [PATCH 0602/1212] Update CHANGELOG for #15592 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 834d128b1db..6c7e71f402a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,11 @@ BUX FIXES * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] +ENHANCEMENTS + +* resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] +* resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] + ## 3.23.0 (January 08, 2021) FEATURES From 189090746e4049d2e7510069fbfd650415f30857 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 10:47:52 -0800 Subject: [PATCH 0603/1212] Update aws/resource_aws_elasticache_parameter_group_test.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_parameter_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index cd8c5599006..41fd660ee7d 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -65,7 +65,7 @@ func TestAccAWSElasticacheParameterGroup_addParameter(t *testing.T) { ), }, { - ResourceName: "aws_elasticache_parameter_group.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, From 3301df15f6b78929a46c3a3e6dbce2bdaaa2a946 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 10:48:04 -0800 Subject: [PATCH 0604/1212] Update aws/resource_aws_elasticache_parameter_group_test.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_parameter_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index 41fd660ee7d..a2feb62ac49 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -312,7 +312,7 @@ func TestAccAWSElasticacheParameterGroup_UppercaseName(t *testing.T) { Config: testAccAWSElasticacheParameterGroupConfigParameter1(rName, "redis2.8", "appendonly", "yes"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheParameterGroupExists(resourceName, &v), - resource.TestCheckResourceAttr("aws_elasticache_parameter_group.test", "name", fmt.Sprintf("tf-elastipg-%d", rInt)), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("tf-elastipg-%d", rInt)), ), }, { From ce729e1513d87d8ef37dc6a25fc033fce33789d9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 10:48:10 -0800 Subject: [PATCH 0605/1212] Update aws/resource_aws_elasticache_parameter_group_test.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_parameter_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_parameter_group_test.go b/aws/resource_aws_elasticache_parameter_group_test.go index a2feb62ac49..a83efdd88b6 100644 --- a/aws/resource_aws_elasticache_parameter_group_test.go +++ b/aws/resource_aws_elasticache_parameter_group_test.go @@ -316,7 +316,7 @@ func TestAccAWSElasticacheParameterGroup_UppercaseName(t *testing.T) { ), }, { - ResourceName: "aws_elasticache_parameter_group.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, From 559053de76061075206e9ddcc6263552f2ba470e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 12 Jan 2021 15:32:26 -0500 Subject: [PATCH 0606/1212] test/resource/elasticache_cluster: Fix lint issue --- aws/resource_aws_elasticache_cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster_test.go b/aws/resource_aws_elasticache_cluster_test.go index 0edea3ec325..24b307fbd51 100644 --- a/aws/resource_aws_elasticache_cluster_test.go +++ b/aws/resource_aws_elasticache_cluster_test.go @@ -1342,7 +1342,7 @@ resource "aws_elasticache_cluster" "test" { engine = "memcached" node_type = "cache.t3.small" num_cache_nodes = 1 - + final_snapshot_identifier = %[1]q } `, rName) @@ -1355,7 +1355,7 @@ resource "aws_elasticache_cluster" "test" { engine = "redis" node_type = "cache.t3.small" num_cache_nodes = 1 - + final_snapshot_identifier = %[1]q } `, rName) From 618af7825fdfd5625f32e21e06a0039afd4eb696 Mon Sep 17 00:00:00 2001 From: Kyrill Lebediev Date: Tue, 13 Oct 2020 16:54:08 +0300 Subject: [PATCH 0607/1212] resource/aws_instance: Add tags parameter to *_block_device block --- aws/resource_aws_instance.go | 118 ++++++++++++++++++-- aws/resource_aws_instance_test.go | 155 +++++++++++++++++++++++++- aws/tags.go | 9 ++ website/docs/r/instance.html.markdown | 4 +- 4 files changed, 275 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 2ff3c7cbb51..c9fc5cb7e04 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -339,7 +339,7 @@ func resourceAwsInstance() *schema.Resource { "tags": tagsSchema(), - "volume_tags": tagsSchemaComputed(), + "volume_tags": tagsSchema(), "ebs_block_device": { Type: schema.TypeSet, @@ -396,6 +396,7 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, DiffSuppressFunc: throughputDiffSuppressFunc, }, + "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), "volume_size": { Type: schema.TypeInt, @@ -510,6 +511,7 @@ func resourceAwsInstance() *schema.Resource { Computed: true, DiffSuppressFunc: throughputDiffSuppressFunc, }, + "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), "volume_size": { Type: schema.TypeInt, @@ -762,6 +764,39 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { }) } + // tags in root_block_device and ebs_block_device + volumeTagsToCreate := map[string]map[string]interface{}{} + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]interface{}) + for _, v := range vL { + bd := v.(map[string]interface{}) + if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + if rootVolumeId := getRootVolumeId(instance); rootVolumeId != "" { + volumeTagsToCreate[rootVolumeId] = tagsm + } + } + } + } + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + devName := bd["device_name"].(string) + if volumeId := getVolumeIdByDeviceName(instance, devName); volumeId != "" { + volumeTagsToCreate[volumeId] = tagsm + } + } + } + } + + for vol, tagsm := range volumeTagsToCreate { + if err := keyvaluetags.Ec2CreateTags(conn, vol, tagsm); err != nil { + log.Printf("[ERR] Error creating tags for EBS volume %s: %s", vol, err) + } + } + // Update if we need to return resourceAwsInstanceUpdate(d, meta) } @@ -936,13 +971,15 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags: %s", err) } - volumeTags, err := readVolumeTags(conn, d.Id()) - if err != nil { - return err - } + if _, ok := d.GetOk("volume_tags"); ok && !blockDeviceTagsDefined(d) { + volumeTags, err := readVolumeTags(conn, d.Id()) + if err != nil { + return err + } - if err := d.Set("volume_tags", keyvaluetags.Ec2KeyValueTags(volumeTags).IgnoreAws().Map()); err != nil { - return fmt.Errorf("error setting volume_tags: %s", err) + if err := d.Set("volume_tags", keyvaluetags.Ec2KeyValueTags(volumeTags).IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting volume_tags: %s", err) + } } if err := readSecurityGroups(d, instance, conn); err != nil { @@ -1522,6 +1559,14 @@ func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { } } } + + if d.HasChange("root_block_device.0.tags") { + o, n := d.GetChange("root_block_device.0.tags") + + if err := keyvaluetags.Ec2UpdateTags(conn, volumeID, o, n); err != nil { + return fmt.Errorf("error updating tags for volume (%s): %s", volumeID, err) + } + } } // TODO(mitchellh): wait for the attributes we modified to @@ -1667,7 +1712,7 @@ func stringifyStateReason(sr *ec2.StateReason) string { } func readBlockDevices(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) error { - ibds, err := readBlockDevicesFromInstance(instance, conn) + ibds, err := readBlockDevicesFromInstance(d, instance, conn) if err != nil { return err } @@ -1743,7 +1788,7 @@ func disassociateInstanceProfile(associationId *string, conn *ec2.EC2) error { return nil } -func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[string]interface{}, error) { +func readBlockDevicesFromInstance(d *schema.ResourceData, instance *ec2.Instance, conn *ec2.EC2) (map[string]interface{}, error) { blockDevices := make(map[string]interface{}) blockDevices["ebs"] = make([]map[string]interface{}, 0) blockDevices["root"] = nil @@ -1804,6 +1849,9 @@ func readBlockDevicesFromInstance(instance *ec2.Instance, conn *ec2.EC2) (map[st if instanceBd.DeviceName != nil { bd["device_name"] = aws.StringValue(instanceBd.DeviceName) } + if _, ok := d.GetOk("volume_tags"); !ok && vol.Tags != nil { + bd["tags"] = keyvaluetags.Ec2KeyValueTags(vol.Tags).IgnoreAws().Map() + } if blockDeviceIsRoot(instanceBd, instance) { blockDevices["root"] = bd @@ -2521,6 +2569,58 @@ func getAwsInstanceVolumeIds(conn *ec2.EC2, instanceId string) ([]string, error) return volumeIds, nil } +func getRootVolumeId(instance *ec2.Instance) string { + rootVolumeId := "" + for _, bd := range instance.BlockDeviceMappings { + if bd.Ebs != nil && blockDeviceIsRoot(bd, instance) { + if bd.Ebs.VolumeId != nil { + rootVolumeId = aws.StringValue(bd.Ebs.VolumeId) + } + break + } + } + + return rootVolumeId +} + +func getVolumeIdByDeviceName(instance *ec2.Instance, deviceName string) string { + volumeId := "" + for _, bd := range instance.BlockDeviceMappings { + if aws.StringValue(bd.DeviceName) == deviceName { + if bd.Ebs != nil { + volumeId = aws.StringValue(bd.Ebs.VolumeId) + break + } + } + } + + return volumeId +} + +func blockDeviceTagsDefined(d *schema.ResourceData) bool { + if v, ok := d.GetOk("root_block_device"); ok { + vL := v.([]interface{}) + for _, v := range vL { + bd := v.(map[string]interface{}) + if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + return true + } + } + } + + if v, ok := d.GetOk("ebs_block_device"); ok { + vL := v.(*schema.Set).List() + for _, v := range vL { + bd := v.(map[string]interface{}) + if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + return true + } + } + } + + return false +} + func getCreditSpecifications(conn *ec2.EC2, instanceId string) ([]map[string]interface{}, error) { var creditSpecifications []map[string]interface{} creditSpecification := make(map[string]interface{}) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 1b38cd0e5cc..94fefefa9d8 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1116,7 +1116,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { }) } -func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { +func TestAccAWSInstance_volumeTagsWithAttachedVolume(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -1140,6 +1140,53 @@ func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { }) } +func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { + var v ec2.Instance + resourceName := "aws_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags(), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.0.tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.0.tags.Name", "terraform-test-ebs"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.1.tags.%", "0"), + ), + }, + { + Config: testAccCheckInstanceConfigBlockDeviceCreateVolumeTags(), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", "terraform-test-root"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Purpose", "test"), + ), + }, + { + Config: testAccCheckInstanceConfigBlockDeviceUpdateVolumeTags(), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", "terraform-test-root-new"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Env", "dev"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, + }, + }, + }) +} + func TestAccAWSInstance_instanceProfileChange(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -4070,6 +4117,112 @@ resource "aws_instance" "test" { `) } +func testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + + instance_type = "t2.medium" + + root_block_device { + volume_type = "gp2" + } + + ebs_block_device { + device_name = "/dev/sdb" + volume_size = 1 + tags = { + Name = "terraform-test-ebs" + } + } + + ebs_block_device { + device_name = "/dev/sdc" + volume_size = 1 + } + + + ephemeral_block_device { + device_name = "/dev/sde" + virtual_name = "ephemeral0" + } +} +`)) +} + +func testAccCheckInstanceConfigBlockDeviceCreateVolumeTags() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + + instance_type = "t2.medium" + + root_block_device { + volume_type = "gp2" + tags = { + Name = "terraform-test-root" + Purpose = "test" + } + } + + ebs_block_device { + device_name = "/dev/sdb" + volume_size = 1 + tags = { + Name = "terraform-test-ebs" + } + } + + ebs_block_device { + device_name = "/dev/sdc" + volume_size = 1 + } + + ephemeral_block_device { + device_name = "/dev/sde" + virtual_name = "ephemeral0" + } +} +`)) +} + +func testAccCheckInstanceConfigBlockDeviceUpdateVolumeTags() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + + instance_type = "t2.medium" + + root_block_device { + volume_type = "gp2" + tags = { + Name = "terraform-test-root-new" + Env = "dev" + } + } + + ebs_block_device { + device_name = "/dev/sdb" + volume_size = 1 + tags = { + Name = "terraform-test-ebs" + } + } + + ebs_block_device { + device_name = "/dev/sdc" + volume_size = 1 + } + + + ephemeral_block_device { + device_name = "/dev/sde" + virtual_name = "ephemeral0" + } +} +`)) +} + var testAccCheckInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` resource "aws_instance" "test" { ami = data.aws_ami.ami.id diff --git a/aws/tags.go b/aws/tags.go index 8abb4db0c55..cf9ed21dfe9 100644 --- a/aws/tags.go +++ b/aws/tags.go @@ -35,6 +35,15 @@ func tagsSchemaForceNew() *schema.Schema { } } +func tagsSchemaConflictsWith(conflictsWith []string) *schema.Schema { + return &schema.Schema{ + ConflictsWith: conflictsWith, + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + } +} + // ec2TagsFromTagDescriptions returns the tags from the given tag descriptions. // No attempt is made to remove duplicates. func ec2TagsFromTagDescriptions(tds []*ec2.TagDescription) []*ec2.Tag { diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index f5520686d1b..4dbf799fc69 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -97,7 +97,7 @@ instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/Use * `ipv6_address_count`- (Optional) A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. * `ipv6_addresses` - (Optional) Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface * `tags` - (Optional) A map of tags to assign to the resource. -* `volume_tags` - (Optional) A map of tags to assign to the devices created by the instance at launch time. +* `volume_tags` - (Optional) A map of tags to assign to the devices created by the instance at launch time. Not recommended to use in configurations with EBS volumes attached via `aws_volume_attachment` resource. Use `tags` in `root_block_device` parameter in such cases. * `root_block_device` - (Optional) Customize details about the root block device of the instance. See [Block Devices](#block-devices) below for details. * `ebs_block_device` - (Optional) Additional EBS block devices to attach to the @@ -136,6 +136,7 @@ The `root_block_device` mapping supports the following: on instance termination (Default: `true`). * `encrypted` - (Optional) Enable volume encryption. (Default: `false`). Must be configured to perform drift detection. * `kms_key_id` - (Optional) Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. +* `tags` - (Optional) A map of tags to assign to the device. Modifying any of the `root_block_device` settings other than `volume_size` requires resource replacement. @@ -156,6 +157,7 @@ Each `ebs_block_device` supports the following: encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume (Default: `false`). Cannot be used with `snapshot_id`. Must be configured to perform drift detection. * `kms_key_id` - (Optional) Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. +* `tags` - (Optional) A map of tags to assign to the device. ~> **NOTE:** Currently, changes to the `ebs_block_device` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes and attachments of an EBS block to an instance, use the `aws_ebs_volume` and `aws_volume_attachment` resources instead. If you use `ebs_block_device` on an `aws_instance`, Terraform will assume management over the full set of non-root EBS block devices for the instance, treating additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `aws_ebs_volume` and `aws_volume_attachment` resources for a given instance. From 16160587ddce2a5ea9e18a328afd8c6a33b250b5 Mon Sep 17 00:00:00 2001 From: Kyrill Lebediev Date: Thu, 12 Nov 2020 17:12:10 +0200 Subject: [PATCH 0608/1212] resource/aws_instance: Add more test cases for volume tags --- aws/resource_aws_instance_test.go | 70 ++++++++++++++++++++++++++- website/docs/r/instance.html.markdown | 5 +- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 94fefefa9d8..ca7b2697355 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1149,6 +1149,14 @@ func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ + { + Config: testAccCheckInstanceConfigBlockDeviceRootTagsConflictWithVolumeTags(), + ExpectError: regexp.MustCompile(`"root_block_device\.0\.tags": conflicts with volume_tags`), + }, + { + Config: testAccCheckInstanceConfigBlockDeviceEbsTagsConflictWithVolumeTags(), + ExpectError: regexp.MustCompile(`"ebs_block_device\.0\.tags": conflicts with volume_tags`), + }, { Config: testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags(), Check: resource.ComposeTestCheckFunc( @@ -4079,6 +4087,62 @@ resource "aws_volume_attachment" "test" { `) } +func testAccCheckInstanceConfigBlockDeviceRootTagsConflictWithVolumeTags() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + + instance_type = "t2.medium" + + root_block_device { + volume_type = "gp2" + volume_size = 11 + + tags = { + Name = "root-tag" + } + } + + ebs_block_device { + device_name = "/dev/sdb" + volume_size = 9 + } + + volume_tags = { + Name = "volume-tags" + } +} +`)) +} + +func testAccCheckInstanceConfigBlockDeviceEbsTagsConflictWithVolumeTags() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + + instance_type = "t2.medium" + + root_block_device { + volume_type = "gp2" + volume_size = 11 + } + + ebs_block_device { + device_name = "/dev/sdb" + volume_size = 9 + + tags = { + Name = "ebs-volume" + } + } + + volume_tags = { + Name = "volume-tags" + } +} +`)) +} + func testAccCheckInstanceConfigNoVolumeTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { @@ -4131,6 +4195,7 @@ resource "aws_instance" "test" { ebs_block_device { device_name = "/dev/sdb" volume_size = 1 + tags = { Name = "terraform-test-ebs" } @@ -4141,7 +4206,6 @@ resource "aws_instance" "test" { volume_size = 1 } - ephemeral_block_device { device_name = "/dev/sde" virtual_name = "ephemeral0" @@ -4159,6 +4223,7 @@ resource "aws_instance" "test" { root_block_device { volume_type = "gp2" + tags = { Name = "terraform-test-root" Purpose = "test" @@ -4168,6 +4233,7 @@ resource "aws_instance" "test" { ebs_block_device { device_name = "/dev/sdb" volume_size = 1 + tags = { Name = "terraform-test-ebs" } @@ -4195,6 +4261,7 @@ resource "aws_instance" "test" { root_block_device { volume_type = "gp2" + tags = { Name = "terraform-test-root-new" Env = "dev" @@ -4204,6 +4271,7 @@ resource "aws_instance" "test" { ebs_block_device { device_name = "/dev/sdb" volume_size = 1 + tags = { Name = "terraform-test-ebs" } diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 4dbf799fc69..f89cc6ee6e1 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -97,7 +97,10 @@ instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/Use * `ipv6_address_count`- (Optional) A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. * `ipv6_addresses` - (Optional) Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface * `tags` - (Optional) A map of tags to assign to the resource. -* `volume_tags` - (Optional) A map of tags to assign to the devices created by the instance at launch time. Not recommended to use in configurations with EBS volumes attached via `aws_volume_attachment` resource. Use `tags` in `root_block_device` parameter in such cases. +* `volume_tags` - (Optional) A map of tags to assign to the devices created by the instance at launch time. + +~> **NOTE:** Use `volume_tags` to apply the same tags to an instance's root and EBS devices. Using `volume_tags` is incompatible with other ways of tagging an instance's volumes such as using `tags` in `root_block_device` or `ebs_block_device` blocks, or using `tags` in an `aws_ebs_volume` resource attached via `aws_volume_attachment`. Using `volume_tags` together with other ways of tagging volumes will cause inconsistent behavior and resource cycling. + * `root_block_device` - (Optional) Customize details about the root block device of the instance. See [Block Devices](#block-devices) below for details. * `ebs_block_device` - (Optional) Additional EBS block devices to attach to the From ed08c300437fd32fa3c110ca37304aa3c011ecb0 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 12 Jan 2021 11:32:37 -0500 Subject: [PATCH 0609/1212] resource/instance: Fix linter issues --- aws/resource_aws_instance_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index ca7b2697355..125d357bf71 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -4088,7 +4088,7 @@ resource "aws_volume_attachment" "test" { } func testAccCheckInstanceConfigBlockDeviceRootTagsConflictWithVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4112,11 +4112,11 @@ resource "aws_instance" "test" { Name = "volume-tags" } } -`)) +`) } func testAccCheckInstanceConfigBlockDeviceEbsTagsConflictWithVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4140,7 +4140,7 @@ resource "aws_instance" "test" { Name = "volume-tags" } } -`)) +`) } func testAccCheckInstanceConfigNoVolumeTags() string { @@ -4182,7 +4182,7 @@ resource "aws_instance" "test" { } func testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4211,11 +4211,11 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`)) +`) } func testAccCheckInstanceConfigBlockDeviceCreateVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4249,11 +4249,11 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`)) +`) } func testAccCheckInstanceConfigBlockDeviceUpdateVolumeTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4288,7 +4288,7 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`)) +`) } var testAccCheckInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` From 186bce7ec4c7201cfb66767d5d4a12969dd92879 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 12 Jan 2021 13:43:28 -0500 Subject: [PATCH 0610/1212] resource/instance: Organize attributes --- aws/resource_aws_instance.go | 682 ++++++++++++++++------------------- 1 file changed, 311 insertions(+), 371 deletions(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index c9fc5cb7e04..1e637b5a743 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -52,295 +52,72 @@ func resourceAwsInstance() *schema.Resource { Required: true, ForceNew: true, }, - "arn": { Type: schema.TypeString, Computed: true, }, - "associate_public_ip_address": { Type: schema.TypeBool, ForceNew: true, Computed: true, Optional: true, }, - "availability_zone": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, }, - - "placement_group": { - Type: schema.TypeString, + "cpu_core_count": { + Type: schema.TypeInt, Optional: true, Computed: true, ForceNew: true, }, - - "instance_type": { - Type: schema.TypeString, - Required: true, - }, - - "key_name": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "get_password_data": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "password_data": { - Type: schema.TypeString, - Computed: true, - }, - - "subnet_id": { - Type: schema.TypeString, + "cpu_threads_per_core": { + Type: schema.TypeInt, Optional: true, Computed: true, ForceNew: true, }, - - "private_ip": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validation.Any( - validation.StringIsEmpty, - validation.IsIPv4Address, - ), - }, - - "secondary_private_ips": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.IsIPv4Address, - }, - }, - - "source_dest_check": { - Type: schema.TypeBool, + "credit_specification": { + Type: schema.TypeList, Optional: true, - Default: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Suppress diff if network_interface is set - _, ok := d.GetOk("network_interface") - return ok - }, - }, - - "user_data": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"user_data_base64"}, + MaxItems: 1, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Sometimes the EC2 API responds with the equivalent, empty SHA1 sum - // echo -n "" | shasum - if (old == "da39a3ee5e6b4b0d3255bfef95601890afd80709" && new == "") || - (old == "" && new == "da39a3ee5e6b4b0d3255bfef95601890afd80709") { + if old == "1" && new == "0" { return true } return false }, - StateFunc: func(v interface{}) string { - switch v := v.(type) { - case string: - return userDataHashSum(v) - default: - return "" - } - }, - ValidateFunc: validation.StringLenBetween(0, 16384), - }, - - "user_data_base64": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"user_data"}, - ValidateFunc: func(v interface{}, name string) (warns []string, errs []error) { - s := v.(string) - if !isBase64Encoded([]byte(s)) { - errs = append(errs, fmt.Errorf( - "%s: must be base64-encoded", name, - )) - } - return - }, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "vpc_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "public_dns": { - Type: schema.TypeString, - Computed: true, - }, - - "outpost_arn": { - Type: schema.TypeString, - Computed: true, - }, - - "primary_network_interface_id": { - Type: schema.TypeString, - Computed: true, - }, - - "network_interface": { - ConflictsWith: []string{"associate_public_ip_address", "subnet_id", "private_ip", "secondary_private_ips", "vpc_security_group_ids", "security_groups", "ipv6_addresses", "ipv6_address_count", "source_dest_check"}, - Type: schema.TypeSet, - Optional: true, - Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "delete_on_termination": { - Type: schema.TypeBool, - Default: false, - Optional: true, - ForceNew: true, - }, - "network_interface_id": { + "cpu_credits": { Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "device_index": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, + Optional: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // Only work with existing instances + if d.Id() == "" { + return false + } + // Only work with missing configurations + if new != "" { + return false + } + // Only work when already set in Terraform state + if old == "" { + return false + } + return true + }, }, }, }, }, - - "public_ip": { - Type: schema.TypeString, - Computed: true, - }, - - "instance_state": { - Type: schema.TypeString, - Computed: true, - }, - - "private_dns": { - Type: schema.TypeString, - Computed: true, - }, - - "ebs_optimized": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "disable_api_termination": { Type: schema.TypeBool, Optional: true, }, - - "instance_initiated_shutdown_behavior": { - Type: schema.TypeString, - Optional: true, - }, - - "hibernation": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - - "monitoring": { - Type: schema.TypeBool, - Optional: true, - }, - - "iam_instance_profile": { - Type: schema.TypeString, - Optional: true, - }, - - "ipv6_address_count": { - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "ipv6_addresses": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.IsIPv6Address, - }, - }, - - "tenancy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - ec2.TenancyDedicated, - ec2.TenancyDefault, - ec2.TenancyHost, - }, false), - }, - "host_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "cpu_core_count": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "cpu_threads_per_core": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "tags": tagsSchema(), - - "volume_tags": tagsSchema(), - "ebs_block_device": { Type: schema.TypeSet, Optional: true, @@ -353,20 +130,17 @@ func resourceAwsInstance() *schema.Resource { Default: true, ForceNew: true, }, - "device_name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "encrypted": { Type: schema.TypeBool, Optional: true, Computed: true, ForceNew: true, }, - "iops": { Type: schema.TypeInt, Optional: true, @@ -374,21 +148,19 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, DiffSuppressFunc: iopsDiffSuppressFunc, }, - "kms_key_id": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, }, - "snapshot_id": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, }, - + "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), "throughput": { Type: schema.TypeInt, Optional: true, @@ -396,15 +168,16 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, DiffSuppressFunc: throughputDiffSuppressFunc, }, - "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), - + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, "volume_size": { Type: schema.TypeInt, Optional: true, Computed: true, ForceNew: true, }, - "volume_type": { Type: schema.TypeString, Optional: true, @@ -412,11 +185,6 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringInSlice(ec2.VolumeType_Values(), false), }, - - "volume_id": { - Type: schema.TypeString, - Computed: true, - }, }, }, Set: func(v interface{}) int { @@ -427,7 +195,27 @@ func resourceAwsInstance() *schema.Resource { return hashcode.String(buf.String()) }, }, - + "ebs_optimized": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "enclave_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, "ephemeral_block_device": { Type: schema.TypeSet, Optional: true, @@ -435,34 +223,183 @@ func resourceAwsInstance() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, + "device_name": { + Type: schema.TypeString, + Required: true, + }, + "no_device": { + Type: schema.TypeBool, + Optional: true, + }, + "virtual_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) + if v, ok := m["no_device"].(bool); ok && v { + buf.WriteString(fmt.Sprintf("%t-", v)) + } + return hashcode.String(buf.String()) + }, + }, + "get_password_data": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "hibernation": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "host_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "iam_instance_profile": { + Type: schema.TypeString, + Optional: true, + }, + "instance_initiated_shutdown_behavior": { + Type: schema.TypeString, + Optional: true, + }, + "instance_state": { + Type: schema.TypeString, + Computed: true, + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + }, + "ipv6_address_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "ipv6_addresses": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.IsIPv6Address, + }, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "metadata_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_endpoint": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ec2.InstanceMetadataEndpointStateEnabled, ec2.InstanceMetadataEndpointStateDisabled}, false), + }, + "http_put_response_hop_limit": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(1, 64), + }, + "http_tokens": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ec2.HttpTokensStateOptional, ec2.HttpTokensStateRequired}, false), + }, + }, + }, + }, + "monitoring": { + Type: schema.TypeBool, + Optional: true, + }, + "network_interface": { + ConflictsWith: []string{"associate_public_ip_address", "subnet_id", "private_ip", "secondary_private_ips", "vpc_security_group_ids", "security_groups", "ipv6_addresses", "ipv6_address_count", "source_dest_check"}, + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_on_termination": { + Type: schema.TypeBool, + Default: false, + Optional: true, + ForceNew: true, + }, + "device_index": { + Type: schema.TypeInt, Required: true, + ForceNew: true, }, - - "virtual_name": { + "network_interface_id": { Type: schema.TypeString, - Optional: true, - }, - - "no_device": { - Type: schema.TypeBool, - Optional: true, + Required: true, + ForceNew: true, }, }, }, - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["device_name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["virtual_name"].(string))) - if v, ok := m["no_device"].(bool); ok && v { - buf.WriteString(fmt.Sprintf("%t-", v)) - } - return hashcode.String(buf.String()) - }, }, - + "outpost_arn": { + Type: schema.TypeString, + Computed: true, + }, + "password_data": { + Type: schema.TypeString, + Computed: true, + }, + "placement_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "primary_network_interface_id": { + Type: schema.TypeString, + Computed: true, + }, + "private_dns": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validation.Any( + validation.StringIsEmpty, + validation.IsIPv4Address, + ), + }, + "public_dns": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, "root_block_device": { Type: schema.TypeList, Optional: true, @@ -478,141 +415,144 @@ func resourceAwsInstance() *schema.Resource { Optional: true, Default: true, }, - "device_name": { Type: schema.TypeString, Computed: true, }, - "encrypted": { Type: schema.TypeBool, Optional: true, Computed: true, ForceNew: true, }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "iops": { Type: schema.TypeInt, Optional: true, Computed: true, DiffSuppressFunc: iopsDiffSuppressFunc, }, - + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), "throughput": { Type: schema.TypeInt, Optional: true, Computed: true, DiffSuppressFunc: throughputDiffSuppressFunc, }, - "tags": tagsSchemaConflictsWith([]string{"volume_tags"}), - + "volume_id": { + Type: schema.TypeString, + Computed: true, + }, "volume_size": { Type: schema.TypeInt, Optional: true, Computed: true, }, - "volume_type": { Type: schema.TypeString, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice(ec2.VolumeType_Values(), false), }, - - "volume_id": { - Type: schema.TypeString, - Computed: true, - }, }, }, }, - - "credit_specification": { - Type: schema.TypeList, + "secondary_private_ips": { + Type: schema.TypeSet, Optional: true, - MaxItems: 1, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.IsIPv4Address, + }, + }, + "security_groups": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "source_dest_check": { + Type: schema.TypeBool, + Optional: true, + Default: true, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old == "1" && new == "0" { + // Suppress diff if network_interface is set + _, ok := d.GetOk("network_interface") + return ok + }, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tags": tagsSchema(), + "tenancy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + ec2.TenancyDedicated, + ec2.TenancyDefault, + ec2.TenancyHost, + }, false), + }, + "user_data": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"user_data_base64"}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // Sometimes the EC2 API responds with the equivalent, empty SHA1 sum + // echo -n "" | shasum + if (old == "da39a3ee5e6b4b0d3255bfef95601890afd80709" && new == "") || + (old == "" && new == "da39a3ee5e6b4b0d3255bfef95601890afd80709") { return true } return false }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu_credits": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Only work with existing instances - if d.Id() == "" { - return false - } - // Only work with missing configurations - if new != "" { - return false - } - // Only work when already set in Terraform state - if old == "" { - return false - } - return true - }, - }, - }, + StateFunc: func(v interface{}) string { + switch v := v.(type) { + case string: + return userDataHashSum(v) + default: + return "" + } }, + ValidateFunc: validation.StringLenBetween(0, 16384), }, - - "metadata_options": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "http_endpoint": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ec2.InstanceMetadataEndpointStateEnabled, ec2.InstanceMetadataEndpointStateDisabled}, false), - }, - "http_tokens": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ec2.HttpTokensStateOptional, ec2.HttpTokensStateRequired}, false), - }, - "http_put_response_hop_limit": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntBetween(1, 64), - }, - }, + "user_data_base64": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"user_data"}, + ValidateFunc: func(v interface{}, name string) (warns []string, errs []error) { + s := v.(string) + if !isBase64Encoded([]byte(s)) { + errs = append(errs, fmt.Errorf( + "%s: must be base64-encoded", name, + )) + } + return }, }, - - "enclave_options": { - Type: schema.TypeList, + "volume_tags": tagsSchema(), + "vpc_security_group_ids": { + Type: schema.TypeSet, Optional: true, Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, }, } From 3c193f250db8e81dc262f6f232fad26c59ebf07e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 12 Jan 2021 14:43:48 -0500 Subject: [PATCH 0611/1212] tests/resource/instance: Clean up composeConfig() use --- aws/resource_aws_instance_test.go | 358 ++++++++++++++++++++---------- 1 file changed, 245 insertions(+), 113 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 125d357bf71..fe45bdfafbf 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -3489,8 +3489,9 @@ func testAccAvailableAZsWavelengthZonesDefaultExcludeConfig() string { } func testAccInstanceConfigInDefaultVpcBySgName(rName string) string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + + return composeConfig( + testAccAvailableAZsNoOptInDefaultExcludeConfig(), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` data "aws_vpc" "default" { default = true @@ -3508,12 +3509,13 @@ resource "aws_instance" "test" { security_groups = [aws_security_group.test.name] availability_zone = data.aws_availability_zones.available.names[0] } -`, rName) +`, rName)) } func testAccInstanceConfigInDefaultVpcBySgId(rName string) string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + - testAccLatestAmazonLinuxHvmEbsAmiConfig() + + return composeConfig( + testAccAvailableAZsNoOptInDefaultExcludeConfig(), + testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` data "aws_vpc" "default" { default = true @@ -3531,7 +3533,7 @@ resource "aws_instance" "test" { vpc_security_group_ids = [aws_security_group.test.id] availability_zone = data.aws_availability_zones.available.names[0] } -`, rName) +`, rName)) } func testAccInstanceConfigInEc2Classic() string { @@ -3562,7 +3564,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigAtLeastOneOtherEbsVolume(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmInstanceStoreAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmInstanceStoreAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` # Ensure that there is at least 1 EBS volume in the current region. # See https://github.com/hashicorp/terraform/issues/1249. resource "aws_ebs_volume" "test" { @@ -3592,7 +3597,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigWithUserDataBase64(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3604,7 +3612,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigWithSmallInstanceType(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3619,7 +3630,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigUpdateInstanceType(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id subnet_id = aws_subnet.test.id @@ -3677,7 +3691,8 @@ resource "aws_instance" "test" { } func testAccInstanceConfigNoAMIEphemeralDevices() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), ` resource "aws_instance" "test" { @@ -3720,8 +3735,7 @@ func testAccAwsEc2InstanceRootBlockDeviceWithIOPS(size, delete, volumeType, iops if iops == "" { iops = "null" } - return composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, - fmt.Sprintf(` + return composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.ami.id @@ -3741,8 +3755,7 @@ func testAccAwsEc2InstanceRootBlockDeviceWithThroughput(size, delete, volumeType if throughput == "" { throughput = "null" } - return composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, - fmt.Sprintf(` + return composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.ami.id @@ -3838,39 +3851,51 @@ resource "aws_instance" "test" { } func testAccInstanceConfigSourceDestEnable(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfigSourceDestDisable(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" subnet_id = aws_subnet.test.id source_dest_check = false } -` +`) } func testAccInstanceConfigDisableAPITermination(rName string, val bool) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" subnet_id = aws_subnet.test.id disable_api_termination = %[1]t } -`, val) +`, val)) } func testAccEc2InstanceConfigDedicatedInstance(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -3882,13 +3907,11 @@ resource "aws_instance" "test" { # pre-encoded base64 data user_data = "3dc39dda39be1205215e776bad998da361a5955d" } -` +`) } func testAccInstanceConfigOutpost() string { - return composeConfig( - testAccLatestAmazonLinuxHvmEbsAmiConfig(), - ` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` data "aws_outposts_outposts" "test" {} data "aws_outposts_outpost" "test" { @@ -3924,7 +3947,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigPlacementGroup(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_placement_group" "test" { name = %[1]q strategy = "cluster" @@ -3941,11 +3967,14 @@ resource "aws_instance" "test" { # pre-encoded base64 data user_data = "3dc39dda39be1205215e776bad998da361a5955d" } -`, rName) +`, rName)) } func testAccInstanceConfigIpv6ErrorConfig(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcIpv6Config(rName) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcIpv6Config(rName), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -3957,11 +3986,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigIpv6Support(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcIpv6Config(rName) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcIpv6Config(rName), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -3972,11 +4004,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigIpv6SupportWithIpv4(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcIpv6Config(rName) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcIpv6Config(rName), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -3988,7 +4023,7 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccCheckInstanceConfigTags() string { @@ -4033,7 +4068,10 @@ resource "aws_instance" "test" { } func testAccInstanceConfigRootBlockDeviceKmsKeyArn(rName string) string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_kms_key" "test" { deletion_window_in_days = 7 } @@ -4420,7 +4458,7 @@ resource "aws_instance" "test" { } func testAccInstanceConfigWithoutInstanceProfile(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_iam_role" "test" { name = %[1]q @@ -4453,11 +4491,11 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigWithInstanceProfile(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_iam_role" "test" { name = %[1]q @@ -4496,33 +4534,42 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigPrivateIP(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id private_ip = "10.1.1.42" } -` +`) } func testAccInstanceConfigEmptyPrivateIP(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id private_ip = "" } -` +`) } func testAccInstanceConfigAssociatePublicIPAndPrivateIP(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4530,14 +4577,15 @@ resource "aws_instance" "test" { associate_public_ip_address = true private_ip = "10.1.1.42" } -` +`) } func testAccInstanceNetworkInstanceSecurityGroups(rName string) string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), - testAccAwsInstanceVpcSecurityGroupConfig(rName), ` + testAccAwsInstanceVpcSecurityGroupConfig(rName), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4559,7 +4607,8 @@ func testAccInstanceNetworkInstanceVPCSecurityGroupIDs(rName string) string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), - testAccAwsInstanceVpcSecurityGroupConfig(rName), ` + testAccAwsInstanceVpcSecurityGroupConfig(rName), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4580,7 +4629,8 @@ func testAccInstanceNetworkInstanceVPCRemoveSecurityGroupIDs(rName string) strin return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), testAccAwsInstanceVpcConfig(rName, false), - testAccAwsInstanceVpcSecurityGroupConfig(rName), ` + testAccAwsInstanceVpcSecurityGroupConfig(rName), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4598,7 +4648,7 @@ resource "aws_eip" "test" { } func testAccInstanceConfigKeyPair(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_key_pair" "test" { key_name = %[1]q public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" @@ -4613,11 +4663,11 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigRootBlockDeviceMismatch(rName string) string { - return testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig(testAccAwsInstanceVpcConfig(rName, false), ` resource "aws_instance" "test" { # This is an AMI in UsWest2 with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" ami = "ami-ef5b69df" @@ -4630,31 +4680,40 @@ resource "aws_instance" "test" { volume_size = 13 } } -` //lintignore:AWSAT002 +`) //lintignore:AWSAT002 } func testAccInstanceConfigForceNewAndTagsDrift(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.nano" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfigForceNewAndTagsDrift_Update(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfigPrimaryNetworkInterface(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id private_ips = ["10.1.1.42"] @@ -4673,11 +4732,14 @@ resource "aws_instance" "test" { device_index = 0 } } -`, rName) +`, rName)) } func testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_network_interface" "test" { subnet_id = aws_subnet.test.id private_ips = ["10.1.1.42"] @@ -4697,11 +4759,14 @@ resource "aws_instance" "test" { device_index = 0 } } -`, rName) +`, rName)) } func testAccInstanceConfigAddSecondaryNetworkInterfaceBefore(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_network_interface" "primary" { subnet_id = aws_subnet.test.id private_ips = ["10.1.1.42"] @@ -4729,11 +4794,14 @@ resource "aws_instance" "test" { device_index = 0 } } -`, rName) +`, rName)) } func testAccInstanceConfigAddSecondaryNetworkInterfaceAfter(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_network_interface" "primary" { subnet_id = aws_subnet.test.id private_ips = ["10.1.1.42"] @@ -4767,11 +4835,14 @@ resource "aws_instance" "test" { device_index = 0 } } -`, rName) +`, rName)) } func testAccInstanceConfigAddSecurityGroupBefore(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_subnet" "test2" { cidr_block = "10.1.2.0/24" vpc_id = aws_vpc.test.id @@ -4824,11 +4895,14 @@ resource "aws_network_interface" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigAddSecurityGroupAfter(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_subnet" "test2" { cidr_block = "10.1.2.0/24" vpc_id = aws_vpc.test.id @@ -4882,11 +4956,14 @@ resource "aws_network_interface" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfigPublicAndPrivateSecondaryIPs(rName string, isPublic bool) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id description = "%[1]s" @@ -4910,11 +4987,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName, isPublic) +`, rName, isPublic)) } func testAccInstanceConfigPrivateIPAndSecondaryIPs(rName, privateIP, secondaryIPs string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id description = "%[1]s" @@ -4937,11 +5017,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName, privateIP, secondaryIPs) +`, rName, privateIP, secondaryIPs)) } func testAccInstanceConfig_associatePublic_defaultPrivate(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4951,11 +5034,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_associatePublic_defaultPublic(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, true) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, true), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4965,11 +5051,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_associatePublic_explicitPublic(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, true) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, true), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4980,11 +5069,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_associatePublic_explicitPrivate(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, true) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, true), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -4995,11 +5087,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_associatePublic_overridePublic(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -5010,11 +5105,14 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_associatePublic_overridePrivate(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, true) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, true), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -5025,11 +5123,11 @@ resource "aws_instance" "test" { Name = %[1]q } } -`, rName) +`, rName)) } func testAccInstanceConfig_getPasswordData(rName string, val bool) string { - return testAccLatestWindowsServer2016CoreAmiConfig() + fmt.Sprintf(` + return composeConfig(testAccLatestWindowsServer2016CoreAmiConfig(), fmt.Sprintf(` resource "aws_key_pair" "test" { key_name = %[1]q public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAq6U3HQYC4g8WzU147gZZ7CKQH8TgYn3chZGRPxaGmHW1RUwsyEs0nmombmIhwxudhJ4ehjqXsDLoQpd6+c7BuLgTMvbv8LgE9LX53vnljFe1dsObsr/fYLvpU9LTlo8HgHAqO5ibNdrAUvV31ronzCZhms/Gyfdaue88Fd0/YnsZVGeOZPayRkdOHSpqme2CBrpa8myBeL1CWl0LkDG4+YCURjbaelfyZlIApLYKy3FcCan9XQFKaL32MJZwCgzfOvWIMtYcU8QtXMgnA3/I3gXk8YDUJv5P4lj0s/PJXuTM8DygVAUtebNwPuinS7wwonm5FXcWMuVGsVpG5K7FGQ== tf-acc-winpasswordtest" @@ -5042,11 +5140,14 @@ resource "aws_instance" "test" { get_password_data = %[2]t } -`, rName, val) +`, rName, val)) } func testAccInstanceConfig_CreditSpecification_Empty_NonBurstable(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "m5.large" @@ -5054,41 +5155,53 @@ resource "aws_instance" "test" { credit_specification {} } -` +`) } func testAccInstanceConfig_CreditSpecification_Unspecified_NonBurstable(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "m5.large" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfig_creditSpecification_unspecified(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfig_creditSpecification_unspecified_t3(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t3.micro" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfig_creditSpecification_standardCpuCredits(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -5098,11 +5211,14 @@ resource "aws_instance" "test" { cpu_credits = "standard" } } -` +`) } func testAccInstanceConfig_creditSpecification_standardCpuCredits_t3(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t3.micro" @@ -5112,11 +5228,14 @@ resource "aws_instance" "test" { cpu_credits = "standard" } } -` +`) } func testAccInstanceConfig_creditSpecification_unlimitedCpuCredits(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" @@ -5126,11 +5245,14 @@ resource "aws_instance" "test" { cpu_credits = "unlimited" } } -` +`) } func testAccInstanceConfig_creditSpecification_unlimitedCpuCredits_t3(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t3.micro" @@ -5140,11 +5262,14 @@ resource "aws_instance" "test" { cpu_credits = "unlimited" } } -` +`) } func testAccInstanceConfig_creditSpecification_isNotAppliedToNonBurstable(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.small" @@ -5154,11 +5279,14 @@ resource "aws_instance" "test" { cpu_credits = "standard" } } -` +`) } func testAccInstanceConfig_creditSpecification_unknownCpuCredits(rName, instanceType string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + fmt.Sprintf(` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = %[1]q @@ -5166,28 +5294,34 @@ resource "aws_instance" "test" { credit_specification {} } -`, instanceType) +`, instanceType)) } func testAccInstanceConfig_UserData_Unspecified(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id } -` +`) } func testAccInstanceConfig_UserData_EmptyString(rName string) string { - return testAccLatestAmazonLinuxHvmEbsAmiConfig() + testAccAwsInstanceVpcConfig(rName, false) + ` + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAwsInstanceVpcConfig(rName, false), + ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" subnet_id = aws_subnet.test.id user_data = "" } -` +`) } // testAccLatestAmazonLinuxHvmEbsAmiConfig returns the configuration for a data source that @@ -5407,9 +5541,7 @@ resource "aws_subnet" "test" { } func testAccInstanceConfigHibernation(hibernation bool) string { - return composeConfig( - testAccLatestAmazonLinuxHvmEbsAmiConfig(), - fmt.Sprintf(` + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" From e0b6d42514847f5a3d6cce94c003498a2afab2de Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Tue, 12 Jan 2021 15:16:07 -0500 Subject: [PATCH 0612/1212] tests/resource/instance: Adjust naming --- aws/resource_aws_instance_test.go | 77 +++++++++++++++++++------------ 1 file changed, 47 insertions(+), 30 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index fe45bdfafbf..8175fcdfecd 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -334,7 +334,7 @@ func TestAccAWSInstance_EbsBlockDevice_InvalidIopsForVolumeType(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigEBSBlockDeviceInvalidIops, + Config: testAccInstanceConfigEBSBlockDeviceInvalidIops, ExpectError: regexp.MustCompile(`error creating resource: iops attribute not supported for ebs_block_device with volume_type gp2`), }, }, @@ -348,7 +348,7 @@ func TestAccAWSInstance_EbsBlockDevice_InvalidThroughputForVolumeType(t *testing CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigEBSBlockDeviceInvalidThroughput, + Config: testAccInstanceConfigEBSBlockDeviceInvalidThroughput, ExpectError: regexp.MustCompile(`error creating resource: throughput attribute not supported for ebs_block_device with volume_type gp2`), }, }, @@ -1042,7 +1042,7 @@ func TestAccAWSInstance_tags(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigTags(), + Config: testAccInstanceConfigTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -1055,7 +1055,7 @@ func TestAccAWSInstance_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccCheckInstanceConfigTagsUpdate(), + Config: testAccInstanceConfigTagsUpdate(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -1066,7 +1066,7 @@ func TestAccAWSInstance_tags(t *testing.T) { }) } -func TestAccAWSInstance_volumeTags(t *testing.T) { +func TestAccAWSInstance_blockDeviceTags_volumeTags(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -1076,7 +1076,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigNoVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsNoVolumeTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckNoResourceAttr(resourceName, "volume_tags"), @@ -1089,7 +1089,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, }, { - Config: testAccCheckInstanceConfigWithVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsVolumeTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "volume_tags.%", "1"), @@ -1097,7 +1097,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { ), }, { - Config: testAccCheckInstanceConfigWithVolumeTagsUpdate(), + Config: testAccInstanceConfigBlockDeviceTagsVolumeTagsUpdate(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "volume_tags.%", "2"), @@ -1106,7 +1106,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { ), }, { - Config: testAccCheckInstanceConfigNoVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsNoVolumeTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckNoResourceAttr(resourceName, "volume_tags"), @@ -1116,7 +1116,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { }) } -func TestAccAWSInstance_volumeTagsWithAttachedVolume(t *testing.T) { +func TestAccAWSInstance_blockDeviceTags_withAttachedVolume(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -1126,7 +1126,7 @@ func TestAccAWSInstance_volumeTagsWithAttachedVolume(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigWithAttachedVolume(), + Config: testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), ), @@ -1140,7 +1140,24 @@ func TestAccAWSInstance_volumeTagsWithAttachedVolume(t *testing.T) { }) } -func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { +/* +TestAccAWSInstance_blockDeviceTags_volumeTags + testAccInstanceConfigBlockDeviceTagsVolumeTagsUpdate + testAccInstanceConfigBlockDeviceTagsVolumeTags + testAccInstanceConfigBlockDeviceTagsNoVolumeTags + +TestAccAWSInstance_blockDeviceTags_ebsAndRoot + testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate + testAccInstanceConfigBlockDeviceTagsEBSAndRootTags + testAccInstanceConfigBlockDeviceTagsEBSTags + testAccInstanceConfigBlockDeviceTagsEBSTagsConflict + testAccInstanceConfigBlockDeviceTagsRootTagsConflict + +TestAccAWSInstance_blockDeviceTags_withAttachedVolume + testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags +*/ + +func TestAccAWSInstance_blockDeviceTags_ebsAndRoot(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -1150,15 +1167,15 @@ func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccCheckInstanceConfigBlockDeviceRootTagsConflictWithVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsRootTagsConflict(), ExpectError: regexp.MustCompile(`"root_block_device\.0\.tags": conflicts with volume_tags`), }, { - Config: testAccCheckInstanceConfigBlockDeviceEbsTagsConflictWithVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSTagsConflict(), ExpectError: regexp.MustCompile(`"ebs_block_device\.0\.tags": conflicts with volume_tags`), }, { - Config: testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "0"), @@ -1168,7 +1185,7 @@ func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { ), }, { - Config: testAccCheckInstanceConfigBlockDeviceCreateVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTags(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), @@ -1177,7 +1194,7 @@ func TestAccAWSInstance_blockDeviceVolumeTags(t *testing.T) { ), }, { - Config: testAccCheckInstanceConfigBlockDeviceUpdateVolumeTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate(), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), @@ -4026,7 +4043,7 @@ resource "aws_instance" "test" { `, rName)) } -func testAccCheckInstanceConfigTags() string { +func testAccInstanceConfigTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4090,7 +4107,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigWithAttachedVolume() string { +func testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4125,7 +4142,7 @@ resource "aws_volume_attachment" "test" { `) } -func testAccCheckInstanceConfigBlockDeviceRootTagsConflictWithVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsRootTagsConflict() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4153,7 +4170,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigBlockDeviceEbsTagsConflictWithVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsEBSTagsConflict() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4181,7 +4198,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigNoVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsNoVolumeTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4219,7 +4236,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigBlockDeviceNoRootVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsEBSTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4252,7 +4269,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigBlockDeviceCreateVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsEBSAndRootTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4290,7 +4307,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigBlockDeviceUpdateVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4329,7 +4346,7 @@ resource "aws_instance" "test" { `) } -var testAccCheckInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` +var testAccInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` resource "aws_instance" "test" { ami = data.aws_ami.ami.id @@ -4344,7 +4361,7 @@ resource "aws_instance" "test" { } `) -var testAccCheckInstanceConfigEBSBlockDeviceInvalidThroughput = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` +var testAccInstanceConfigEBSBlockDeviceInvalidThroughput = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` resource "aws_instance" "test" { ami = data.aws_ami.ami.id @@ -4359,7 +4376,7 @@ resource "aws_instance" "test" { } `) -func testAccCheckInstanceConfigWithVolumeTags() string { +func testAccInstanceConfigBlockDeviceTagsVolumeTags() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4401,7 +4418,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigWithVolumeTagsUpdate() string { +func testAccInstanceConfigBlockDeviceTagsVolumeTagsUpdate() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4444,7 +4461,7 @@ resource "aws_instance" "test" { `) } -func testAccCheckInstanceConfigTagsUpdate() string { +func testAccInstanceConfigTagsUpdate() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id From f573a9895c869adfcdee5474485f8168c21cc751 Mon Sep 17 00:00:00 2001 From: Lionel Martin Date: Tue, 12 Jan 2021 21:57:45 +0100 Subject: [PATCH 0613/1212] docs/resource/aws_api_gateway_method_settings: Use aws_api_gateway_resource path attribute for deeply nested resources (#5427) * Update api_gateway_method_settings.html.markdown As per AWS documentation, the method_path parameter must be the full method path relative to the API base URL, without the first forward slash. Current Terraform documentation only includes the last method path part, which fails to update settings on methods with deeper paths. * Update website/docs/r/api_gateway_method_settings.html.markdown Co-authored-by: Brian Flad Co-authored-by: Brian Flad --- website/docs/r/api_gateway_method_settings.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/api_gateway_method_settings.html.markdown b/website/docs/r/api_gateway_method_settings.html.markdown index 89471ae4006..b5e131bdfda 100644 --- a/website/docs/r/api_gateway_method_settings.html.markdown +++ b/website/docs/r/api_gateway_method_settings.html.markdown @@ -16,7 +16,7 @@ Provides an API Gateway Method Settings, e.g. logging or monitoring. resource "aws_api_gateway_method_settings" "s" { rest_api_id = aws_api_gateway_rest_api.test.id stage_name = aws_api_gateway_stage.test.stage_name - method_path = "${aws_api_gateway_resource.test.path_part}/${aws_api_gateway_method.test.http_method}" + method_path = "${trimprefix(aws_api_gateway_resource.test.path, "/")}/${aws_api_gateway_method.test.http_method}" settings { metrics_enabled = true From ec65ce8e2c1b17dac1db47524b2cf3cc32948e8c Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 12 Jan 2021 15:59:25 -0500 Subject: [PATCH 0614/1212] Update CHANGELOG for #16827 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c7e71f402a..4b9aaa13773 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,14 @@ FEATURES BUX FIXES * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] +* resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] ENHANCEMENTS +* resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] From cea407afcc3503195367257a90f2b3ff0fdc01c2 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 12 Jan 2021 16:06:24 -0500 Subject: [PATCH 0615/1212] add new argument to resource documentation --- website/docs/r/dms_endpoint.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index 140aaefb505..46d0dcffe5e 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -123,6 +123,7 @@ The `s3_settings` configuration block supports the following arguments: * `compression_type` - (Optional) Set to compress target files. Defaults to `NONE`. Valid values are `GZIP` and `NONE`. * `csv_delimiter` - (Optional) Delimiter used to separate columns in the source files. Defaults to `,`. * `csv_row_delimiter` - (Optional) Delimiter used to separate rows in the source files. Defaults to `\n`. +* `date_partition_enabled` - (Optional) Partition S3 bucket folders based on transaction commit dates. Defaults to `false`. * `external_table_definition` - (Optional) JSON document that describes how AWS DMS should interpret the data. * `service_access_role_arn` - (Optional) Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket. From ed00118a47f01f32e698568f99084b9e90c9d6b3 Mon Sep 17 00:00:00 2001 From: DJ Spatoulas Date: Tue, 12 Jan 2021 17:32:13 -0500 Subject: [PATCH 0616/1212] Properly handle response from Lambda API --- aws/resource_aws_lambda_function.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_lambda_function.go b/aws/resource_aws_lambda_function.go index d3f7ca7e29c..d16c3db9670 100644 --- a/aws/resource_aws_lambda_function.go +++ b/aws/resource_aws_lambda_function.go @@ -1302,13 +1302,15 @@ func expandLambdaFileSystemConfigs(fscMaps []interface{}) []*lambda.FileSystemCo func flattenLambdaImageConfig(response *lambda.ImageConfigResponse) []map[string]interface{} { settings := make(map[string]interface{}) - if response == nil || response.Error != nil { + imageConfig := response.ImageConfig + + if response == nil || response.Error != nil || imageConfig == nil { return nil } - settings["command"] = response.ImageConfig.Command - settings["entry_point"] = response.ImageConfig.EntryPoint - settings["working_directory"] = response.ImageConfig.WorkingDirectory + settings["command"] = imageConfig.Command + settings["entry_point"] = imageConfig.EntryPoint + settings["working_directory"] = imageConfig.WorkingDirectory return []map[string]interface{}{settings} } From 824d2d9be7cec6db8dcb8ab50e96ed40a1b5afb0 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Wed, 13 Jan 2021 07:41:28 +0900 Subject: [PATCH 0617/1212] Replace %s with %w for the error messages --- aws/resource_aws_route53_resolver_dnssec_config.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go index 10f2881debe..9340620a274 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config.go +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -65,7 +65,7 @@ func resourceAwsRoute53ResolverDnssecConfigCreate(d *schema.ResourceData, meta i log.Printf("[DEBUG] Creating Route53 Resolver DNSSEC config: %#v", req) resp, err := conn.UpdateResolverDnssecConfig(req) if err != nil { - return fmt.Errorf("error creating Route53 Resolver DNSSEC config: %s", err) + return fmt.Errorf("error creating Route53 Resolver DNSSEC config: %w", err) } d.SetId(aws.StringValue(resp.ResolverDNSSECConfig.ResourceId)) @@ -86,7 +86,7 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int vpc, err := vpcDescribe(ec2Conn, d.Id()) if err != nil { - return fmt.Errorf("error getting VPC associated with Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + return fmt.Errorf("error getting VPC associated with Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } // GetResolverDnssecConfig returns AccessDeniedException if sending a request with non-existing VPC id @@ -98,7 +98,7 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int raw, state, err := route53ResolverDnssecConfigRefresh(conn, d.Id())() if err != nil { - return fmt.Errorf("error getting Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + return fmt.Errorf("error getting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } if state == route53ResolverDnssecConfigStatusNotFound || state == route53resolver.ResolverDNSSECValidationStatusDisabled { @@ -128,7 +128,7 @@ func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta i return nil } if err != nil { - return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } err = route53ResolverDnssecConfigWait(conn, d.Id(), d.Timeout(schema.TimeoutDelete), @@ -151,7 +151,7 @@ func route53ResolverDnssecConfigWait(conn *route53resolver.Route53Resolver, id s MinTimeout: 5 * time.Second, } if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to reach target state: %s", id, err) + return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to reach target state: %w", id, err) } return nil From 03bfc2c6ee6f6de711abc6e9710c36dff0100768 Mon Sep 17 00:00:00 2001 From: DJ Spatoulas Date: Tue, 12 Jan 2021 17:54:45 -0500 Subject: [PATCH 0618/1212] Fix failing CI test --- aws/resource_aws_lambda_function.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_lambda_function.go b/aws/resource_aws_lambda_function.go index d16c3db9670..fe6f0116486 100644 --- a/aws/resource_aws_lambda_function.go +++ b/aws/resource_aws_lambda_function.go @@ -1302,15 +1302,13 @@ func expandLambdaFileSystemConfigs(fscMaps []interface{}) []*lambda.FileSystemCo func flattenLambdaImageConfig(response *lambda.ImageConfigResponse) []map[string]interface{} { settings := make(map[string]interface{}) - imageConfig := response.ImageConfig - - if response == nil || response.Error != nil || imageConfig == nil { + if response == nil || response.Error != nil || response.ImageConfig == nil { return nil } - settings["command"] = imageConfig.Command - settings["entry_point"] = imageConfig.EntryPoint - settings["working_directory"] = imageConfig.WorkingDirectory + settings["command"] = response.ImageConfig.Command + settings["entry_point"] = response.ImageConfig.EntryPoint + settings["working_directory"] = response.ImageConfig.WorkingDirectory return []map[string]interface{}{settings} } From 753dd9f01a33dc933135cf13a202b02b6751e4d4 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Wed, 13 Jan 2021 08:13:07 +0900 Subject: [PATCH 0619/1212] Add ARN support for aws_route53_resolver_dnssec_config --- ...resource_aws_route53_resolver_dnssec_config.go | 15 +++++++++++++++ ...rce_aws_route53_resolver_dnssec_config_test.go | 2 ++ .../route53_resolver_dnssec_config.html.markdown | 1 + 3 files changed, 18 insertions(+) diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go index 9340620a274..9afdf60c081 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config.go +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -6,6 +6,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/route53resolver" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -25,6 +26,11 @@ func resourceAwsRoute53ResolverDnssecConfig() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "id": { Type: schema.TypeString, Computed: true, @@ -113,6 +119,15 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int d.Set("resource_id", out.ResourceId) d.Set("validation_status", out.ValidationStatus) + configArn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "route53resolver", + Region: meta.(*AWSClient).region, + AccountID: aws.StringValue(out.OwnerId), + Resource: fmt.Sprintf("resolver-dnssec-config/%s", aws.StringValue(out.ResourceId)), + }.String() + d.Set("arn", configArn) + return nil } diff --git a/aws/resource_aws_route53_resolver_dnssec_config_test.go b/aws/resource_aws_route53_resolver_dnssec_config_test.go index 51bb8f3fc2a..b88ec552015 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config_test.go +++ b/aws/resource_aws_route53_resolver_dnssec_config_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "testing" "time" @@ -87,6 +88,7 @@ func TestAccAWSRoute53ResolverDnssecConfig_basic(t *testing.T) { Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "route53resolver", regexp.MustCompile(`resolver-dnssec-config/.+$`)), resource.TestCheckResourceAttrSet(resourceName, "id"), resource.TestCheckResourceAttrSet(resourceName, "owner_id"), resource.TestCheckResourceAttrSet(resourceName, "resource_id"), diff --git a/website/docs/r/route53_resolver_dnssec_config.html.markdown b/website/docs/r/route53_resolver_dnssec_config.html.markdown index 878c152e4c8..7ae43953bbe 100644 --- a/website/docs/r/route53_resolver_dnssec_config.html.markdown +++ b/website/docs/r/route53_resolver_dnssec_config.html.markdown @@ -34,6 +34,7 @@ The following argument is supported: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN for a configuration for DNSSEC validation. * `id` - The ID for a configuration for DNSSEC validation. * `owner_id` - The owner account ID of the virtual private cloud (VPC) for a configuration for DNSSEC validation. * `validation_status` - The validation status for a DNSSEC configuration. The status can be one of the following: `ENABLING`, `ENABLED`, `DISABLING` and `DISABLED`. From bc9018ee157f2dd070e01ad3ec9d3e627ee24a4e Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 18:44:26 -0500 Subject: [PATCH 0620/1212] resource/aws_api_gateway_rest_api: Refactor body_base_path to parameters argument Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSAPIGatewayRestApi_api_key_source (254.55s) --- PASS: TestAccAWSAPIGatewayRestApi_basic (55.86s) --- PASS: TestAccAWSAPIGatewayRestApi_disappears (11.43s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration (211.19s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private (20.20s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint (438.68s) --- PASS: TestAccAWSAPIGatewayRestApi_openapi (433.40s) --- PASS: TestAccAWSAPIGatewayRestApi_Parameters (135.48s) --- PASS: TestAccAWSAPIGatewayRestApi_policy (46.59s) --- PASS: TestAccAWSAPIGatewayRestApi_tags (32.33s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private (20.81s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint (347.68s) --- SKIP: TestAccAWSAPIGatewayRestApi_api_key_source (1.26s) --- SKIP: TestAccAWSAPIGatewayRestApi_basic (1.26s) --- SKIP: TestAccAWSAPIGatewayRestApi_disappears (1.26s) --- SKIP: TestAccAWSAPIGatewayRestApi_EndpointConfiguration (35.10s) --- SKIP: TestAccAWSAPIGatewayRestApi_openapi (0.00s) --- SKIP: TestAccAWSAPIGatewayRestApi_Parameters (1.26s) --- SKIP: TestAccAWSAPIGatewayRestApi_policy (0.00s) --- SKIP: TestAccAWSAPIGatewayRestApi_tags (0.00s) ``` --- aws/resource_aws_api_gateway_rest_api.go | 48 +-- aws/resource_aws_api_gateway_rest_api_test.go | 302 ++---------------- .../docs/r/api_gateway_rest_api.html.markdown | 6 +- 3 files changed, 46 insertions(+), 310 deletions(-) diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index ee1849222b0..8ad41302946 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -65,11 +65,10 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { Optional: true, }, - "body_base_path": { - Type: schema.TypeString, - Default: "ignore", - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"ignore", "prepend", "split"}, true), + "parameters": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "minimum_compression_size": { @@ -182,21 +181,21 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} d.SetId(aws.StringValue(gateway.Id)) - bodyBasePathMode := d.Get("body_base_path").(string) - if body, ok := d.GetOk("body"); ok { log.Printf("[DEBUG] Initializing API Gateway from OpenAPI spec %s", d.Id()) - _, err := conn.PutRestApi(&apigateway.PutRestApiInput{ + + input := &apigateway.PutRestApiInput{ RestApiId: gateway.Id, Mode: aws.String(apigateway.PutModeOverwrite), Body: []byte(body.(string)), - Parameters: map[string]*string{ - // See https://docs.aws.amazon.com/cli/latest/reference/apigateway/import-rest-api.html - // At the moment of writing, according to aws support, the docs are incorrect - // and the parameter should be called 'basepath' and not 'basePath' - "basepath": &bodyBasePathMode, - }, - }) + } + + if v, ok := d.GetOk("parameters"); ok && len(v.(map[string]interface{})) > 0 { + input.Parameters = stringMapToPointers(v.(map[string]interface{})) + } + + _, err := conn.PutRestApi(input) + if err != nil { return fmt.Errorf("error creating API Gateway specification: %s", err) } @@ -425,19 +424,22 @@ func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{} } } - if d.HasChange("body") { - bodyBasePathMode := d.Get("body_base_path").(string) - + if d.HasChanges("body", "parameters") { if body, ok := d.GetOk("body"); ok { log.Printf("[DEBUG] Updating API Gateway from OpenAPI spec: %s", d.Id()) - _, err := conn.PutRestApi(&apigateway.PutRestApiInput{ + + input := &apigateway.PutRestApiInput{ RestApiId: aws.String(d.Id()), Mode: aws.String(apigateway.PutModeOverwrite), Body: []byte(body.(string)), - Parameters: map[string]*string{ - "basepath": &bodyBasePathMode, - }, - }) + } + + if v, ok := d.GetOk("parameters"); ok && len(v.(map[string]interface{})) > 0 { + input.Parameters = stringMapToPointers(v.(map[string]interface{})) + } + + _, err := conn.PutRestApi(input) + if err != nil { return fmt.Errorf("error updating API Gateway specification: %s", err) } diff --git a/aws/resource_aws_api_gateway_rest_api_test.go b/aws/resource_aws_api_gateway_rest_api_test.go index 58072a6318f..73cc2135a3d 100644 --- a/aws/resource_aws_api_gateway_rest_api_test.go +++ b/aws/resource_aws_api_gateway_rest_api_test.go @@ -513,99 +513,34 @@ func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { }) } -func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_ignore(t *testing.T) { - var conf apigateway.RestApi - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathIgnore, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), - ), - }, - { - ResourceName: "aws_api_gateway_rest_api.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"body"}, - }, - { - Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathIgnore, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/update"}), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_prepend(t *testing.T) { - var conf apigateway.RestApi - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathPrepend, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo/bar/baz/test"}), - ), - }, - { - ResourceName: "aws_api_gateway_rest_api.test", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"body"}, - }, - { - Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathPrepend, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo/bar/baz/update"}), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayRestApi_openapi_body_base_path_split(t *testing.T) { +func TestAccAWSAPIGatewayRestApi_Parameters(t *testing.T) { var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathSplit, + Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "prepend"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/bar/baz/test"}), + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo", "/foo/bar", "/foo/bar/baz", "/foo/bar/baz/test"}), ), }, { - ResourceName: "aws_api_gateway_rest_api.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"body"}, + ImportStateVerifyIgnore: []string{"body", "parameters"}, }, { - Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPIBasePathSplit, + Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "ignore"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists("aws_api_gateway_rest_api.test", &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, "test"), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/bar/baz/update"}), + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), ), }, }, @@ -1120,180 +1055,18 @@ EOF `, rName, rName) } -const testAccAWSAPIGatewayRestAPIConfigOpenAPIBasePathIgnore = ` -resource "aws_api_gateway_rest_api" "test" { - name = "test" - body_base_path = "ignore" - body = < Date: Tue, 12 Jan 2021 18:47:14 -0500 Subject: [PATCH 0621/1212] Update CHANGELOG for #7374 --- CHANGELOG.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b9aaa13773..05a4efea248 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ FEATURES * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) +ENHANCEMENTS + +* resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] +* resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] +* resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] +* resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] + BUX FIXES * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] @@ -12,12 +19,6 @@ BUX FIXES * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] -ENHANCEMENTS - -* resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] -* resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] -* resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] - ## 3.23.0 (January 08, 2021) FEATURES From 4de8164929db1ed0611d0f6faa327a95e332e588 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 15:56:00 -0800 Subject: [PATCH 0622/1212] Restores diff suppression for hashed GitHub tokens --- aws/resource_aws_codepipeline.go | 35 +++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_codepipeline.go b/aws/resource_aws_codepipeline.go index b66de0d158d..bc0ddb3732c 100644 --- a/aws/resource_aws_codepipeline.go +++ b/aws/resource_aws_codepipeline.go @@ -1,9 +1,12 @@ package aws import ( + "crypto/sha256" + "encoding/hex" "errors" "fmt" "log" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codepipeline" @@ -104,9 +107,10 @@ func resourceAwsCodePipeline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "configuration": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: suppressCodePipelineStageActionConfiguration, }, "category": { Type: schema.TypeString, @@ -621,3 +625,28 @@ func resourceAwsCodePipelineValidateActionProvider(i interface{}, path cty.Path) return nil } + +func suppressCodePipelineStageActionConfiguration(k, old, new string, d *schema.ResourceData) bool { + parts := strings.Split(k, ".") + parts = parts[:len(parts)-2] + providerAddr := strings.Join(append(parts, "provider"), ".") + provider := d.Get(providerAddr).(string) + + if provider == CodePipelineProviderGitHub && strings.HasSuffix(k, CodePipelineGitHubActionConfigurationOAuthToken) { + hash := hashCodePipelineGitHubToken(new) + return old == hash + } + + return false +} + +const codePipelineGitHubTokenHashPrefix = "hash-" + +func hashCodePipelineGitHubToken(token string) string { + // Without this check, the value was getting encoded twice + if strings.HasPrefix(token, codePipelineGitHubTokenHashPrefix) { + return token + } + sum := sha256.Sum256([]byte(token)) + return codePipelineGitHubTokenHashPrefix + hex.EncodeToString(sum[:]) +} From be7061c84e0f2d12ae73258da856b0b4e3f78f21 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 20:54:53 -0500 Subject: [PATCH 0623/1212] resource/aws_api_gateway_usage_plan: Refactor out mutex in preference of client retries Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayUsagePlanKey_basic (49.70s) --- PASS: TestAccAWSAPIGatewayUsagePlanKey_KeyId_Concurrency (81.01s) --- PASS: TestAccAWSAPIGatewayUsagePlanKey_disappears (112.89s) ``` --- ...resource_aws_api_gateway_usage_plan_key.go | 19 +++---------------- ...rce_aws_api_gateway_usage_plan_key_test.go | 2 +- 2 files changed, 4 insertions(+), 17 deletions(-) diff --git a/aws/resource_aws_api_gateway_usage_plan_key.go b/aws/resource_aws_api_gateway_usage_plan_key.go index d6ddab617df..8802014cba2 100644 --- a/aws/resource_aws_api_gateway_usage_plan_key.go +++ b/aws/resource_aws_api_gateway_usage_plan_key.go @@ -3,19 +3,13 @@ package aws import ( "fmt" "log" -<<<<<<< HEAD "strings" -======= - "sync" ->>>>>>> d7b8088c3e963addb9bebc76947247ad056c4795 "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -var resourceAwsApiGatewayUsagePlanKeyMutex = &sync.Mutex{} - func resourceAwsApiGatewayUsagePlanKey() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayUsagePlanKeyCreate, @@ -78,20 +72,13 @@ func resourceAwsApiGatewayUsagePlanKeyCreate(d *schema.ResourceData, meta interf UsagePlanId: aws.String(d.Get("usage_plan_id").(string)), } - resourceAwsApiGatewayUsagePlanKeyMutex.Lock() - defer resourceAwsApiGatewayUsagePlanKeyMutex.Unlock() - - o, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { - return conn.CreateUsagePlanKey(params) - }) + up, err := conn.CreateUsagePlanKey(params) if err != nil { - return fmt.Errorf("Error creating API Gateway Usage Plan Key: %s", err) + return fmt.Errorf("error creating API Gateway Usage Plan Key: %w", err) } - up := o.(*apigateway.UsagePlanKey) - - d.SetId(*up.Id) + d.SetId(aws.StringValue(up.Id)) return resourceAwsApiGatewayUsagePlanKeyRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_usage_plan_key_test.go b/aws/resource_aws_api_gateway_usage_plan_key_test.go index fa747fcc143..a76d39fa33a 100644 --- a/aws/resource_aws_api_gateway_usage_plan_key_test.go +++ b/aws/resource_aws_api_gateway_usage_plan_key_test.go @@ -33,7 +33,7 @@ func TestAccAWSAPIGatewayUsagePlanKey_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "key_type", "API_KEY"), resource.TestCheckResourceAttrSet(resourceName, "name"), resource.TestCheckResourceAttrPair(resourceName, "usage_plan_id", apiGatewayUsagePlanResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "value", ""), + resource.TestCheckResourceAttrSet(resourceName, "value"), ), }, { From 91c76dfe32341327a0bf45dd870f46b1cd9515c8 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 20:57:21 -0500 Subject: [PATCH 0624/1212] Update CHANGELOG for #10092 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05a4efea248..f39635cfd20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS BUX FIXES +* resource/aws_api_gateway_usage_plan_key: Automatically retry on retryable `ConflictException` errors during creation and deletion [GH-10092] * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] From 9dd1e48279fe9b0202eddb2f10990723591c30d6 Mon Sep 17 00:00:00 2001 From: Max Smolens Date: Tue, 12 Jan 2021 21:14:40 -0500 Subject: [PATCH 0625/1212] docs/resource/aws_api_gateway_gateway_response: Fix invalid JSON in response template (#12820) Strings in JSON must be enclosed in double quotes. Update the example response template to be valid JSON. --- website/docs/r/api_gateway_gateway_response.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/api_gateway_gateway_response.markdown b/website/docs/r/api_gateway_gateway_response.markdown index 99241fd588e..1322b6dac30 100644 --- a/website/docs/r/api_gateway_gateway_response.markdown +++ b/website/docs/r/api_gateway_gateway_response.markdown @@ -23,7 +23,7 @@ resource "aws_api_gateway_gateway_response" "test" { response_type = "UNAUTHORIZED" response_templates = { - "application/json" = "{'message':$context.error.messageString}" + "application/json" = "{\"message\":$context.error.messageString}" } response_parameters = { From 6b256fc6e38b104e80959108ad94395f2d091693 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 21:40:36 -0500 Subject: [PATCH 0626/1212] resource/aws_api_gateway_method: Minor refactoring and CI fixes Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayMethod_customrequestvalidator (27.83s) --- PASS: TestAccAWSAPIGatewayMethod_OperationName (27.86s) --- PASS: TestAccAWSAPIGatewayMethod_customauthorizer (89.63s) --- PASS: TestAccAWSAPIGatewayMethod_basic (119.56s) --- PASS: TestAccAWSAPIGatewayMethod_disappears (218.21s) --- PASS: TestAccAWSAPIGatewayMethod_cognitoauthorizer (394.87s) ``` --- aws/resource_aws_api_gateway_method.go | 11 ++- aws/resource_aws_api_gateway_method_test.go | 84 +++++-------------- .../docs/r/api_gateway_method.html.markdown | 2 +- 3 files changed, 27 insertions(+), 70 deletions(-) diff --git a/aws/resource_aws_api_gateway_method.go b/aws/resource_aws_api_gateway_method.go index 43d825c3205..e81ce364dd3 100644 --- a/aws/resource_aws_api_gateway_method.go +++ b/aws/resource_aws_api_gateway_method.go @@ -141,14 +141,14 @@ func resourceAwsApiGatewayMethodCreate(d *schema.ResourceData, meta interface{}) input.AuthorizationScopes = expandStringList(v.(*schema.Set).List()) } - if v, ok := d.GetOk("request_validator_id"); ok { - input.RequestValidatorId = aws.String(v.(string)) - } - if v, ok := d.GetOk("operation_name"); ok { input.OperationName = aws.String(v.(string)) } + if v, ok := d.GetOk("request_validator_id"); ok { + input.RequestValidatorId = aws.String(v.(string)) + } + _, err := conn.PutMethod(&input) if err != nil { return fmt.Errorf("Error creating API Gateway Method: %s", err) @@ -187,6 +187,7 @@ func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) e d.Set("authorization", out.AuthorizationType) d.Set("authorizer_id", out.AuthorizerId) + d.Set("operation_name", out.OperationName) if err := d.Set("request_models", aws.StringValueMap(out.RequestModels)); err != nil { return fmt.Errorf("error setting request_models: %s", err) @@ -198,8 +199,6 @@ func resourceAwsApiGatewayMethodRead(d *schema.ResourceData, meta interface{}) e d.Set("request_validator_id", out.RequestValidatorId) - d.Set("operation_name", out.OperationName) - return nil } diff --git a/aws/resource_aws_api_gateway_method_test.go b/aws/resource_aws_api_gateway_method_test.go index 017b5486cd9..72394641032 100644 --- a/aws/resource_aws_api_gateway_method_test.go +++ b/aws/resource_aws_api_gateway_method_test.go @@ -198,44 +198,34 @@ func TestAccAWSAPIGatewayMethod_disappears(t *testing.T) { }) } -func TestAccAWSAPIGatewayMethod_customoperationname(t *testing.T) { +func TestAccAWSAPIGatewayMethod_OperationName(t *testing.T) { var conf apigateway.Method rInt := acctest.RandInt() + resourceName := "aws_api_gateway_method.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAPIGatewayMethodDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayMethodConfigWithCustomOperationName(rInt), + Config: testAccAWSAPIGatewayMethodConfigOperationName(rInt, "getTest"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf), - testAccCheckAWSAPIGatewayMethodAttributes(&conf), - resource.TestCheckResourceAttr( - "aws_api_gateway_method.test", "http_method", "GET"), - resource.TestCheckResourceAttr( - "aws_api_gateway_method.test", "authorization", "NONE"), - resource.TestCheckResourceAttr( - "aws_api_gateway_method.test", "request_models.application/json", "Error"), - resource.TestCheckResourceAttr( - "aws_api_gateway_method.test", "operation_name", "getTest"), + testAccCheckAWSAPIGatewayMethodExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "operation_name", "getTest"), ), }, { - ResourceName: "aws_api_gateway_method.test", + ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccAWSAPIGatewayMethodImportStateIdFunc("aws_api_gateway_method.test"), + ImportStateIdFunc: testAccAWSAPIGatewayMethodImportStateIdFunc(resourceName), ImportStateVerify: true, }, - { - Config: testAccAWSAPIGatewayMethodConfigWithCustomOperationNameUpdate(rInt), + Config: testAccAWSAPIGatewayMethodConfigOperationName(rInt, "describeTest"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayMethodExists("aws_api_gateway_method.test", &conf), - testAccCheckAWSAPIGatewayMethodAttributesUpdate(&conf), - resource.TestCheckResourceAttr( - "aws_api_gateway_method.test", "operation_name", "describeTest"), + testAccCheckAWSAPIGatewayMethodExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "operation_name", "describeTest"), ), }, }, @@ -770,23 +760,24 @@ resource "aws_api_gateway_method" "test" { `, rInt) } -func testAccAWSAPIGatewayMethodConfigWithCustomOperationName(rInt int) string { +func testAccAWSAPIGatewayMethodConfigOperationName(rInt int, operationName string) string { return fmt.Sprintf(` resource "aws_api_gateway_rest_api" "test" { - name = "tf-acc-test-apig-method-custom-op-name-%d" + name = "tf-acc-test-apig-method-custom-op-name-%[1]d" } resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" + rest_api_id = aws_api_gateway_rest_api.test.id + parent_id = aws_api_gateway_rest_api.test.root_resource_id path_part = "test" } resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" + authorization = "NONE" + http_method = "GET" + operation_name = %[2]q + resource_id = aws_api_gateway_resource.test.id + rest_api_id = aws_api_gateway_rest_api.test.id request_models = { "application/json" = "Error" @@ -796,39 +787,6 @@ resource "aws_api_gateway_method" "test" { "method.request.header.Content-Type" = false "method.request.querystring.page" = true } - - operation_name = "getTest" -} -`, rInt) -} - -func testAccAWSAPIGatewayMethodConfigWithCustomOperationNameUpdate(rInt int) string { - return fmt.Sprintf(` -resource "aws_api_gateway_rest_api" "test" { - name = "tf-acc-test-apig-method-custom-op-name-%d" -} - -resource "aws_api_gateway_resource" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}" - path_part = "test" -} - -resource "aws_api_gateway_method" "test" { - rest_api_id = "${aws_api_gateway_rest_api.test.id}" - resource_id = "${aws_api_gateway_resource.test.id}" - http_method = "GET" - authorization = "NONE" - - request_models = { - "application/json" = "Error" - } - - request_parameters = { - "method.request.querystring.page" = false - } - - operation_name = "describeTest" } -`, rInt) +`, rInt, operationName) } diff --git a/website/docs/r/api_gateway_method.html.markdown b/website/docs/r/api_gateway_method.html.markdown index a4b7b9d28d1..59917adb05d 100644 --- a/website/docs/r/api_gateway_method.html.markdown +++ b/website/docs/r/api_gateway_method.html.markdown @@ -82,13 +82,13 @@ The following arguments are supported: * `authorizer_id` - (Optional) The authorizer id to be used when the authorization is `CUSTOM` or `COGNITO_USER_POOLS` * `authorization_scopes` - (Optional) The authorization scopes used when the authorization is `COGNITO_USER_POOLS` * `api_key_required` - (Optional) Specify if the method requires an API key +* `operation_name` - (Optional) The function name that will be given to the method when generating an SDK through API Gateway. If omitted, API Gateway will generate a function name based on the resource path and HTTP verb. * `request_models` - (Optional) A map of the API models used for the request's content type where key is the content type (e.g. `application/json`) and value is either `Error`, `Empty` (built-in models) or `aws_api_gateway_model`'s `name`. * `request_validator_id` - (Optional) The ID of a `aws_api_gateway_request_validator` * `request_parameters` - (Optional) A map of request parameters (from the path, query string and headers) that should be passed to the integration. The boolean value indicates whether the parameter is required (`true`) or optional (`false`). For example: `request_parameters = {"method.request.header.X-Some-Header" = true "method.request.querystring.some-query-param" = true}` would define that the header `X-Some-Header` and the query string `some-query-param` must be provided in the request. -* `operation_name` - (Optional) The function name that will be given to the method when generating an SDK through API Gateway. If omitted, API Gateway will generate a function name based on the resource path and HTTP verb. ## Import From b224066ee7f56df101474ed2a11865ef74f121a9 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 21:42:00 -0500 Subject: [PATCH 0627/1212] Update CHANGELOG for #13232 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f39635cfd20..4019bbabd03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ FEATURES ENHANCEMENTS +* resource/aws_api_gateway_method: Add `operation_name` argument [GH-13232] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] From e1e033453434daf8f3ba31a1c59ff39d0280f561 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 21:43:25 -0500 Subject: [PATCH 0628/1212] Update CHANGELOG for #13282 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4019bbabd03..3736d4e7f60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ FEATURES ENHANCEMENTS -* resource/aws_api_gateway_method: Add `operation_name` argument [GH-13232] +* resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] From 6b8918cf8d66fb38e5104411379dce7a5d95d46a Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 22:14:39 -0500 Subject: [PATCH 0629/1212] data-source/aws_api_gateway_domain_name: Modernize to latest codebase Output from acceptance testing: ``` --- PASS: TestAccDataSourceAwsApiGatewayDomainName_basic (21.67s) ``` --- ...data_source_aws_api_gateway_domain_name.go | 35 +++++---- ...source_aws_api_gateway_domain_name_test.go | 77 +++++-------------- .../d/api_gateway_domain_name.html.markdown | 41 ++++------ 3 files changed, 55 insertions(+), 98 deletions(-) diff --git a/aws/data_source_aws_api_gateway_domain_name.go b/aws/data_source_aws_api_gateway_domain_name.go index 6cc7e97f5fa..dbb6e6df498 100644 --- a/aws/data_source_aws_api_gateway_domain_name.go +++ b/aws/data_source_aws_api_gateway_domain_name.go @@ -2,14 +2,12 @@ package aws import ( "fmt" - "log" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) @@ -85,20 +83,21 @@ func dataSourceAwsApiGatewayDomainName() *schema.Resource { func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn - targetDomainName := d.Get("domain_name").(string) - log.Printf("[DEBUG] Reading API Gateway Domain Name %s", targetDomainName) - domainName, err := conn.GetDomainName(&apigateway.GetDomainNameInput{ - DomainName: aws.String(targetDomainName), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == apigateway.ErrCodeNotFoundException { - return fmt.Errorf("API Gateway Domain Name (%s) not found", targetDomainName) - } - return err + input := &apigateway.GetDomainNameInput{} + + if v, ok := d.GetOk("domain_name"); ok { + input.DomainName = aws.String(v.(string)) + } + + domainName, err := conn.GetDomainName(input) + + if err != nil { + return fmt.Errorf("error getting API Gateway Domain Name: %w", err) } - d.SetId(*domainName.DomainName) + d.SetId(aws.StringValue(domainName.DomainName)) + arn := arn.ARN{ Partition: meta.(*AWSClient).partition, Service: "apigateway", @@ -108,13 +107,14 @@ func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interfac d.Set("arn", arn) d.Set("certificate_arn", domainName.CertificateArn) d.Set("certificate_name", domainName.CertificateName) - if err := d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)); err != nil { - log.Printf("[DEBUG] Error setting certificate_upload_date: %s", err) + + if domainName.CertificateUploadDate != nil { + d.Set("certificate_upload_date", domainName.CertificateUploadDate.Format(time.RFC3339)) } + d.Set("cloudfront_domain_name", domainName.DistributionDomainName) d.Set("cloudfront_zone_id", cloudFrontRoute53ZoneID) d.Set("domain_name", domainName.DomainName) - d.Set("security_policy", domainName.SecurityPolicy) if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(domainName.EndpointConfiguration)); err != nil { return fmt.Errorf("error setting endpoint_configuration: %s", err) @@ -124,6 +124,7 @@ func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interfac d.Set("regional_certificate_name", domainName.RegionalCertificateName) d.Set("regional_domain_name", domainName.RegionalDomainName) d.Set("regional_zone_id", domainName.RegionalHostedZoneId) + d.Set("security_policy", domainName.SecurityPolicy) if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) diff --git a/aws/data_source_aws_api_gateway_domain_name_test.go b/aws/data_source_aws_api_gateway_domain_name_test.go index bea732b5f01..999c006a53e 100644 --- a/aws/data_source_aws_api_gateway_domain_name_test.go +++ b/aws/data_source_aws_api_gateway_domain_name_test.go @@ -2,56 +2,13 @@ package aws import ( "fmt" - "os" - "regexp" "testing" - "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccDataSourceAwsApiGatewayDomainName_CertificateArn(t *testing.T) { - certificateArn := os.Getenv("AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_ARN") - if certificateArn == "" { - t.Skip( - "Environment variable AWS_API_GATEWAY_DOMAIN_NAME_CERTIFICATE_ARN is not set. " + - "This environment variable must be set to the ARN of " + - "an ISSUED ACM certificate in us-east-1 to enable this test.") - } - - // This test must always run in us-east-1 - // BadRequestException: Invalid certificate ARN: arn:aws:acm:us-west-2:123456789012:certificate/xxxxx. Certificate must be in 'us-east-1'. - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - resourceName := "aws_api_gateway_domain_name.test" - dataSourceName := "data.aws_api_gateway_domain_name.test" - rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDataSourceAwsApiGatewayDomainNameConfig_CertificateArn(rName, certificateArn), - Check: resource.ComposeTestCheckFunc( - testAccMatchResourceAttrRegionalARNNoAccount(dataSourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)), - resource.TestCheckResourceAttr(dataSourceName, "domain_name", rName), - resource.TestCheckResourceAttr(dataSourceName, "cloudfront_zone_id", "Z2FDTNDATAQYW2"), - resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), - resource.TestCheckResourceAttrPair(resourceName, "domain_name", dataSourceName, "domain_name"), - resource.TestCheckResourceAttrPair(resourceName, "cloudfront_domain_name", dataSourceName, "cloudfront_domain_name"), - resource.TestCheckResourceAttrPair(resourceName, "cloudfront_zone_id", dataSourceName, "cloudfront_zone_id"), - resource.TestCheckResourceAttrPair(resourceName, "certificate_upload_date", dataSourceName, "certificate_upload_date"), - ), - }, - }, - }) -} - -func TestAccDataSourceAwsApiGatewayDomainName_RegionalCertificateArn(t *testing.T) { +func TestAccDataSourceAwsApiGatewayDomainName_basic(t *testing.T) { resourceName := "aws_api_gateway_domain_name.test" dataSourceName := "data.aws_api_gateway_domain_name.test" rName := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8)) @@ -67,12 +24,20 @@ func TestAccDataSourceAwsApiGatewayDomainName_RegionalCertificateArn(t *testing. { Config: testAccDataSourceAwsApiGatewayDomainNameConfig_RegionalCertificateArn(rName, key, certificate), Check: resource.ComposeTestCheckFunc( - testAccMatchResourceAttrRegionalARNNoAccount(dataSourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)), resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "certificate_arn", dataSourceName, "certificate_arn"), + resource.TestCheckResourceAttrPair(resourceName, "certificate_name", dataSourceName, "certificate_name"), + resource.TestCheckResourceAttrPair(resourceName, "certificate_upload_date", dataSourceName, "certificate_upload_date"), + resource.TestCheckResourceAttrPair(resourceName, "cloudfront_domain_name", dataSourceName, "cloudfront_domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "cloudfront_zone_id", dataSourceName, "cloudfront_zone_id"), resource.TestCheckResourceAttrPair(resourceName, "domain_name", dataSourceName, "domain_name"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint_configuration.#", dataSourceName, "endpoint_configuration.#"), + resource.TestCheckResourceAttrPair(resourceName, "regional_certificate_arn", dataSourceName, "regional_certificate_arn"), + resource.TestCheckResourceAttrPair(resourceName, "regional_certificate_name", dataSourceName, "regional_certificate_name"), resource.TestCheckResourceAttrPair(resourceName, "regional_domain_name", dataSourceName, "regional_domain_name"), resource.TestCheckResourceAttrPair(resourceName, "regional_zone_id", dataSourceName, "regional_zone_id"), - resource.TestCheckResourceAttrPair(resourceName, "certificate_upload_date", dataSourceName, "certificate_upload_date"), + resource.TestCheckResourceAttrPair(resourceName, "security_policy", dataSourceName, "security_policy"), + resource.TestCheckResourceAttrPair(resourceName, "tags.%", dataSourceName, "tags.%"), ), }, }, @@ -99,21 +64,21 @@ data "aws_api_gateway_domain_name" "test" { func testAccDataSourceAwsApiGatewayDomainNameConfig_RegionalCertificateArn(domainName, key, certificate string) string { return fmt.Sprintf(` resource "aws_acm_certificate" "test" { - certificate_body = "%[2]s" - private_key = "%[3]s" + certificate_body = "%[2]s" + private_key = "%[3]s" } resource "aws_api_gateway_domain_name" "test" { - domain_name = %[1]q - regional_certificate_arn = "${aws_acm_certificate.test.arn}" + domain_name = %[1]q + regional_certificate_arn = aws_acm_certificate.test.arn - endpoint_configuration { - types = ["REGIONAL"] - } + endpoint_configuration { + types = ["REGIONAL"] + } } data "aws_api_gateway_domain_name" "test" { - domain_name = "${aws_api_gateway_domain_name.test.domain_name}" + domain_name = aws_api_gateway_domain_name.test.domain_name } `, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key)) } diff --git a/website/docs/d/api_gateway_domain_name.html.markdown b/website/docs/d/api_gateway_domain_name.html.markdown index c782ab76f42..0530a87c3ae 100644 --- a/website/docs/d/api_gateway_domain_name.html.markdown +++ b/website/docs/d/api_gateway_domain_name.html.markdown @@ -13,39 +13,30 @@ Use this data source to get the custom domain name for use with AWS API Gateway. ## Example Usage ```hcl -resource "aws_api_gateway_domain_name" "example" { +data "aws_api_gateway_domain_name" "example" { domain_name = "api.example.com" } ``` ## Argument Reference - * `domain_name` - (Required) The fully-qualified domain name to look up. - If no domain name is found, an error will be returned. +* `domain_name` - (Required) The fully-qualified domain name to look up. If no domain name is found, an error will be returned. ## Attributes Reference In addition to the arguments, the following attributes are exported: - * `arn` - The ARN of the found custom domain name. - * `certificate_arn` - The ARN for an AWS-managed certificate - that is used by edge-optimized endpoint for this domain name. - * `certificate_name` - The name of the certificate that is used by - edge-optimized endpoint for this domain name. - * `certificate_upload_date` - The upload date associated with - the domain certificate. - * `cloudfront_domain_name` - The hostname created by Cloudfront to represent - the distribution that implements this domain name mapping. - * `cloudfront_zone_id` - For convenience, the hosted zone ID (`Z2FDTNDATAQYW2`) - that can be used to create a Route53 alias record for the distribution. - * `endpoint_configuration` - The endpoint configuration of this domain name - showing the endpoint types of the domain name. - * `regional_certificate_arn` - The ARN for an AWS-managed certificate - that is used for validating the regional domain name. - * `regional_certificate_name` - The user-friendly name of the certificate - that is used by regional endpoint for this domain name. - * `regional_domain_name` - The hostname for the custom domain's - regional endpoint. - * `regional_zone_id` - The hosted zone ID that can be used to create - a Route53 alias record for the regional endpoint. - * `tags` - A mapping of tags for the resource. +* `arn` - The ARN of the found custom domain name. +* `certificate_arn` - The ARN for an AWS-managed certificate that is used by edge-optimized endpoint for this domain name. +* `certificate_name` - The name of the certificate that is used by edge-optimized endpoint for this domain name. +* `certificate_upload_date` - The upload date associated with the domain certificate. +* `cloudfront_domain_name` - The hostname created by Cloudfront to represent the distribution that implements this domain name mapping. +* `cloudfront_zone_id` - For convenience, the hosted zone ID (`Z2FDTNDATAQYW2`) that can be used to create a Route53 alias record for the distribution. +* `endpoint_configuration` - List of objects with the endpoint configuration of this domain name. + * `types` - List of endpoint types. +* `regional_certificate_arn` - The ARN for an AWS-managed certificate that is used for validating the regional domain name. +* `regional_certificate_name` - The user-friendly name of the certificate that is used by regional endpoint for this domain name. +* `regional_domain_name` - The hostname for the custom domain's regional endpoint. +* `regional_zone_id` - The hosted zone ID that can be used to create a Route53 alias record for the regional endpoint. +* `security_policy` - The security policy for the domain name. +* `tags` - Key-value map of tags for the resource. From eb3abd675770dbd3f3abbcc1ac4e96024c1995b7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 22:15:56 -0500 Subject: [PATCH 0630/1212] Update CHANGELOG for #12489 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3736d4e7f60..e95d59a7f0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES +* **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) ENHANCEMENTS From dfe379ebaa50082fca7ccfdde624cbfcc7c268da Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 22:17:12 -0500 Subject: [PATCH 0631/1212] tests/data-source/aws_api_gateway_domain_name: Remove unused function --- ...a_source_aws_api_gateway_domain_name_test.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/aws/data_source_aws_api_gateway_domain_name_test.go b/aws/data_source_aws_api_gateway_domain_name_test.go index 999c006a53e..830b69f9b6e 100644 --- a/aws/data_source_aws_api_gateway_domain_name_test.go +++ b/aws/data_source_aws_api_gateway_domain_name_test.go @@ -44,23 +44,6 @@ func TestAccDataSourceAwsApiGatewayDomainName_basic(t *testing.T) { }) } -func testAccDataSourceAwsApiGatewayDomainNameConfig_CertificateArn(domainName, certificateArn string) string { - return fmt.Sprintf(` -resource "aws_api_gateway_domain_name" "test" { - domain_name = "%s" - certificate_arn = "%s" - - endpoint_configuration { - types = ["EDGE"] - } -} - -data "aws_api_gateway_domain_name" "test" { - domain_name = "${aws_api_gateway_domain_name.test.domain_name}" -} -`, domainName, certificateArn) -} - func testAccDataSourceAwsApiGatewayDomainNameConfig_RegionalCertificateArn(domainName, key, certificate string) string { return fmt.Sprintf(` resource "aws_acm_certificate" "test" { From 10b67df45a07e16885031347eff4687edeb1cc84 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 22:55:17 -0500 Subject: [PATCH 0632/1212] data-source/aws_api_gateway_domain_name: Add missing ignore tags configuration --- aws/data_source_aws_api_gateway_domain_name.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_api_gateway_domain_name.go b/aws/data_source_aws_api_gateway_domain_name.go index dbb6e6df498..34cfd5111c7 100644 --- a/aws/data_source_aws_api_gateway_domain_name.go +++ b/aws/data_source_aws_api_gateway_domain_name.go @@ -83,6 +83,7 @@ func dataSourceAwsApiGatewayDomainName() *schema.Resource { func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig input := &apigateway.GetDomainNameInput{} @@ -126,7 +127,7 @@ func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interfac d.Set("regional_zone_id", domainName.RegionalHostedZoneId) d.Set("security_policy", domainName.SecurityPolicy) - if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().Map()); err != nil { + if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } From 616f49388ac232cbe453ed74e39c443a5181c2cd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Jan 2021 23:09:24 -0500 Subject: [PATCH 0633/1212] resource/aws_api_gateway_domain_name: Support mutual TLS authentication (#15258) Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayDomainName_CertificateArn (965.23s) --- PASS: TestAccAWSAPIGatewayDomainName_disappears (198.12s) --- PASS: TestAccAWSAPIGatewayDomainName_MutualTlsAuthentication (160.02s) --- PASS: TestAccAWSAPIGatewayDomainName_RegionalCertificateArn (364.00s) --- PASS: TestAccAWSAPIGatewayDomainName_SecurityPolicy (58.65s) --- PASS: TestAccAWSAPIGatewayDomainName_Tags (151.50s) ``` --- aws/resource_aws_api_gateway_domain_name.go | 73 ++++++++++- ...source_aws_api_gateway_domain_name_test.go | 113 +++++++++++++++++- .../r/api_gateway_domain_name.html.markdown | 7 ++ 3 files changed, 186 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_api_gateway_domain_name.go b/aws/resource_aws_api_gateway_domain_name.go index 7febcfd1275..e48563eaadc 100644 --- a/aws/resource_aws_api_gateway_domain_name.go +++ b/aws/resource_aws_api_gateway_domain_name.go @@ -119,6 +119,25 @@ func resourceAwsApiGatewayDomainName() *schema.Resource { }, }, + "mutual_tls_authentication": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "truststore_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "truststore_version": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "regional_certificate_arn": { Type: schema.TypeString, Optional: true, @@ -154,7 +173,8 @@ func resourceAwsApiGatewayDomainNameCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Creating API Gateway Domain Name") params := &apigateway.CreateDomainNameInput{ - DomainName: aws.String(d.Get("domain_name").(string)), + DomainName: aws.String(d.Get("domain_name").(string)), + MutualTlsAuthentication: expandApiGatewayMutualTlsAuthentication(d.Get("mutual_tls_authentication").([]interface{})), } if v, ok := d.GetOk("certificate_arn"); ok { @@ -250,6 +270,10 @@ func resourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interface{ if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(domainName.EndpointConfiguration)); err != nil { return fmt.Errorf("error setting endpoint_configuration: %s", err) } + err = d.Set("mutual_tls_authentication", flattenApiGatewayMutualTlsAuthentication(domainName.MutualTlsAuthentication)) + if err != nil { + return fmt.Errorf("error setting mutual_tls_authentication: %s", err) + } d.Set("regional_certificate_arn", domainName.RegionalCertificateArn) d.Set("regional_certificate_name", domainName.RegionalCertificateName) @@ -316,6 +340,25 @@ func resourceAwsApiGatewayDomainNameUpdateOperations(d *schema.ResourceData) []* } } + if d.HasChange("mutual_tls_authentication") { + vMutualTlsAuthentication := d.Get("mutual_tls_authentication").([]interface{}) + + if len(vMutualTlsAuthentication) == 0 || vMutualTlsAuthentication[0] == nil { + // To disable mutual TLS for a custom domain name, remove the truststore from your custom domain name. + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/mutualTlsAuthentication/truststoreUri"), + Value: aws.String(""), + }) + } else { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/mutualTlsAuthentication/truststoreVersion"), + Value: aws.String(vMutualTlsAuthentication[0].(map[string]interface{})["truststore_version"].(string)), + }) + } + } + return operations } @@ -360,3 +403,31 @@ func resourceAwsApiGatewayDomainNameDelete(d *schema.ResourceData, meta interfac return nil } + +func expandApiGatewayMutualTlsAuthentication(vMutualTlsAuthentication []interface{}) *apigateway.MutualTlsAuthenticationInput { + if len(vMutualTlsAuthentication) == 0 || vMutualTlsAuthentication[0] == nil { + return nil + } + mMutualTlsAuthentication := vMutualTlsAuthentication[0].(map[string]interface{}) + + mutualTlsAuthentication := &apigateway.MutualTlsAuthenticationInput{ + TruststoreUri: aws.String(mMutualTlsAuthentication["truststore_uri"].(string)), + } + + if vTruststoreVersion, ok := mMutualTlsAuthentication["truststore_version"].(string); ok && vTruststoreVersion != "" { + mutualTlsAuthentication.TruststoreVersion = aws.String(vTruststoreVersion) + } + + return mutualTlsAuthentication +} + +func flattenApiGatewayMutualTlsAuthentication(mutualTlsAuthentication *apigateway.MutualTlsAuthentication) []interface{} { + if mutualTlsAuthentication == nil { + return []interface{}{} + } + + return []interface{}{map[string]interface{}{ + "truststore_uri": aws.StringValue(mutualTlsAuthentication.TruststoreUri), + "truststore_version": aws.StringValue(mutualTlsAuthentication.TruststoreVersion), + }} +} diff --git a/aws/resource_aws_api_gateway_domain_name_test.go b/aws/resource_aws_api_gateway_domain_name_test.go index 3d94c887425..4dc3bb755ce 100644 --- a/aws/resource_aws_api_gateway_domain_name_test.go +++ b/aws/resource_aws_api_gateway_domain_name_test.go @@ -285,6 +285,50 @@ func TestAccAWSAPIGatewayDomainName_disappears(t *testing.T) { }) } +func TestAccAWSAPIGatewayDomainName_MutualTlsAuthentication(t *testing.T) { + rootDomain := testAccAwsAcmCertificateDomainFromEnv(t) + domain := testAccAwsAcmCertificateRandomSubDomain(rootDomain) + + var v apigateway.DomainName + resourceName := "aws_api_gateway_domain_name.test" + acmCertificateResourceName := "aws_acm_certificate.test" + s3BucketObjectResourceName := "aws_s3_bucket_object.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthentication(rootDomain, domain, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &v), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "apigateway", regexp.MustCompile(`/domainnames/+.`)), + resource.TestCheckResourceAttrPair(resourceName, "domain_name", acmCertificateResourceName, "domain_name"), + resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.0.truststore_uri", fmt.Sprintf("s3://%s/%s", rName, rName)), + resource.TestCheckResourceAttrPair(resourceName, "mutual_tls_authentication.0.truststore_version", s3BucketObjectResourceName, "version_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + // Test disabling mutual TLS authentication. + { + Config: testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthenticationMissing(rootDomain, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "domain_name", acmCertificateResourceName, "domain_name"), + resource.TestCheckResourceAttr(resourceName, "mutual_tls_authentication.#", "0"), + ), + }, + }, + }) +} + func testAccCheckAWSAPIGatewayDomainNameExists(n string, res *apigateway.DomainName) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -341,10 +385,8 @@ func testAccCheckAWSAPIGatewayDomainNameDestroy(s *terraform.State) error { return nil } -func testAccAWSAPIGatewayDomainNameConfig_CertificateArn(rootDomain string, domain string) string { - return composeConfig( - testAccApigatewayEdgeDomainNameRegionProviderConfig(), - fmt.Sprintf(` +func testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain string) string { + return fmt.Sprintf(` data "aws_route53_zone" "test" { name = %[1]q private_zone = false @@ -367,7 +409,6 @@ resource "aws_acm_certificate" "test" { # type = dvo.resource_record_type # } # } - # allow_overwrite = true # name = each.value.name # records = [each.value.record] @@ -389,7 +430,14 @@ resource "aws_acm_certificate_validation" "test" { certificate_arn = aws_acm_certificate.test.arn validation_record_fqdns = [aws_route53_record.test.fqdn] } +`, rootDomain, domain) +} +func testAccAWSAPIGatewayDomainNameConfig_CertificateArn(rootDomain string, domain string) string { + return composeConfig( + testAccApigatewayEdgeDomainNameRegionProviderConfig(), + testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain), + ` resource "aws_api_gateway_domain_name" "test" { domain_name = aws_acm_certificate.test.domain_name certificate_arn = aws_acm_certificate_validation.test.certificate_arn @@ -398,7 +446,7 @@ resource "aws_api_gateway_domain_name" "test" { types = ["EDGE"] } } -`, rootDomain, domain)) +`) } func testAccAWSAPIGatewayDomainNameConfig_CertificateName(domainName, key, certificate, chainCertificate string) string { @@ -510,3 +558,56 @@ resource "aws_api_gateway_domain_name" "test" { } `, domainName, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key), tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthentication(rootDomain, domain, rName string) string { + return composeConfig( + testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain), + fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + force_destroy = true + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket_object" "test" { + bucket = aws_s3_bucket.test.id + key = %[1]q + source = "test-fixtures/apigateway-domain-name-truststore-1.pem" +} + +resource "aws_api_gateway_domain_name" "test" { + domain_name = aws_acm_certificate.test.domain_name + regional_certificate_arn = aws_acm_certificate_validation.test.certificate_arn + security_policy = "TLS_1_2" + + endpoint_configuration { + types = ["REGIONAL"] + } + + mutual_tls_authentication { + truststore_uri = "s3://${aws_s3_bucket_object.test.bucket}/${aws_s3_bucket_object.test.key}" + truststore_version = aws_s3_bucket_object.test.version_id + } +} +`, rName)) +} + +func testAccAWSAPIGatewayDomainNameConfig_MutualTlsAuthenticationMissing(rootDomain, domain string) string { + return composeConfig( + testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain), + ` +resource "aws_api_gateway_domain_name" "test" { + domain_name = aws_acm_certificate.test.domain_name + regional_certificate_arn = aws_acm_certificate_validation.test.certificate_arn + security_policy = "TLS_1_2" + + endpoint_configuration { + types = ["REGIONAL"] + } +} +`) +} diff --git a/website/docs/r/api_gateway_domain_name.html.markdown b/website/docs/r/api_gateway_domain_name.html.markdown index eee2427b32b..004a4b9eb3e 100644 --- a/website/docs/r/api_gateway_domain_name.html.markdown +++ b/website/docs/r/api_gateway_domain_name.html.markdown @@ -154,6 +154,7 @@ The following arguments are supported: * `domain_name` - (Required) The fully-qualified domain name to register * `endpoint_configuration` - (Optional) Configuration block defining API endpoint information including type. Defined below. +* `mutual_tls_authentication` - (Optional) The mutual TLS authentication configuration for the domain name. Defined below. * `security_policy` - (Optional) The Transport Layer Security (TLS) version + cipher suite for this DomainName. The valid values are `TLS_1_0` and `TLS_1_2`. Must be configured to perform drift detection. * `tags` - (Optional) Key-value map of resource tags @@ -183,6 +184,12 @@ When uploading a certificate, the following arguments are supported: * `types` - (Required) A list of endpoint types. This resource currently only supports managing a single value. Valid values: `EDGE` or `REGIONAL`. If unspecified, defaults to `EDGE`. Must be declared as `REGIONAL` in non-Commercial partitions. Refer to the [documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/create-regional-api.html) for more information on the difference between edge-optimized and regional APIs. +### mutual_tls_authentication + +* `truststore_uri` - (Required) An Amazon S3 URL that specifies the truststore for mutual TLS authentication, for example, `s3://bucket-name/key-name`. +The truststore can contain certificates from public or private certificate authorities. To update the truststore, upload a new version to S3, and then update your custom domain name to use the new version. +* `truststore_version` - (Optional) The version of the S3 object that contains the truststore. To specify a version, you must have versioning enabled for the S3 bucket. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 9b338a5491c5f9d770e63ca4b7663e761b35b28d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 23:10:30 -0500 Subject: [PATCH 0634/1212] Update CHANGELOG for #15258 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e95d59a7f0a..74050d89f27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES ENHANCEMENTS +* resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] * resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] From 801cd3627c79a92fc6fb603800f2e438d91f0900 Mon Sep 17 00:00:00 2001 From: Phil Nichol <35630607+philnichol@users.noreply.github.com> Date: Wed, 13 Jan 2021 04:31:00 +0000 Subject: [PATCH 0635/1212] resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument (#16198) Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayRestApi_api_key_source (49.76s) --- PASS: TestAccAWSAPIGatewayRestApi_basic (733.13s) --- PASS: TestAccAWSAPIGatewayRestApi_disable_execute_api_endpoint (29.79s) --- PASS: TestAccAWSAPIGatewayRestApi_disappears (30.64s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration (237.77s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private (9.10s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint (222.18s) --- PASS: TestAccAWSAPIGatewayRestApi_openapi (17.50s) --- PASS: TestAccAWSAPIGatewayRestApi_Parameters (38.95s) --- PASS: TestAccAWSAPIGatewayRestApi_policy (437.20s) --- PASS: TestAccAWSAPIGatewayRestApi_tags (20.53s) ``` --- aws/resource_aws_api_gateway_rest_api.go | 20 ++++++++ aws/resource_aws_api_gateway_rest_api_test.go | 47 +++++++++++++++++++ .../docs/r/api_gateway_rest_api.html.markdown | 1 + 3 files changed, 68 insertions(+) diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index 8ad41302946..e8a1234c1c6 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -65,6 +65,12 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { Optional: true, }, + "disable_execute_api_endpoint": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "parameters": { Type: schema.TypeMap, Optional: true, @@ -156,6 +162,10 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} params.ApiKeySource = aws.String(v.(string)) } + if v, ok := d.GetOk("disable_execute_api_endpoint"); ok { + params.DisableExecuteApiEndpoint = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("policy"); ok { params.Policy = aws.String(v.(string)) } @@ -241,6 +251,7 @@ func resourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{}) d.Set("name", api.Name) d.Set("description", api.Description) d.Set("api_key_source", api.ApiKeySource) + d.Set("disable_execute_api_endpoint", api.DisableExecuteApiEndpoint) // The API returns policy as an escaped JSON string // {\\\"Version\\\":\\\"2012-10-17\\\",...} @@ -324,6 +335,15 @@ func resourceAwsApiGatewayRestApiUpdateOperations(d *schema.ResourceData) []*api }) } + if d.HasChange("disable_execute_api_endpoint") { + value := strconv.FormatBool(d.Get("disable_execute_api_endpoint").(bool)) + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/disableExecuteApiEndpoint"), + Value: aws.String(value), + }) + } + if d.HasChange("policy") { operations = append(operations, &apigateway.PatchOperation{ Op: aws.String(apigateway.OpReplace), diff --git a/aws/resource_aws_api_gateway_rest_api_test.go b/aws/resource_aws_api_gateway_rest_api_test.go index 73cc2135a3d..820cefcb3b7 100644 --- a/aws/resource_aws_api_gateway_rest_api_test.go +++ b/aws/resource_aws_api_gateway_rest_api_test.go @@ -84,6 +84,7 @@ func TestAccAWSAPIGatewayRestApi_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", ""), resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "0"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "execution_arn"), @@ -106,6 +107,7 @@ func TestAccAWSAPIGatewayRestApi_basic(t *testing.T) { testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttribute(&conf, 10485760), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", "test"), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "10485760"), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "execution_arn"), @@ -437,6 +439,42 @@ func TestAccAWSAPIGatewayRestApi_api_key_source(t *testing.T) { }) } +func TestAccAWSAPIGatewayRestApi_disable_execute_api_endpoint(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `true`), + ), + }, + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + ), + }, + }, + }) +} + func TestAccAWSAPIGatewayRestApi_policy(t *testing.T) { resourceName := "aws_api_gateway_rest_api.test" expectedPolicyText := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"*"},"Action":"execute-api:Invoke","Resource":"*","Condition":{"IpAddress":{"aws:SourceIp":"123.123.123.123/32"}}}]}` @@ -704,6 +742,15 @@ resource "aws_api_gateway_rest_api" "test" { `, rName, endpointType) } +func testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName string, disabled bool) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + name = "%s" + disable_execute_api_endpoint = %t +} +`, rName, disabled) +} + func testAccAWSAPIGatewayRestAPIConfig_Name(rName string) string { return fmt.Sprintf(` resource "aws_api_gateway_rest_api" "test" { diff --git a/website/docs/r/api_gateway_rest_api.html.markdown b/website/docs/r/api_gateway_rest_api.html.markdown index c8a40b436db..35600d63fe6 100644 --- a/website/docs/r/api_gateway_rest_api.html.markdown +++ b/website/docs/r/api_gateway_rest_api.html.markdown @@ -48,6 +48,7 @@ The following arguments are supported: * `parameters` - (Optional) Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). * `policy` - (Optional) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Terraform will only perform drift detection of its value when present in a configuration. It is recommended to use the [`aws_api_gateway_rest_api_policy` resource](/docs/providers/aws/r/api_gateway_rest_api_policy.html) instead. * `api_key_source` - (Optional) The source of the API key for requests. Valid values are HEADER (default) and AUTHORIZER. +* `disable_execute_api_endpoint` - (Optional) Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. * `tags` - (Optional) Key-value map of resource tags __Note__: If the `body` argument is provided, the OpenAPI specification will be used to configure the resources, methods and integrations for the Rest API. If this argument is provided, the following resources should not be managed as separate ones, as updates may cause manual resource updates to be overwritten: From 766117d72a42b5a14f734e9cce3ee9b6d7853894 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 23:32:20 -0500 Subject: [PATCH 0636/1212] Update CHANGELOG for #16198 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74050d89f27..bd5cb34bdb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS * resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] * resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] +* resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] From 706406f3331bbcb51d18e44bd69ad355864314cf Mon Sep 17 00:00:00 2001 From: vurt007 <449892+vurt007@users.noreply.github.com> Date: Wed, 13 Jan 2021 04:58:19 +0000 Subject: [PATCH 0637/1212] resource/aws_api_gateway_base_path_mapping: Support in-place updates for `stage_name`, `api_id` and `base_path` (#16147) * use patch updates for aws_api_gateway_base_path_mapping where possible, this allows switching the mapped api / stage without destroying and creating the mapping * more specific tests for in place updates of base path mapping * Apply suggestions from code review Co-authored-by: Brian Flad --- ...ource_aws_api_gateway_base_path_mapping.go | 64 +++++++- ..._aws_api_gateway_base_path_mapping_test.go | 148 ++++++++++++++++++ 2 files changed, 209 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_api_gateway_base_path_mapping.go b/aws/resource_aws_api_gateway_base_path_mapping.go index 8586e624d66..30e1385174c 100644 --- a/aws/resource_aws_api_gateway_base_path_mapping.go +++ b/aws/resource_aws_api_gateway_base_path_mapping.go @@ -18,6 +18,7 @@ func resourceAwsApiGatewayBasePathMapping() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayBasePathMappingCreate, Read: resourceAwsApiGatewayBasePathMappingRead, + Update: resourceAwsApiGatewayBasePathMappingUpdate, Delete: resourceAwsApiGatewayBasePathMappingDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -27,17 +28,14 @@ func resourceAwsApiGatewayBasePathMapping() *schema.Resource { "api_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "base_path": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "stage_name": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "domain_name": { Type: schema.TypeString, @@ -87,6 +85,66 @@ func resourceAwsApiGatewayBasePathMappingCreate(d *schema.ResourceData, meta int return resourceAwsApiGatewayBasePathMappingRead(d, meta) } +func resourceAwsApiGatewayBasePathMappingUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).apigatewayconn + + operations := make([]*apigateway.PatchOperation, 0) + + if d.HasChange("stage_name") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/stage"), + Value: aws.String(d.Get("stage_name").(string)), + }) + } + + if d.HasChange("api_id") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/restapiId"), + Value: aws.String(d.Get("api_id").(string)), + }) + } + + if d.HasChange("base_path") { + operations = append(operations, &apigateway.PatchOperation{ + Op: aws.String("replace"), + Path: aws.String("/basePath"), + Value: aws.String(d.Get("base_path").(string)), + }) + } + + domainName, basePath, decodeErr := decodeApiGatewayBasePathMappingId(d.Id()) + if decodeErr != nil { + return decodeErr + } + + input := apigateway.UpdateBasePathMappingInput{ + BasePath: aws.String(basePath), + DomainName: aws.String(domainName), + PatchOperations: operations, + } + + log.Printf("[INFO] Updating API Gateway base path mapping: %s", input) + + _, err := conn.UpdateBasePathMapping(&input) + + if err != nil { + if err != nil { + return fmt.Errorf("Updating API Gateway base path mapping failed: %s", err) + } + } + + if d.HasChange("base_path") { + id := fmt.Sprintf("%s/%s", d.Get("domain_name").(string), d.Get("base_path").(string)) + d.SetId(id) + } + + log.Printf("[DEBUG] API Gateway base path mapping updated: %s", d.Id()) + + return resourceAwsApiGatewayBasePathMappingRead(d, meta) +} + func resourceAwsApiGatewayBasePathMappingRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn diff --git a/aws/resource_aws_api_gateway_base_path_mapping_test.go b/aws/resource_aws_api_gateway_base_path_mapping_test.go index 2a42b9d8284..0686ef72da0 100644 --- a/aws/resource_aws_api_gateway_base_path_mapping_test.go +++ b/aws/resource_aws_api_gateway_base_path_mapping_test.go @@ -120,6 +120,55 @@ func TestAccAWSAPIGatewayBasePathMapping_BasePath_Empty(t *testing.T) { }) } +func TestAccAWSAPIGatewayBasePathMapping_updates(t *testing.T) { + var confFirst, conf apigateway.BasePathMapping + resourceName := "aws_api_gateway_base_path_mapping.test" + name := fmt.Sprintf("tf-acc-%s.terraformtest.com", acctest.RandString(8)) + + key := tlsRsaPrivateKeyPem(2048) + certificate := tlsRsaX509SelfSignedCertificatePem(key, name) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayBasePathDestroy(name), + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayBasePathConfigBasePath(name, key, certificate, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayBasePathExists(resourceName, &confFirst), + testAccCheckAWSAPIGatewayBasePathStageAttribute(&confFirst, "test"), + ), + }, + { + Config: testAccAWSAPIGatewayBasePathConfigBasePathAltStageAndAPI(name, key, certificate, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayBasePathExists(resourceName, &conf), + testAccCheckAWSAPIGatewayBasePathBasePathAttribute(&conf, "(none)"), + testAccCheckAWSAPIGatewayBasePathStageAttribute(&conf, "test2"), + testAccCheckAWSAPIGatewayRestApiIdAttributeHasChanged(&conf, &confFirst), + resource.TestCheckResourceAttr(resourceName, "stage_name", "test2"), + ), + }, + { + Config: testAccAWSAPIGatewayBasePathConfigBasePathAltStageAndAPI(name, key, certificate, "thing"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayBasePathExists(resourceName, &conf), + testAccCheckAWSAPIGatewayBasePathBasePathAttribute(&conf, "thing"), + testAccCheckAWSAPIGatewayBasePathStageAttribute(&conf, "test2"), + resource.TestCheckResourceAttr(resourceName, "stage_name", "test2"), + resource.TestCheckResourceAttr(resourceName, "base_path", "thing"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSAPIGatewayBasePathMapping_disappears(t *testing.T) { var conf apigateway.BasePathMapping @@ -213,6 +262,45 @@ func testAccCheckAWSAPIGatewayBasePathDestroy(name string) resource.TestCheckFun } } +func testAccCheckAWSAPIGatewayBasePathStageAttribute(conf *apigateway.BasePathMapping, basePath string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if conf.Stage == nil { + return fmt.Errorf("attribute Stage should not be nil") + } + if *conf.Stage != basePath { + return fmt.Errorf("unexpected value Stage: %s", *conf.Stage) + } + + return nil + } +} + +func testAccCheckAWSAPIGatewayRestApiIdAttributeHasChanged(conf *apigateway.BasePathMapping, previousConf *apigateway.BasePathMapping) resource.TestCheckFunc { + return func(s *terraform.State) error { + if conf.RestApiId == nil { + return fmt.Errorf("attribute RestApiId should not be nil") + } + if *conf.RestApiId == *previousConf.RestApiId { + return fmt.Errorf("expected RestApiId to have changed") + } + + return nil + } +} + +func testAccCheckAWSAPIGatewayBasePathBasePathAttribute(conf *apigateway.BasePathMapping, basePath string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if conf.Stage == nil { + return fmt.Errorf("attribute Stage should not be nil") + } + if *conf.BasePath != basePath { + return fmt.Errorf("unexpected value Stage: %s", *conf.BasePath) + } + + return nil + } +} + func testAccAWSAPIGatewayBasePathConfigBase(domainName, key, certificate string) string { return fmt.Sprintf(` resource "aws_acm_certificate" "test" { @@ -277,3 +365,63 @@ resource "aws_api_gateway_base_path_mapping" "test" { } `, basePath) } + +func testAccAWSAPIGatewayBasePathConfigBasePathAltStageAndAPI(domainName, key, certificate, basePath string) string { + return testAccAWSAPIGatewayBasePathConfigBase(domainName, key, certificate) + fmt.Sprintf(` + +resource "aws_api_gateway_rest_api" "test2" { + name = "tf-acc-apigateway-base-path-mapping-alt" + description = "Terraform Acceptance Tests" + + endpoint_configuration { + types = ["REGIONAL"] + } +} + + +resource "aws_api_gateway_stage" "test2" { + + depends_on = [ + aws_api_gateway_deployment.test + ] + + stage_name = "test2" + rest_api_id = aws_api_gateway_rest_api.test2.id + deployment_id = aws_api_gateway_deployment.test2.id +} + +resource "aws_api_gateway_resource" "test2" { + rest_api_id = aws_api_gateway_rest_api.test2.id + parent_id = aws_api_gateway_rest_api.test2.root_resource_id + path_part = "tf-acc" +} + +resource "aws_api_gateway_method" "test2" { + rest_api_id = aws_api_gateway_rest_api.test2.id + resource_id = aws_api_gateway_resource.test2.id + http_method = "GET" + authorization = "NONE" +} + +resource "aws_api_gateway_integration" "test2" { + rest_api_id = aws_api_gateway_rest_api.test2.id + resource_id = aws_api_gateway_resource.test2.id + http_method = aws_api_gateway_method.test2.http_method + type = "MOCK" +} + + +resource "aws_api_gateway_deployment" "test2" { + rest_api_id = aws_api_gateway_rest_api.test2.id + stage_name = "test" + depends_on = [aws_api_gateway_integration.test2] +} + +resource "aws_api_gateway_base_path_mapping" "test" { + api_id = aws_api_gateway_rest_api.test2.id + base_path = %[1]q + stage_name = aws_api_gateway_stage.test2.stage_name + domain_name = aws_api_gateway_domain_name.test.domain_name +} +`, basePath) +} From 18c862a3ff0bb9c92ceaad5146edf7649cb1c3f0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 12 Jan 2021 23:59:10 -0500 Subject: [PATCH 0638/1212] Update CHANGELOG for #16147 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd5cb34bdb8..644d46ec5db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES ENHANCEMENTS +* resource/aws_api_gateway_base_path_mapping: Support in-place updates for `api_id`, `base_path`, and `stage_name` [GH-16147] * resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] * resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] * resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] From 88361f054bf6436bb4f198a120917c6de896755a Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 13 Jan 2021 00:33:51 -0500 Subject: [PATCH 0639/1212] resource/aws_api_gateway_integration: Minor refactoring of new tls_config functionality and documentation Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayIntegration_basic (147.27s) --- PASS: TestAccAWSAPIGatewayIntegration_cache_key_parameters (10.00s) --- PASS: TestAccAWSAPIGatewayIntegration_contentHandling (43.45s) --- PASS: TestAccAWSAPIGatewayIntegration_disappears (84.91s) --- PASS: TestAccAWSAPIGatewayIntegration_integrationType (667.74s) --- PASS: TestAccAWSAPIGatewayIntegration_TlsConfig_InsecureSkipVerification (16.36s) ``` --- aws/resource_aws_api_gateway_integration.go | 6 +- ...source_aws_api_gateway_integration_test.go | 73 +++++++++++++++++-- .../r/api_gateway_integration.html.markdown | 12 +-- 3 files changed, 77 insertions(+), 14 deletions(-) diff --git a/aws/resource_aws_api_gateway_integration.go b/aws/resource_aws_api_gateway_integration.go index 8e5f1faf04e..bfea8d3029c 100644 --- a/aws/resource_aws_api_gateway_integration.go +++ b/aws/resource_aws_api_gateway_integration.go @@ -489,9 +489,7 @@ func resourceAwsApiGatewayIntegrationUpdate(d *schema.ResourceData, meta interfa }) } - if d.HasChange("tls_config.0.insecure_skip_verification") { - // The domain name must have an endpoint type. - // If attempting to remove the configuration, do nothing. + if d.HasChange("tls_config") { if v, ok := d.GetOk("tls_config"); ok && len(v.([]interface{})) > 0 { m := v.([]interface{})[0].(map[string]interface{}) @@ -557,7 +555,7 @@ func expandApiGatewayTlsConfig(vConfig []interface{}) *apigateway.TlsConfig { func flattenApiGatewayTlsConfig(config *apigateway.TlsConfig) []interface{} { if config == nil { - return []interface{}{} + return nil } return []interface{}{map[string]interface{}{ diff --git a/aws/resource_aws_api_gateway_integration_test.go b/aws/resource_aws_api_gateway_integration_test.go index 245c5a70ce7..4f07224dbf0 100644 --- a/aws/resource_aws_api_gateway_integration_test.go +++ b/aws/resource_aws_api_gateway_integration_test.go @@ -40,6 +40,7 @@ func TestAccAWSAPIGatewayIntegration_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "request_templates.application/json", ""), resource.TestCheckResourceAttr(resourceName, "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"), resource.TestCheckResourceAttr(resourceName, "timeout_milliseconds", "29000"), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "0"), ), }, @@ -271,9 +272,38 @@ func TestAccAWSAPIGatewayIntegration_integrationType(t *testing.T) { ), }, { - Config: testAccAWSAPIGatewayIntegrationConfig_IntegrationTLSConfig(rName), + Config: testAccAWSAPIGatewayIntegrationConfig_IntegrationTypeInternet(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayIntegrationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "connection_type", "INTERNET"), + resource.TestCheckResourceAttr(resourceName, "connection_id", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSAPIGatewayIntegrationImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSAPIGatewayIntegration_TlsConfig_InsecureSkipVerification(t *testing.T) { + var conf apigateway.Integration + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(7)) + resourceName := "aws_api_gateway_integration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayIntegrationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayIntegrationConfig_TlsConfig_InsecureSkipVerification(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayIntegrationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "tls_config.0.insecure_skip_verification", "true"), ), }, @@ -283,6 +313,14 @@ func TestAccAWSAPIGatewayIntegration_integrationType(t *testing.T) { ImportStateIdFunc: testAccAWSAPIGatewayIntegrationImportStateIdFunc(resourceName), ImportStateVerify: true, }, + { + Config: testAccAWSAPIGatewayIntegrationConfig_TlsConfig_InsecureSkipVerification(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayIntegrationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "tls_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tls_config.0.insecure_skip_verification", "false"), + ), + }, }, }) } @@ -813,8 +851,33 @@ resource "aws_api_gateway_integration" "test" { ` } -func testAccAWSAPIGatewayIntegrationConfig_IntegrationTLSConfig(rName string) string { - return testAccAWSAPIGatewayIntegrationConfig_IntegrationTypeBase(rName) + ` +func testAccAWSAPIGatewayIntegrationConfig_TlsConfig_InsecureSkipVerification(rName string, insecureSkipVerification bool) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + name = %[1]q +} + +resource "aws_api_gateway_resource" "test" { + rest_api_id = aws_api_gateway_rest_api.test.id + parent_id = aws_api_gateway_rest_api.test.root_resource_id + path_part = "test" +} + +resource "aws_api_gateway_method" "test" { + rest_api_id = aws_api_gateway_rest_api.test.id + resource_id = aws_api_gateway_resource.test.id + http_method = "GET" + authorization = "NONE" + + request_models = { + "application/json" = "Error" + } + + request_parameters = { + "method.request.path.param" = true + } +} + resource "aws_api_gateway_integration" "test" { rest_api_id = aws_api_gateway_rest_api.test.id resource_id = aws_api_gateway_resource.test.id @@ -827,8 +890,8 @@ resource "aws_api_gateway_integration" "test" { content_handling = "CONVERT_TO_TEXT" tls_config { - insecure_skip_verification = true + insecure_skip_verification = %[2]t } } -` +`, rName, insecureSkipVerification) } diff --git a/website/docs/r/api_gateway_integration.html.markdown b/website/docs/r/api_gateway_integration.html.markdown index 61a9586f285..6f87ab65002 100644 --- a/website/docs/r/api_gateway_integration.html.markdown +++ b/website/docs/r/api_gateway_integration.html.markdown @@ -40,10 +40,6 @@ resource "aws_api_gateway_integration" "MyDemoIntegration" { cache_namespace = "foobar" timeout_milliseconds = 29000 - tls_config { - insecure_skip_verification = true - } - request_parameters = { "integration.request.header.X-Authorization" = "'static'" } @@ -233,7 +229,13 @@ The following arguments are supported: * `cache_namespace` - (Optional) The integration's cache namespace. * `content_handling` - (Optional) Specifies how to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. * `timeout_milliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. -* `tls_config` - (Optional) Specifies the TLS configuration for an integration defined as a block. Supports `insecure_skip_verification` toggle. +* `tls_config` - (Optional) Configuration block specifying the TLS configuration for an integration. Defined below. + +### tls_config Configuration Block + +The `tls_config` configuration block supports the following arguments: + +* `insecure_skip_verification` - (Optional) Specifies whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a [supported certificate authority](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-supported-certificate-authorities-for-http-endpoints.html). This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for `HTTP` and `HTTP_PROXY` integrations. ## Import From 4d91a6c57b5b41ecd498f3237b498068c259e93d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 13 Jan 2021 00:36:42 -0500 Subject: [PATCH 0640/1212] Update CHANGELOG for #15499 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 644d46ec5db..a3b55865048 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ ENHANCEMENTS * resource/aws_api_gateway_base_path_mapping: Support in-place updates for `api_id`, `base_path`, and `stage_name` [GH-16147] * resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] +* resource/aws_api_gateway_integration: Add `tls_config` configuration block [GH-15499] * resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] * resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] From d906813c60bd8f5b664849498932f4473c4105fe Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 13 Jan 2021 00:58:55 -0500 Subject: [PATCH 0641/1212] service/apigateway: Extend retryable ConflictException handling to all operations and remove unreleased aws_api_gateway_method_settings mutexes Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_LoggingLevel (28.05s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingRateLimitDisabledByDefault (60.30s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_UnauthorizedCacheControlHeaderStrategy (96.09s) --- PASS: TestAccAWSAPIGatewayMethodSettings_disappears (109.62s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingRateLimit (131.07s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_RequireAuthorizationForCacheControl (176.33s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CachingEnabled (225.19s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingBurstLimitDisabledByDefault (257.27s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_Multiple (257.69s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingBurstLimit (360.28s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_DataTraceEnabled (365.22s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CacheTtlInSeconds (486.19s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_MetricsEnabled (498.96s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CacheDataEncrypted (661.38s) --- PASS: TestAccAWSAPIGatewayMethodSettings_basic (696.21s) ``` --- aws/config.go | 10 +++++----- ...esource_aws_api_gateway_method_settings.go | 19 ++----------------- 2 files changed, 7 insertions(+), 22 deletions(-) diff --git a/aws/config.go b/aws/config.go index 18fefeee06e..00333097e04 100644 --- a/aws/config.go +++ b/aws/config.go @@ -645,11 +645,11 @@ func (c *Config) Client() (interface{}, error) { client.shieldconn = shield.New(sess.Copy(shieldConfig)) client.apigatewayconn.Handlers.Retry.PushBack(func(r *request.Request) { - switch r.Operation.Name { - case "CreateUsagePlanKey", "DeleteUsagePlanKey": - if tfawserr.ErrMessageContains(r.Error, apigateway.ErrCodeConflictException, "try again later") { - r.Retryable = aws.Bool(true) - } + // Many operations can return an error such as: + // ConflictException: Unable to complete operation due to concurrent modification. Please try again later. + // Handle them all globally for the service client. + if tfawserr.ErrMessageContains(r.Error, apigateway.ErrCodeConflictException, "try again later") { + r.Retryable = aws.Bool(true) } }) diff --git a/aws/resource_aws_api_gateway_method_settings.go b/aws/resource_aws_api_gateway_method_settings.go index 1c7e448d5ad..176ecb0bf14 100644 --- a/aws/resource_aws_api_gateway_method_settings.go +++ b/aws/resource_aws_api_gateway_method_settings.go @@ -5,17 +5,12 @@ import ( "log" "strings" - "sync" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -var resourceAwsApiGatewayMethodSettingsUpdateMutex = &sync.Mutex{} -var resourceAwsApiGatewayMethodSettingsDeleteMutex = &sync.Mutex{} - func resourceAwsApiGatewayMethodSettings() *schema.Resource { return &schema.Resource{ Create: resourceAwsApiGatewayMethodSettingsUpdate, @@ -257,12 +252,7 @@ func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - resourceAwsApiGatewayMethodSettingsUpdateMutex.Lock() - defer resourceAwsApiGatewayMethodSettingsUpdateMutex.Unlock() - - _, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { - return conn.UpdateStage(&input) - }) + _, err := conn.UpdateStage(&input) if err != nil { return fmt.Errorf("updating API Gateway Stage failed: %w", err) @@ -289,12 +279,7 @@ func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - resourceAwsApiGatewayMethodSettingsDeleteMutex.Lock() - defer resourceAwsApiGatewayMethodSettingsDeleteMutex.Unlock() - - _, err := retryOnAwsCode(apigateway.ErrCodeConflictException, func() (interface{}, error) { - return conn.UpdateStage(&input) - }) + _, err := conn.UpdateStage(&input) if err != nil { return fmt.Errorf("updating API Gateway Stage failed: %w", err) From c0018f14e74290e770d6efb8289db86a43e34d42 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 13 Jan 2021 01:01:39 -0500 Subject: [PATCH 0642/1212] Update CHANGELOG for #10092 #13497 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a3b55865048..73658638e50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,12 +19,12 @@ ENHANCEMENTS BUX FIXES -* resource/aws_api_gateway_usage_plan_key: Automatically retry on retryable `ConflictException` errors during creation and deletion [GH-10092] * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] +* service/apigateway: All operations will now automatically retry on `ConflictException: Unable to complete operation due to concurrent modification. Please try again later.` errors. ## 3.23.0 (January 08, 2021) From 518825907d3885737c94ce3afc7b7a997c98bffa Mon Sep 17 00:00:00 2001 From: Brent Harrison Date: Thu, 14 Jan 2021 00:14:49 +1100 Subject: [PATCH 0643/1212] fix autoscaling_group attribute 'instance_warmup' --- website/docs/r/autoscaling_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index e83ff3edc96..2e0354080da 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -402,7 +402,7 @@ This configuration block supports the following: * `strategy` - (Required) The strategy to use for instance refresh. The only allowed value is `Rolling`. See [StartInstanceRefresh Action](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_StartInstanceRefresh.html#API_StartInstanceRefresh_RequestParameters) for more information. * `preferences` - (Optional) Override default parameters for Instance Refresh. - * `instance_warmup_seconds` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. + * `instance_warmup` - (Optional) The number of seconds until a newly launched instance is configured and ready to use. Default behavior is to use the Auto Scaling Group's health check grace period. * `min_healthy_percentage` - (Optional) The amount of capacity in the Auto Scaling group that must remain healthy during an instance refresh to allow the operation to continue, as a percentage of the desired capacity of the Auto Scaling group. Defaults to `90`. * `triggers` - (Optional) Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of `launch_configuration`, `launch_template`, or `mixed_instances_policy`. From a0cccd77a3ade521bd7e88b68f59226c6f25bb72 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 13 Jan 2021 10:46:58 -0500 Subject: [PATCH 0644/1212] resource/instance: Pre-merge cleanup --- aws/resource_aws_instance.go | 21 ++--- aws/resource_aws_instance_test.go | 138 +++++++++++++++++++----------- 2 files changed, 98 insertions(+), 61 deletions(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 1e637b5a743..925861a4489 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -408,7 +408,8 @@ func resourceAwsInstance() *schema.Resource { Elem: &schema.Resource{ // "You can only modify the volume size, volume type, and Delete on // Termination flag on the block device mapping entry for the root - // device volume." - bit.ly/ec2bdmap + // device volume." + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html Schema: map[string]*schema.Schema{ "delete_on_termination": { Type: schema.TypeBool, @@ -705,14 +706,14 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { } // tags in root_block_device and ebs_block_device - volumeTagsToCreate := map[string]map[string]interface{}{} + blockDeviceTagsToCreate := map[string]map[string]interface{}{} if v, ok := d.GetOk("root_block_device"); ok { vL := v.([]interface{}) for _, v := range vL { bd := v.(map[string]interface{}) - if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + if blockDeviceTags, ok := bd["tags"].(map[string]interface{}); ok && len(blockDeviceTags) > 0 { if rootVolumeId := getRootVolumeId(instance); rootVolumeId != "" { - volumeTagsToCreate[rootVolumeId] = tagsm + blockDeviceTagsToCreate[rootVolumeId] = blockDeviceTags } } } @@ -722,17 +723,17 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) - if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + if blockDeviceTags, ok := bd["tags"].(map[string]interface{}); ok && len(blockDeviceTags) > 0 { devName := bd["device_name"].(string) if volumeId := getVolumeIdByDeviceName(instance, devName); volumeId != "" { - volumeTagsToCreate[volumeId] = tagsm + blockDeviceTagsToCreate[volumeId] = blockDeviceTags } } } } - for vol, tagsm := range volumeTagsToCreate { - if err := keyvaluetags.Ec2CreateTags(conn, vol, tagsm); err != nil { + for vol, blockDeviceTags := range blockDeviceTagsToCreate { + if err := keyvaluetags.Ec2CreateTags(conn, vol, blockDeviceTags); err != nil { log.Printf("[ERR] Error creating tags for EBS volume %s: %s", vol, err) } } @@ -2542,7 +2543,7 @@ func blockDeviceTagsDefined(d *schema.ResourceData) bool { vL := v.([]interface{}) for _, v := range vL { bd := v.(map[string]interface{}) - if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + if blockDeviceTags, ok := bd["tags"].(map[string]interface{}); ok && len(blockDeviceTags) > 0 { return true } } @@ -2552,7 +2553,7 @@ func blockDeviceTagsDefined(d *schema.ResourceData) bool { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) - if tagsm, ok := bd["tags"].(map[string]interface{}); ok && len(tagsm) > 0 { + if blockDeviceTags, ok := bd["tags"].(map[string]interface{}); ok && len(blockDeviceTags) > 0 { return true } } diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 8175fcdfecd..9b4011c7e63 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1119,6 +1119,8 @@ func TestAccAWSInstance_blockDeviceTags_volumeTags(t *testing.T) { func TestAccAWSInstance_blockDeviceTags_withAttachedVolume(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" + ebsVolumeName := "aws_ebs_volume.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1126,9 +1128,31 @@ func TestAccAWSInstance_blockDeviceTags_withAttachedVolume(t *testing.T) { CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags(), + Config: testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.%", "2"), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Name", rName), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Factum", "PerAsperaAdAstra"), + ), + }, + { + //https://github.com/hashicorp/terraform-provider-aws/issues/17074 + Config: testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.%", "2"), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Name", rName), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Factum", "PerAsperaAdAstra"), + ), + }, + { + Config: testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTagsUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.%", "2"), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Name", rName), + resource.TestCheckResourceAttr(ebsVolumeName, "tags.Factum", "VincitQuiSeVincit"), ), }, { @@ -1140,26 +1164,10 @@ func TestAccAWSInstance_blockDeviceTags_withAttachedVolume(t *testing.T) { }) } -/* -TestAccAWSInstance_blockDeviceTags_volumeTags - testAccInstanceConfigBlockDeviceTagsVolumeTagsUpdate - testAccInstanceConfigBlockDeviceTagsVolumeTags - testAccInstanceConfigBlockDeviceTagsNoVolumeTags - -TestAccAWSInstance_blockDeviceTags_ebsAndRoot - testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate - testAccInstanceConfigBlockDeviceTagsEBSAndRootTags - testAccInstanceConfigBlockDeviceTagsEBSTags - testAccInstanceConfigBlockDeviceTagsEBSTagsConflict - testAccInstanceConfigBlockDeviceTagsRootTagsConflict - -TestAccAWSInstance_blockDeviceTags_withAttachedVolume - testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags -*/ - func TestAccAWSInstance_blockDeviceTags_ebsAndRoot(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1175,30 +1183,30 @@ func TestAccAWSInstance_blockDeviceTags_ebsAndRoot(t *testing.T) { ExpectError: regexp.MustCompile(`"ebs_block_device\.0\.tags": conflicts with volume_tags`), }, { - Config: testAccInstanceConfigBlockDeviceTagsEBSTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSTags(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "ebs_block_device.0.tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "ebs_block_device.0.tags.Name", "terraform-test-ebs"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.0.tags.Name", rName), resource.TestCheckResourceAttr(resourceName, "ebs_block_device.1.tags.%", "0"), ), }, { - Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTags(), + Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTags(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", "terraform-test-root"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", rName), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Purpose", "test"), ), }, { - Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate(), + Config: testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate(rName), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", "terraform-test-root-new"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Name", rName), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.tags.Env", "dev"), ), }, @@ -4107,30 +4115,59 @@ resource "aws_instance" "test" { `) } -func testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +func testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTags(rName string) string { + // https://github.com/hashicorp/terraform-provider-aws/issues/17074 + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` resource "aws_instance" "test" { - ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.medium" + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + availability_zone = data.aws_availability_zones.available.names[0] +} - root_block_device { - delete_on_termination = true - volume_size = "10" - volume_type = "standard" - } +resource "aws_ebs_volume" "test" { + availability_zone = aws_instance.test.availability_zone + size = "10" + type = "gp2" tags = { - Name = "test-terraform" + Name = %[1]q + Factum = "PerAsperaAdAstra" } } +resource "aws_volume_attachment" "test" { + device_name = "/dev/xvdg" + volume_id = aws_ebs_volume.test.id + instance_id = aws_instance.test.id +} +`, rName)) +} + +func testAccInstanceConfigBlockDeviceTagsAttachedVolumeWithTagsUpdate(rName string) string { + // https://github.com/hashicorp/terraform-provider-aws/issues/17074 + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableAZsNoOptInConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + availability_zone = data.aws_availability_zones.available.names[0] +} + resource "aws_ebs_volume" "test" { availability_zone = aws_instance.test.availability_zone size = "10" type = "gp2" tags = { - Name = "test-terraform" + Name = %[1]q + Factum = "VincitQuiSeVincit" } } @@ -4139,7 +4176,7 @@ resource "aws_volume_attachment" "test" { volume_id = aws_ebs_volume.test.id instance_id = aws_instance.test.id } -`) +`, rName)) } func testAccInstanceConfigBlockDeviceTagsRootTagsConflict() string { @@ -4236,8 +4273,8 @@ resource "aws_instance" "test" { `) } -func testAccInstanceConfigBlockDeviceTagsEBSTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +func testAccInstanceConfigBlockDeviceTagsEBSTags(rName string) string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4252,7 +4289,7 @@ resource "aws_instance" "test" { volume_size = 1 tags = { - Name = "terraform-test-ebs" + Name = %[1]q } } @@ -4266,11 +4303,11 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`) +`, rName)) } -func testAccInstanceConfigBlockDeviceTagsEBSAndRootTags() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +func testAccInstanceConfigBlockDeviceTagsEBSAndRootTags(rName string) string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4280,7 +4317,7 @@ resource "aws_instance" "test" { volume_type = "gp2" tags = { - Name = "terraform-test-root" + Name = %[1]q Purpose = "test" } } @@ -4290,7 +4327,7 @@ resource "aws_instance" "test" { volume_size = 1 tags = { - Name = "terraform-test-ebs" + Name = %[1]q } } @@ -4304,11 +4341,11 @@ resource "aws_instance" "test" { virtual_name = "ephemeral0" } } -`) +`, rName)) } -func testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate() string { - return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +func testAccInstanceConfigBlockDeviceTagsEBSAndRootTagsUpdate(rName string) string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id @@ -4318,7 +4355,7 @@ resource "aws_instance" "test" { volume_type = "gp2" tags = { - Name = "terraform-test-root-new" + Name = %[1]q Env = "dev" } } @@ -4328,7 +4365,7 @@ resource "aws_instance" "test" { volume_size = 1 tags = { - Name = "terraform-test-ebs" + Name = %[1]q } } @@ -4337,13 +4374,12 @@ resource "aws_instance" "test" { volume_size = 1 } - ephemeral_block_device { device_name = "/dev/sde" virtual_name = "ephemeral0" } } -`) +`, rName)) } var testAccInstanceConfigEBSBlockDeviceInvalidIops = composeConfig(testAccAwsEc2InstanceAmiWithEbsRootVolume, ` From 0f1fddcdf6cc40eec6162103b0cfbce73146de77 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 13 Jan 2021 12:19:29 -0500 Subject: [PATCH 0645/1212] docs/instance: Clean up documentation --- website/docs/r/instance.html.markdown | 335 +++++++++++--------------- 1 file changed, 140 insertions(+), 195 deletions(-) diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index f89cc6ee6e1..bbe39b65ede 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -8,18 +8,13 @@ description: |- # Resource: aws_instance -Provides an EC2 instance resource. This allows instances to be created, updated, -and deleted. Instances also support [provisioning](https://www.terraform.io/docs/provisioners/index.html). +Provides an EC2 instance resource. This allows instances to be created, updated, and deleted. Instances also support [provisioning](https://www.terraform.io/docs/provisioners/index.html). ## Example Usage -```hcl -# Create a new instance of the latest Ubuntu 20.04 on an -# t3.micro node with an AWS Tag naming it "HelloWorld" -provider "aws" { - region = "us-west-2" -} +### Basic Example Using AMI Lookup +```hcl data "aws_ami" "ubuntu" { most_recent = true @@ -46,72 +41,100 @@ resource "aws_instance" "web" { } ``` +### Network and Credit Specification Example + +```hcl +resource "aws_vpc" "my_vpc" { + cidr_block = "172.16.0.0/16" + + tags = { + Name = "tf-example" + } +} + +resource "aws_subnet" "my_subnet" { + vpc_id = aws_vpc.my_vpc.id + cidr_block = "172.16.10.0/24" + availability_zone = "us-west-2a" + + tags = { + Name = "tf-example" + } +} + +resource "aws_network_interface" "foo" { + subnet_id = aws_subnet.my_subnet.id + private_ips = ["172.16.10.100"] + + tags = { + Name = "primary_network_interface" + } +} + +resource "aws_instance" "foo" { + ami = "ami-005e54dee72cc1d00" # us-west-2 + instance_type = "t2.micro" + + network_interface { + network_interface_id = aws_network_interface.foo.id + device_index = 0 + } + + credit_specification { + cpu_credits = "unlimited" + } +} +``` + ## Argument Reference The following arguments are supported: -* `ami` - (Required) The AMI to use for the instance. -* `availability_zone` - (Optional) The AZ to start the instance in. -* `placement_group` - (Optional) The Placement Group to start the instance in. -* `tenancy` - (Optional) The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the import-instance command. -* `host_id` - (optional) The Id of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. -* `cpu_core_count` - (Optional) Sets the number of CPU cores for an instance. This option is - only supported on creation of instance type that support CPU Options - [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. -* `cpu_threads_per_core` - (Optional - has no effect unless `cpu_core_count` is also set) If set to to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. +* `ami` - (Required) AMI to use for the instance. +* `associate_public_ip_address` - (Optional) Whether to associate a public IP address with an instance in a VPC. +* `availability_zone` - (Optional) AZ to start the instance in. -> **NOTE:** Changing `cpu_core_count` and/or `cpu_threads_per_core` will cause the resource to be destroyed and re-created. -* `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. - Note that if this is not set on an instance type that is optimized by default then - this will show as disabled but if the instance type is optimized by default then - there is no need to set this and there is no effect to disabling it. - See the [EBS Optimized section](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) of the AWS User Guide for more information. -* `disable_api_termination` - (Optional) If true, enables [EC2 Instance - Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination) -* `instance_initiated_shutdown_behavior` - (Optional) Shutdown behavior for the -instance. Amazon defaults this to `stop` for EBS-backed instances and -`terminate` for instance-store instances. Cannot be set on instance-store -instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingInstanceInitiatedShutdownBehavior) for more information. -* `instance_type` - (Required) The type of instance to start. Updates to this field will trigger a stop/start of the EC2 instance. -* `key_name` - (Optional) The key name of the Key Pair to use for the instance; which can be managed using [the `aws_key_pair` resource](key_pair.html). - +* `cpu_core_count` - (Optional) Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. +* `cpu_threads_per_core` - (Optional - has no effect unless `cpu_core_count` is also set) If set to to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. +* `credit_specification` - (Optional) Customize the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. +* `disable_api_termination` - (Optional) If true, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination). +* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See [Block Devices](#block-devices) below for details on attributes and drift detection. +* `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. Note that if this is not set on an instance type that is optimized by default then this will show as disabled but if the instance type is optimized by default then there is no need to set this and there is no effect to disabling it. See the [EBS Optimized section](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) of the AWS User Guide for more information. +* `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. +* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. +* `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. +* `host_id` - (Optional) ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. +* `iam_instance_profile` - (Optional) IAM Instance Profile to launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the [EC2 documentation](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html#roles-usingrole-ec2instance-permissions), notably `iam:PassRole`. +* `instance_initiated_shutdown_behavior` - (Optional) Shutdown behavior for the instance. Amazon defaults this to `stop` for EBS-backed instances and `terminate` for instance-store instances. Cannot be set on instance-store instances. See [Shutdown Behavior](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingInstanceInitiatedShutdownBehavior) for more information. +* `instance_type` - (Required) Type of instance to start. Updates to this field will trigger a stop/start of the EC2 instance. +* `ipv6_address_count`- (Optional) A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. +* `ipv6_addresses` - (Optional) Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface +* `key_name` - (Optional) Key name of the Key Pair to use for the instance; which can be managed using [the `aws_key_pair` resource](key_pair.html). +* `metadata_options` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. * `monitoring` - (Optional) If true, the launched EC2 instance will have detailed monitoring enabled. (Available since v0.6.0) +* `network_interface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. +* `placement_group` - (Optional) Placement Group to start the instance in. +* `private_ip` - (Optional) Private IP address to associate with the instance in a VPC. +* `root_block_device` - (Optional) Customize details about the root block device of the instance. See [Block Devices](#block-devices) below for details. +* `secondary_private_ips` - (Optional) A list of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e. referenced in a `network_interface` block. Refer to the [Elastic network interfaces documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) to see the maximum number of private IP addresses allowed per instance type. * `security_groups` - (Optional, EC2-Classic and default VPC only) A list of security group names (EC2-Classic) or IDs (default VPC) to associate with. +* `source_dest_check` - (Optional) Controls if traffic is routed to the instance when the destination address does not match the instance. Used for NAT or VPNs. Defaults true. +* `subnet_id` - (Optional) VPC Subnet ID to launch in. +* `tags` - (Optional) A map of tags to assign to the resource. Note that these tags apply to the instance and not block storage devices. +* `tenancy` - (Optional) Tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the import-instance command. +* `user_data` - (Optional) User data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see `user_data_base64` instead. +* `user_data_base64` - (Optional) Can be used instead of `user_data` to pass base64-encoded binary data directly. Use this instead of `user_data` whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. --> **NOTE:** If you are creating Instances in a VPC, use `vpc_security_group_ids` instead. +~> **NOTE:** Do not use `volume_tags` if you plan to manage block device tags outside the `aws_instance` configuration, such as using `tags` in an [`aws_ebs_volume`](/docs/providers/aws/r/ebs_volume.html) resource attached via [`aws_volume_attachment`](/docs/providers/aws/r/volume_attachment.html). Doing so will result in resource cycling and inconsistent behavior. -* `vpc_security_group_ids` - (Optional, VPC only) A list of security group IDs to associate with. -* `subnet_id` - (Optional) The VPC Subnet ID to launch in. -* `associate_public_ip_address` - (Optional) Associate a public ip address with an instance in a VPC. Boolean value. -* `private_ip` - (Optional) Private IP address to associate with the - instance in a VPC. -* `secondary_private_ips` - (Optional) A list of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e. referenced in a `network_interface` block. Refer to the [Elastic network interfaces documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) to see the maximum number of private IP addresses allowed per instance type. -* `source_dest_check` - (Optional) Controls if traffic is routed to the instance when - the destination address does not match the instance. Used for NAT or VPNs. Defaults true. -* `user_data` - (Optional) The user data to provide when launching the instance. Do not pass gzip-compressed data via this argument; see `user_data_base64` instead. -* `user_data_base64` - (Optional) Can be used instead of `user_data` to pass base64-encoded binary data directly. Use this instead of `user_data` whenever the value is not a valid UTF-8 string. For example, gzip-encoded user data must be base64-encoded and passed via this argument to avoid corruption. -* `iam_instance_profile` - (Optional) The IAM Instance Profile to - launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the [EC2 documentation](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html#roles-usingrole-ec2instance-permissions), notably `iam:PassRole`. -* `ipv6_address_count`- (Optional) A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. -* `ipv6_addresses` - (Optional) Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface -* `tags` - (Optional) A map of tags to assign to the resource. -* `volume_tags` - (Optional) A map of tags to assign to the devices created by the instance at launch time. +* `volume_tags` - (Optional) A map of tags to assign, at instance-creation time, to root and EBS volumes. -~> **NOTE:** Use `volume_tags` to apply the same tags to an instance's root and EBS devices. Using `volume_tags` is incompatible with other ways of tagging an instance's volumes such as using `tags` in `root_block_device` or `ebs_block_device` blocks, or using `tags` in an `aws_ebs_volume` resource attached via `aws_volume_attachment`. Using `volume_tags` together with other ways of tagging volumes will cause inconsistent behavior and resource cycling. +-> **NOTE:** If you are creating Instances in a VPC, use `vpc_security_group_ids` instead. -* `root_block_device` - (Optional) Customize details about the root block - device of the instance. See [Block Devices](#block-devices) below for details. -* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the - instance. Block device configurations only apply on resource creation. See [Block Devices](#block-devices) below for details on attributes and drift detection. -* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as - "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. -* `network_interface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. -* `credit_specification` - (Optional) Customize the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. -* `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. -* `metadata_options` - (Optional) Customize the metadata options of the instance. See [Metadata Options](#metadata-options) below for more details. -* `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. +* `vpc_security_group_ids` - (Optional, VPC only) A list of security group IDs to associate with. ### Timeouts @@ -121,100 +144,56 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/d * `update` - (Defaults to 10 mins) Used when stopping and starting the instance when necessary during update - e.g. when changing instance type * `delete` - (Defaults to 20 mins) Used when terminating the instance -### Block devices +### Credit Specification + +~> **NOTE:** Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. + +Credit specification can be applied/modified to the EC2 Instance at any time. + +The `credit_specification` block supports the following: + +* `cpu_credits` - (Optional) Credit option for CPU usage. Valid values include `standard` or `unlimited`. T3 instances are launched as unlimited by default. T2 instances are launched as standard by default. + +### EBS, Ephemeral, and Root Block Devices Each of the `*_block_device` attributes control a portion of the AWS -Instance's "Block Device Mapping". It's a good idea to familiarize yourself with [AWS's Block Device -Mapping docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) -to understand the implications of using these attributes. +Instance's "Block Device Mapping". It's a good idea to familiarize yourself with [AWS's Block Device Mapping docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) to understand the implications of using these attributes. The `root_block_device` mapping supports the following: -* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"io2"`, `"sc1"`, or `"st1"`. (Default: `"gp2"`). -* `volume_size` - (Optional) The size of the volume in gibibytes (GiB). -* `iops` - (Optional) The amount of provisioned - [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). Only valid for volume_type of `"io1"`, `"io2"` or `"gp3"`. -* `throughput` - (Optional) The throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `"gp3"`. -* `delete_on_termination` - (Optional) Whether the volume should be destroyed - on instance termination (Default: `true`). -* `encrypted` - (Optional) Enable volume encryption. (Default: `false`). Must be configured to perform drift detection. +* `delete_on_termination` - (Optional) Whether the volume should be destroyed on instance termination. Defaults to `true`. +* `encrypted` - (Optional) Whether to enable volume encryption. Defaults to `false`. Must be configured to perform drift detection. +* `iops` - (Optional) Amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). Only valid for volume_type of `io1`, `io2` or `gp3`. * `kms_key_id` - (Optional) Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. * `tags` - (Optional) A map of tags to assign to the device. +* `throughput` - (Optional) Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `gp3`. +* `volume_size` - (Optional) Size of the volume in gibibytes (GiB). +* `volume_type` - (Optional) Type of volume. Valid values include `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1`, or `st1`. Defaults to `gp2`. -Modifying any of the `root_block_device` settings other than `volume_size` requires resource -replacement. +Modifying any of the `root_block_device` settings other than `volume_size` requires resource replacement. Each `ebs_block_device` supports the following: -* `device_name` - (Required) The name of the device to mount. -* `snapshot_id` - (Optional) The Snapshot ID to mount. -* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, `"gp3"`, `"io1"`, `"io2"`, `"sc1"`, or `"st1"`. (Default: `"gp2"`). -* `volume_size` - (Optional) The size of the volume in gibibytes (GiB). -* `iops` - (Optional) The amount of provisioned - [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). - Only valid for volume_type of `"io1"`, `"io2"` or `"gp3"`. -* `throughput` - (Optional) The throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `"gp3"`. -* `delete_on_termination` - (Optional) Whether the volume should be destroyed - on instance termination (Default: `true`). -* `encrypted` - (Optional) Enables [EBS - encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) - on the volume (Default: `false`). Cannot be used with `snapshot_id`. Must be configured to perform drift detection. +* `delete_on_termination` - (Optional) Whether the volume should be destroyed on instance termination. Defaults to `true`. +* `device_name` - (Required) Name of the device to mount. +* `encrypted` - (Optional) Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume. Defaults to `false`. Cannot be used with `snapshot_id`. Must be configured to perform drift detection. +* `iops` - (Optional) Amount of provisioned [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html). Only valid for volume_type of `io1`, `io2` or `gp3`. * `kms_key_id` - (Optional) Amazon Resource Name (ARN) of the KMS Key to use when encrypting the volume. Must be configured to perform drift detection. +* `snapshot_id` - (Optional) Snapshot ID to mount. * `tags` - (Optional) A map of tags to assign to the device. +* `throughput` - (Optional) Throughput to provision for a volume in mebibytes per second (MiB/s). This is only valid for `volume_type` of `gp3`. +* `volume_size` - (Optional) Size of the volume in gibibytes (GiB). +* `volume_type` - (Optional) Type of volume. Valid values include `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1`, or `st1`. Defaults to `gp2`. ~> **NOTE:** Currently, changes to the `ebs_block_device` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes and attachments of an EBS block to an instance, use the `aws_ebs_volume` and `aws_volume_attachment` resources instead. If you use `ebs_block_device` on an `aws_instance`, Terraform will assume management over the full set of non-root EBS block devices for the instance, treating additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `aws_ebs_volume` and `aws_volume_attachment` resources for a given instance. Each `ephemeral_block_device` supports the following: * `device_name` - The name of the block device to mount on the instance. -* `virtual_name` - (Optional) The [Instance Store Device - Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames) - (e.g. `"ephemeral0"`). * `no_device` - (Optional) Suppresses the specified device included in the AMI's block device mapping. +* `virtual_name` - (Optional) [Instance Store Device Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames) (e.g. `ephemeral0`). -Each AWS Instance type has a different set of Instance Store block devices -available for attachment. AWS [publishes a -list](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#StorageOnInstanceTypes) -of which ephemeral devices are available on each type. The devices are always -identified by the `virtual_name` in the format `"ephemeral{0..N}"`. - -### Network Interfaces - -Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because -the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation -of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use -the `aws_network_interface` or `aws_network_interface_attachment` resources instead. - -The `network_interface` configuration block _does_, however, allow users to supply their own network interface to be used -as the default network interface on an EC2 Instance, attached at `eth0`. - -Each `network_interface` block supports the following: - -* `device_index` - (Required) The integer index of the network interface attachment. Limited by instance type. -* `network_interface_id` - (Required) The ID of the network interface to attach. -* `delete_on_termination` - (Optional) Whether or not to delete the network interface on instance termination. Defaults to `false`. Currently, the only valid value is `false`, as this is only supported when creating new network interfaces when launching an instance. - -### Credit Specification - -~> **NOTE:** Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. - -Credit specification can be applied/modified to the EC2 Instance at any time. - -The `credit_specification` block supports the following: - -* `cpu_credits` - (Optional) The credit option for CPU usage. Can be `"standard"` or `"unlimited"`. T3 instances are launched as unlimited by default. T2 instances are launched as standard by default. - -### Metadata Options - -Metadata options can be applied/modified to the EC2 Instance at any time. - -The `metadata_options` block supports the following: - -* `http_endpoint` - (Optional) Whether the metadata service is available. Can be `"enabled"` or `"disabled"`. (Default: `"enabled"`). -* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Can be `"optional"` or `"required"`. (Default: `"optional"`). -* `http_put_response_hop_limit` - (Optional) The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Can be an integer from `1` to `64`. (Default: `1`). - -For more information, see the documentation on the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +Each AWS Instance type has a different set of Instance Store block devices available for attachment. AWS [publishes a list](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#StorageOnInstanceTypes) of which ephemeral devices are available on each type. The devices are always identified by the `virtual_name` in the format `ephemeral{0..N}`. ### Enclave Options @@ -224,89 +203,55 @@ Enclave options apply to the instance at boot time. The `enclave_options` block supports the following: -* `enabled` - (Optional) Whether Nitro Enclaves will be enabled on the instance. (Default: `"false"`). +* `enabled` - (Optional) Whether Nitro Enclaves will be enabled on the instance. Defaults to `false`. For more information, see the documentation on [Nitro Enclaves](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html). -### Example +### Metadata Options -```hcl -resource "aws_vpc" "my_vpc" { - cidr_block = "172.16.0.0/16" +Metadata options can be applied/modified to the EC2 Instance at any time. - tags = { - Name = "tf-example" - } -} +The `metadata_options` block supports the following: -resource "aws_subnet" "my_subnet" { - vpc_id = aws_vpc.my_vpc.id - cidr_block = "172.16.10.0/24" - availability_zone = "us-west-2a" +* `http_endpoint` - (Optional) Whether the metadata service is available. Valid values include `enabled` or `disabled`. Defaults to `enabled`. +* `http_put_response_hop_limit` - (Optional) Desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Valid values are integer from `1` to `64`. Defaults to `1`. +* `http_tokens` - (Optional) Whether or not the metadata service requires session tokens, also referred to as _Instance Metadata Service Version 2 (IMDSv2)_. Valid values include `optional` or `required`. Defaults to `optional`. - tags = { - Name = "tf-example" - } -} +For more information, see the documentation on the [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). -resource "aws_network_interface" "foo" { - subnet_id = aws_subnet.my_subnet.id - private_ips = ["172.16.10.100"] +### Network Interfaces - tags = { - Name = "primary_network_interface" - } -} +Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use the `aws_network_interface` or `aws_network_interface_attachment` resources instead. -resource "aws_instance" "foo" { - ami = "ami-005e54dee72cc1d00" # us-west-2 - instance_type = "t2.micro" +The `network_interface` configuration block _does_, however, allow users to supply their own network interface to be used as the default network interface on an EC2 Instance, attached at `eth0`. - network_interface { - network_interface_id = aws_network_interface.foo.id - device_index = 0 - } +Each `network_interface` block supports the following: - credit_specification { - cpu_credits = "unlimited" - } -} -``` +* `delete_on_termination` - (Optional) Whether or not to delete the network interface on instance termination. Defaults to `false`. Currently, the only valid value is `false`, as this is only supported when creating new network interfaces when launching an instance. +* `device_index` - (Required) Integer index of the network interface attachment. Limited by instance type. +* `network_interface_id` - (Required) ID of the network interface to attach. ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* `id` - The instance ID. * `arn` - The ARN of the instance. -* `availability_zone` - The availability zone of the instance. -* `placement_group` - The placement group of the instance. -* `key_name` - The key name of the instance -* `password_data` - Base-64 encoded encrypted password data for the instance. - Useful for getting the administrator password for instances running Microsoft Windows. - This attribute is only exported if `get_password_data` is true. - Note that this encrypted value will be stored in the state file, as with all exported attributes. - See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. -* `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this - is only available if you've enabled DNS hostnames for your VPC -* `public_ip` - The public IP address assigned to the instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip`, as this field will change after the EIP is attached. -* `ipv6_addresses` - A list of assigned IPv6 addresses, if any -* `primary_network_interface_id` - The ID of the instance's primary network interface. -* `private_dns` - The private DNS name assigned to the instance. Can only be - used inside the Amazon EC2, and only available if you've enabled DNS hostnames - for your VPC -* `private_ip` - The private IP address assigned to the instance -* `security_groups` - The associated security groups. -* `vpc_security_group_ids` - The associated security groups in non-default VPC -* `subnet_id` - The VPC subnet ID. -* `outpost_arn` - The ARN of the Outpost the instance is assigned to. -* `credit_specification` - Credit specification of instance. * `instance_state` - The state of the instance. One of: `pending`, `running`, `shutting-down`, `terminated`, `stopping`, `stopped`. See [Instance Lifecycle](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) for more information. +* `outpost_arn` - The ARN of the Outpost the instance is assigned to. +* `password_data` - Base-64 encoded encrypted password data for the instance. Useful for getting the administrator password for instances running Microsoft Windows. This attribute is only exported if `get_password_data` is true. Note that this encrypted value will be stored in the state file, as with all exported attributes. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. +* `primary_network_interface_id` - The ID of the instance's primary network interface. +* `private_dns` - The private DNS name assigned to the instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC. +* `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this is only available if you've enabled DNS hostnames for your VPC. +* `public_ip` - The public IP address assigned to the instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached. + +For `ebs_block_device`, in addition to the arguments above, the following attribute is exported: + +* `volume_id` - ID of the volume. For example, the ID can be accessed like this, `aws_instance.web.ebs_block_device.2.volume_id`. -For any `root_block_device` and `ebs_block_device` the `volume_id` is exported. -e.g. `aws_instance.web.root_block_device.0.volume_id` +For `root_block_device`, in addition to the arguments above, the following attributes are exported: -For the `root_block_device` the `device_name` is exported. +* `volume_id` - ID of the volume. For example, the ID can be accessed like this, `aws_instance.web.root_block_device.0.volume_id`. +* `device_name` - Device name, e.g. `/dev/sdh` or `xvdh`. ## Import From b2909d53710e80369acbb3559dafc81e74054fac Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 13 Jan 2021 12:22:49 -0500 Subject: [PATCH 0646/1212] resource/instance: Fix linting --- aws/resource_aws_instance_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 9b4011c7e63..4fc3687923c 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -4134,8 +4134,8 @@ resource "aws_ebs_volume" "test" { type = "gp2" tags = { - Name = %[1]q - Factum = "PerAsperaAdAstra" + Name = %[1]q + Factum = "PerAsperaAdAstra" } } @@ -4166,8 +4166,8 @@ resource "aws_ebs_volume" "test" { type = "gp2" tags = { - Name = %[1]q - Factum = "VincitQuiSeVincit" + Name = %[1]q + Factum = "VincitQuiSeVincit" } } From e8ee05b6efcd928cb6a74fd1b00c47fd9351ef72 Mon Sep 17 00:00:00 2001 From: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Date: Wed, 13 Jan 2021 13:04:51 -0500 Subject: [PATCH 0647/1212] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73658638e50..47fa35f464e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,11 +16,13 @@ ENHANCEMENTS * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] +* resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.[GH-15474] BUX FIXES * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] +* resource/aws_instance: Prevent `volume_tags` from improperly interfering with `tags` in `aws_ebs_volume` [GH-15474] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] From dd7322dcd1512c82a976ed36aa0637a7aeb07cc1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 13 Jan 2021 14:14:23 -0800 Subject: [PATCH 0648/1212] Fixes temporarily disabling automatic failover when setting primary cluster id --- ...ource_aws_elasticache_replication_group.go | 95 +++++----- ..._aws_elasticache_replication_group_test.go | 164 +++++++++--------- 2 files changed, 126 insertions(+), 133 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 876855e5d36..43e47db2bde 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -113,8 +113,7 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Optional: true, Computed: true, StateFunc: func(val interface{}) string { - // Elasticache always changes the maintenance - // to lowercase + // Elasticache always changes the maintenance to lowercase return strings.ToLower(val.(string)) }, ValidateFunc: validateOnceAWeekWindowFormat, @@ -686,36 +685,16 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i reEnableAutomaticFailover = true } - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(false), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) - if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to set new primary: %sw", d.Id(), err) - } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + err = resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error disabling Elasticache Replication Group (%s) automatic failover: %w", d.Id(), err) } } // Set new primary - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(newPrimaryClusterID), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) - if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to set new primary: %w", d.Id(), err) - } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + err = resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, d.Id(), newPrimaryClusterID, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error changing Elasticache Replication Group (%s) primary cluster: %w", d.Id(), err) } // Finally retry deleting the cache cluster @@ -737,15 +716,9 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i // Re-enable automatic failover if we needed to temporarily disable it if reEnableAutomaticFailover { - input := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(true), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", input) - _, err := conn.ModifyReplicationGroup(input) + err := resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) + return fmt.Errorf("error re-enabling Elasticache Replication Group (%s) automatic failover: %w", d.Id(), err) } } } @@ -833,15 +806,10 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i } if requestUpdate { - _, err := conn.ModifyReplicationGroup(params) + err := resourceAwsElasticacheReplicationGroupModify(conn, d.Timeout(schema.TimeoutUpdate), params) if err != nil { return fmt.Errorf("error updating Elasticache Replication Group (%s): %w", d.Id(), err) } - - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be updated: %w", d.Id(), err) - } } if d.HasChange("tags") { @@ -900,9 +868,9 @@ func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replic var rg *elasticache.ReplicationGroup for _, replicationGroup := range resp.ReplicationGroups { - rgId := aws.StringValue(replicationGroup.ReplicationGroupId) - if rgId == replicationGroupId { - log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", rgId) + rgID := aws.StringValue(replicationGroup.ReplicationGroupId) + if rgID == replicationGroupId { + log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", rgID) rg = replicationGroup } } @@ -989,6 +957,40 @@ func flattenElasticacheNodeGroupsToClusterMode(nodeGroups []*elasticache.NodeGro return []map[string]interface{}{m} } +func resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(false), + MultiAZEnabled: aws.Bool(false), + }) +} + +func resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + MultiAZEnabled: aws.Bool(true), + }) +} + +func resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn *elasticache.ElastiCache, replicationGroupID, primaryClusterID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + PrimaryClusterId: aws.String(primaryClusterID), + }) +} + +func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, timeout time.Duration, input *elasticache.ModifyReplicationGroupInput) error { + _, err := conn.ModifyReplicationGroup(input) + if err != nil { + return err + } + return waitForModifyElasticacheReplicationGroup(conn, aws.StringValue(input.ReplicationGroupId), timeout) +} + func waitForModifyElasticacheReplicationGroup(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { pending := []string{"creating", "modifying", "snapshotting"} stateConf := &resource.StateChangeConf{ @@ -1004,10 +1006,3 @@ func waitForModifyElasticacheReplicationGroup(conn *elasticache.ElastiCache, rep _, err := stateConf.WaitForState() return err } - -func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws []string, errors []error) { - if strings.ToLower(v.(string)) != "redis" { - errors = append(errors, fmt.Errorf("The only acceptable Engine type when using Replication Groups is Redis")) - } - return -} diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 4b1ec5444ef..127cdf3acf5 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -71,7 +71,10 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "engine", "redis"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x"), @@ -360,6 +363,7 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr(resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet(resourceName, "primary_endpoint_address"), @@ -638,10 +642,11 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, false), + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), ), }, @@ -652,18 +657,20 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { ImportStateVerifyIgnore: []string{"apply_immediately"}, }, { - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 4, false), + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 4), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), ), }, { - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, false), + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), ), }, @@ -682,10 +689,11 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 3, false), + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 3, false, false), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), ), }, @@ -697,24 +705,19 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }, { PreConfig: func() { - // Simulate failover so primary is on node we are trying to delete + // Ensure that primary is on the node we are trying to delete conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - input := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(fmt.Sprintf("%s-003", rName)), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error setting new primary cache cluster: %s", err) - } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for new primary cache cluster: %s", err) + timeout := 40 * time.Minute + + if err := resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, rName, fmt.Sprintf("%s-003", rName), timeout); err != nil { + t.Fatalf("error changing primary cache cluster: %s", err) } }, - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, false), + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 2, false, false), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), ), }, @@ -733,7 +736,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 3, true), + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 3, true, true), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), @@ -743,51 +746,26 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }, { PreConfig: func() { - // Simulate failover so primary is on node we are trying to delete + // Ensure that primary is on the node we are trying to delete conn := testAccProvider.Meta().(*AWSClient).elasticacheconn + timeout := 40 * time.Minute // Must disable automatic failover first - var input *elasticache.ModifyReplicationGroupInput = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(false), - MultiAZEnabled: aws.Bool(false), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { + if err := resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn, rName, timeout); err != nil { t.Fatalf("error disabling automatic failover: %s", err) } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for disabling automatic failover: %s", err) - } - // Failover - input = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(fmt.Sprintf("%s-003", rName)), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error setting new primary cache cluster: %s", err) - } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for new primary cache cluster: %s", err) + // Set primary + if err := resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, rName, fmt.Sprintf("%s-003", rName), timeout); err != nil { + t.Fatalf("error changing primary cache cluster: %s", err) } // Re-enable automatic failover like nothing ever happened - input = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(true), - MultiAZEnabled: aws.Bool(true), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error enabled automatic failover: %s", err) - } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for enabled automatic failover: %s", err) + if err := resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn, rName, timeout); err != nil { + t.Fatalf("error re-enabling automatic failover: %s", err) } }, - Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, true), + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 2, true, true), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), @@ -865,34 +843,6 @@ func TestAccAWSElasticacheReplicationGroup_FinalSnapshot(t *testing.T) { }) } -func TestResourceAWSElastiCacheReplicationGroupEngineValidation(t *testing.T) { - cases := []struct { - Value string - ErrCount int - }{ - { - Value: "Redis", - ErrCount: 0, - }, - { - Value: "REDIS", - ErrCount: 0, - }, - { - Value: "memcached", - ErrCount: 1, - }, - } - - for _, tc := range cases { - _, errors := validateAwsElastiCacheReplicationGroupEngine(tc.Value, "aws_elasticache_replication_group_engine") - - if len(errors) != tc.ErrCount { - t.Fatalf("Expected the ElastiCache Replication Group Engine to trigger a validation error") - } - } -} - func testAccCheckAWSElasticacheReplicationGroupExists(n string, v *elasticache.ReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -1227,6 +1177,7 @@ resource "aws_elasticache_replication_group" "test" { security_group_ids = [aws_security_group.test.id] availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1]] automatic_failover_enabled = true + multi_az_enabled = true snapshot_window = "02:00-03:00" snapshot_retention_limit = 7 } @@ -1698,7 +1649,53 @@ resource "aws_elasticache_replication_group" "test" { `, rInt, rInt, rString10, rString16) } -func testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName string, numberCacheClusters int, autoFailover bool) string { +func testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName string, numberCacheClusters int) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_vpc" "test" { + cidr_block = "192.168.0.0/16" + + tags = { + Name = "terraform-testacc-elasticache-replication-group-number-cache-clusters" + } +} + +resource "aws_subnet" "test" { + count = 2 + + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = "192.168.${count.index}.0/24" + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-elasticache-replication-group-number-cache-clusters" + } +} + +resource "aws_elasticache_subnet_group" "test" { + name = "%[1]s" + subnet_ids = aws_subnet.test[*].id +} + +resource "aws_elasticache_replication_group" "test" { + node_type = "cache.t2.micro" + number_cache_clusters = %[2]d + replication_group_id = %[1]q + replication_group_description = "Terraform Acceptance Testing - number_cache_clusters" + subnet_group_name = aws_elasticache_subnet_group.test.name +} +`, rName, numberCacheClusters) +} + +func testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName string, numberCacheClusters int, autoFailover, multiAZ bool) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { state = "available" @@ -1736,14 +1733,15 @@ resource "aws_elasticache_subnet_group" "test" { resource "aws_elasticache_replication_group" "test" { # InvalidParameterCombination: Automatic failover is not supported for T1 and T2 cache node types. - automatic_failover_enabled = %[2]t + automatic_failover_enabled = %[3]t + multi_az_enabled = %[4]t node_type = "cache.t3.medium" - number_cache_clusters = %[3]d + number_cache_clusters = %[2]d replication_group_id = "%[1]s" replication_group_description = "Terraform Acceptance Testing - number_cache_clusters" subnet_group_name = aws_elasticache_subnet_group.test.name } -`, rName, autoFailover, numberCacheClusters) +`, rName, numberCacheClusters, autoFailover, multiAZ) } func testAccAWSElasticacheReplicationGroupConfigTags1(rName, tagKey1, tagValue1 string) string { From 8617e5a92745099a9e70fdf6f2f9a14ba069aef9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 13 Jan 2021 14:15:12 -0800 Subject: [PATCH 0649/1212] Uses standard validation function for engine parameter --- aws/resource_aws_elasticache_replication_group.go | 2 +- website/docs/r/elasticache_replication_group.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 43e47db2bde..debae58817b 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -101,7 +101,7 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Optional: true, ForceNew: true, Default: "redis", - ValidateFunc: validateAwsElastiCacheReplicationGroupEngine, + ValidateFunc: validation.StringInSlice([]string{"redis"}, true), }, "engine_version": { Type: schema.TypeString, diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index bfe1a316fa1..e1d2b5b986f 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -108,7 +108,7 @@ The following arguments are supported: * `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. Defaults to `false`. * `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. This parameter is currently not supported by the AWS API. Defaults to `true`. * `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important. -* `engine` - (Optional) The name of the cache engine to be used for the clusters in this replication group. e.g. `redis` +* `engine` - (Optional) The name of the cache engine to be used for the clusters in this replication group. The only valid value is `redis`. * `at_rest_encryption_enabled` - (Optional) Whether to enable encryption at rest. * `transit_encryption_enabled` - (Optional) Whether to enable encryption in transit. * `auth_token` - (Optional) The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. From 44eb67ec19b41d186cd9fd1d6b3ad0a05d1934e2 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 13 Jan 2021 14:52:29 -0800 Subject: [PATCH 0650/1212] Update CHANGELOG.md for #16688 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47fa35f464e..b0cf969fe54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ ENHANCEMENTS * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] * resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.[GH-15474] +* resource/aws_workspaces_directory: Add access properties [GH-16688] +* datasource/aws_workspaces_directory: Add access properties [GH-16688] BUX FIXES From 793c05e431b8188ad2c689e117b152c8336fcf97 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 13 Jan 2021 14:59:09 -0800 Subject: [PATCH 0651/1212] Update CHANGELOG.md for #16867 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0cf969fe54..d5a4c7599db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ ENHANCEMENTS BUX FIXES +* resource/aws_appmesh_route: Allow an empty `match` attribute to specified for a `grpc_route`, indicating that any service should be matched [GH-16867] * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] * resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] * resource/aws_instance: Prevent `volume_tags` from improperly interfering with `tags` in `aws_ebs_volume` [GH-15474] From c0dd376402c03018017e8e403e21168b8e7fab1c Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 13 Jan 2021 15:55:57 -0800 Subject: [PATCH 0652/1212] Update CHANGELOG.md for #16077 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5a4c7599db..cd2a891860a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES * **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) +* **New Resource:** `aws_sagemaker_domain` [GH-16077] ENHANCEMENTS @@ -28,6 +29,7 @@ BUX FIXES * resource/aws_instance: Prevent `volume_tags` from improperly interfering with `tags` in `aws_ebs_volume` [GH-15474] * resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] * resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] +* resource/aws_sagemaker_image - fix error on wait for delete when image does not exist [GH-16077] * resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] * service/apigateway: All operations will now automatically retry on `ConflictException: Unable to complete operation due to concurrent modification. Please try again later.` errors. From e22f4855e209b7e5d12a46c16011e0b20403631d Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Thu, 14 Jan 2021 09:21:56 +0900 Subject: [PATCH 0653/1212] Use id instead of resource id for route53_resolver_dnssec_config resource id --- .../service/route53resolver/finder/finder.go | 33 +++++++ .../service/route53resolver/waiter/status.go | 20 +++++ .../service/route53resolver/waiter/waiter.go | 42 +++++++++ ...urce_aws_route53_resolver_dnssec_config.go | 88 +++---------------- ...aws_route53_resolver_dnssec_config_test.go | 88 +++++++++---------- ...ute53_resolver_dnssec_config.html.markdown | 12 +-- 6 files changed, 153 insertions(+), 130 deletions(-) diff --git a/aws/internal/service/route53resolver/finder/finder.go b/aws/internal/service/route53resolver/finder/finder.go index ab246be9be3..2c056d947f6 100644 --- a/aws/internal/service/route53resolver/finder/finder.go +++ b/aws/internal/service/route53resolver/finder/finder.go @@ -42,3 +42,36 @@ func ResolverQueryLogConfigByID(conn *route53resolver.Route53Resolver, queryLogC return output.ResolverQueryLogConfig, nil } + +// ResolverDnssecConfigByID returns the dnssec configuration corresponding to the specified ID. +// Returns nil if no configuration is found. +func ResolverDnssecConfigByID(conn *route53resolver.Route53Resolver, dnssecConfigID string) (*route53resolver.ResolverDnssecConfig, error) { + input := &route53resolver.ListResolverDnssecConfigsInput{} + + var config *route53resolver.ResolverDnssecConfig + // GetResolverDnssecConfigs does not support query with id + err := conn.ListResolverDnssecConfigsPages(input, func(page *route53resolver.ListResolverDnssecConfigsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, c := range page.ResolverDnssecConfigs { + if aws.StringValue(c.Id) == dnssecConfigID { + config = c + return false + } + } + + return !lastPage + }) + + if err != nil { + return nil, err + } + + if config == nil { + return nil, nil + } + + return config, nil +} diff --git a/aws/internal/service/route53resolver/waiter/status.go b/aws/internal/service/route53resolver/waiter/status.go index 7c8c91dd5e1..2d902d562a7 100644 --- a/aws/internal/service/route53resolver/waiter/status.go +++ b/aws/internal/service/route53resolver/waiter/status.go @@ -14,6 +14,9 @@ const ( resolverQueryLogConfigStatusNotFound = "NotFound" resolverQueryLogConfigStatusUnknown = "Unknown" + + resolverDnssecConfigStatusNotFound = "NotFound" + resolverDnssecConfigStatusUnknown = "Unknown" ) // QueryLogConfigAssociationStatus fetches the QueryLogConfigAssociation and its Status @@ -57,3 +60,20 @@ func QueryLogConfigStatus(conn *route53resolver.Route53Resolver, queryLogConfigI return queryLogConfig, aws.StringValue(queryLogConfig.Status), nil } } + +// DnssecConfigStatus fetches the DnssecConfig and its Status +func DnssecConfigStatus(conn *route53resolver.Route53Resolver, dnssecConfigID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + dnssecConfig, err := finder.ResolverDnssecConfigByID(conn, dnssecConfigID) + + if err != nil { + return nil, resolverDnssecConfigStatusUnknown, err + } + + if dnssecConfig == nil { + return nil, resolverDnssecConfigStatusNotFound, nil + } + + return dnssecConfig, aws.StringValue(dnssecConfig.ValidationStatus), nil + } +} diff --git a/aws/internal/service/route53resolver/waiter/waiter.go b/aws/internal/service/route53resolver/waiter/waiter.go index 744ad94bac1..8bcfea75b9b 100644 --- a/aws/internal/service/route53resolver/waiter/waiter.go +++ b/aws/internal/service/route53resolver/waiter/waiter.go @@ -19,6 +19,12 @@ const ( // Maximum amount of time to wait for a QueryLogConfig to be deleted QueryLogConfigDeletedTimeout = 5 * time.Minute + + // Maximum amount of time to wait for a DnssecConfig to return ENABLED + DnssecConfigCreatedTimeout = 5 * time.Minute + + // Maximum amount of time to wait for a DnssecConfig to return DISABLED + DnssecConfigDeletedTimeout = 5 * time.Minute ) // QueryLogConfigAssociationCreated waits for a QueryLogConfig to return ACTIVE @@ -92,3 +98,39 @@ func QueryLogConfigDeleted(conn *route53resolver.Route53Resolver, queryLogConfig return nil, err } + +// DnssecConfigCreated waits for a DnssecConfig to return ENABLED +func DnssecConfigCreated(conn *route53resolver.Route53Resolver, dnssecConfigID string) (*route53resolver.ResolverDnssecConfig, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{route53resolver.ResolverDNSSECValidationStatusEnabling}, + Target: []string{route53resolver.ResolverDNSSECValidationStatusEnabled}, + Refresh: DnssecConfigStatus(conn, dnssecConfigID), + Timeout: DnssecConfigCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*route53resolver.ResolverDnssecConfig); ok { + return v, err + } + + return nil, err +} + +// DnssecConfigCreated waits for a DnssecConfig to return DELETED +func DnssecConfigDeleted(conn *route53resolver.Route53Resolver, dnssecConfigID string) (*route53resolver.ResolverDnssecConfig, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, + Target: []string{route53resolver.ResolverDNSSECValidationStatusDisabled}, + Refresh: DnssecConfigStatus(conn, dnssecConfigID), + Timeout: DnssecConfigDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*route53resolver.ResolverDnssecConfig); ok { + return v, err + } + + return nil, err +} diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go index 9afdf60c081..370f5d969a5 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config.go +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -3,17 +3,13 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/route53resolver" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const ( - route53ResolverDnssecConfigStatusNotFound = "NOT_FOUND" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53resolver/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53resolver/waiter" ) func resourceAwsRoute53ResolverDnssecConfig() *schema.Resource { @@ -52,11 +48,6 @@ func resourceAwsRoute53ResolverDnssecConfig() *schema.Resource { Computed: true, }, }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, } } @@ -74,11 +65,9 @@ func resourceAwsRoute53ResolverDnssecConfigCreate(d *schema.ResourceData, meta i return fmt.Errorf("error creating Route53 Resolver DNSSEC config: %w", err) } - d.SetId(aws.StringValue(resp.ResolverDNSSECConfig.ResourceId)) + d.SetId(aws.StringValue(resp.ResolverDNSSECConfig.Id)) - err = route53ResolverDnssecConfigWait(conn, d.Id(), d.Timeout(schema.TimeoutCreate), - []string{route53resolver.ResolverDNSSECValidationStatusEnabling}, - []string{route53resolver.ResolverDNSSECValidationStatusEnabled}) + _, err = waiter.DnssecConfigCreated(conn, d.Id()) if err != nil { return err } @@ -88,43 +77,30 @@ func resourceAwsRoute53ResolverDnssecConfigCreate(d *schema.ResourceData, meta i func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).route53resolverconn - ec2Conn := meta.(*AWSClient).ec2conn - - vpc, err := vpcDescribe(ec2Conn, d.Id()) - if err != nil { - return fmt.Errorf("error getting VPC associated with Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) - } - // GetResolverDnssecConfig returns AccessDeniedException if sending a request with non-existing VPC id - if vpc == nil { - log.Printf("[WARN] VPC associated with Resolver DNSSEC config (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } + config, err := finder.ResolverDnssecConfigByID(conn, d.Id()) - raw, state, err := route53ResolverDnssecConfigRefresh(conn, d.Id())() if err != nil { return fmt.Errorf("error getting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } - if state == route53ResolverDnssecConfigStatusNotFound || state == route53resolver.ResolverDNSSECValidationStatusDisabled { + if config == nil || aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { log.Printf("[WARN] Route53 Resolver DNSSEC config (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - out := raw.(*route53resolver.ResolverDnssecConfig) - d.Set("id", out.Id) - d.Set("owner_id", out.OwnerId) - d.Set("resource_id", out.ResourceId) - d.Set("validation_status", out.ValidationStatus) + d.Set("id", config.Id) + d.Set("owner_id", config.OwnerId) + d.Set("resource_id", config.ResourceId) + d.Set("validation_status", config.ValidationStatus) configArn := arn.ARN{ Partition: meta.(*AWSClient).partition, Service: "route53resolver", Region: meta.(*AWSClient).region, - AccountID: aws.StringValue(out.OwnerId), - Resource: fmt.Sprintf("resolver-dnssec-config/%s", aws.StringValue(out.ResourceId)), + AccountID: aws.StringValue(config.OwnerId), + Resource: fmt.Sprintf("resolver-dnssec-config/%s", aws.StringValue(config.ResourceId)), }.String() d.Set("arn", configArn) @@ -136,7 +112,7 @@ func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta i log.Printf("[DEBUG] Deleting Route53 Resolver DNSSEC config: %s", d.Id()) _, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ - ResourceId: aws.String(d.Id()), + ResourceId: aws.String(d.Get("resource_id").(string)), Validation: aws.String(route53resolver.ValidationDisable), }) if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { @@ -146,46 +122,10 @@ func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta i return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } - err = route53ResolverDnssecConfigWait(conn, d.Id(), d.Timeout(schema.TimeoutDelete), - []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, - []string{route53resolver.ResolverDNSSECValidationStatusDisabled}) + _, err = waiter.DnssecConfigDeleted(conn, d.Id()) if err != nil { return err } return nil } - -func route53ResolverDnssecConfigWait(conn *route53resolver.Route53Resolver, id string, timeout time.Duration, pending, target []string) error { - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: target, - Refresh: route53ResolverDnssecConfigRefresh(conn, id), - Timeout: timeout, - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, - } - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to reach target state: %w", id, err) - } - - return nil -} - -func route53ResolverDnssecConfigRefresh(conn *route53resolver.Route53Resolver, id string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ - ResourceId: aws.String(id), - }) - - if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { - return &route53resolver.ResolverDnssecConfig{}, route53ResolverDnssecConfigStatusNotFound, nil - } - - if err != nil { - return nil, "", err - } - - return resp.ResolverDNSSECConfig, aws.StringValue(resp.ResolverDNSSECConfig.ValidationStatus), nil - } -} diff --git a/aws/resource_aws_route53_resolver_dnssec_config_test.go b/aws/resource_aws_route53_resolver_dnssec_config_test.go index b88ec552015..ad864458467 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config_test.go +++ b/aws/resource_aws_route53_resolver_dnssec_config_test.go @@ -127,62 +127,38 @@ func TestAccAWSRoute53ResolverDnssecConfig_disappear(t *testing.T) { }) } -func TestAccAWSRoute53ResolverDnssecConfig_disappear_VPC(t *testing.T) { - var config route53resolver.ResolverDnssecConfig - resourceName := "aws_route53_resolver_dnssec_config.test" - vpcResourceName := "aws_vpc.test" - rName := acctest.RandomWithPrefix("tf-acc-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ErrorCheck: testAccErrorCheckSkipRoute53(t), - Providers: testAccProviders, - CheckDestroy: testAccCheckRoute53ResolverDnssecConfigDestroy, - Steps: []resource.TestStep{ - { - Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), - testAccCheckResourceDisappears(testAccProvider, resourceAwsVpc(), vpcResourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - func testAccCheckRoute53ResolverDnssecConfigDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).route53resolverconn - ec2Conn := testAccProvider.Meta().(*AWSClient).ec2conn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_route53_resolver_dnssec_config" { continue } - vpc, err := vpcDescribe(ec2Conn, rs.Primary.ID) - if err != nil { - return err - } + input := &route53resolver.ListResolverDnssecConfigsInput{} - // The VPC has been deleted - if vpc == nil { - continue - } + var config *route53resolver.ResolverDnssecConfig + err := conn.ListResolverDnssecConfigsPages(input, func(page *route53resolver.ListResolverDnssecConfigsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } - // Try to find the resource - out, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ - ResourceId: aws.String(rs.Primary.ID), + for _, c := range page.ResolverDnssecConfigs { + if aws.StringValue(c.Id) == rs.Primary.ID { + config = c + return false + } + } + + return !lastPage }) - // Verify the error is what we want - if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { - continue - } + if err != nil { return err } - if aws.StringValue(out.ResolverDNSSECConfig.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { - continue + + if config == nil || aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { + return nil } return fmt.Errorf("Route 53 Resolver Dnssec config still exists: %s", rs.Primary.ID) @@ -191,7 +167,7 @@ func testAccCheckRoute53ResolverDnssecConfigDestroy(s *terraform.State) error { return nil } -func testAccCheckRoute53ResolverDnssecConfigExists(n string, c *route53resolver.ResolverDnssecConfig) resource.TestCheckFunc { +func testAccCheckRoute53ResolverDnssecConfigExists(n string, config *route53resolver.ResolverDnssecConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -203,14 +179,34 @@ func testAccCheckRoute53ResolverDnssecConfigExists(n string, c *route53resolver. } conn := testAccProvider.Meta().(*AWSClient).route53resolverconn - resp, err := conn.GetResolverDnssecConfig(&route53resolver.GetResolverDnssecConfigInput{ - ResourceId: aws.String(rs.Primary.ID), + input := &route53resolver.ListResolverDnssecConfigsInput{} + + err := conn.ListResolverDnssecConfigsPages(input, func(page *route53resolver.ListResolverDnssecConfigsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, c := range page.ResolverDnssecConfigs { + if aws.StringValue(c.Id) == rs.Primary.ID { + config = c + return false + } + } + + return !lastPage }) + if err != nil { return err } - *c = *resp.ResolverDNSSECConfig + if config == nil { + return fmt.Errorf("No Route 53 Resolver Dnssec config found") + } + + if aws.StringValue(config.ValidationStatus) != route53resolver.ResolverDNSSECValidationStatusEnabled { + return fmt.Errorf("Route 53 Resolver Dnssec config (%s) is not enabled", aws.StringValue(config.Id)) + } return nil } diff --git a/website/docs/r/route53_resolver_dnssec_config.html.markdown b/website/docs/r/route53_resolver_dnssec_config.html.markdown index 7ae43953bbe..326efa867f0 100644 --- a/website/docs/r/route53_resolver_dnssec_config.html.markdown +++ b/website/docs/r/route53_resolver_dnssec_config.html.markdown @@ -39,18 +39,10 @@ In addition to all arguments above, the following attributes are exported: * `owner_id` - The owner account ID of the virtual private cloud (VPC) for a configuration for DNSSEC validation. * `validation_status` - The validation status for a DNSSEC configuration. The status can be one of the following: `ENABLING`, `ENABLED`, `DISABLING` and `DISABLED`. -## Timeouts - -`aws_route53_resolver_dnssec_config` provides the following -[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: - -- `create` - (Default `10 minutes`) Used for creating Route 53 Resolver DNSSEC config -- `delete` - (Default `10 minutes`) Used for destroying Route 53 Resolver DNSSEC config - ## Import - Route 53 Resolver DNSSEC configs can be imported using the VPC ID, e.g. + Route 53 Resolver DNSSEC configs can be imported using the Route 53 Resolver DNSSEC config ID, e.g. ``` -$ terraform import aws_route53_resolver_dnssec_config.example vpc-7a190fdssf3 +$ terraform import aws_route53_resolver_dnssec_config.example rdsc-be1866ecc1683e95 ``` From c7ca14b18b0be60ad54fab5b1fbde14452221938 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 13 Jan 2021 17:08:15 -0800 Subject: [PATCH 0654/1212] Update CHANGELOG.md for #17043 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd2a891860a..db19a9d5b1f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS * resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] * resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] +* resource/aws_apigatewayv2_integration: Add `response_parameters` attribute [GH-17043] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] From 757ee46f386336db950fc4ed715ac45fa96a42ad Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 13 Jan 2021 21:41:09 -0500 Subject: [PATCH 0655/1212] additional acctests; reduce logging --- .../service/cloudwatch/finder/finder.go | 6 +- ...resource_aws_cloudwatch_composite_alarm.go | 64 +- ...rce_aws_cloudwatch_composite_alarm_test.go | 663 ++++++++++++++---- .../cloudwatch_composite_alarm.html.markdown | 2 +- 4 files changed, 549 insertions(+), 186 deletions(-) diff --git a/aws/internal/service/cloudwatch/finder/finder.go b/aws/internal/service/cloudwatch/finder/finder.go index 7fa7069ea89..1de5bf2c9d6 100644 --- a/aws/internal/service/cloudwatch/finder/finder.go +++ b/aws/internal/service/cloudwatch/finder/finder.go @@ -1,17 +1,19 @@ package finder import ( + "context" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" ) -func CompositeAlarmByName(conn *cloudwatch.CloudWatch, name string) (*cloudwatch.CompositeAlarm, error) { +func CompositeAlarmByName(ctx context.Context, conn *cloudwatch.CloudWatch, name string) (*cloudwatch.CompositeAlarm, error) { input := cloudwatch.DescribeAlarmsInput{ AlarmNames: aws.StringSlice([]string{name}), AlarmTypes: aws.StringSlice([]string{cloudwatch.AlarmTypeCompositeAlarm}), } - output, err := conn.DescribeAlarms(&input) + output, err := conn.DescribeAlarmsWithContext(ctx, &input) if err != nil { return nil, err } diff --git a/aws/resource_aws_cloudwatch_composite_alarm.go b/aws/resource_aws_cloudwatch_composite_alarm.go index f5cbb0d9c0b..46ac4c36a43 100644 --- a/aws/resource_aws_cloudwatch_composite_alarm.go +++ b/aws/resource_aws_cloudwatch_composite_alarm.go @@ -30,6 +30,7 @@ func resourceAwsCloudWatchCompositeAlarm() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, + ForceNew: true, }, "alarm_actions": { Type: schema.TypeSet, @@ -86,48 +87,46 @@ func resourceAwsCloudWatchCompositeAlarm() *schema.Resource { } } -func resourceAwsCloudWatchCompositeAlarmCreate( - ctx context.Context, - d *schema.ResourceData, - meta interface{}, -) diag.Diagnostics { +func resourceAwsCloudWatchCompositeAlarmCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*AWSClient).cloudwatchconn name := d.Get("alarm_name").(string) input := expandAwsCloudWatchPutCompositeAlarmInput(d) + _, err := conn.PutCompositeAlarmWithContext(ctx, &input) if err != nil { - return diag.Errorf("error creating composite alarm: %s", err) + return diag.Errorf("error creating CloudWatch Composite Alarm (%s): %s", name, err) } - log.Printf("[INFO] Created Composite Alarm %s.", name) d.SetId(name) return resourceAwsCloudWatchCompositeAlarmRead(ctx, d, meta) } -func resourceAwsCloudWatchCompositeAlarmRead( - ctx context.Context, - d *schema.ResourceData, - meta interface{}, -) diag.Diagnostics { +func resourceAwsCloudWatchCompositeAlarmRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*AWSClient).cloudwatchconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig name := d.Id() - alarm, err := finder.CompositeAlarmByName(conn, name) + alarm, err := finder.CompositeAlarmByName(ctx, conn, name) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, cloudwatch.ErrCodeResourceNotFound) { + log.Printf("[WARN] CloudWatch Composite Alarm %s not found, removing from state", name) + d.SetId("") + return nil + } + if err != nil { - return diag.Errorf("error reading composite alarm (%s): %s", name, err) + return diag.Errorf("error reading CloudWatch Composite Alarm (%s): %s", name, err) } if alarm == nil { - if !d.IsNewResource() { - log.Printf("[WARN] CloudWatch Composite alarm %s not found, removing from state", name) - d.SetId("") - return nil + if d.IsNewResource() { + return diag.Errorf("error reading CloudWatch Composite Alarm (%s): not found", name) } - return diag.Errorf("error reading composite alarm (%s): alarm not filtered", name) + log.Printf("[WARN] CloudWatch Composite Alarm %s not found, removing from state", name) + d.SetId("") + return nil } d.Set("actions_enabled", alarm.ActionsEnabled) @@ -161,20 +160,15 @@ func resourceAwsCloudWatchCompositeAlarmRead( return nil } -func resourceAwsCloudWatchCompositeAlarmUpdate( - ctx context.Context, - d *schema.ResourceData, - meta interface{}, -) diag.Diagnostics { +func resourceAwsCloudWatchCompositeAlarmUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*AWSClient).cloudwatchconn name := d.Id() - log.Printf("[INFO] Updating Composite Alarm %s...", name) - input := expandAwsCloudWatchPutCompositeAlarmInput(d) + _, err := conn.PutCompositeAlarmWithContext(ctx, &input) if err != nil { - return diag.Errorf("error creating composite alarm: %s", err) + return diag.Errorf("error updating CloudWatch Composite Alarm (%s): %s", name, err) } arn := d.Get("arn").(string) @@ -189,16 +183,10 @@ func resourceAwsCloudWatchCompositeAlarmUpdate( return resourceAwsCloudWatchCompositeAlarmRead(ctx, d, meta) } -func resourceAwsCloudWatchCompositeAlarmDelete( - ctx context.Context, - d *schema.ResourceData, - meta interface{}, -) diag.Diagnostics { +func resourceAwsCloudWatchCompositeAlarmDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*AWSClient).cloudwatchconn name := d.Id() - log.Printf("[INFO] Deleting Composite Alarm %s...", name) - input := cloudwatch.DeleteAlarmsInput{ AlarmNames: aws.StringSlice([]string{name}), } @@ -208,17 +196,15 @@ func resourceAwsCloudWatchCompositeAlarmDelete( if tfawserr.ErrCodeEquals(err, cloudwatch.ErrCodeResourceNotFound) { return nil } - return diag.Errorf("error deleting composite alarm: %s", err) + return diag.Errorf("error deleting CloudWatch Composite Alarm (%s): %s", name, err) } return nil } func expandAwsCloudWatchPutCompositeAlarmInput(d *schema.ResourceData) cloudwatch.PutCompositeAlarmInput { - out := cloudwatch.PutCompositeAlarmInput{} - - if v, ok := d.GetOk("actions_enabled"); ok { - out.ActionsEnabled = aws.Bool(v.(bool)) + out := cloudwatch.PutCompositeAlarmInput{ + ActionsEnabled: aws.Bool(d.Get("actions_enabled").(bool)), } if v, ok := d.GetOk("alarm_actions"); ok { diff --git a/aws/resource_aws_cloudwatch_composite_alarm_test.go b/aws/resource_aws_cloudwatch_composite_alarm_test.go index 8a033cffebb..baf9bd788ad 100644 --- a/aws/resource_aws_cloudwatch_composite_alarm_test.go +++ b/aws/resource_aws_cloudwatch_composite_alarm_test.go @@ -1,40 +1,86 @@ package aws import ( + "context" "fmt" + "log" "regexp" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/cloudwatch/finder" ) -func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn +func init() { + resource.AddTestSweepers("aws_cloudwatch_composite_alarm", &resource.Sweeper{ + Name: "aws_cloudwatch_composite_alarm", + F: testSweepCloudWatchCompositeAlarms, + }) +} - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_cloudwatch_composite_alarm" { - continue - } +func testSweepCloudWatchCompositeAlarms(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } - params := cloudwatch.DescribeAlarmsInput{ - AlarmNames: []*string{aws.String(rs.Primary.ID)}, + conn := client.(*AWSClient).cloudwatchconn + ctx := context.Background() + + input := &cloudwatch.DescribeAlarmsInput{ + AlarmTypes: aws.StringSlice([]string{cloudwatch.AlarmTypeCompositeAlarm}), + } + + var sweeperErrs *multierror.Error + + err = conn.DescribeAlarmsPagesWithContext(ctx, input, func(page *cloudwatch.DescribeAlarmsOutput, isLast bool) bool { + if page == nil { + return !isLast } - resp, err := conn.DescribeAlarms(¶ms) + for _, compositeAlarm := range page.CompositeAlarms { + if compositeAlarm == nil { + continue + } + + name := aws.StringValue(compositeAlarm.AlarmName) + + log.Printf("[INFO] Deleting CloudWatch Composite Alarm: %s", name) + + r := resourceAwsCloudWatchCompositeAlarm() + d := r.Data(nil) + d.SetId(name) + + diags := r.DeleteContext(ctx, d, client) - if err == nil { - if len(resp.MetricAlarms) != 0 && - aws.StringValue(resp.MetricAlarms[0].AlarmName) == rs.Primary.ID { - return fmt.Errorf("Alarm Still Exists: %s", rs.Primary.ID) + for i := range diags { + if diags[i].Severity == diag.Error { + log.Printf("[ERROR] %s", diags[i].Summary) + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf(diags[i].Summary)) + continue + } } } + + return !isLast + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping CloudWatch Composite Alarms sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving CloudWatch Composite Alarms: %w", err)) } - return nil + return sweeperErrs.ErrorOrNil() } func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { @@ -50,14 +96,15 @@ func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), Check: resource.ComposeTestCheckFunc( testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), + resource.TestCheckResourceAttr(resourceName, "actions_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "0"), + resource.TestCheckResourceAttr(resourceName, "alarm_description", ""), resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s) OR ALARM(tf-test-metric-1-%[1]s)", suffix)), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), - resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "0"), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -69,46 +116,114 @@ func TestAccAwsCloudWatchCompositeAlarm_basic(t *testing.T) { }) } -func testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "test" { - count = 2 - - alarm_name = "tf-test-metric-${count.index}-%[1]s" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = 2 - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = 120 - statistic = "Average" - threshold = 80 +func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" - dimensions = { - InstanceId = "i-abc123" - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudWatchCompositeAlarm(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) } -resource "aws_sns_topic" "test" { - count = 1 - name = "tf-test-alarms-${count.index}-%[1]s" +func TestAccAwsCloudWatchCompositeAlarm_actionsEnabled(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_actionsEnabled(false, suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "actions_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_actionsEnabled(true, suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "actions_enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } -resource "aws_cloudwatch_composite_alarm" "test" { - alarm_actions = aws_sns_topic.test.*.arn - alarm_description = "Test 1" - alarm_name = "tf-test-composite-%[1]s" - alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) - insufficient_data_actions = aws_sns_topic.test.*.arn - ok_actions = aws_sns_topic.test.*.arn +func TestAccAwsCloudWatchCompositeAlarm_alarmActions(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" - tags = { - Foo = "Bar" - } -} -`, suffix) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_alarmActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_updateAlarmActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } -func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { +func TestAccAwsCloudWatchCompositeAlarm_description(t *testing.T) { suffix := acctest.RandString(8) resourceName := "aws_cloudwatch_composite_alarm.test" @@ -118,42 +233,132 @@ func TestAccAwsCloudWatchCompositeAlarm_disappears(t *testing.T) { CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix), + Config: testAccAwsCloudWatchCompositeAlarmConfig_description("Test 1", suffix), Check: resource.ComposeTestCheckFunc( testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), - testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudWatchCompositeAlarm(), resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), ), - ExpectNonEmptyPlan: true, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_description("Test Updated", suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test Updated"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func testAccAwsCloudWatchCompositeAlarmConfig_disappears(suffix string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "test" { - alarm_name = "tf-test-metric-%[1]s" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = 2 - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = 120 - statistic = "Average" - threshold = 80 +func TestAccAwsCloudWatchCompositeAlarm_insufficientDataActions(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" - dimensions = { - InstanceId = "i-abc123" - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_insufficientDataActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_updateInsufficientDataActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } -resource "aws_cloudwatch_composite_alarm" "test" { - alarm_name = "tf-test-composite-%[1]s" - alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test.alarm_name})" -} -`, suffix) +func TestAccAwsCloudWatchCompositeAlarm_okActions(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_okActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_updateOkActions(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } -func TestAccAwsCloudWatchCompositeAlarm_update(t *testing.T) { +func TestAccAwsCloudWatchCompositeAlarm_allActions(t *testing.T) { suffix := acctest.RandString(8) resourceName := "aws_cloudwatch_composite_alarm.test" @@ -163,17 +368,12 @@ func TestAccAwsCloudWatchCompositeAlarm_update(t *testing.T) { CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsCloudWatchCompositeAlarmConfig_update_before(suffix), + Config: testAccAwsCloudWatchCompositeAlarmConfig_allActions(suffix), Check: resource.ComposeTestCheckFunc( testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 1"), - resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), - resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s) OR ALARM(tf-test-metric-1-%[1]s)", suffix)), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "1"), resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), ), }, { @@ -182,29 +382,117 @@ func TestAccAwsCloudWatchCompositeAlarm_update(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAwsCloudWatchCompositeAlarmConfig_update_after(suffix), + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "0"), + resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "0"), + resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsCloudWatchCompositeAlarm_updateAlarmRule(t *testing.T) { + suffix := acctest.RandString(8) + resourceName := "aws_cloudwatch_composite_alarm.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsCloudWatchCompositeAlarmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsCloudWatchCompositeAlarmConfig_updateAlarmRule(suffix), Check: resource.ComposeTestCheckFunc( testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "alarm_actions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "alarm_description", "Test 2"), - resource.TestCheckResourceAttr(resourceName, "alarm_name", "tf-test-composite-"+suffix), resource.TestCheckResourceAttr(resourceName, "alarm_rule", fmt.Sprintf("ALARM(tf-test-metric-0-%[1]s)", suffix)), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "cloudwatch", regexp.MustCompile(`alarm:.+`)), - resource.TestCheckResourceAttr(resourceName, "insufficient_data_actions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "ok_actions.#", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func testAccAwsCloudWatchCompositeAlarmConfig_update_before(suffix string) string { +func testAccCheckAwsCloudWatchCompositeAlarmDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudwatch_composite_alarm" { + continue + } + + alarm, err := finder.CompositeAlarmByName(context.Background(), conn, rs.Primary.ID) + + if tfawserr.ErrCodeEquals(err, cloudwatch.ErrCodeResourceNotFound) { + continue + } + if err != nil { + return fmt.Errorf("error reading CloudWatch composite alarm (%s): %w", rs.Primary.ID, err) + } + + if alarm != nil { + return fmt.Errorf("CloudWatch composite alarm (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAwsCloudWatchCompositeAlarmExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("resource %s has not set its id", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn + + alarm, err := finder.CompositeAlarmByName(context.Background(), conn, rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error reading CloudWatch composite alarm (%s): %w", rs.Primary.ID, err) + } + + if alarm == nil { + return fmt.Errorf("CloudWatch composite alarm (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix string) string { return fmt.Sprintf(` resource "aws_cloudwatch_metric_alarm" "test" { count = 2 - alarm_name = "tf-test-metric-${count.index}-%[1]s" + alarm_name = "tf-test-metric-${count.index}-%s" comparison_operator = "GreaterThanOrEqualToThreshold" evaluation_periods = 2 metric_name = "CPUUtilization" @@ -217,85 +505,172 @@ resource "aws_cloudwatch_metric_alarm" "test" { InstanceId = "i-abc123" } } +`, suffix) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_actionsEnabled(enabled bool, suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_cloudwatch_composite_alarm" "test" { + actions_enabled = %t + alarm_name = "tf-test-composite-%s" + alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) +} +`, enabled, suffix)) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_basic(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) +} +`, suffix)) +} +func testAccAwsCloudWatchCompositeAlarmConfig_description(description, suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_description = %q + alarm_name = "tf-test-composite-%s" + alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) +} +`, description, suffix)) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_alarmActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` resource "aws_sns_topic" "test" { - count = 1 + count = 2 name = "tf-test-alarms-${count.index}-%[1]s" } resource "aws_cloudwatch_composite_alarm" "test" { - alarm_actions = aws_sns_topic.test.*.arn - alarm_description = "Test 1" - alarm_name = "tf-test-composite-%[1]s" - alarm_rule = join(" OR ", formatlist("ALARM(%%s)", aws_cloudwatch_metric_alarm.test.*.alarm_name)) - insufficient_data_actions = aws_sns_topic.test.*.arn - ok_actions = aws_sns_topic.test.*.arn - - tags = { - Foo = "Bar" - } + alarm_actions = aws_sns_topic.test.*.arn + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" } -`, suffix) +`, suffix)) } -func testAccAwsCloudWatchCompositeAlarmConfig_update_after(suffix string) string { - return fmt.Sprintf(` -resource "aws_cloudwatch_metric_alarm" "test" { +func testAccAwsCloudWatchCompositeAlarmConfig_updateAlarmActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_sns_topic" "test" { count = 2 + name = "tf-test-alarms-${count.index}-%[1]s" +} - alarm_name = "tf-test-metric-${count.index}-%[1]s" - comparison_operator = "GreaterThanOrEqualToThreshold" - evaluation_periods = 2 - metric_name = "CPUUtilization" - namespace = "AWS/EC2" - period = 120 - statistic = "Average" - threshold = 80 +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_actions = [aws_sns_topic.test[0].arn] + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" +} +`, suffix)) +} - dimensions = { - InstanceId = "i-abc123" - } +func testAccAwsCloudWatchCompositeAlarmConfig_updateAlarmRule(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" +} +`, suffix)) } +func testAccAwsCloudWatchCompositeAlarmConfig_insufficientDataActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` resource "aws_sns_topic" "test" { count = 2 name = "tf-test-alarms-${count.index}-%[1]s" } resource "aws_cloudwatch_composite_alarm" "test" { - alarm_actions = aws_sns_topic.test.*.arn - alarm_description = "Test 2" alarm_name = "tf-test-composite-%[1]s" alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" insufficient_data_actions = aws_sns_topic.test.*.arn - ok_actions = aws_sns_topic.test.*.arn +} +`, suffix)) +} - tags = { - Foo = "Bar" - Bax = "Baf" - } +func testAccAwsCloudWatchCompositeAlarmConfig_updateInsufficientDataActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_sns_topic" "test" { + count = 2 + name = "tf-test-alarms-${count.index}-%[1]s" } -`, suffix) + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" + insufficient_data_actions = [aws_sns_topic.test[0].arn] +} +`, suffix)) } -func testAccCheckAwsCloudWatchCompositeAlarmExists(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - conn := testAccProvider.Meta().(*AWSClient).cloudwatchconn - params := cloudwatch.DescribeAlarmsInput{ - AlarmNames: []*string{aws.String(rs.Primary.ID)}, - AlarmTypes: []*string{aws.String(cloudwatch.AlarmTypeCompositeAlarm)}, - } - resp, err := conn.DescribeAlarms(¶ms) - if err != nil { - return err - } - if len(resp.CompositeAlarms) == 0 { - return fmt.Errorf("Alarm not found") - } - return nil - } +func testAccAwsCloudWatchCompositeAlarmConfig_okActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_sns_topic" "test" { + count = 2 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" + ok_actions = aws_sns_topic.test.*.arn +} +`, suffix)) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_updateOkActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_sns_topic" "test" { + count = 2 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" + ok_actions = [aws_sns_topic.test[0].arn] +} +`, suffix)) +} + +func testAccAwsCloudWatchCompositeAlarmConfig_allActions(suffix string) string { + return composeConfig( + testAccAwsCloudWatchCompositeAlarmBaseConfig(suffix), + fmt.Sprintf(` +resource "aws_sns_topic" "test" { + count = 3 + name = "tf-test-alarms-${count.index}-%[1]s" +} + +resource "aws_cloudwatch_composite_alarm" "test" { + alarm_actions = [aws_sns_topic.test[0].arn] + alarm_name = "tf-test-composite-%[1]s" + alarm_rule = "ALARM(${aws_cloudwatch_metric_alarm.test[0].alarm_name})" + insufficient_data_actions = [aws_sns_topic.test[1].arn] + ok_actions = [aws_sns_topic.test[2].arn] +} +`, suffix)) } diff --git a/website/docs/r/cloudwatch_composite_alarm.html.markdown b/website/docs/r/cloudwatch_composite_alarm.html.markdown index 5190bd0b27a..0d74e77c16d 100644 --- a/website/docs/r/cloudwatch_composite_alarm.html.markdown +++ b/website/docs/r/cloudwatch_composite_alarm.html.markdown @@ -31,7 +31,7 @@ EOF ## Argument Reference -* `actions_enabled` - (Optional) Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to `true`. +* `actions_enabled` - (Optional, Forces new resource) Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. Defaults to `true`. * `alarm_actions` - (Optional) The set of actions to execute when this alarm transitions to the `ALARM` state from any other state. Each action is specified as an ARN. Up to 5 actions are allowed. * `alarm_description` - (Optional) The description for the composite alarm. * `alarm_name` - (Required) The name for the composite alarm. This name must be unique within the region. From 95f94addc338289e32589fc2c3c02f3bc121880d Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Wed, 13 Jan 2021 21:55:57 -0500 Subject: [PATCH 0656/1212] Update CHANGELOG for #15023 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index db19a9d5b1f..03578cea0e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES * **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] +* **New Resource:** `aws_cloudwatch_composite_alarm` [GH-15023] * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) * **New Resource:** `aws_sagemaker_domain` [GH-16077] From 469a9cfa1de9cf1c1b29c80b3b3cdbfea041f96d Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 14 Jan 2021 01:48:18 -0500 Subject: [PATCH 0657/1212] align identitystore data-sources with service design --- aws/data_source_aws_identity_store_group.go | 115 --------------- aws/data_source_aws_identity_store_user.go | 115 --------------- aws/data_source_aws_identitystore_group.go | 139 ++++++++++++++++++ ...ata_source_aws_identitystore_group_test.go | 131 +++++++++++++++++ aws/data_source_aws_identitystore_user.go | 111 ++++++++++++++ ...data_source_aws_identitystore_user_test.go | 131 +++++++++++++++++ aws/provider.go | 4 +- .../docs/d/identity_store_group.html.markdown | 42 ------ .../docs/d/identity_store_user.html.markdown | 42 ------ .../docs/d/identitystore_group.html.markdown | 52 +++++++ .../docs/d/identitystore_user.html.markdown | 52 +++++++ 11 files changed, 618 insertions(+), 316 deletions(-) delete mode 100644 aws/data_source_aws_identity_store_group.go delete mode 100644 aws/data_source_aws_identity_store_user.go create mode 100644 aws/data_source_aws_identitystore_group.go create mode 100644 aws/data_source_aws_identitystore_group_test.go create mode 100644 aws/data_source_aws_identitystore_user.go create mode 100644 aws/data_source_aws_identitystore_user_test.go delete mode 100644 website/docs/d/identity_store_group.html.markdown delete mode 100644 website/docs/d/identity_store_user.html.markdown create mode 100644 website/docs/d/identitystore_group.html.markdown create mode 100644 website/docs/d/identitystore_user.html.markdown diff --git a/aws/data_source_aws_identity_store_group.go b/aws/data_source_aws_identity_store_group.go deleted file mode 100644 index b69028d0cbb..00000000000 --- a/aws/data_source_aws_identity_store_group.go +++ /dev/null @@ -1,115 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/identitystore" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func dataSourceAwsIdentityStoreGroup() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsIdentityStoreGroupRead, - - Schema: map[string]*schema.Schema{ - "identity_store_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 64), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), - ), - }, - - "group_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"display_name"}, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 47), - validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), - ), - }, - - "display_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"group_id"}, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1024), - validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{S}\p{N}\p{P}\t\n\r ]+$`), "must match [\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\t\\n\\r ]"), - ), - }, - }, - } -} - -func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).identitystoreconn - - identityStoreID := d.Get("identity_store_id").(string) - groupID := d.Get("group_id").(string) - displayName := d.Get("display_name").(string) - - if groupID != "" { - log.Printf("[DEBUG] Reading AWS Identity Store Group") - resp, err := conn.DescribeGroup(&identitystore.DescribeGroupInput{ - IdentityStoreId: aws.String(identityStoreID), - GroupId: aws.String(groupID), - }) - if err != nil { - aerr, ok := err.(awserr.Error) - if ok && aerr.Code() == identitystore.ErrCodeResourceNotFoundException { - log.Printf("[DEBUG] AWS Identity Store Group not found with the id %v", groupID) - d.SetId("") - return nil - } - return fmt.Errorf("Error getting AWS Identity Store Group: %s", err) - } - d.SetId(groupID) - d.Set("display_name", resp.DisplayName) - } else if displayName != "" { - log.Printf("[DEBUG] Reading AWS Identity Store Groups") - req := &identitystore.ListGroupsInput{ - IdentityStoreId: aws.String(identityStoreID), - Filters: []*identitystore.Filter{ - { - AttributePath: aws.String("DisplayName"), - AttributeValue: aws.String(displayName), - }, - }, - } - groups := []*identitystore.Group{} - err := conn.ListGroupsPages(req, func(page *identitystore.ListGroupsOutput, lastPage bool) bool { - if page != nil && len(page.Groups) != 0 { - groups = append(groups, page.Groups...) - } - return !lastPage - }) - if err != nil { - return fmt.Errorf("Error getting AWS Identity Store Groups: %s", err) - } - if len(groups) == 0 { - log.Printf("[DEBUG] No AWS Identity Store Groups found") - d.SetId("") - return nil - } - if len(groups) > 1 { - return fmt.Errorf("Found multiple AWS Identity Store Groups with the DisplayName %v. Not sure which one to use. %s", displayName, groups) - } - group := groups[0] - d.SetId(aws.StringValue(group.GroupId)) - d.Set("group_id", group.GroupId) - } else { - return fmt.Errorf("One of group_id or display_name is required") - } - - return nil -} diff --git a/aws/data_source_aws_identity_store_user.go b/aws/data_source_aws_identity_store_user.go deleted file mode 100644 index 766a0e518d2..00000000000 --- a/aws/data_source_aws_identity_store_user.go +++ /dev/null @@ -1,115 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/identitystore" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func dataSourceAwsIdentityStoreUser() *schema.Resource { - return &schema.Resource{ - Read: dataSourceAwsIdentityStoreUserRead, - - Schema: map[string]*schema.Schema{ - "identity_store_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 64), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), - ), - }, - - "user_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"user_name"}, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 47), - validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), - ), - }, - - "user_name": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"user_id"}, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 128), - validation.StringMatch(regexp.MustCompile(`^[\p{L}\p{M}\p{S}\p{N}\p{P}]+$`), "must match [\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]"), - ), - }, - }, - } -} - -func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).identitystoreconn - - identityStoreID := d.Get("identity_store_id").(string) - userID := d.Get("user_id").(string) - userName := d.Get("user_name").(string) - - if userID != "" { - log.Printf("[DEBUG] Reading AWS Identity Store User") - resp, err := conn.DescribeUser(&identitystore.DescribeUserInput{ - IdentityStoreId: aws.String(identityStoreID), - UserId: aws.String(userID), - }) - if err != nil { - aerr, ok := err.(awserr.Error) - if ok && aerr.Code() == identitystore.ErrCodeResourceNotFoundException { - log.Printf("[DEBUG] AWS Identity Store User not found with the id %v", userID) - d.SetId("") - return nil - } - return fmt.Errorf("Error getting AWS Identity Store User: %s", err) - } - d.SetId(userID) - d.Set("user_name", resp.UserName) - } else if userName != "" { - log.Printf("[DEBUG] Reading AWS Identity Store Users") - req := &identitystore.ListUsersInput{ - IdentityStoreId: aws.String(identityStoreID), - Filters: []*identitystore.Filter{ - { - AttributePath: aws.String("UserName"), - AttributeValue: aws.String(userName), - }, - }, - } - users := []*identitystore.User{} - err := conn.ListUsersPages(req, func(page *identitystore.ListUsersOutput, lastPage bool) bool { - if page != nil && len(page.Users) != 0 { - users = append(users, page.Users...) - } - return !lastPage - }) - if err != nil { - return fmt.Errorf("Error getting AWS Identity Store Users: %s", err) - } - if len(users) == 0 { - log.Printf("[DEBUG] No AWS Identity Store Users found") - d.SetId("") - return nil - } - if len(users) > 1 { - return fmt.Errorf("Found multiple AWS Identity Store Users with the UserName %v. Not sure which one to use. %s", userName, users) - } - user := users[0] - d.SetId(aws.StringValue(user.UserId)) - d.Set("user_id", user.UserId) - } else { - return fmt.Errorf("One of user_id or user_name is required") - } - - return nil -} diff --git a/aws/data_source_aws_identitystore_group.go b/aws/data_source_aws_identitystore_group.go new file mode 100644 index 00000000000..2cf37074ea9 --- /dev/null +++ b/aws/data_source_aws_identitystore_group.go @@ -0,0 +1,139 @@ +package aws + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func dataSourceAwsIdentityStoreGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIdentityStoreGroupRead, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + + "filter": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_path": { + Type: schema.TypeString, + Required: true, + }, + "attribute_value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "group_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "identity_store_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), + ), + }, + }, + } +} + +func dataSourceAwsIdentityStoreGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).identitystoreconn + + input := &identitystore.ListGroupsInput{ + IdentityStoreId: aws.String(d.Get("identity_store_id").(string)), + Filters: expandIdentityStoreFilters(d.Get("filter").(*schema.Set).List()), + } + + var results []*identitystore.Group + + err := conn.ListGroupsPages(input, func(page *identitystore.ListGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, group := range page.Groups { + if group == nil { + continue + } + + if v, ok := d.GetOk("group_id"); ok && v.(string) != aws.StringValue(group.GroupId) { + continue + } + + results = append(results, group) + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing Identity Store Groups: %w", err) + } + + if len(results) == 0 { + return fmt.Errorf("no Identity Store Group found matching criteria; try different search") + } + + if len(results) > 1 { + return fmt.Errorf("multiple Identity Store Groups found matching criteria; try different search") + } + + group := results[0] + + d.SetId(aws.StringValue(group.GroupId)) + d.Set("display_name", group.DisplayName) + d.Set("group_id", group.GroupId) + + return nil +} + +func expandIdentityStoreFilters(l []interface{}) []*identitystore.Filter { + if len(l) == 0 || l[0] == nil { + return nil + } + + filters := make([]*identitystore.Filter, 0, len(l)) + for _, v := range l { + tfMap, ok := v.(map[string]interface{}) + if !ok { + continue + } + + filter := &identitystore.Filter{} + + if v, ok := tfMap["attribute_path"].(string); ok && v != "" { + filter.AttributePath = aws.String(v) + } + + if v, ok := tfMap["attribute_value"].(string); ok && v != "" { + filter.AttributeValue = aws.String(v) + } + + filters = append(filters, filter) + } + + return filters +} diff --git a/aws/data_source_aws_identitystore_group_test.go b/aws/data_source_aws_identitystore_group_test.go new file mode 100644 index 00000000000..c5c2ce3780e --- /dev/null +++ b/aws/data_source_aws_identitystore_group_test.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSIdentityStoreGroupDataSource_DisplayName(t *testing.T) { + dataSourceName := "data.aws_identitystore_group.test" + name := os.Getenv("AWS_IDENTITY_STORE_GROUP_NAME") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreGroupName(t) + }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreGroupDataSourceConfigDisplayName(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "group_id"), + resource.TestCheckResourceAttr(dataSourceName, "display_name", name), + ), + }, + }, + }) +} + +func TestAccAWSIdentityStoreGroupDataSource_GroupID(t *testing.T) { + dataSourceName := "data.aws_identitystore_group.test" + name := os.Getenv("AWS_IDENTITY_STORE_GROUP_NAME") + groupID := os.Getenv("AWS_IDENTITY_STORE_GROUP_ID") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreGroupName(t) + testAccPreCheckAWSIdentityStoreGroupID(t) + }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreGroupDataSourceConfigGroupID(name, groupID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "group_id", groupID), + resource.TestCheckResourceAttrSet(dataSourceName, "display_name"), + ), + }, + }, + }) +} + +func TestAccAWSIdentityStoreGroupDataSource_NonExistent(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreGroupDataSourceConfigNonExistent, + ExpectError: regexp.MustCompile(`no Identity Store Group found matching criteria`), + }, + }, + }) +} + +func testAccPreCheckAWSIdentityStoreGroupName(t *testing.T) { + if os.Getenv("AWS_IDENTITY_STORE_GROUP_NAME") == "" { + t.Skip("AWS_IDENTITY_STORE_GROUP_NAME env var must be set for AWS Identity Store Group acceptance test. " + + "This is required until ListGroups API returns results without filtering by name.") + } +} + +func testAccPreCheckAWSIdentityStoreGroupID(t *testing.T) { + if os.Getenv("AWS_IDENTITY_STORE_GROUP_ID") == "" { + t.Skip("AWS_IDENTITY_STORE_GROUP_ID env var must be set for AWS Identity Store Group acceptance test. " + + "This is required until ListGroups API returns results without filtering by name.") + } +} + +func testAccAWSIdentityStoreGroupDataSourceConfigDisplayName(name string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_group" "test" { + filter { + attribute_path = "DisplayName" + attribute_value = %q + } + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +`, name) +} + +func testAccAWSIdentityStoreGroupDataSourceConfigGroupID(name, id string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_group" "test" { + filter { + attribute_path = "DisplayName" + attribute_value = %q + } + + group_id = %q + + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +`, name, id) +} + +const testAccAWSIdentityStoreGroupDataSourceConfigNonExistent = ` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_group" "test" { + filter { + attribute_path = "DisplayName" + attribute_value = "does-not-exist" + } + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +` diff --git a/aws/data_source_aws_identitystore_user.go b/aws/data_source_aws_identitystore_user.go new file mode 100644 index 00000000000..9cb95f18051 --- /dev/null +++ b/aws/data_source_aws_identitystore_user.go @@ -0,0 +1,111 @@ +package aws + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/identitystore" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func dataSourceAwsIdentityStoreUser() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIdentityStoreUserRead, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_path": { + Type: schema.TypeString, + Required: true, + }, + "attribute_value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "identity_store_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]*$`), "must match [a-zA-Z0-9-]"), + ), + }, + + "user_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "user_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsIdentityStoreUserRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).identitystoreconn + + input := &identitystore.ListUsersInput{ + IdentityStoreId: aws.String(d.Get("identity_store_id").(string)), + Filters: expandIdentityStoreFilters(d.Get("filter").(*schema.Set).List()), + } + + var results []*identitystore.User + + err := conn.ListUsersPages(input, func(page *identitystore.ListUsersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, user := range page.Users { + if user == nil { + continue + } + + if v, ok := d.GetOk("user_id"); ok && v.(string) != aws.StringValue(user.UserId) { + continue + } + + results = append(results, user) + } + + return !lastPage + }) + + if err != nil { + return fmt.Errorf("error listing Identity Store Users: %w", err) + } + + if len(results) == 0 { + return fmt.Errorf("no Identity Store User found matching criteria; try different search") + } + + if len(results) > 1 { + return fmt.Errorf("multiple Identity Store Users found matching criteria; try different search") + } + + user := results[0] + + d.SetId(aws.StringValue(user.UserId)) + d.Set("user_id", user.UserId) + d.Set("user_name", user.UserName) + + return nil +} diff --git a/aws/data_source_aws_identitystore_user_test.go b/aws/data_source_aws_identitystore_user_test.go new file mode 100644 index 00000000000..346b81617f8 --- /dev/null +++ b/aws/data_source_aws_identitystore_user_test.go @@ -0,0 +1,131 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSIdentityStoreUserDataSource_UserName(t *testing.T) { + dataSourceName := "data.aws_identitystore_user.test" + name := os.Getenv("AWS_IDENTITY_STORE_USER_NAME") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreUserName(t) + }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreUserDataSourceConfigDisplayName(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "user_id"), + resource.TestCheckResourceAttr(dataSourceName, "user_name", name), + ), + }, + }, + }) +} + +func TestAccAWSIdentityStoreUserDataSource_UserID(t *testing.T) { + dataSourceName := "data.aws_identitystore_user.test" + name := os.Getenv("AWS_IDENTITY_STORE_USER_NAME") + userID := os.Getenv("AWS_IDENTITY_STORE_USER_ID") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreUserName(t) + testAccPreCheckAWSIdentityStoreUserID(t) + }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreUserDataSourceConfigUserID(name, userID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "user_id", userID), + resource.TestCheckResourceAttrSet(dataSourceName, "user_name"), + ), + }, + }, + }) +} + +func TestAccAWSIdentityStoreUserDataSource_NonExistent(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSIdentityStoreUserDataSourceConfigNonExistent, + ExpectError: regexp.MustCompile(`no Identity Store User found matching criteria`), + }, + }, + }) +} + +func testAccPreCheckAWSIdentityStoreUserName(t *testing.T) { + if os.Getenv("AWS_IDENTITY_STORE_USER_NAME") == "" { + t.Skip("AWS_IDENTITY_STORE_USER_NAME env var must be set for AWS Identity Store User acceptance test. " + + "This is required until ListUsers API returns results without filtering by name.") + } +} + +func testAccPreCheckAWSIdentityStoreUserID(t *testing.T) { + if os.Getenv("AWS_IDENTITY_STORE_USER_ID") == "" { + t.Skip("AWS_IDENTITY_STORE_USER_ID env var must be set for AWS Identity Store User acceptance test. " + + "This is required until ListUsers API returns results without filtering by name.") + } +} + +func testAccAWSIdentityStoreUserDataSourceConfigDisplayName(name string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_user" "test" { + filter { + attribute_path = "UserName" + attribute_value = %q + } + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +`, name) +} + +func testAccAWSIdentityStoreUserDataSourceConfigUserID(name, id string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_user" "test" { + filter { + attribute_path = "UserName" + attribute_value = %q + } + + user_id = %q + + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +`, name, id) +} + +const testAccAWSIdentityStoreUserDataSourceConfigNonExistent = ` +data "aws_ssoadmin_instances" "test" {} + +data "aws_identitystore_user" "test" { + filter { + attribute_path = "UserName" + attribute_value = "does-not-exist" + } + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] +} +` diff --git a/aws/provider.go b/aws/provider.go index 3b2c1d2e0a9..22d7a4f9dc3 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -267,8 +267,8 @@ func Provider() *schema.Provider { "aws_iam_role": dataSourceAwsIAMRole(), "aws_iam_server_certificate": dataSourceAwsIAMServerCertificate(), "aws_iam_user": dataSourceAwsIAMUser(), - "aws_identity_store_group": dataSourceAwsIdentityStoreGroup(), - "aws_identity_store_user": dataSourceAwsIdentityStoreUser(), + "aws_identitystore_group": dataSourceAwsIdentityStoreGroup(), + "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), diff --git a/website/docs/d/identity_store_group.html.markdown b/website/docs/d/identity_store_group.html.markdown deleted file mode 100644 index 460166f4d4a..00000000000 --- a/website/docs/d/identity_store_group.html.markdown +++ /dev/null @@ -1,42 +0,0 @@ ---- -subcategory: "Identity Store" -layout: "aws" -page_title: "AWS: aws_identity_store_group" -description: |- - Get information on an AWS Identity Store Group ---- - -# Data Source: aws_identity_store_group - -Use this data source to get an Identity Store Group. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_identity_store_group" "example" { - identity_store_id = data.aws_sso_instance.selected.identity_store_id - display_name = "ExampleGroup@example.com" -} - -output "group_id" { - value = data.aws_identity_store_group.example.group_id -} -``` - -## Argument Reference - -The following arguments are supported: - -* `identity_store_id` - (Required) The Identity Store ID associated with the AWS Single Sign-On Instance. -* `group_id` - (Optional) An Identity Store group ID. -* `display_name` - (Optional) An Identity Store group display name. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The Identity Store group ID. -* `group_id` - The Identity Store group ID. -* `display_name` - The Identity Store group display name. diff --git a/website/docs/d/identity_store_user.html.markdown b/website/docs/d/identity_store_user.html.markdown deleted file mode 100644 index e5ca68f3b23..00000000000 --- a/website/docs/d/identity_store_user.html.markdown +++ /dev/null @@ -1,42 +0,0 @@ ---- -subcategory: "Identity Store" -layout: "aws" -page_title: "AWS: aws_identity_store_user" -description: |- - Get information on an AWS Identity Store User ---- - -# Data Source: aws_identity_store_user - -Use this data source to get an Identity Store User. - -## Example Usage - -```hcl -data "aws_sso_instance" "selected" {} - -data "aws_identity_store_user" "example" { - identity_store_id = data.aws_sso_instance.selected.identity_store_id - user_name = "example@example.com" -} - -output "user_id" { - value = data.aws_identity_store_user.example.user_id -} -``` - -## Argument Reference - -The following arguments are supported: - -* `identity_store_id` - (Required) The Identity Store ID associated with the AWS Single Sign-On Instance. -* `user_id` - (Optional) An Identity Store user ID. -* `user_name` - (Optional) An Identity Store user name. - -## Attributes Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - The Identity Store user ID. -* `user_id` - The Identity Store user ID. -* `user_name` - The Identity Store user name. diff --git a/website/docs/d/identitystore_group.html.markdown b/website/docs/d/identitystore_group.html.markdown new file mode 100644 index 00000000000..274e2606504 --- /dev/null +++ b/website/docs/d/identitystore_group.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "Identity Store" +layout: "aws" +page_title: "AWS: aws_identitystore_group" +description: |- + Get information on an Identity Store Group +--- + +# Data Source: aws_identitystore_group + +Use this data source to get an Identity Store Group. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +data "aws_identitystore_group" "example" { + identity_store_id = tolist(data.aws_ssoadmin_instances.example.identity_store_ids)[0] + + filter { + attribute_path = "DisplayName" + attribute_value = "ExampleGroup" + } +} + +output "group_id" { + value = data.aws_identitystore_group.example.group_id +} +``` + +## Argument Reference + +The following arguments are supported: + +* `filter` - (Required) Configuration block(s) for filtering. Currently, the AWS Identity Store API supports only 1 filter. Detailed below. +* `group_id` - (Optional) The identifier for a group in the Identity Store. +* `identity_store_id` - (Required) The Identity Store ID associated with the Single Sign-On Instance. + +### `filter` Configuration Block + +The following arguments are supported by the `filter` configuration block: + +* `attribute_path` - (Required) The attribute path that is used to specify which attribute name to search. Currently, `DisplayName` is the only valid attribute path. +* `attribute_value` - (Required) The value for an attribute. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The identifier of the group in the Identity Store. +* `display_name` - The group's display name value. diff --git a/website/docs/d/identitystore_user.html.markdown b/website/docs/d/identitystore_user.html.markdown new file mode 100644 index 00000000000..f5275130e0a --- /dev/null +++ b/website/docs/d/identitystore_user.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "Identity Store" +layout: "aws" +page_title: "AWS: aws_identitystore_user" +description: |- + Get information on an Identity Store User +--- + +# Data Source: aws_identitystore_user + +Use this data source to get an Identity Store User. + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +data "aws_identitystore_user" "example" { + identity_store_id = tolist(data.aws_ssoadmin_instances.example.identity_store_ids)[0] + + filter { + attribute_path = "UserName" + attribute_value = "ExampleUser" + } +} + +output "user_id" { + value = data.aws_identitystore_user.example.user_id +} +``` + +## Argument Reference + +The following arguments are supported: + +* `filter` - (Required) Configuration block(s) for filtering. Currently, the AWS Identity Store API supports only 1 filter. Detailed below. +* `user_id` - (Optional) The identifier for a user in the Identity Store. +* `identity_store_id` - (Required) The Identity Store ID associated with the Single Sign-On Instance. + +### `filter` Configuration Block + +The following arguments are supported by the `filter` configuration block: + +* `attribute_path` - (Required) The attribute path that is used to specify which attribute name to search. Currently, `UserName` is the only valid attribute path. +* `attribute_value` - (Required) The value for an attribute. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The identifier of the user in the Identity Store. +* `user_name` - The user's user name value. From 5e2e798436469a6ee7c956dc9a7e4ed3822644b8 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 13 Jan 2021 23:42:23 -0800 Subject: [PATCH 0658/1212] Allow additional resource types. A full list of resource types is needed so that any security policy can be used. --- aws/resource_aws_fms_policy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index fccddfa43b4..2f90cd32dbf 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -97,7 +97,7 @@ func resourceAwsFmsPolicy() *schema.Resource { Required: true, Elem: &schema.Schema{ Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"AWS::ApiGateway::Stage", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::CloudFront::Distribution"}, false), + ValidateFunc: validation.StringInSlice([]string{"AWS::ApiGateway::Stage", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::CloudFront::Distribution", "AWS::EC2::NetworkInterface", "AWS::EC2::Instance", "AWS::EC2::SecurityGroup"}, false), }, Set: schema.HashString, }, From a199be98bf32b6f41eff5c2e9638b03ed5a4ddd1 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 15:57:21 +0200 Subject: [PATCH 0659/1212] initial commit --- .../service/sagemaker/finder/finder.go | 19 + .../service/sagemaker/waiter/status.go | 23 + .../service/sagemaker/waiter/waiter.go | 38 ++ aws/provider.go | 2 + aws/resource_aws_sagemaker_feature_group.go | 504 ++++++++++++++++++ ...source_aws_sagemaker_feature_group_test.go | 190 +++++++ 6 files changed, 776 insertions(+) create mode 100644 aws/resource_aws_sagemaker_feature_group.go create mode 100644 aws/resource_aws_sagemaker_feature_group_test.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 06d23f1f24f..9c0a55f460c 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -61,3 +61,22 @@ func DomainByName(conn *sagemaker.SageMaker, domainID string) (*sagemaker.Descri return output, nil } + +// FeatureGroupByName returns the feature group corresponding to the specified name. +// Returns nil if no feature group is found. +func FeatureGroupByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFeatureGroupOutput, error) { + input := &sagemaker.DescribeFeatureGroupInput{ + FeatureGroupName: aws.String(name), + } + + output, err := conn.DescribeFeatureGroup(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index cb55b5ee3b3..8a9baadc920 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" ) const ( @@ -14,6 +15,8 @@ const ( SagemakerImageStatusNotFound = "NotFound" SagemakerImageStatusFailed = "Failed" SagemakerDomainStatusNotFound = "NotFound" + SagemakerFeatureGroupStatusNotFound = "NotFound" + SagemakerFeatureGroupStatusUnknown = "Unknown" ) // NotebookInstanceStatus fetches the NotebookInstance and its Status @@ -94,3 +97,23 @@ func DomainStatus(conn *sagemaker.SageMaker, domainID string) resource.StateRefr return output, aws.StringValue(output.Status), nil } } + +// FeatureGroupStatus fetches the Feature Group and its Status +func FeatureGroupStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := finder.FeatureGroupByName(conn, name) + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { + return nil, SagemakerFeatureGroupStatusNotFound, nil + } + + if err != nil { + return nil, SagemakerFeatureGroupStatusUnknown, err + } + + if output == nil { + return nil, SagemakerFeatureGroupStatusNotFound, nil + } + + return output, aws.StringValue(output.FeatureGroupStatus), nil + } +} diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index decbbab95bb..b34bf3fcf73 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -15,6 +15,8 @@ const ( ImageDeletedTimeout = 10 * time.Minute DomainInServiceTimeout = 10 * time.Minute DomainDeletedTimeout = 10 * time.Minute + FeatureGroupCreatedTimeout = 10 * time.Minute + FeatureGroupDeletedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -160,3 +162,39 @@ func DomainDeleted(conn *sagemaker.SageMaker, domainID string) (*sagemaker.Descr return nil, err } + +// FeatureGroupCreated waits for a Feature Group to return Created +func FeatureGroupCreated(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFeatureGroupOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.FeatureGroupStatusCreating}, + Target: []string{sagemaker.FeatureGroupStatusCreated}, + Refresh: FeatureGroupStatus(conn, name), + Timeout: FeatureGroupCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeFeatureGroupOutput); ok { + return output, err + } + + return nil, err +} + +// FeatureGroupDeleted waits for a Feature Group to return Deleted +func FeatureGroupDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeFeatureGroupOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.FeatureGroupStatusDeleting}, + Target: []string{}, + Refresh: FeatureGroupStatus(conn, name), + Timeout: FeatureGroupDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeFeatureGroupOutput); ok { + return output, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 84de3c32fc4..8e0a31ad6a1 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -874,6 +874,8 @@ func Provider() *schema.Provider { "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_domain": resourceAwsSagemakerDomain(), + "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), + "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go new file mode 100644 index 00000000000..47fd9cf59e1 --- /dev/null +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -0,0 +1,504 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" +) + +func resourceAwsSagemakerFeatureGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerFeatureGroupCreate, + Read: resourceAwsSagemakerFeatureGroupRead, + Update: resourceAwsSagemakerFeatureGroupUpdate, + Delete: resourceAwsSagemakerFeatureGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "feature_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])$`), + "Must start and end with an alphanumeric character and Can only contain alphanumeric character and hyphens. Spaces are not allowed."), + ), + }, + "record_identifier_feature_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), + ), + }, + "event_time_feature_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), + ), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "feature_definition": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 2500, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "feature_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringNotInSlice([]string{"is_deleted", "write_time", "api_invocation_time"}, false), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), + ), + }, + "feature_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.FeatureType_Values(), false), + }, + }, + }, + }, + "offline_store_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + // AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_catalog_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog": { + Type: schema.TypeString, + Required: true, + }, + "database": { + Type: schema.TypeString, + Required: true, + }, + "table_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "s3_storage_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "s3_uri": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "disable_glue_table_creation": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "online_store_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + // AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_config": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + "enable_online_store": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSagemakerFeatureGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + name := d.Get("feature_group_name").(string) + + input := &sagemaker.CreateFeatureGroupInput{ + FeatureGroupName: aws.String(name), + EventTimeFeatureName: aws.String(d.Get("event_time_feature_name").(string)), + RecordIdentifierFeatureName: aws.String(d.Get("record_identifier_feature_name").(string)), + RoleArn: aws.String(d.Get("role_arn").(string)), + FeatureDefinitions: expandSagemakerFeatureGroupFeatureDefinition(d.Get("feature_definition").([]interface{})), + } + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + + if v, ok := d.GetOk("offline_store_config"); ok { + input.OfflineStoreConfig = expandSagemakerFeatureGroupOfflineStoreConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("online_store_config"); ok { + input.OnlineStoreConfig = expandSagemakerFeatureGroupOnlineStoreConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Sagemaker Feature Group create config: %#v", *input) + _, err := conn.CreateFeatureGroup(input) + if err != nil { + return fmt.Errorf("error creating SageMaker Feature Group: %w", err) + } + + d.SetId(name) + + if _, err := waiter.FeatureGroupCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for SageMaker Feature Group (%s) to create: %w", d.Id(), err) + } + + return resourceAwsSagemakerFeatureGroupRead(d, meta) +} + +func resourceAwsSagemakerFeatureGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + output, err := finder.FeatureGroupByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, "ValidationException", "Cannot find FeatureGroup") { + d.SetId("") + log.Printf("[WARN] Unable to find SageMaker Feature Group (%s); removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading SageMaker Feature Group (%s): %w", d.Id(), err) + + } + + d.Set("feature_group_name", output.FeatureGroupName) + d.Set("event_time_feature_name", output.EventTimeFeatureName) + d.Set("description", output.Description) + d.Set("record_identifier_feature_name", output.RecordIdentifierFeatureName) + d.Set("role_arn", output.RoleArn) + d.Set("arn", output.FeatureGroupArn) + + if err := d.Set("feature_definition", flattenSagemakerFeatureGroupFeatureDefinition(output.FeatureDefinitions)); err != nil { + return fmt.Errorf("error setting feature_definition for Sagemaker Feature Group (%s): %w", d.Id(), err) + } + + if err := d.Set("online_store_config", flattenSagemakerFeatureGroupOnlineStoreConfig(output.OnlineStoreConfig)); err != nil { + return fmt.Errorf("error setting online_store_config for Sagemaker Feature Group (%s): %w", d.Id(), err) + } + + if err := d.Set("offline_store_config", flattenSagemakerFeatureGroupOfflineStoreConfig(output.OfflineStoreConfig)); err != nil { + return fmt.Errorf("error setting offline_store_config for Sagemaker Feature Group (%s): %w", d.Id(), err) + } + + return nil +} + +func resourceAwsSagemakerFeatureGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Feature Store (%s) tags: %w", d.Id(), err) + } + } + + return resourceAwsSagemakerFeatureGroupRead(d, meta) +} + +func resourceAwsSagemakerFeatureGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteFeatureGroupInput{ + FeatureGroupName: aws.String(d.Id()), + } + + if _, err := conn.DeleteFeatureGroup(input); err != nil { + if isAWSErr(err, "ValidationException", "Cannot find FeatureGroup") { + return nil + } + return fmt.Errorf("error deleting SageMaker Feature Group (%s): %w", d.Id(), err) + } + + if _, err := waiter.FeatureGroupDeleted(conn, d.Id()); err != nil { + if isAWSErr(err, "ValidationException", "RecordNotFound") { + return nil + } + return fmt.Errorf("error waiting for SageMaker Feature Group (%s) to delete: %w", d.Id(), err) + } + + return nil +} + +func expandSagemakerFeatureGroupFeatureDefinition(l []interface{}) []*sagemaker.FeatureDefinition { + featureDefs := make([]*sagemaker.FeatureDefinition, 0, len(l)) + + for _, lRaw := range l { + data := lRaw.(map[string]interface{}) + + featureDef := &sagemaker.FeatureDefinition{ + FeatureName: aws.String(data["feature_name"].(string)), + FeatureType: aws.String(data["feature_type"].(string)), + } + + featureDefs = append(featureDefs, featureDef) + } + + return featureDefs +} + +func flattenSagemakerFeatureGroupFeatureDefinition(config []*sagemaker.FeatureDefinition) []map[string]interface{} { + features := make([]map[string]interface{}, 0, len(config)) + + for _, i := range config { + feature := map[string]interface{}{ + "feature_name": aws.StringValue(i.FeatureName), + "feature_type": aws.StringValue(i.FeatureType), + } + + features = append(features, feature) + } + return features +} + +func expandSagemakerFeatureGroupOnlineStoreConfig(l []interface{}) *sagemaker.OnlineStoreConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.OnlineStoreConfig{ + EnableOnlineStore: aws.Bool(m["enable_online_store"].(bool)), + } + + if v, ok := m["security_config"].([]interface{}); ok && len(v) > 0 { + config.SecurityConfig = expandSagemakerFeatureGroupOnlineStoreConfigSecurityConfig(v) + } + + return config +} + +func flattenSagemakerFeatureGroupOnlineStoreConfig(config *sagemaker.OnlineStoreConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "enable_online_store": aws.BoolValue(config.EnableOnlineStore), + } + + if config.SecurityConfig != nil { + m["security_config"] = flattenSagemakerFeatureGroupOnlineStoreConfigSecurityConfig(config.SecurityConfig) + } + + return []map[string]interface{}{m} +} + +func expandSagemakerFeatureGroupOnlineStoreConfigSecurityConfig(l []interface{}) *sagemaker.OnlineStoreSecurityConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.OnlineStoreSecurityConfig{ + KmsKeyId: aws.String(m["kms_key_id"].(string)), + } + + return config +} + +func flattenSagemakerFeatureGroupOnlineStoreConfigSecurityConfig(config *sagemaker.OnlineStoreSecurityConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "kms_key_id": aws.StringValue(config.KmsKeyId), + } + + return []map[string]interface{}{m} +} + +func expandSagemakerFeatureGroupOfflineStoreConfig(l []interface{}) *sagemaker.OfflineStoreConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.OfflineStoreConfig{} + + if v, ok := m["s3_storage_config"].([]interface{}); ok && len(v) > 0 { + config.S3StorageConfig = expandSagemakerFeatureGroupOfflineStoreConfigS3StorageConfig(v) + } + + if v, ok := m["data_catalog_config"].([]interface{}); ok && len(v) > 0 { + config.DataCatalogConfig = expandSagemakerFeatureGroupOfflineStoreConfigDataCatalogConfig(v) + } + + if v, ok := m["disable_glue_table_creation"].(bool); ok { + config.DisableGlueTableCreation = aws.Bool(v) + } + + return config +} + +func flattenSagemakerFeatureGroupOfflineStoreConfig(config *sagemaker.OfflineStoreConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "disable_glue_table_creation": aws.BoolValue(config.DisableGlueTableCreation), + } + + if config.DataCatalogConfig != nil { + m["data_catalog_config"] = flattenSagemakerFeatureGroupOfflineStoreConfigDataCatalogConfig(config.DataCatalogConfig) + } + + if config.S3StorageConfig != nil { + m["s3_storage_config"] = flattenSagemakerFeatureGroupOfflineStoreConfigS3StorageConfig(config.S3StorageConfig) + } + + return []map[string]interface{}{m} +} + +func expandSagemakerFeatureGroupOfflineStoreConfigS3StorageConfig(l []interface{}) *sagemaker.S3StorageConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.S3StorageConfig{ + S3Uri: aws.String(m["s3_uri"].(string)), + } + + if v, ok := m["kms_key_id"].(string); ok && v != "" { + config.KmsKeyId = aws.String(m["kms_key_id"].(string)) + } + + return config +} + +func flattenSagemakerFeatureGroupOfflineStoreConfigS3StorageConfig(config *sagemaker.S3StorageConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "s3_uri": aws.StringValue(config.S3Uri), + } + + if config.KmsKeyId != nil { + m["kms_key_id"] = aws.StringValue(config.KmsKeyId) + } + + return []map[string]interface{}{m} +} + +func expandSagemakerFeatureGroupOfflineStoreConfigDataCatalogConfig(l []interface{}) *sagemaker.DataCatalogConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.DataCatalogConfig{ + Catalog: aws.String(m["catalog"].(string)), + Database: aws.String(m["database"].(string)), + TableName: aws.String(m["table_name"].(string)), + } + + return config +} + +func flattenSagemakerFeatureGroupOfflineStoreConfigDataCatalogConfig(config *sagemaker.DataCatalogConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "catalog": aws.StringValue(config.Catalog), + "database": aws.StringValue(config.Database), + "table_name": aws.StringValue(config.TableName), + } + + return []map[string]interface{}{m} +} diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go new file mode 100644 index 00000000000..e94b9ac79fb --- /dev/null +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -0,0 +1,190 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func init() { + resource.AddTestSweepers("aws_sagemaker_feature_group", &resource.Sweeper{ + Name: "aws_sagemaker_feature_group", + F: testSweepSagemakerFeatureGroups, + }) +} + +func testSweepSagemakerFeatureGroups(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).sagemakerconn + + err = conn.ListFeatureGroupsPages(&sagemaker.ListFeatureGroupsInput{}, func(page *sagemaker.ListFeatureGroupsOutput, lastPage bool) bool { + for _, group := range page.FeatureGroupSummaries { + name := aws.StringValue(group.FeatureGroupName) + + input := &sagemaker.DeleteFeatureGroupInput{ + FeatureGroupName: group.FeatureGroupName, + } + + log.Printf("[INFO] Deleting SageMaker Feature Group: %s", name) + if _, err := conn.DeleteFeatureGroup(input); err != nil { + log.Printf("[ERROR] Error deleting SageMaker Feature Group (%s): %s", name, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker Feature Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("Error retrieving SageMaker Feature Groups: %w", err) + } + + return nil +} + +func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + // testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), + // resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), + // resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/hashicorp/terraform-provider-aws.git"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerFeatureGroup_disappears(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerFeatureGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerFeatureGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_feature_group" { + continue + } + + codeRepository, err := finder.FeatureGroupByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + if aws.StringValue(codeRepository.FeatureGroupName) == rs.Primary.ID { + return fmt.Errorf("Sagemaker Feature Group %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerFeatureGroupExists(n string, codeRepo *sagemaker.DescribeFeatureGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker Feature Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.FeatureGroupByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *codeRepo = *resp + + return nil + } +} + +func testAccAWSSagemakerFeatureGroupBaseConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} +`, rName) +} + +func testAccAWSSagemakerFeatureGroupBasicConfig(rName string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } +} +`, rName) +} From 938dc49117abd6d399aec11c651f23961c4d81ce Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 18:18:13 +0200 Subject: [PATCH 0660/1212] fix regex and resource deletion --- aws/resource_aws_sagemaker_feature_group.go | 45 ++++++++++--------- ...source_aws_sagemaker_feature_group_test.go | 17 +++++-- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index 47fd9cf59e1..0c4f4c1ad74 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" @@ -34,8 +35,8 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])$`), + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}`), "Must start and end with an alphanumeric character and Can only contain alphanumeric character and hyphens. Spaces are not allowed."), ), }, @@ -44,8 +45,8 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}`), "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), ), }, @@ -54,8 +55,8 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { Required: true, ForceNew: true, ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}`), "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), ), }, @@ -83,9 +84,9 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { Type: schema.TypeString, Optional: true, ValidateFunc: validation.All( - validation.StringLenBetween(1, 63), + validation.StringLenBetween(1, 64), validation.StringNotInSlice([]string{"is_deleted", "write_time", "api_invocation_time"}, false), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9])$`), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}`), "Must start and end with an alphanumeric character and Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed."), ), }, @@ -98,11 +99,11 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { }, }, "offline_store_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - // AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "data_catalog_config": { @@ -152,16 +153,16 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { }, }, "online_store_config": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - // AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + AtLeastOneOf: []string{"offline_store_config", "online_store_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "security_config": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -235,7 +236,7 @@ func resourceAwsSagemakerFeatureGroupRead(d *schema.ResourceData, meta interface output, err := finder.FeatureGroupByName(conn, d.Id()) if err != nil { - if isAWSErr(err, "ValidationException", "Cannot find FeatureGroup") { + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { d.SetId("") log.Printf("[WARN] Unable to find SageMaker Feature Group (%s); removing from state", d.Id()) return nil @@ -288,14 +289,14 @@ func resourceAwsSagemakerFeatureGroupDelete(d *schema.ResourceData, meta interfa } if _, err := conn.DeleteFeatureGroup(input); err != nil { - if isAWSErr(err, "ValidationException", "Cannot find FeatureGroup") { + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { return nil } return fmt.Errorf("error deleting SageMaker Feature Group (%s): %w", d.Id(), err) } if _, err := waiter.FeatureGroupDeleted(conn, d.Id()); err != nil { - if isAWSErr(err, "ValidationException", "RecordNotFound") { + if tfawserr.ErrCodeEquals(err, sagemaker.ErrCodeResourceNotFound) { return nil } return fmt.Errorf("error waiting for SageMaker Feature Group (%s) to delete: %w", d.Id(), err) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index e94b9ac79fb..2c5a6fffa7d 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -72,9 +72,14 @@ func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), - // testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), - // resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), - // resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/hashicorp/terraform-provider-aws.git"), + resource.TestCheckResourceAttr(resourceName, "event_time_feature_name", rName), + resource.TestCheckResourceAttr(resourceName, "record_identifier_feature_name", rName), + resource.TestCheckResourceAttr(resourceName, "online_store_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "online_store_config.0.enable_online_store", "true"), + resource.TestCheckResourceAttr(resourceName, "feature_definition.#", "1"), + resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_name", rName), + resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_type", "String"), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("feature-group/%s", rName)), ), }, { @@ -184,7 +189,11 @@ resource "aws_sagemaker_feature_group" "test" { feature_definition { feature_name = %[1]q feature_type = "String" - } + } + + online_store_config { + enable_online_store = true + } } `, rName) } From e43e8af90fe348cef61ddd1dc4d2f9b6f0211255 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 20:34:24 +0200 Subject: [PATCH 0661/1212] add tests for multi feature, desc, online kms --- ...source_aws_sagemaker_feature_group_test.go | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 2c5a6fffa7d..ca716189871 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -91,6 +91,94 @@ func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { }) } +func TestAccAWSSagemakerFeatureGroup_description(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupDescriptionConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "description", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerFeatureGroup_multipleFeatures(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupConfigMultiFeature(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "feature_definition.#", "2"), + resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_name", rName), + resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_type", "String"), + resource.TestCheckResourceAttr(resourceName, "feature_definition.1.feature_name", fmt.Sprintf("%s-2", rName)), + resource.TestCheckResourceAttr(resourceName, "feature_definition.1.feature_type", "Integral"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerFeatureGroup_onlineConfigSecurityConfig(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupOnlineSecurityConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "online_store_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "online_store_config.0.enable_online_store", "true"), + resource.TestCheckResourceAttr(resourceName, "online_store_config.0.security_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "online_store_config.0.security_config.0.kms_key_id", "aws_kms_key.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerFeatureGroup_disappears(t *testing.T) { var notebook sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -197,3 +285,78 @@ resource "aws_sagemaker_feature_group" "test" { } `, rName) } + +func testAccAWSSagemakerFeatureGroupDescriptionConfig(rName string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + description = %[1]q + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } + + online_store_config { + enable_online_store = true + } +} +`, rName) +} + +func testAccAWSSagemakerFeatureGroupConfigMultiFeature(rName string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } + + feature_definition { + feature_name = "%[1]s-2" + feature_type = "Integral" + } + + online_store_config { + enable_online_store = true + } +} +`, rName) +} + +func testAccAWSSagemakerFeatureGroupOnlineSecurityConfig(rName string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } + + online_store_config { + enable_online_store = true + + security_config { + kms_key_id = aws_kms_key.test.arn + } + } +} +`, rName) +} From e000f739007e465dd74713ae5886236552d109c3 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:01:23 +0200 Subject: [PATCH 0662/1212] offline config changes and test --- aws/resource_aws_sagemaker_feature_group.go | 12 +- ...source_aws_sagemaker_feature_group_test.go | 236 ++++++++++++++++++ 2 files changed, 244 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index 0c4f4c1ad74..402b87aebcb 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -108,21 +108,25 @@ func resourceAwsSagemakerFeatureGroup() *schema.Resource { Schema: map[string]*schema.Schema{ "data_catalog_config": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "catalog": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, }, "database": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, }, "table_name": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, }, }, }, diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index ca716189871..9d6bd0df09d 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -80,6 +81,7 @@ func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_name", rName), resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_type", "String"), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("feature-group/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "0"), ), }, { @@ -179,6 +181,107 @@ func TestAccAWSSagemakerFeatureGroup_onlineConfigSecurityConfig(t *testing.T) { }) } +func TestAccAWSSagemakerFeatureGroup_offlineConfig_basic(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupOfflineBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "true"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.0.s3_uri", fmt.Sprintf("s3://%s/prefix/", rName)), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerFeatureGroup_offlineConfig_createCatalog(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupOfflineCreateGlueCatalogConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "false"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.0.s3_uri", fmt.Sprintf("s3://%s/prefix/", rName)), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.catalog", "AwsDataCatalog"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.database", "sagemaker_featurestore"), + resource.TestMatchResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.table_name", regexp.MustCompile(fmt.Sprintf("^%s-", rName))), + // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerFeatureGroup_offlineConfig_providedCatalog(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + glueTableResourceName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupOfflineCreateGlueCatalogConfigProvidedCatalog(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "true"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.s3_storage_config.0.s3_uri", fmt.Sprintf("s3://%s/prefix/", rName)), + resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "offline_store_config.0.data_catalog_config.0.catalog", glueTableResourceName, "catalog_id"), + resource.TestCheckResourceAttrPair(resourceName, "offline_store_config.0.data_catalog_config.0.database", glueTableResourceName, "database_name"), + resource.TestCheckResourceAttrPair(resourceName, "offline_store_config.0.data_catalog_config.0.table_name", glueTableResourceName, "name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSagemakerFeatureGroup_disappears(t *testing.T) { var notebook sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -247,6 +350,8 @@ func testAccCheckAWSSagemakerFeatureGroupExists(n string, codeRepo *sagemaker.De func testAccAWSSagemakerFeatureGroupBaseConfig(rName string) string { return fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_iam_role" "test" { name = %[1]q path = "/" @@ -263,6 +368,26 @@ data "aws_iam_policy_document" "test" { } } } + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.name + policy_arn = aws_iam_policy.test.arn +} + +resource "aws_iam_policy" "test" { + policy = < Date: Fri, 11 Dec 2020 23:20:49 +0200 Subject: [PATCH 0663/1212] add tags test --- ...source_aws_sagemaker_feature_group_test.go | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 9d6bd0df09d..7359ea15f07 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -120,6 +120,53 @@ func TestAccAWSSagemakerFeatureGroup_description(t *testing.T) { }) } +func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { + var notebook sagemaker.DescribeFeatureGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_feature_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerFeatureGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerFeatureGroupTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerFeatureGroupTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerFeatureGroupTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccAWSSagemakerFeatureGroup_multipleFeatures(t *testing.T) { var notebook sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -596,3 +643,52 @@ resource "aws_sagemaker_feature_group" "test" { } `, rName) } + +func testAccAWSSagemakerFeatureGroupTags1(rName, tag1Key, tag1Value string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } + + online_store_config { + enable_online_store = true + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tag1Key, tag1Value) +} + +func testAccAWSSagemakerFeatureGroupTags2(rName, tag1Key, tag1Value, tag2Key, tag2Value string) string { + return testAccAWSSagemakerFeatureGroupBaseConfig(rName) + fmt.Sprintf(` +resource "aws_sagemaker_feature_group" "test" { + feature_group_name = %[1]q + record_identifier_feature_name = %[1]q + event_time_feature_name = %[1]q + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = %[1]q + feature_type = "String" + } + + online_store_config { + enable_online_store = true + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tag1Key, tag1Value, tag2Key, tag2Value) +} From b4966b1ff4405fb01624621124de82942f908a00 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:29:08 +0200 Subject: [PATCH 0664/1212] fix tags + fmt --- aws/resource_aws_sagemaker_feature_group.go | 13 ++++++++- ...source_aws_sagemaker_feature_group_test.go | 28 +++++++++---------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index 402b87aebcb..c40ea0727bd 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -237,6 +237,7 @@ func resourceAwsSagemakerFeatureGroupCreate(d *schema.ResourceData, meta interfa func resourceAwsSagemakerFeatureGroupRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig output, err := finder.FeatureGroupByName(conn, d.Id()) if err != nil { @@ -249,12 +250,13 @@ func resourceAwsSagemakerFeatureGroupRead(d *schema.ResourceData, meta interface } + arn := aws.StringValue(output.FeatureGroupArn) d.Set("feature_group_name", output.FeatureGroupName) d.Set("event_time_feature_name", output.EventTimeFeatureName) d.Set("description", output.Description) d.Set("record_identifier_feature_name", output.RecordIdentifierFeatureName) d.Set("role_arn", output.RoleArn) - d.Set("arn", output.FeatureGroupArn) + d.Set("arn", arn) if err := d.Set("feature_definition", flattenSagemakerFeatureGroupFeatureDefinition(output.FeatureDefinitions)); err != nil { return fmt.Errorf("error setting feature_definition for Sagemaker Feature Group (%s): %w", d.Id(), err) @@ -268,6 +270,15 @@ func resourceAwsSagemakerFeatureGroupRead(d *schema.ResourceData, meta interface return fmt.Errorf("error setting offline_store_config for Sagemaker Feature Group (%s): %w", d.Id(), err) } + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + if err != nil { + return fmt.Errorf("error listing tags for Sagemaker Feature Group (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + return nil } diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 7359ea15f07..c88c38a7087 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -447,13 +447,13 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } online_store_config { - enable_online_store = true - } + enable_online_store = true + } } `, rName) } @@ -468,13 +468,13 @@ resource "aws_sagemaker_feature_group" "test" { description = %[1]q feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } online_store_config { enable_online_store = true - } + } } `, rName) } @@ -488,18 +488,18 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } feature_definition { - feature_name = "%[1]s-2" + feature_name = "%[1]s-2" feature_type = "Integral" } online_store_config { enable_online_store = true - } + } } `, rName) } @@ -518,7 +518,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } @@ -548,7 +548,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } @@ -580,7 +580,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } @@ -621,7 +621,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } @@ -653,7 +653,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } @@ -677,7 +677,7 @@ resource "aws_sagemaker_feature_group" "test" { role_arn = aws_iam_role.test.arn feature_definition { - feature_name = %[1]q + feature_name = %[1]q feature_type = "String" } From 5ca9d936d14ef8de334b8806110ab66db39e7d7f Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:35:33 +0200 Subject: [PATCH 0665/1212] fmt --- ...source_aws_sagemaker_feature_group_test.go | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index c88c38a7087..0275231c318 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -585,11 +585,11 @@ resource "aws_sagemaker_feature_group" "test" { } offline_store_config { - disable_glue_table_creation = false + disable_glue_table_creation = false - s3_storage_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" - } + s3_storage_config { + s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" + } } depends_on = [aws_iam_role_policy_attachment.test] @@ -626,17 +626,17 @@ resource "aws_sagemaker_feature_group" "test" { } offline_store_config { - disable_glue_table_creation = true + disable_glue_table_creation = true - s3_storage_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" - } + s3_storage_config { + s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" + } - data_catalog_config { - catalog = aws_glue_catalog_table.test.catalog_id - database = aws_glue_catalog_table.test.database_name - table_name = aws_glue_catalog_table.test.name - } + data_catalog_config { + catalog = aws_glue_catalog_table.test.catalog_id + database = aws_glue_catalog_table.test.database_name + table_name = aws_glue_catalog_table.test.name + } } depends_on = [aws_iam_role_policy_attachment.test] @@ -658,7 +658,7 @@ resource "aws_sagemaker_feature_group" "test" { } online_store_config { - enable_online_store = true + enable_online_store = true } tags = { @@ -682,11 +682,11 @@ resource "aws_sagemaker_feature_group" "test" { } online_store_config { - enable_online_store = true + enable_online_store = true } tags = { - %[2]q = %[3]q + %[2]q = %[3]q %[4]q = %[5]q } } From 0eccbe273f67130a2763b8d811a19a995cf38a5a Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:40:42 +0200 Subject: [PATCH 0666/1212] fmt --- ...esource_aws_sagemaker_feature_group_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 0275231c318..3aac12f30ba 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -498,7 +498,7 @@ resource "aws_sagemaker_feature_group" "test" { } online_store_config { - enable_online_store = true + enable_online_store = true } } `, rName) @@ -523,11 +523,11 @@ resource "aws_sagemaker_feature_group" "test" { } online_store_config { - enable_online_store = true + enable_online_store = true - security_config { - kms_key_id = aws_kms_key.test.arn - } + security_config { + kms_key_id = aws_kms_key.test.arn + } } } `, rName) @@ -553,11 +553,11 @@ resource "aws_sagemaker_feature_group" "test" { } offline_store_config { - disable_glue_table_creation = true + disable_glue_table_creation = true - s3_storage_config { - s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" - } + s3_storage_config { + s3_uri = "s3://${aws_s3_bucket.test.bucket}/prefix/" + } } depends_on = [aws_iam_role_policy_attachment.test] From 08fd185240728fa46ca260911d51aa16aa4eb3a2 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:42:21 +0200 Subject: [PATCH 0667/1212] fmt 2 --- aws/resource_aws_sagemaker_feature_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 3aac12f30ba..4971aff8b5e 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -473,7 +473,7 @@ resource "aws_sagemaker_feature_group" "test" { } online_store_config { - enable_online_store = true + enable_online_store = true } } `, rName) From f6b03c33d95d412757e9c4167481b66adda30fef Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:48:24 +0200 Subject: [PATCH 0668/1212] fmt 3 --- aws/resource_aws_sagemaker_feature_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 4971aff8b5e..7edc9712456 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -528,7 +528,7 @@ resource "aws_sagemaker_feature_group" "test" { security_config { kms_key_id = aws_kms_key.test.arn } - } + } } `, rName) } From 5e29bf895d706fb66fbc60adfccf41598e548482 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 11 Dec 2020 23:58:53 +0200 Subject: [PATCH 0669/1212] fmt 4 --- aws/resource_aws_sagemaker_feature_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 7edc9712456..ea6a6af4991 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -528,7 +528,7 @@ resource "aws_sagemaker_feature_group" "test" { security_config { kms_key_id = aws_kms_key.test.arn } - } + } } `, rName) } From 262273b94e601f1c5047e9fecdace38624b45140 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 12 Dec 2020 00:03:40 +0200 Subject: [PATCH 0670/1212] fmt 5 --- aws/resource_aws_sagemaker_feature_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index ea6a6af4991..0094493bc01 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -687,7 +687,7 @@ resource "aws_sagemaker_feature_group" "test" { tags = { %[2]q = %[3]q - %[4]q = %[5]q + %[4]q = %[5]q } } `, rName, tag1Key, tag1Value, tag2Key, tag2Value) From a66bdcc363ffb6b50c78bd3753f76cd051f26854 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 20 Dec 2020 10:10:44 +0200 Subject: [PATCH 0671/1212] add docs --- .../r/sagemaker_feature_group.html.markdown | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 website/docs/r/sagemaker_feature_group.html.markdown diff --git a/website/docs/r/sagemaker_feature_group.html.markdown b/website/docs/r/sagemaker_feature_group.html.markdown new file mode 100644 index 00000000000..45474cd7398 --- /dev/null +++ b/website/docs/r/sagemaker_feature_group.html.markdown @@ -0,0 +1,92 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_feature_group" +description: |- + Provides a SageMaker Feature Group resource. +--- + +# Resource: aws_sagemaker_feature_group + +Provides a SageMaker Feature Group resource. + +## Example Usage + +Basic usage: + +```hcl +resource "aws_sagemaker_feature_group" "example" { + feature_group_name = "example" + record_identifier_feature_name = "example" + event_time_feature_name = "example" + role_arn = aws_iam_role.test.arn + + feature_definition { + feature_name = "example" + feature_type = "String" + } + + online_store_config { + enable_online_store = true + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `feature_group_name` - (Required) The name of the Feature Group. The name must be unique within an AWS Region in an AWS account. +* `record_identifier_feature_name` - (Required) The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store. +* `event_time_feature_name` - (Required) The name of the feature that stores the EventTime of a Record in a Feature Group. +* `description` (Optional) - A free-form description of a Feature Group. +* `role_arn` (Required) - The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided. +* `feature_definition` (Optional) - A list of Feature names and types. See [Feature Definition](#feature-definition) Below. +* `offline_store_config` (Optional) - The Offline Feature Store Configuration. See [Offline Store Config](#offline-store-config) Below. +* `online_store_config` (Optional) - The Online Feature Store Configuration. See [Online Store Config](#online-store-config) Below. + +### Feature Definition + +* `feature_name` - (Required) The name of a feature. `feature_name` cannot be any of the following: `is_deleted`, `write_time`, `api_invocation_time`. +* `feature_type` - (Required) The value type of a feature. Valid values are `Integral`, `Fractional`, or `String`. + +### Offline Store Config + +* `enable_online_store` - (Optional) Set to `true` to disable the automatic creation of an AWS Glue table when configuring an OfflineStore. +* `s3_storage_config` - (Required) The Amazon Simple Storage (Amazon S3) location of OfflineStore. See [S3 Storage Config](#s3-storage-config) Below. +* `data_catalog_config` - (Optional) The meta data of the Glue table that is autogenerated when an OfflineStore is created. See [Data Catalog Config](#data-catalog-config) Below. + +### Online Store Config + +* `disable_glue_table_creation` - (Optional) Set to `true` to turn Online Store On. +* `security_config` - (Required) Security config for at-rest encryption of your OnlineStore. See [Security Config](#security-config) Below. + +#### S3 Storage Config + +* `kms_key_id` - (Optional) The AWS Key Management Service (KMS) key ID of the key used to encrypt any objects written into the OfflineStore S3 location. +* `s3_uri` - (Required) The S3 URI, or location in Amazon S3, of OfflineStore. + +#### Data Catalog Config + +* `catalog` - (Optional) The name of the Glue table catalog. +* `database` - (Optional) The name of the Glue table database. +* `table_name` - (Optional) The name of the Glue table. + +#### Security Config + +* `kms_key_id` - (Optional) The ID of the AWS Key Management Service (AWS KMS) key that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `name` - The name of the Feature Group. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this feature_group. + +## Import + +Feature Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_sagemaker_feature_group.test_feature_group feature_group-foo +``` From c56a806cc211d5af10ff53ad38aea64659dc6fda Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 20 Dec 2020 10:12:28 +0200 Subject: [PATCH 0672/1212] fmt --- website/docs/r/sagemaker_feature_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/sagemaker_feature_group.html.markdown b/website/docs/r/sagemaker_feature_group.html.markdown index 45474cd7398..e27c72bef4b 100644 --- a/website/docs/r/sagemaker_feature_group.html.markdown +++ b/website/docs/r/sagemaker_feature_group.html.markdown @@ -25,7 +25,7 @@ resource "aws_sagemaker_feature_group" "example" { feature_name = "example" feature_type = "String" } - + online_store_config { enable_online_store = true } From 3432bf67de46e07d30c1f3b91ae795f9bc67ba0c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Tue, 5 Jan 2021 22:08:32 +0200 Subject: [PATCH 0673/1212] sagemaker alpahbet --- aws/provider.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/provider.go b/aws/provider.go index 8e0a31ad6a1..711d160d639 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -875,10 +875,10 @@ func Provider() *schema.Provider { "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_domain": resourceAwsSagemakerDomain(), "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), - "aws_sagemaker_model": resourceAwsSagemakerModel(), + "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), + "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), "aws_sagemaker_image": resourceAwsSagemakerImage(), - "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), From f5412a0d4ebb94ea2055b11c6db50f1783f41bbc Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 8 Jan 2021 20:54:30 +0200 Subject: [PATCH 0674/1212] retry on iam error --- aws/resource_aws_sagemaker_feature_group.go | 20 +++++++++- ...source_aws_sagemaker_feature_group_test.go | 40 +++++++++---------- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index c40ea0727bd..7c3911eff52 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -8,9 +8,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + iamwaiter "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/iam/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" ) @@ -221,9 +223,23 @@ func resourceAwsSagemakerFeatureGroupCreate(d *schema.ResourceData, meta interfa } log.Printf("[DEBUG] Sagemaker Feature Group create config: %#v", *input) - _, err := conn.CreateFeatureGroup(input) + err := resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { + _, err := conn.CreateFeatureGroup(input) + if err != nil { + if isAWSErr(err, "ValidationException", "The execution role ARN is invalid.") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.CreateFeatureGroup(input) + } + if err != nil { - return fmt.Errorf("error creating SageMaker Feature Group: %w", err) + return fmt.Errorf("Error creating SageMaker Feature Group: %w", err) } d.SetId(name) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index 0094493bc01..cf2151b8d19 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -59,7 +59,7 @@ func testSweepSagemakerFeatureGroups(region string) error { } func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -71,7 +71,7 @@ func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "event_time_feature_name", rName), resource.TestCheckResourceAttr(resourceName, "record_identifier_feature_name", rName), @@ -94,7 +94,7 @@ func TestAccAWSSagemakerFeatureGroup_basic(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_description(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -106,7 +106,7 @@ func TestAccAWSSagemakerFeatureGroup_description(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupDescriptionConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "description", rName), ), @@ -121,7 +121,7 @@ func TestAccAWSSagemakerFeatureGroup_description(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -133,7 +133,7 @@ func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), @@ -147,7 +147,7 @@ func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), @@ -157,7 +157,7 @@ func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -168,7 +168,7 @@ func TestAccAWSSagemakerFeatureGroup_tags(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_multipleFeatures(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -180,7 +180,7 @@ func TestAccAWSSagemakerFeatureGroup_multipleFeatures(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupConfigMultiFeature(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "feature_definition.#", "2"), resource.TestCheckResourceAttr(resourceName, "feature_definition.0.feature_name", rName), @@ -199,7 +199,7 @@ func TestAccAWSSagemakerFeatureGroup_multipleFeatures(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_onlineConfigSecurityConfig(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -211,7 +211,7 @@ func TestAccAWSSagemakerFeatureGroup_onlineConfigSecurityConfig(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupOnlineSecurityConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "online_store_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "online_store_config.0.enable_online_store", "true"), @@ -229,7 +229,7 @@ func TestAccAWSSagemakerFeatureGroup_onlineConfigSecurityConfig(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_offlineConfig_basic(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -241,7 +241,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_basic(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupOfflineBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "true"), @@ -260,7 +260,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_basic(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_offlineConfig_createCatalog(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -272,7 +272,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_createCatalog(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupOfflineCreateGlueCatalogConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "false"), @@ -295,7 +295,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_createCatalog(t *testing.T) { } func TestAccAWSSagemakerFeatureGroup_offlineConfig_providedCatalog(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" glueTableResourceName := "aws_glue_catalog_table.test" @@ -308,7 +308,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_providedCatalog(t *testing.T) { Config: testAccAWSSagemakerFeatureGroupOfflineCreateGlueCatalogConfigProvidedCatalog(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), resource.TestCheckResourceAttr(resourceName, "feature_group_name", rName), resource.TestCheckResourceAttr(resourceName, "offline_store_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.disable_glue_table_creation", "true"), @@ -330,7 +330,7 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_providedCatalog(t *testing.T) } func TestAccAWSSagemakerFeatureGroup_disappears(t *testing.T) { - var notebook sagemaker.DescribeFeatureGroupOutput + var featureGroup sagemaker.DescribeFeatureGroupOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_feature_group.test" @@ -342,7 +342,7 @@ func TestAccAWSSagemakerFeatureGroup_disappears(t *testing.T) { { Config: testAccAWSSagemakerFeatureGroupBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerFeatureGroupExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerFeatureGroupExists(resourceName, &featureGroup), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerFeatureGroup(), resourceName), ), ExpectNonEmptyPlan: true, From 0ae30519972ed7a8d69d84cc836deab798673314 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 8 Jan 2021 20:55:32 +0200 Subject: [PATCH 0675/1212] Update aws/resource_aws_sagemaker_feature_group.go Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_feature_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index 7c3911eff52..e6a05649b0d 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -305,7 +305,7 @@ func resourceAwsSagemakerFeatureGroupUpdate(d *schema.ResourceData, meta interfa o, n := d.GetChange("tags") if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Sagemaker Feature Store (%s) tags: %w", d.Id(), err) + return fmt.Errorf("error updating SageMaker Feature Group (%s) tags: %w", d.Id(), err) } } From 3c94f1170c24866dc346e454c2dd8ebb764ef6fc Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 14 Jan 2021 09:50:09 +0200 Subject: [PATCH 0676/1212] order resources --- aws/provider.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/provider.go b/aws/provider.go index 711d160d639..b13cd87515f 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -874,7 +874,6 @@ func Provider() *schema.Provider { "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_domain": resourceAwsSagemakerDomain(), - "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), From a7232267e3910d14dd4b7f65314496814a14b696 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 14 Jan 2021 05:00:41 -0500 Subject: [PATCH 0677/1212] align account assignment resource w/service design --- .../service/ssoadmin/finder/finder.go | 31 ++ .../service/ssoadmin/waiter/status.go | 44 ++ .../service/ssoadmin/waiter/waiter.go | 40 ++ aws/provider.go | 2 +- aws/resource_aws_sso_assignment.go | 429 ------------------ aws/resource_aws_sso_assignment_test.go | 304 ------------- ...esource_aws_ssoadmin_account_assignment.go | 229 ++++++++++ ...ce_aws_ssoadmin_account_assignment_test.go | 242 ++++++++++ aws/resource_aws_ssoadmin_permission_set.go | 2 +- website/docs/r/sso_assignment.html.markdown | 63 --- .../ssoadmin_account_assignment.html.markdown | 67 +++ 11 files changed, 655 insertions(+), 798 deletions(-) delete mode 100644 aws/resource_aws_sso_assignment.go delete mode 100644 aws/resource_aws_sso_assignment_test.go create mode 100644 aws/resource_aws_ssoadmin_account_assignment.go create mode 100644 aws/resource_aws_ssoadmin_account_assignment_test.go delete mode 100644 website/docs/r/sso_assignment.html.markdown create mode 100644 website/docs/r/ssoadmin_account_assignment.html.markdown diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go index 0592c5775d8..dc3465d8f3e 100644 --- a/aws/internal/service/ssoadmin/finder/finder.go +++ b/aws/internal/service/ssoadmin/finder/finder.go @@ -5,6 +5,37 @@ import ( "github.com/aws/aws-sdk-go/service/ssoadmin" ) +// AccountAssignment returns the account assigned to a permission set within a specified SSO instance. +// Returns an error if no account assignment is found. +func AccountAssignment(conn *ssoadmin.SSOAdmin, principalId, principalType, accountId, permissionSetArn, instanceArn string) (*ssoadmin.AccountAssignment, error) { + input := &ssoadmin.ListAccountAssignmentsInput{ + AccountId: aws.String(accountId), + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + } + + var accountAssignment *ssoadmin.AccountAssignment + err := conn.ListAccountAssignmentsPages(input, func(page *ssoadmin.ListAccountAssignmentsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, a := range page.AccountAssignments { + if aws.StringValue(a.PrincipalType) != principalType { + continue + } + if aws.StringValue(a.PrincipalId) == principalId { + accountAssignment = a + return false + } + } + + return !lastPage + }) + + return accountAssignment, err +} + // ManagedPolicy returns the managed policy attached to a permission set within a specified SSO instance. // Returns an error if no managed policy is found. func ManagedPolicy(conn *ssoadmin.SSOAdmin, managedPolicyArn, permissionSetArn, instanceArn string) (*ssoadmin.AttachedManagedPolicy, error) { diff --git a/aws/internal/service/ssoadmin/waiter/status.go b/aws/internal/service/ssoadmin/waiter/status.go index cd0b8e10bbe..15a8e85dbfc 100644 --- a/aws/internal/service/ssoadmin/waiter/status.go +++ b/aws/internal/service/ssoadmin/waiter/status.go @@ -7,10 +7,54 @@ import ( ) const ( + AccountAssignmentStatusUnknown = "Unknown" + AccountAssignmentStatusNotFound = "NotFound" PermissionSetProvisioningStatusUnknown = "Unknown" PermissionSetProvisioningStatusNotFound = "NotFound" ) +func AccountAssignmentCreationStatus(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &ssoadmin.DescribeAccountAssignmentCreationStatusInput{ + AccountAssignmentCreationRequestId: aws.String(requestID), + InstanceArn: aws.String(instanceArn), + } + + resp, err := conn.DescribeAccountAssignmentCreationStatus(input) + + if err != nil { + return nil, AccountAssignmentStatusUnknown, err + } + + if resp == nil || resp.AccountAssignmentCreationStatus == nil { + return nil, AccountAssignmentStatusNotFound, nil + } + + return resp.AccountAssignmentCreationStatus, aws.StringValue(resp.AccountAssignmentCreationStatus.Status), nil + } +} + +func AccountAssignmentDeletionStatus(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &ssoadmin.DescribeAccountAssignmentDeletionStatusInput{ + AccountAssignmentDeletionRequestId: aws.String(requestID), + InstanceArn: aws.String(instanceArn), + } + + resp, err := conn.DescribeAccountAssignmentDeletionStatus(input) + + if err != nil { + return nil, AccountAssignmentStatusUnknown, err + } + + if resp == nil || resp.AccountAssignmentDeletionStatus == nil { + return nil, AccountAssignmentStatusNotFound, nil + } + + return resp.AccountAssignmentDeletionStatus, aws.StringValue(resp.AccountAssignmentDeletionStatus.Status), nil + } +} + func PermissionSetProvisioningStatus(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { input := &ssoadmin.DescribePermissionSetProvisioningStatusInput{ diff --git a/aws/internal/service/ssoadmin/waiter/waiter.go b/aws/internal/service/ssoadmin/waiter/waiter.go index 48d7e111abf..fb63f04721a 100644 --- a/aws/internal/service/ssoadmin/waiter/waiter.go +++ b/aws/internal/service/ssoadmin/waiter/waiter.go @@ -8,10 +8,50 @@ import ( ) const ( + AWSSSOAdminAccountAssignmentCreateTimeout = 5 * time.Minute + AWSSSOAdminAccountAssignmentDeleteTimeout = 5 * time.Minute + AWSSSOAdminAccountAssignmentDelay = 5 * time.Second + AWSSSOAdminAccountAssignmentMinTimeout = 3 * time.Second AWSSSOAdminPermissionSetProvisioningRetryDelay = 5 * time.Second AWSSSOAdminPermissionSetProvisionTimeout = 10 * time.Minute ) +func AccountAssignmentCreated(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Refresh: AccountAssignmentCreationStatus(conn, instanceArn, requestID), + Timeout: AWSSSOAdminAccountAssignmentCreateTimeout, + Delay: AWSSSOAdminAccountAssignmentDelay, + MinTimeout: AWSSSOAdminAccountAssignmentMinTimeout, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*ssoadmin.AccountAssignmentOperationStatus); ok { + return v, err + } + + return nil, err +} + +func AccountAssignmentDeleted(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) (*ssoadmin.AccountAssignmentOperationStatus, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ssoadmin.StatusValuesInProgress}, + Target: []string{ssoadmin.StatusValuesSucceeded}, + Refresh: AccountAssignmentDeletionStatus(conn, instanceArn, requestID), + Timeout: AWSSSOAdminAccountAssignmentDeleteTimeout, + Delay: AWSSSOAdminAccountAssignmentDelay, + MinTimeout: AWSSSOAdminAccountAssignmentMinTimeout, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*ssoadmin.AccountAssignmentOperationStatus); ok { + return v, err + } + + return nil, err +} + func PermissionSetProvisioned(conn *ssoadmin.SSOAdmin, instanceArn, requestID string) (*ssoadmin.PermissionSetProvisioningStatus, error) { stateConf := resource.StateChangeConf{ Delay: AWSSSOAdminPermissionSetProvisioningRetryDelay, diff --git a/aws/provider.go b/aws/provider.go index 22d7a4f9dc3..80285f13c7c 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -942,7 +942,7 @@ func Provider() *schema.Provider { "aws_ssm_patch_group": resourceAwsSsmPatchGroup(), "aws_ssm_parameter": resourceAwsSsmParameter(), "aws_ssm_resource_data_sync": resourceAwsSsmResourceDataSync(), - "aws_sso_assignment": resourceAwsSsoAssignment(), + "aws_ssoadmin_account_assignment": resourceAwsSsoAdminAccountAssignment(), "aws_ssoadmin_managed_policy_attachment": resourceAwsSsoAdminManagedPolicyAttachment(), "aws_ssoadmin_permission_set": resourceAwsSsoAdminPermissionSet(), "aws_ssoadmin_permission_set_inline_policy": resourceAwsSsoAdminPermissionSetInlinePolicy(), diff --git a/aws/resource_aws_sso_assignment.go b/aws/resource_aws_sso_assignment.go deleted file mode 100644 index afeb4a81370..00000000000 --- a/aws/resource_aws_sso_assignment.go +++ /dev/null @@ -1,429 +0,0 @@ -package aws - -import ( - "fmt" - "log" - "regexp" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -const ( - AWSSSOAssignmentCreateRetryTimeout = 5 * time.Minute - AWSSSOAssignmentDeleteRetryTimeout = 5 * time.Minute - AWSSSOAssignmentRetryDelay = 5 * time.Second - AWSSSOAssignmentRetryMinTimeout = 3 * time.Second -) - -func resourceAwsSsoAssignment() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsSsoAssignmentCreate, - Read: resourceAwsSsoAssignmentRead, - Delete: resourceAwsSsoAssignmentDelete, - - Importer: &schema.ResourceImporter{ - State: resourceAwsSsoAssignmentImport, - }, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(AWSSSOAssignmentCreateRetryTimeout), - Delete: schema.DefaultTimeout(AWSSSOAssignmentDeleteRetryTimeout), - }, - - Schema: map[string]*schema.Schema{ - "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$`), "must match arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}"), - ), - }, - - "permission_set_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(10, 1224), - validation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$`), "must match arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}"), - ), - }, - - "target_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: ssoadmin.TargetTypeAwsAccount, - ValidateFunc: validation.StringInSlice([]string{ssoadmin.TargetTypeAwsAccount}, false), - }, - - "target_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateAwsAccountId, - }, - - "principal_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ssoadmin.PrincipalTypeUser, ssoadmin.PrincipalTypeGroup}, false), - }, - - "principal_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 47), - validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), - ), - }, - }, - } -} - -func resourceAwsSsoAssignmentCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - - instanceArn := d.Get("instance_arn").(string) - permissionSetArn := d.Get("permission_set_arn").(string) - targetType := d.Get("target_type").(string) - targetID := d.Get("target_id").(string) - principalType := d.Get("principal_type").(string) - principalID := d.Get("principal_id").(string) - - id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) - if idErr != nil { - return idErr - } - - // We need to check if the assignment exists before creating it - // since the AWS SSO API doesn't prevent us from creating duplicates - accountAssignment, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( - conn, - instanceArn, - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - ) - if getAccountAssignmentErr != nil { - return getAccountAssignmentErr - } - if accountAssignment != nil { - return fmt.Errorf("AWS SSO Assignment already exists. Import the resource by calling: terraform import %s", id) - } - - req := &ssoadmin.CreateAccountAssignmentInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - TargetType: aws.String(targetType), - TargetId: aws.String(targetID), - PrincipalType: aws.String(principalType), - PrincipalId: aws.String(principalID), - } - - log.Printf("[INFO] Creating AWS SSO Assignment") - resp, err := conn.CreateAccountAssignment(req) - if err != nil { - return fmt.Errorf("Error creating AWS SSO Assignment: %s", err) - } - - status := resp.AccountAssignmentCreationStatus - _, waitErr := waitForAssignmentCreation(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - - d.SetId(id) - return resourceAwsSsoAssignmentRead(d, meta) -} - -func resourceAwsSsoAssignmentRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - - instanceArn := d.Get("instance_arn").(string) - permissionSetArn := d.Get("permission_set_arn").(string) - targetType := d.Get("target_type").(string) - targetID := d.Get("target_id").(string) - principalType := d.Get("principal_type").(string) - principalID := d.Get("principal_id").(string) - - accountAssignment, err := resourceAwsSsoAssignmentGet( - conn, - instanceArn, - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - ) - - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") || accountAssignment == nil { - log.Printf("[WARN] AWS SSO Account Assignment (%s) not found, removing from state", map[string]string{ - "PrincipalType": principalType, - "PrincipalId": principalID, - }) - d.SetId("") - return nil - } - - if err != nil { - return err - } - - id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) - if idErr != nil { - return idErr - } - d.SetId(id) - return nil -} - -func resourceAwsSsoAssignmentDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).ssoadminconn - - instanceArn := d.Get("instance_arn").(string) - permissionSetArn := d.Get("permission_set_arn").(string) - targetType := d.Get("target_type").(string) - targetID := d.Get("target_id").(string) - principalType := d.Get("principal_type").(string) - principalID := d.Get("principal_id").(string) - - req := &ssoadmin.DeleteAccountAssignmentInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - TargetType: aws.String(targetType), - TargetId: aws.String(targetID), - PrincipalType: aws.String(principalType), - PrincipalId: aws.String(principalID), - } - - log.Printf("[INFO] Deleting AWS SSO Assignment") - resp, err := conn.DeleteAccountAssignment(req) - if err != nil { - if isAWSErr(err, ssoadmin.ErrCodeResourceNotFoundException, "") { - log.Printf("[DEBUG] AWS SSO Assignment not found") - d.SetId("") - return nil - } - return fmt.Errorf("Error deleting AWS SSO Assignment: %s", err) - } - - status := resp.AccountAssignmentDeletionStatus - - _, waitErr := waitForAssignmentDeletion(conn, instanceArn, aws.StringValue(status.RequestId), d.Timeout(schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - d.SetId("") - return nil -} - -func resourceAwsSsoAssignmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} - idParts := strings.Split(d.Id(), "/") - if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { - return nil, fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", d.Id()) - } - - instanceID := idParts[0] - permissionSetID := idParts[1] - targetType := idParts[2] - targetID := idParts[3] - principalType := idParts[4] - principalID := idParts[5] - - var err error - - // arn:${Partition}:sso:::instance/${InstanceId} - instanceArn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("instance/%s", instanceID), - }.String() - - // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} - permissionSetArn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), - }.String() - - err = d.Set("instance_arn", instanceArn) - if err != nil { - return nil, err - } - err = d.Set("permission_set_arn", permissionSetArn) - if err != nil { - return nil, err - } - err = d.Set("target_type", targetType) - if err != nil { - return nil, err - } - err = d.Set("target_id", targetID) - if err != nil { - return nil, err - } - err = d.Set("principal_type", principalType) - if err != nil { - return nil, err - } - err = d.Set("principal_id", principalID) - if err != nil { - return nil, err - } - - id, idErr := resourceAwsSsoAssignmentID(instanceArn, permissionSetArn, targetType, targetID, principalType, principalID) - if idErr != nil { - return nil, idErr - } - d.SetId(id) - - return []*schema.ResourceData{d}, nil -} - -func resourceAwsSsoAssignmentID( - instanceArn string, - permissionSetArn string, - targetType string, - targetID string, - principalType string, - principalID string, -) (string, error) { - // arn:${Partition}:sso:::instance/${InstanceId} - iArn, err := arn.Parse(instanceArn) - if err != nil { - return "", err - } - iArnResourceParts := strings.Split(iArn.Resource, "/") - if len(iArnResourceParts) != 2 || iArnResourceParts[0] != "instance" || iArnResourceParts[1] == "" { - return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::instance/${InstanceId}", instanceArn) - } - instanceID := iArnResourceParts[1] - - // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} - pArn, err := arn.Parse(permissionSetArn) - if err != nil { - return "", err - } - pArnResourceParts := strings.Split(pArn.Resource, "/") - if len(pArnResourceParts) != 3 || pArnResourceParts[0] != "permissionSet" || pArnResourceParts[1] == "" || pArnResourceParts[2] == "" { - return "", fmt.Errorf("Unexpected format of ARN (%s), expected arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId}", permissionSetArn) - } - permissionSetID := pArnResourceParts[2] - - vars := []string{ - instanceID, - permissionSetID, - targetType, - targetID, - principalType, - principalID, - } - return strings.Join(vars, "/"), nil -} - -func resourceAwsSsoAssignmentGet( - conn *ssoadmin.SSOAdmin, - instanceArn string, - permissionSetArn string, - targetType string, - targetID string, - principalType string, - principalID string, -) (*ssoadmin.AccountAssignment, error) { - if targetType != ssoadmin.TargetTypeAwsAccount { - return nil, fmt.Errorf("Invalid AWS SSO Assignments Target type %s. Only %s is supported", targetType, ssoadmin.TargetTypeAwsAccount) - } - req := &ssoadmin.ListAccountAssignmentsInput{ - InstanceArn: aws.String(instanceArn), - PermissionSetArn: aws.String(permissionSetArn), - AccountId: aws.String(targetID), - } - log.Printf("[DEBUG] Reading AWS SSO Assignments for %s", req) - var accountAssignment *ssoadmin.AccountAssignment - err := conn.ListAccountAssignmentsPages(req, func(page *ssoadmin.ListAccountAssignmentsOutput, lastPage bool) bool { - if page != nil && len(page.AccountAssignments) != 0 { - for _, a := range page.AccountAssignments { - if aws.StringValue(a.PrincipalType) == principalType { - if aws.StringValue(a.PrincipalId) == principalID { - accountAssignment = a - return false - } - } - } - } - return !lastPage - }) - if err != nil { - return nil, fmt.Errorf("Error getting AWS SSO Assignments: %s", err) - } - return accountAssignment, nil -} - -func waitForAssignmentCreation(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{ssoadmin.StatusValuesInProgress}, - Target: []string{ssoadmin.StatusValuesSucceeded}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeAccountAssignmentCreationStatus(&ssoadmin.DescribeAccountAssignmentCreationStatusInput{ - InstanceArn: aws.String(instanceArn), - AccountAssignmentCreationRequestId: aws.String(requestID), - }) - if err != nil { - return resp, "", fmt.Errorf("Error describing account assignment creation status: %s", err) - } - status := resp.AccountAssignmentCreationStatus - return status, aws.StringValue(status.Status), nil - }, - Timeout: timeout, - Delay: AWSSSOAssignmentRetryDelay, - MinTimeout: AWSSSOAssignmentRetryMinTimeout, - } - status, err := stateConf.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for account assignment to be created: %s", err) - } - return status.(*ssoadmin.AccountAssignmentOperationStatus), nil -} - -func waitForAssignmentDeletion(conn *ssoadmin.SSOAdmin, instanceArn string, requestID string, timeout time.Duration) (*ssoadmin.AccountAssignmentOperationStatus, error) { - stateConf := &resource.StateChangeConf{ - Pending: []string{ssoadmin.StatusValuesInProgress}, - Target: []string{ssoadmin.StatusValuesSucceeded}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeAccountAssignmentDeletionStatus(&ssoadmin.DescribeAccountAssignmentDeletionStatusInput{ - InstanceArn: aws.String(instanceArn), - AccountAssignmentDeletionRequestId: aws.String(requestID), - }) - if err != nil { - return resp, "", fmt.Errorf("Error describing account assignment deletion status: %s", err) - } - status := resp.AccountAssignmentDeletionStatus - return status, aws.StringValue(status.Status), nil - }, - Timeout: timeout, - Delay: AWSSSOAssignmentRetryDelay, - MinTimeout: AWSSSOAssignmentRetryMinTimeout, - } - status, err := stateConf.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for account assignment to be deleted: %s", err) - } - return status.(*ssoadmin.AccountAssignmentOperationStatus), nil -} diff --git a/aws/resource_aws_sso_assignment_test.go b/aws/resource_aws_sso_assignment_test.go deleted file mode 100644 index 6c8461832b9..00000000000 --- a/aws/resource_aws_sso_assignment_test.go +++ /dev/null @@ -1,304 +0,0 @@ -package aws - -import ( - "fmt" - "os" - "regexp" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/ssoadmin" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" -) - -func testAccPreCheckAWSSIdentityStoreGroup(t *testing.T, identityStoreGroup string) { - if identityStoreGroup == "" { - t.Skip("skipping acceptance testing: No Identity Store Group was provided") - } -} - -func testAccPreCheckAWSSIdentityStoreUser(t *testing.T, identityStoreUser string) { - if identityStoreUser == "" { - t.Skip("skipping acceptance testing: No Identity Store User was provided") - } -} - -func TestAccAWSSSOAssignmentGroup_basic(t *testing.T) { - var accountAssignment ssoadmin.AccountAssignment - resourceName := "aws_sso_assignment.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - // Read identity store group from environment since they must exist in the caller's identity store - identityStoreGroup := os.Getenv("AWS_IDENTITY_STORE_GROUP") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPreCheckAWSSSOAdminInstances(t) - testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), - resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), - resource.TestCheckResourceAttr(resourceName, "principal_type", "GROUP"), - resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSSSOAssignmentUser_basic(t *testing.T) { - var accountAssignment ssoadmin.AccountAssignment - resourceName := "aws_sso_assignment.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - // Read identity store user from environment since they must exist in the caller's identity store - identityStoreUser := os.Getenv("AWS_IDENTITY_STORE_USER") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPreCheckAWSSSOAdminInstances(t) - testAccPreCheckAWSSIdentityStoreUser(t, identityStoreUser) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOAssignmentBasicUserConfig(identityStoreUser, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), - resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), - resource.TestCheckResourceAttr(resourceName, "principal_type", "USER"), - resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSSSOAssignmentGroup_disappears(t *testing.T) { - var accountAssignment ssoadmin.AccountAssignment - resourceName := "aws_sso_assignment.example" - rName := acctest.RandomWithPrefix("tf-sso-test") - - // Read identity store group from environment since they must exist in the caller's identity store - identityStoreGroup := os.Getenv("AWS_IDENTITY_STORE_GROUP") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPreCheckAWSSSOAdminInstances(t) - testAccPreCheckAWSSIdentityStoreGroup(t, identityStoreGroup) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSSOAssignmentDestroy, - Steps: []resource.TestStep{ - { - Config: testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSSOAssignmentExists(resourceName, &accountAssignment), - testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAssignment(), resourceName), - ), - ExpectNonEmptyPlan: true, - }, - }, - }) - -} - -func testAccCheckAWSSSOAssignmentExists(resourceName string, accountAssignment *ssoadmin.AccountAssignment) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Resource (%s) ID not set", resourceName) - } - - // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} - idParts := strings.Split(rs.Primary.ID, "/") - if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { - return fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", rs.Primary.ID) - } - - instanceID := idParts[0] - permissionSetID := idParts[1] - targetType := idParts[2] - targetID := idParts[3] - principalType := idParts[4] - principalID := idParts[5] - - // arn:${Partition}:sso:::instance/${InstanceId} - instanceArn := arn.ARN{ - Partition: testAccProvider.Meta().(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("instance/%s", instanceID), - }.String() - - // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} - permissionSetArn := arn.ARN{ - Partition: testAccProvider.Meta().(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), - }.String() - - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - accountAssignmentResp, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( - ssoadminconn, - instanceArn, - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - ) - if getAccountAssignmentErr != nil { - return getAccountAssignmentErr - } - - *accountAssignment = *accountAssignmentResp - return nil - } -} - -func testAccCheckAWSSSOAssignmentDestroy(s *terraform.State) error { - ssoadminconn := testAccProvider.Meta().(*AWSClient).ssoadminconn - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_sso_assignment" { - continue - } - - // id = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} - idParts := strings.Split(rs.Primary.ID, "/") - if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { - return fmt.Errorf("Unexpected format of id (%s), expected ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID}", rs.Primary.ID) - } - - instanceID := idParts[0] - permissionSetID := idParts[1] - targetType := idParts[2] - targetID := idParts[3] - principalType := idParts[4] - principalID := idParts[5] - - // arn:${Partition}:sso:::instance/${InstanceId} - instanceArn := arn.ARN{ - Partition: testAccProvider.Meta().(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("instance/%s", instanceID), - }.String() - - // arn:${Partition}:sso:::permissionSet/${InstanceId}/${PermissionSetId} - permissionSetArn := arn.ARN{ - Partition: testAccProvider.Meta().(*AWSClient).partition, - Service: "sso", - Resource: fmt.Sprintf("permissionSet/%s/%s", instanceID, permissionSetID), - }.String() - - accountAssignment, getAccountAssignmentErr := resourceAwsSsoAssignmentGet( - ssoadminconn, - instanceArn, - permissionSetArn, - targetType, - targetID, - principalType, - principalID, - ) - - if isAWSErr(getAccountAssignmentErr, "ResourceNotFoundException", "") { - continue - } - - if getAccountAssignmentErr != nil { - return getAccountAssignmentErr - } - - if accountAssignment != nil { - return fmt.Errorf("AWS SSO Account Assignment (%s) still exists", rs.Primary.ID) - } - } - - return nil -} - -func testAccSSOAssignmentBasicGroupConfig(identityStoreGroup, rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -data "aws_caller_identity" "current" {} - -data "aws_identity_store_group" "example_group" { - identity_store_id = data.aws_sso_instance.selected.identity_store_id - display_name = "%s" -} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} - -resource "aws_sso_assignment" "example" { - instance_arn = data.aws_sso_instance.selected.arn - permission_set_arn = aws_sso_permission_set.example.arn - target_type = "AWS_ACCOUNT" - target_id = data.aws_caller_identity.current.account_id - principal_type = "GROUP" - principal_id = data.aws_identity_store_group.example_group.group_id -} -`, identityStoreGroup, rName) -} - -func testAccSSOAssignmentBasicUserConfig(identityStoreUser, rName string) string { - return fmt.Sprintf(` -data "aws_sso_instance" "selected" {} - -data "aws_caller_identity" "current" {} - -data "aws_identity_store_user" "example_user" { - identity_store_id = data.aws_sso_instance.selected.identity_store_id - user_name = "%s" -} - -resource "aws_sso_permission_set" "example" { - name = "%s" - description = "testing" - instance_arn = data.aws_sso_instance.selected.arn - managed_policy_arns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] -} - -resource "aws_sso_assignment" "example" { - instance_arn = data.aws_sso_instance.selected.arn - permission_set_arn = aws_sso_permission_set.example.arn - target_type = "AWS_ACCOUNT" - target_id = data.aws_caller_identity.current.account_id - principal_type = "USER" - principal_id = data.aws_identity_store_user.example_user.user_id -} -`, identityStoreUser, rName) -} diff --git a/aws/resource_aws_ssoadmin_account_assignment.go b/aws/resource_aws_ssoadmin_account_assignment.go new file mode 100644 index 00000000000..244e3cc80d4 --- /dev/null +++ b/aws/resource_aws_ssoadmin_account_assignment.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/waiter" +) + +func resourceAwsSsoAdminAccountAssignment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSsoAdminAccountAssignmentCreate, + Read: resourceAwsSsoAdminAccountAssignmentRead, + Delete: resourceAwsSsoAdminAccountAssignmentDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "permission_set_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + + "principal_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 47), + validation.StringMatch(regexp.MustCompile(`^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$`), "must match ([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"), + ), + }, + + "principal_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(ssoadmin.PrincipalType_Values(), false), + }, + + "target_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + + "target_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(ssoadmin.TargetType_Values(), false), + }, + }, + } +} + +func resourceAwsSsoAdminAccountAssignmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + instanceArn := d.Get("instance_arn").(string) + permissionSetArn := d.Get("permission_set_arn").(string) + principalID := d.Get("principal_id").(string) + principalType := d.Get("principal_type").(string) + targetID := d.Get("target_id").(string) + targetType := d.Get("target_type").(string) + + // We need to check if the assignment exists before creating it + // since the AWS SSO API doesn't prevent us from creating duplicates + accountAssignment, err := finder.AccountAssignment(conn, principalID, principalType, targetID, permissionSetArn, instanceArn) + if err != nil { + return fmt.Errorf("error listing SSO Account Assignments for AccountId (%s) PermissionSet (%s): %w", targetID, permissionSetArn, err) + } + + if accountAssignment != nil { + return fmt.Errorf("error creating SSO Account Assignment for %s (%s): already exists", principalType, principalID) + } + + input := &ssoadmin.CreateAccountAssignmentInput{ + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + PrincipalId: aws.String(principalID), + PrincipalType: aws.String(principalType), + TargetId: aws.String(targetID), + TargetType: aws.String(targetType), + } + + output, err := conn.CreateAccountAssignment(input) + if err != nil { + return fmt.Errorf("error creating SSO Account Assignment for %s (%s): %w", principalType, principalID, err) + } + + if output == nil || output.AccountAssignmentCreationStatus == nil { + return fmt.Errorf("error creating SSO Account Assignment for %s (%s): empty output", principalType, principalID) + + } + + status := output.AccountAssignmentCreationStatus + + _, err = waiter.AccountAssignmentCreated(conn, instanceArn, aws.StringValue(status.RequestId)) + if err != nil { + return fmt.Errorf("error waiting for SSO Account Assignment for %s (%s) to be created: %w", principalType, principalID, err) + } + + d.SetId(fmt.Sprintf("%s,%s,%s,%s,%s,%s", principalID, principalType, targetID, targetType, permissionSetArn, instanceArn)) + + return resourceAwsSsoAdminAccountAssignmentRead(d, meta) +} + +func resourceAwsSsoAdminAccountAssignmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + idParts, err := parseSsoAdminAccountAssignmentID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Account Assignment ID: %w", err) + } + + principalID := idParts[0] + principalType := idParts[1] + targetID := idParts[2] + targetType := idParts[3] + permissionSetArn := idParts[4] + instanceArn := idParts[5] + + accountAssignment, err := finder.AccountAssignment(conn, principalID, principalType, targetID, permissionSetArn, instanceArn) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] SSO Account Assignment for Principal (%s) not found, removing from state", principalID) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading SSO Account Assignment for Principal (%s): %w", principalID, err) + } + + if accountAssignment == nil { + if d.IsNewResource() { + return fmt.Errorf("error reading SSO Account Assignment for Principal (%s): not found", principalID) + } + + log.Printf("[WARN] SSO Account Assignment for Principal (%s) not found, removing from state", principalID) + d.SetId("") + return nil + } + + d.Set("instance_arn", instanceArn) + d.Set("permission_set_arn", accountAssignment.PermissionSetArn) + d.Set("principal_id", accountAssignment.PrincipalId) + d.Set("principal_type", accountAssignment.PrincipalType) + d.Set("target_id", accountAssignment.AccountId) + d.Set("target_type", targetType) + + return nil +} + +func resourceAwsSsoAdminAccountAssignmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ssoadminconn + + idParts, err := parseSsoAdminAccountAssignmentID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing SSO Account Assignment ID: %w", err) + } + + principalID := idParts[0] + principalType := idParts[1] + targetID := idParts[2] + targetType := idParts[3] + permissionSetArn := idParts[4] + instanceArn := idParts[5] + + input := &ssoadmin.DeleteAccountAssignmentInput{ + PrincipalId: aws.String(principalID), + InstanceArn: aws.String(instanceArn), + PermissionSetArn: aws.String(permissionSetArn), + TargetType: aws.String(targetType), + TargetId: aws.String(targetID), + PrincipalType: aws.String(principalType), + } + + output, err := conn.DeleteAccountAssignment(input) + if err != nil { + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + return nil + } + return fmt.Errorf("error deleting SSO Account Assignment for Principal (%s): %w", principalID, err) + } + + if output == nil || output.AccountAssignmentDeletionStatus == nil { + return fmt.Errorf("error deleting SSO Account Assignment for Principal (%s): empty output", principalID) + } + + status := output.AccountAssignmentDeletionStatus + + _, err = waiter.AccountAssignmentDeleted(conn, instanceArn, aws.StringValue(status.RequestId)) + if err != nil { + return fmt.Errorf("error waiting for SSO Account Assignment for Principal (%s) to be deleted: %w", principalID, err) + } + + return nil +} + +func parseSsoAdminAccountAssignmentID(id string) ([]string, error) { + idParts := strings.Split(id, ",") + if len(idParts) != 6 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || + idParts[3] == "" || idParts[4] == "" || idParts[5] == "" { + return nil, fmt.Errorf("unexpected format for ID (%q), expected PRINCIPAL_ID,PRINCIPAL_TYPE,TARGET_ID,TARGET_TYPE,PERMISSION_SET_ARN,INSTANCE_ARN", id) + } + return idParts, nil +} diff --git a/aws/resource_aws_ssoadmin_account_assignment_test.go b/aws/resource_aws_ssoadmin_account_assignment_test.go new file mode 100644 index 00000000000..c604b749c9f --- /dev/null +++ b/aws/resource_aws_ssoadmin_account_assignment_test.go @@ -0,0 +1,242 @@ +package aws + +import ( + "fmt" + "os" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/ssoadmin" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" +) + +func TestAccAWSSSOAdminAccountAssignment_Basic_Group(t *testing.T) { + resourceName := "aws_ssoadmin_account_assignment.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + groupName := os.Getenv("AWS_IDENTITY_STORE_GROUP_NAME") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreGroupName(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminAccountAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminAccountAssignmentBasicGroupConfig(groupName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminAccountAssignmentExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), + resource.TestCheckResourceAttr(resourceName, "principal_type", "GROUP"), + resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminAccountAssignment_Basic_User(t *testing.T) { + resourceName := "aws_ssoadmin_account_assignment.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + userName := os.Getenv("AWS_IDENTITY_STORE_USER_NAME") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreUserName(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminAccountAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminAccountAssignmentBasicUserConfig(userName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminAccountAssignmentExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_type", "AWS_ACCOUNT"), + resource.TestCheckResourceAttr(resourceName, "principal_type", "USER"), + resource.TestMatchResourceAttr(resourceName, "principal_id", regexp.MustCompile("^([0-9a-f]{10}-|)[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSOAdminAccountAssignment_Disappears(t *testing.T) { + resourceName := "aws_ssoadmin_account_assignment.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + groupName := os.Getenv("AWS_IDENTITY_STORE_GROUP_NAME") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSSOAdminInstances(t) + testAccPreCheckAWSIdentityStoreGroupName(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminAccountAssignmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminAccountAssignmentBasicGroupConfig(groupName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSOAdminAccountAssignmentExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsoAdminAccountAssignment(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) + +} + +func testAccCheckAWSSSOAdminAccountAssignmentDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_account_assignment" { + continue + } + + idParts, err := parseSsoAdminAccountAssignmentID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Account Assignment ID (%s): %w", rs.Primary.ID, err) + } + + principalID := idParts[0] + principalType := idParts[1] + targetID := idParts[2] + permissionSetArn := idParts[4] + instanceArn := idParts[5] + + accountAssignment, err := finder.AccountAssignment(conn, principalID, principalType, targetID, permissionSetArn, instanceArn) + + if tfawserr.ErrCodeEquals(err, ssoadmin.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return fmt.Errorf("error reading SSO Account Assignment for Principal (%s): %w", principalID, err) + } + + if accountAssignment != nil { + return fmt.Errorf("SSO Account Assignment for Principal (%s) still exists", principalID) + } + } + + return nil +} + +func testAccCheckAWSSSOAdminAccountAssignmentExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).ssoadminconn + + idParts, err := parseSsoAdminAccountAssignmentID(rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error parsing SSO Account Assignment ID (%s): %w", rs.Primary.ID, err) + } + + principalID := idParts[0] + principalType := idParts[1] + targetID := idParts[2] + permissionSetArn := idParts[4] + instanceArn := idParts[5] + + accountAssignment, err := finder.AccountAssignment(conn, principalID, principalType, targetID, permissionSetArn, instanceArn) + + if err != nil { + return err + } + + if accountAssignment == nil { + return fmt.Errorf("Account Assignment for Principal (%s) not found", principalID) + } + + return nil + } +} + +func testAccAWSSSOAdminAccountAssignmentBaseConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +data "aws_caller_identity" "current" {} + +resource "aws_ssoadmin_permission_set" "test" { + name = %q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} +`, rName) +} + +func testAccAWSSSOAdminAccountAssignmentBasicGroupConfig(groupName, rName string) string { + return composeConfig( + testAccAWSSSOAdminAccountAssignmentBaseConfig(rName), + fmt.Sprintf(` +data "aws_identitystore_group" "test" { + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] + filter { + attribute_path = "DisplayName" + attribute_value = %q + } +} + +resource "aws_ssoadmin_account_assignment" "test" { + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn + target_type = "AWS_ACCOUNT" + target_id = data.aws_caller_identity.current.account_id + principal_type = "GROUP" + principal_id = data.aws_identitystore_group.test.group_id +} +`, groupName)) +} + +func testAccAWSSSOAdminAccountAssignmentBasicUserConfig(userName, rName string) string { + return composeConfig( + testAccAWSSSOAdminAccountAssignmentBaseConfig(rName), + fmt.Sprintf(` +data "aws_identitystore_user" "test" { + identity_store_id = tolist(data.aws_ssoadmin_instances.test.identity_store_ids)[0] + filter { + attribute_path = "UserName" + attribute_value = %q + } +} + +resource "aws_ssoadmin_account_assignment" "test" { + instance_arn = aws_ssoadmin_permission_set.test.instance_arn + permission_set_arn = aws_ssoadmin_permission_set.test.arn + target_type = "AWS_ACCOUNT" + target_id = data.aws_caller_identity.current.account_id + principal_type = "USER" + principal_id = data.aws_identitystore_user.test.user_id +} +`, userName)) +} diff --git a/aws/resource_aws_ssoadmin_permission_set.go b/aws/resource_aws_ssoadmin_permission_set.go index 676129a7f0a..5d2b7542f14 100644 --- a/aws/resource_aws_ssoadmin_permission_set.go +++ b/aws/resource_aws_ssoadmin_permission_set.go @@ -288,7 +288,7 @@ func provisionSsoAdminPermissionSet(conn *ssoadmin.SSOAdmin, arn, instanceArn st return fmt.Errorf("error provisioning SSO Permission Set (%s): %w", arn, err) } - if output == nil && output.PermissionSetProvisioningStatus == nil { + if output == nil || output.PermissionSetProvisioningStatus == nil { return fmt.Errorf("error provisioning SSO Permission Set (%s): empty output", arn) } diff --git a/website/docs/r/sso_assignment.html.markdown b/website/docs/r/sso_assignment.html.markdown deleted file mode 100644 index ffd29b66233..00000000000 --- a/website/docs/r/sso_assignment.html.markdown +++ /dev/null @@ -1,63 +0,0 @@ ---- -subcategory: "SSO Admin" -layout: "aws" -page_title: "AWS: sso_assignment" -description: |- - Manages an AWS Single Sign-On assignment ---- - -# Resource: sso_assignment - -Provides an AWS Single Sign-On Assignment resource - -## Example Usage - -```hcl -data "aws_sso_permission_set" "example" { - instance_arn = data.aws_sso_instance.selected.arn - name = "AWSReadOnlyAccess" -} - -data "aws_identity_store_group" "example_group" { - identity_store_id = data.aws_sso_instance.selected.identity_store_id - display_name = "Example Group@example.com" -} - -resource "aws_sso_assignment" "example" { - instance_arn = data.aws_sso_instance.selected.arn - permission_set_arn = data.aws_sso_permission_set.example.arn - - target_type = "AWS_ACCOUNT" - target_id = "012347678910" - - principal_type = "GROUP" - principal_id = data.aws_identity_store_group.example_group.group_id -} -``` - -## Argument Reference - -The following arguments are supported: - -* `instance_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Instance. -* `permission_set_arn` - (Required) The AWS ARN associated with the AWS Single Sign-On Permission Set. -* `target_id` - (Required) The identifier of the AWS account to assign to the AWS Single Sign-On Permission Set. -* `principal_type` - (Required) The entity type for which the assignment will be created. Valid values: `USER`, `GROUP`. -* `principal_id` - (Required) An identifier for an object in AWS SSO, such as a user or group. PrincipalIds are GUIDs (For example, f81d4fae-7dec-11d0-a765-00a0c91e6bf6). -* `target_type` - (Optional) Type of AWS Single Sign-On Assignment. Valid values: `AWS_ACCOUNT`. - -## Attribute Reference - -In addition to all arguments above, the following attributes are exported: - -* `id` - Identifier of the AWS Single Sign-On Assignment. -* `created_date` - The created date of the AWS Single Sign-On Assignment. - -## Import - -`aws_sso_assignment` can be imported by using the identifier of the AWS Single Sign-On Assignment, e.g. -identifier = ${InstanceID}/${PermissionSetID}/${TargetType}/${TargetID}/${PrincipalType}/${PrincipalID} - -``` -$ terraform import aws_sso_assignment.example ssoins-0123456789abcdef/ps-0123456789abcdef/AWS_ACCOUNT/012347678910/GROUP/51b3755f39-e945c18b-e449-4a93-3e95-12231cb7ef96 -``` diff --git a/website/docs/r/ssoadmin_account_assignment.html.markdown b/website/docs/r/ssoadmin_account_assignment.html.markdown new file mode 100644 index 00000000000..5d4d5d8f592 --- /dev/null +++ b/website/docs/r/ssoadmin_account_assignment.html.markdown @@ -0,0 +1,67 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_account_assignment" +description: |- + Manages a Single Sign-On (SSO) Account Assignment +--- + +# Resource: aws_ssoadmin_account_assignment + +Provides a Single Sign-On (SSO) Account Assignment resource + +## Example Usage + +```hcl +data "aws_ssoadmin_instances" "example" {} + +data "aws_ssoadmin_permission_set" "example" { + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] + name = "AWSReadOnlyAccess" +} + +data "aws_identitystore_group" "example" { + identity_store_id = tolist(data.aws_ssoadmin_instances.selected.identity_store_ids)[0] + + filter { + attribute_path = "DisplayName" + attribute_value = "ExampleGroup" + } +} + +resource "aws_ssoadmin_account_assignment" "example" { + instance_arn = data.aws_ssoadmin_permission_set.example.instance_arn + permission_set_arn = data.aws_ssoadmin_permission_set.example.arn + + principal_id = data.aws_identitystore_group.example.group_id + principal_type = "GROUP" + + target_id = "012347678910" + target_type = "AWS_ACCOUNT" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the SSO Instance. +* `permission_set_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of the Permission Set that the admin wants to grant the principal access to. +* `principal_id` - (Required, Forces new resource) An identifier for an object in SSO, such as a user or group. PrincipalIds are GUIDs (For example, `f81d4fae-7dec-11d0-a765-00a0c91e6bf6`). +* `principal_type` - (Required, Forces new resource) The entity type for which the assignment will be created. Valid values: `USER`, `GROUP`. +* `target_id` - (Required, Forces new resource) An AWS account identifier, typically a 10-12 digit string. +* `target_type` - (Optional, Forces new resource) The entity type for which the assignment will be created. Valid values: `AWS_ACCOUNT`. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The identifier of the Account Assignment i.e. `principal_id`, `principal_type`, `target_id`, `target_type`, `permission_set_arn`, `instance_arn` separated by commas (`,`). + +## Import + +SSO Account Assignments can be imported using the `principal_id`, `principal_type`, `target_id`, `target_type`, `permission_set_arn`, `instance_arn` separated by commas (`,`) e.g. + +``` +$ terraform import aws_ssoadmin_account_assignment.example f81d4fae-7dec-11d0-a765-00a0c91e6bf6,GROUP,1234567890,AWS_ACCOUNT,arn:aws:sso:::permissionSet/ssoins-0123456789abcdef/ps-0123456789abcdef,arn:aws:sso:::instance/ssoins-0123456789abcdef +``` From b9359a8ae7a054d32d6d21b7d1b89bdd703f2287 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 14 Jan 2021 05:46:40 -0500 Subject: [PATCH 0678/1212] implement account assignment sweeper --- .../service/ssoadmin/finder/finder.go | 4 + ...ce_aws_ssoadmin_account_assignment_test.go | 118 ++++++++++++++++++ ...source_aws_ssoadmin_permission_set_test.go | 3 + 3 files changed, 125 insertions(+) diff --git a/aws/internal/service/ssoadmin/finder/finder.go b/aws/internal/service/ssoadmin/finder/finder.go index dc3465d8f3e..f7eeecb255a 100644 --- a/aws/internal/service/ssoadmin/finder/finder.go +++ b/aws/internal/service/ssoadmin/finder/finder.go @@ -21,6 +21,10 @@ func AccountAssignment(conn *ssoadmin.SSOAdmin, principalId, principalType, acco } for _, a := range page.AccountAssignments { + if a == nil { + continue + } + if aws.StringValue(a.PrincipalType) != principalType { continue } diff --git a/aws/resource_aws_ssoadmin_account_assignment_test.go b/aws/resource_aws_ssoadmin_account_assignment_test.go index c604b749c9f..360dd829360 100644 --- a/aws/resource_aws_ssoadmin_account_assignment_test.go +++ b/aws/resource_aws_ssoadmin_account_assignment_test.go @@ -2,18 +2,136 @@ package aws import ( "fmt" + "log" "os" "regexp" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ssoadmin" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ssoadmin/finder" ) +func init() { + resource.AddTestSweepers("aws_ssoadmin_account_assignment", &resource.Sweeper{ + Name: "aws_ssoadmin_account_assignment", + F: testSweepSsoAdminAccountAssignments, + }) +} + +func testSweepSsoAdminAccountAssignments(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*AWSClient).ssoadminconn + var sweeperErrs *multierror.Error + + // Need to Read the SSO Instance first; assumes the first instance returned + // is where the permission sets exist as AWS SSO currently supports only 1 instance + ds := dataSourceAwsSsoAdminInstances() + dsData := ds.Data(nil) + + err = ds.Read(dsData, client) + + if testSweepSkipResourceError(err) { + log.Printf("[WARN] Skipping SSO Account Assignment sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return err + } + + instanceArn := dsData.Get("arns").(*schema.Set).List()[0].(string) + + // To sweep account assignments, we need to first determine which Permission Sets + // are available and then search for their respective assignments + input := &ssoadmin.ListPermissionSetsInput{ + InstanceArn: aws.String(instanceArn), + } + + err = conn.ListPermissionSetsPages(input, func(page *ssoadmin.ListPermissionSetsOutput, isLast bool) bool { + if page == nil { + return !isLast + } + + for _, permissionSet := range page.PermissionSets { + if permissionSet == nil { + continue + } + + permissionSetArn := aws.StringValue(permissionSet) + + input := &ssoadmin.ListAccountAssignmentsInput{ + AccountId: aws.String(client.(*AWSClient).accountid), + InstanceArn: aws.String(instanceArn), + PermissionSetArn: permissionSet, + } + + err := conn.ListAccountAssignmentsPages(input, func(page *ssoadmin.ListAccountAssignmentsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, a := range page.AccountAssignments { + if a == nil { + continue + } + + principalID := aws.StringValue(a.PrincipalId) + principalType := aws.StringValue(a.PrincipalType) + targetID := aws.StringValue(a.AccountId) + targetType := ssoadmin.TargetTypeAwsAccount // only valid value currently accepted by API + + r := resourceAwsSsoAdminAccountAssignment() + d := r.Data(nil) + d.SetId(fmt.Sprintf("%s,%s,%s,%s,%s,%s", principalID, principalType, targetID, targetType, permissionSetArn, instanceArn)) + + err = r.Delete(d, client) + + if err != nil { + log.Printf("[ERROR] %s", err) + sweeperErrs = multierror.Append(sweeperErrs, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SSO Account Assignment sweep (PermissionSet %s) for %s: %s", permissionSetArn, region, err) + continue + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving SSO Account Assignments for Permission Set (%s): %w", permissionSetArn, err)) + } + } + + return !isLast + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SSO Account Assignment sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving SSO Permission Sets for Account Assignment sweep: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + func TestAccAWSSSOAdminAccountAssignment_Basic_Group(t *testing.T) { resourceName := "aws_ssoadmin_account_assignment.test" rName := acctest.RandomWithPrefix("tf-acc-test") diff --git a/aws/resource_aws_ssoadmin_permission_set_test.go b/aws/resource_aws_ssoadmin_permission_set_test.go index 47f4406471f..315af037111 100644 --- a/aws/resource_aws_ssoadmin_permission_set_test.go +++ b/aws/resource_aws_ssoadmin_permission_set_test.go @@ -19,6 +19,9 @@ func init() { resource.AddTestSweepers("aws_ssoadmin_permission_set", &resource.Sweeper{ Name: "aws_ssoadmin_permission_set", F: testSweepSsoAdminPermissionSets, + Dependencies: []string{ + "aws_ssoadmin_account_assignment", + }, }) } From f25dc32763adf73d669f1063c0035f9cf5ae2634 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Jan 2021 09:11:38 -0500 Subject: [PATCH 0679/1212] tests/resource/fsx_lustre_file_system: Make GovCloud compatible --- ...esource_aws_fsx_lustre_file_system_test.go | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index 831cd36a812..328fc31a38c 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/fsx" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -86,7 +87,7 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "export_path", ""), resource.TestCheckResourceAttr(resourceName, "import_path", ""), resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "0"), - resource.TestCheckResourceAttr(resourceName, "mount_name", "fsx"), + resource.TestCheckResourceAttrSet(resourceName, "mount_name"), resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "2"), testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), @@ -95,7 +96,7 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "vpc_id", regexp.MustCompile(`^vpc-.+`)), resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexp.MustCompile(`^\d:\d\d:\d\d$`)), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), @@ -107,10 +108,6 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"security_group_ids"}, }, - { - Config: testAccAwsFsxLustreFileSystemDeploymentType(fsx.LustreDeploymentTypeScratch1), - PlanOnly: true, - }, }, }) } @@ -538,22 +535,26 @@ func TestAccAWSFsxLustreFileSystem_KmsKeyId(t *testing.T) { }) } -func TestAccAWSFsxLustreFileSystem_DeploymentTypeScratch2(t *testing.T) { +func TestAccAWSFsxLustreFileSystem_DeploymentTypeScratch1(t *testing.T) { var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) }, + PreCheck: func() { + testAccPreCheck(t) + testAccPartitionPreCheck(endpoints.AwsPartitionID, t) // SCRATCH_1 not supported in GovCloud + testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) + }, Providers: testAccProviders, CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsFsxLustreFileSystemDeploymentType(fsx.LustreDeploymentTypeScratch2), + Config: testAccAwsFsxLustreFileSystemDeploymentType(), Check: resource.ComposeTestCheckFunc( testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), - // We don't know the randomly generated mount_name ahead of time like for SCRATCH_1 deployment types. - resource.TestCheckResourceAttrSet(resourceName, "mount_name"), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch1), + // We know the mount_name ahead of time unlike for SCRATCH_2, PERSISTENT_1 deployment types. + resource.TestCheckResourceAttr(resourceName, "mount_name", "fsx"), ), }, { @@ -785,6 +786,7 @@ resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}" storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } `, rName, exportPrefix) } @@ -800,6 +802,7 @@ resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } `, rName, importPrefix) } @@ -816,6 +819,7 @@ resource "aws_fsx_lustre_file_system" "test" { imported_file_chunk_size = %[2]d storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } `, rName, importedFileChunkSize) } @@ -845,6 +849,7 @@ resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id] storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } ` } @@ -893,6 +898,7 @@ resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id, aws_security_group.test2.id] storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } ` } @@ -902,6 +908,7 @@ func testAccAwsFsxLustreFileSystemConfigStorageCapacity(storageCapacity int) str resource "aws_fsx_lustre_file_system" "test" { storage_capacity = %[1]d subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } `, storageCapacity) } @@ -911,6 +918,7 @@ func testAccAwsFsxLustreFileSystemConfigSubnetIds1() string { resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } ` } @@ -920,6 +928,7 @@ func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" tags = { %[1]q = %[2]q @@ -933,6 +942,7 @@ func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagVa resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" tags = { %[1]q = %[2]q @@ -948,6 +958,7 @@ resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] weekly_maintenance_start_time = %[1]q + deployment_type = "SCRATCH_2" } `, weeklyMaintenanceStartTime) } @@ -977,14 +988,14 @@ resource "aws_fsx_lustre_file_system" "test" { `, retention) } -func testAccAwsFsxLustreFileSystemDeploymentType(deploymentType string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +func testAccAwsFsxLustreFileSystemDeploymentType() string { + return testAccAwsFsxLustreFileSystemConfigBase() + ` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = %[1]q + deployment_type = "SCRATCH_1" } -`, deploymentType) +` } func testAccAwsFsxLustreFileSystemPersistentDeploymentType(perUnitStorageThroughput int) string { @@ -1058,6 +1069,7 @@ resource "aws_fsx_lustre_file_system" "test" { auto_import_policy = %[3]q storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SCRATCH_2" } `, rName, exportPrefix, policy) } From 9a2eeb824a36f4f630b84130c0d333b1efaa4de4 Mon Sep 17 00:00:00 2001 From: brent-au Date: Fri, 15 Jan 2021 02:17:02 +1100 Subject: [PATCH 0680/1212] Adds timeouts to Global Accelerator resources --- ...ource_aws_globalaccelerator_accelerator.go | 25 +++++++++++++------ ..._aws_globalaccelerator_accelerator_test.go | 3 ++- ...ce_aws_globalaccelerator_endpoint_group.go | 13 +++++++--- ...resource_aws_globalaccelerator_listener.go | 12 ++++++--- .../r/globalaccelerator_accelerator.markdown | 8 ++++++ ...alaccelerator_endpoint_group.html.markdown | 7 ++++++ .../r/globalaccelerator_listener.markdown | 7 ++++++ 7 files changed, 60 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_globalaccelerator_accelerator.go b/aws/resource_aws_globalaccelerator_accelerator.go index a1fefbd9d5a..8ce9f01801f 100644 --- a/aws/resource_aws_globalaccelerator_accelerator.go +++ b/aws/resource_aws_globalaccelerator_accelerator.go @@ -29,6 +29,11 @@ func resourceAwsGlobalAcceleratorAccelerator() *schema.Resource { State: schema.ImportStatePassthrough, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -128,13 +133,13 @@ func resourceAwsGlobalAcceleratorAcceleratorCreate(d *schema.ResourceData, meta d.SetId(aws.StringValue(resp.Accelerator.AcceleratorArn)) - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id()) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id(), d.Timeout(schema.TimeoutCreate)) if err != nil { return err } if v := d.Get("attributes").([]interface{}); len(v) > 0 { - err = resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn, d.Id(), v[0].(map[string]interface{})) + err = resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn, d.Id(), d.Timeout(schema.TimeoutUpdate), v[0].(map[string]interface{})) if err != nil { return err } @@ -279,7 +284,7 @@ func resourceAwsGlobalAcceleratorAcceleratorUpdate(d *schema.ResourceData, meta return fmt.Errorf("Error updating Global Accelerator accelerator: %s", err) } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id()) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { return err } @@ -287,7 +292,7 @@ func resourceAwsGlobalAcceleratorAcceleratorUpdate(d *schema.ResourceData, meta if d.HasChange("attributes") { if v := d.Get("attributes").([]interface{}); len(v) > 0 { - err := resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn, d.Id(), v[0].(map[string]interface{})) + err := resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn, d.Id(), d.Timeout(schema.TimeoutUpdate), v[0].(map[string]interface{})) if err != nil { return err } @@ -305,12 +310,12 @@ func resourceAwsGlobalAcceleratorAcceleratorUpdate(d *schema.ResourceData, meta return resourceAwsGlobalAcceleratorAcceleratorRead(d, meta) } -func resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn *globalaccelerator.GlobalAccelerator, acceleratorArn string) error { +func resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn *globalaccelerator.GlobalAccelerator, acceleratorArn string, timeout time.Duration) error { stateConf := &resource.StateChangeConf{ Pending: []string{globalaccelerator.AcceleratorStatusInProgress}, Target: []string{globalaccelerator.AcceleratorStatusDeployed}, Refresh: resourceAwsGlobalAcceleratorAcceleratorStateRefreshFunc(conn, acceleratorArn), - Timeout: 10 * time.Minute, + Timeout: timeout, } log.Printf("[DEBUG] Waiting for Global Accelerator accelerator (%s) availability", acceleratorArn) @@ -322,7 +327,7 @@ func resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn *globalacc return nil } -func resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn *globalaccelerator.GlobalAccelerator, acceleratorArn string, attributes map[string]interface{}) error { +func resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn *globalaccelerator.GlobalAccelerator, acceleratorArn string, timeout time.Duration, attributes map[string]interface{}) error { opts := &globalaccelerator.UpdateAcceleratorAttributesInput{ AcceleratorArn: aws.String(acceleratorArn), FlowLogsEnabled: aws.Bool(attributes["flow_logs_enabled"].(bool)), @@ -342,6 +347,10 @@ func resourceAwsGlobalAcceleratorAcceleratorUpdateAttributes(conn *globalacceler if err != nil { return fmt.Errorf("Error updating Global Accelerator accelerator attributes: %s", err) } + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn, timeout) + if err != nil { + return err + } return nil } @@ -362,7 +371,7 @@ func resourceAwsGlobalAcceleratorAcceleratorDelete(d *schema.ResourceData, meta return fmt.Errorf("Error disabling Global Accelerator accelerator: %s", err) } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id()) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { return err } diff --git a/aws/resource_aws_globalaccelerator_accelerator_test.go b/aws/resource_aws_globalaccelerator_accelerator_test.go index 096669879cb..9007ddcbf85 100644 --- a/aws/resource_aws_globalaccelerator_accelerator_test.go +++ b/aws/resource_aws_globalaccelerator_accelerator_test.go @@ -5,6 +5,7 @@ import ( "log" "regexp" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/globalaccelerator" @@ -71,7 +72,7 @@ func testSweepGlobalAcceleratorAccelerators(region string) error { // Global Accelerator accelerators need to be in `DEPLOYED` state before they can be deleted. // Removing listeners or disabling can both set the state to `IN_PROGRESS`. - if err := resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, arn); err != nil { + if err := resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, arn, 60*time.Minute); err != nil { sweeperErr := fmt.Errorf("error waiting for Global Accelerator Accelerator (%s): %s", arn, err) log.Printf("[ERROR] %s", sweeperErr) sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) diff --git a/aws/resource_aws_globalaccelerator_endpoint_group.go b/aws/resource_aws_globalaccelerator_endpoint_group.go index fc93313263b..dc74e2cf745 100644 --- a/aws/resource_aws_globalaccelerator_endpoint_group.go +++ b/aws/resource_aws_globalaccelerator_endpoint_group.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "strings" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/globalaccelerator" @@ -24,6 +25,12 @@ func resourceAwsGlobalAcceleratorEndpointGroup() *schema.Resource { State: schema.ImportStatePassthrough, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -199,7 +206,7 @@ func resourceAwsGlobalAcceleratorEndpointGroupCreate(d *schema.ResourceData, met return err } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn, d.Timeout(schema.TimeoutCreate)) if err != nil { return err @@ -311,7 +318,7 @@ func resourceAwsGlobalAcceleratorEndpointGroupUpdate(d *schema.ResourceData, met return err } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn, d.Timeout(schema.TimeoutUpdate)) if err != nil { return err @@ -343,7 +350,7 @@ func resourceAwsGlobalAcceleratorEndpointGroupDelete(d *schema.ResourceData, met return err } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, acceleratorArn, d.Timeout(schema.TimeoutDelete)) if err != nil { return err diff --git a/aws/resource_aws_globalaccelerator_listener.go b/aws/resource_aws_globalaccelerator_listener.go index 54d89e761df..3957e7d1b99 100644 --- a/aws/resource_aws_globalaccelerator_listener.go +++ b/aws/resource_aws_globalaccelerator_listener.go @@ -24,6 +24,12 @@ func resourceAwsGlobalAcceleratorListener() *schema.Resource { State: schema.ImportStatePassthrough, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ "accelerator_arn": { Type: schema.TypeString, @@ -96,7 +102,7 @@ func resourceAwsGlobalAcceleratorListenerCreate(d *schema.ResourceData, meta int Pending: []string{globalaccelerator.AcceleratorStatusInProgress}, Target: []string{globalaccelerator.AcceleratorStatusDeployed}, Refresh: resourceAwsGlobalAcceleratorAcceleratorStateRefreshFunc(conn, d.Get("accelerator_arn").(string)), - Timeout: 5 * time.Minute, + Timeout: d.Timeout(schema.TimeoutCreate), } log.Printf("[DEBUG] Waiting for Global Accelerator listener (%s) availability", d.Id()) @@ -211,7 +217,7 @@ func resourceAwsGlobalAcceleratorListenerUpdate(d *schema.ResourceData, meta int } // Creating a listener triggers the accelerator to change status to InPending - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get("accelerator_arn").(string)) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get("accelerator_arn").(string), d.Timeout(schema.TimeoutUpdate)) if err != nil { return err } @@ -236,7 +242,7 @@ func resourceAwsGlobalAcceleratorListenerDelete(d *schema.ResourceData, meta int // Deleting a listener triggers the accelerator to change status to InPending // } - err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get("accelerator_arn").(string)) + err = resourceAwsGlobalAcceleratorAcceleratorWaitForDeployedState(conn, d.Get("accelerator_arn").(string), d.Timeout(schema.TimeoutDelete)) if err != nil { return err } diff --git a/website/docs/r/globalaccelerator_accelerator.markdown b/website/docs/r/globalaccelerator_accelerator.markdown index f58e2fbeca5..5249b087f96 100644 --- a/website/docs/r/globalaccelerator_accelerator.markdown +++ b/website/docs/r/globalaccelerator_accelerator.markdown @@ -60,6 +60,14 @@ In addition to all arguments above, the following attributes are exported: [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html +## Timeouts + +`aws_globalaccelerator_accelerator` provides the following +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: + +* `create` - (Default `30 minutes`) How long to wait for the Global Accelerator Accelerator to be created. +* `update` - (Default `30 minutes`) How long to wait for the Global Accelerator Accelerator to be updated. + ## Import Global Accelerator accelerators can be imported using the `id`, e.g. diff --git a/website/docs/r/globalaccelerator_endpoint_group.html.markdown b/website/docs/r/globalaccelerator_endpoint_group.html.markdown index 629d59434af..d4bbc011aa8 100644 --- a/website/docs/r/globalaccelerator_endpoint_group.html.markdown +++ b/website/docs/r/globalaccelerator_endpoint_group.html.markdown @@ -58,6 +58,13 @@ In addition to all arguments above, the following attributes are exported: * `id` - The Amazon Resource Name (ARN) of the endpoint group. * `arn` - The Amazon Resource Name (ARN) of the endpoint group. +`aws_globalaccelerator_endpoint_group` provides the following +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: + +* `create` - (Default `30 minutes`) How long to wait for the Global Accelerator Endpoint Group to be created. +* `update` - (Default `30 minutes`) How long to wait for the Global Accelerator Endpoint Group to be updated. +* `delete` - (Default `30 minutes`) How long to wait for the Global Accelerator Endpoint Group to be deleted. + ## Import Global Accelerator endpoint groups can be imported using the `id`, e.g. diff --git a/website/docs/r/globalaccelerator_listener.markdown b/website/docs/r/globalaccelerator_listener.markdown index 259001aa4e0..24b3580c986 100644 --- a/website/docs/r/globalaccelerator_listener.markdown +++ b/website/docs/r/globalaccelerator_listener.markdown @@ -57,6 +57,13 @@ In addition to all arguments above, the following attributes are exported: * `id` - The Amazon Resource Name (ARN) of the listener. +`aws_globalaccelerator_listener` provides the following +[Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) configuration options: + +* `create` - (Default `30 minutes`) How long to wait for the Global Accelerator Listener to be created. +* `update` - (Default `30 minutes`) How long to wait for the Global Accelerator Listener to be updated. +* `delete` - (Default `30 minutes`) How long to wait for the Global Accelerator Listener to be deleted. + ## Import Global Accelerator listeners can be imported using the `id`, e.g. From cfd33e11de1dd3b0e000fb781a655ba4129064df Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Jan 2021 11:18:45 -0500 Subject: [PATCH 0681/1212] tests/resource/fsx_lustre_file_system: Adjust GovCloud compatibility --- ...esource_aws_fsx_lustre_file_system_test.go | 86 +++++++++++-------- 1 file changed, 49 insertions(+), 37 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index 328fc31a38c..a75a386ecbb 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -73,6 +73,11 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" + deploymentType := fsx.LustreDeploymentTypeScratch1 + if testAccGetPartition() == endpoints.AwsUsGovPartitionID { + deploymentType = fsx.LustreDeploymentTypeScratch2 // SCRATCH_1 not supported in GovCloud + } + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) }, Providers: testAccProviders, @@ -96,7 +101,7 @@ func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestMatchResourceAttr(resourceName, "vpc_id", regexp.MustCompile(`^vpc-.+`)), resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexp.MustCompile(`^\d:\d\d:\d\d$`)), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", deploymentType), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), @@ -535,26 +540,22 @@ func TestAccAWSFsxLustreFileSystem_KmsKeyId(t *testing.T) { }) } -func TestAccAWSFsxLustreFileSystem_DeploymentTypeScratch1(t *testing.T) { +func TestAccAWSFsxLustreFileSystem_DeploymentTypeScratch2(t *testing.T) { var filesystem fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPartitionPreCheck(endpoints.AwsPartitionID, t) // SCRATCH_1 not supported in GovCloud - testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) - }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsFsxLustreFileSystemDeploymentType(), + Config: testAccAwsFsxLustreFileSystemDeploymentType(fsx.LustreDeploymentTypeScratch2), Check: resource.ComposeTestCheckFunc( testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch1), - // We know the mount_name ahead of time unlike for SCRATCH_2, PERSISTENT_1 deployment types. - resource.TestCheckResourceAttr(resourceName, "mount_name", "fsx"), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), + // We don't know the randomly generated mount_name ahead of time like for SCRATCH_1 deployment types. + resource.TestCheckResourceAttrSet(resourceName, "mount_name"), ), }, { @@ -752,16 +753,7 @@ func testAccCheckFsxLustreFileSystemRecreated(i, j *fsx.FileSystem) resource.Tes } func testAccAwsFsxLustreFileSystemConfigBase() string { - return ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return composeConfig(testAccAvailableAZsNoOptInConfig(), ` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } @@ -771,7 +763,7 @@ resource "aws_subnet" "test1" { cidr_block = "10.0.1.0/24" availability_zone = data.aws_availability_zones.available.names[0] } -` +`) } func testAccAwsFsxLustreFileSystemConfigExportPath(rName, exportPrefix string) string { @@ -781,12 +773,14 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" import_path = "s3://${aws_s3_bucket.test.bucket}" storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, rName, exportPrefix) } @@ -798,11 +792,13 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, rName, importPrefix) } @@ -814,12 +810,14 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}" imported_file_chunk_size = %[2]d storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, rName, importedFileChunkSize) } @@ -845,11 +843,13 @@ resource "aws_security_group" "test1" { } } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id] storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } ` } @@ -894,21 +894,25 @@ resource "aws_security_group" "test2" { } } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id, aws_security_group.test2.id] storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } ` } func testAccAwsFsxLustreFileSystemConfigStorageCapacity(storageCapacity int) string { return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { storage_capacity = %[1]d subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, storageCapacity) } @@ -918,17 +922,19 @@ func testAccAwsFsxLustreFileSystemConfigSubnetIds1() string { resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } ` } func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string { return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 tags = { %[1]q = %[2]q @@ -939,10 +945,12 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 tags = { %[1]q = %[2]q @@ -954,11 +962,13 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] weekly_maintenance_start_time = %[1]q - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, weeklyMaintenanceStartTime) } @@ -988,14 +998,14 @@ resource "aws_fsx_lustre_file_system" "test" { `, retention) } -func testAccAwsFsxLustreFileSystemDeploymentType() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` +func testAccAwsFsxLustreFileSystemDeploymentType(deploymentType string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_1" + deployment_type = %[1]q } -` +`, deploymentType) } func testAccAwsFsxLustreFileSystemPersistentDeploymentType(perUnitStorageThroughput int) string { @@ -1063,13 +1073,15 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } +data "aws_partition" "current" {} + resource "aws_fsx_lustre_file_system" "test" { export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" import_path = "s3://${aws_s3_bucket.test.bucket}" auto_import_policy = %[3]q storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] - deployment_type = "SCRATCH_2" + deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } `, rName, exportPrefix, policy) } From 03ada13dd5bc97cb9596fa877e1ee5b7aa1ccd59 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Jan 2021 11:31:09 -0500 Subject: [PATCH 0682/1212] tests/resource/fsx_lustre_file_system: Use composeConfig() --- ...esource_aws_fsx_lustre_file_system_test.go | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index a75a386ecbb..6d7beac7902 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -767,7 +767,7 @@ resource "aws_subnet" "test1" { } func testAccAwsFsxLustreFileSystemConfigExportPath(rName, exportPrefix string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_s3_bucket" "test" { acl = "private" bucket = %[1]q @@ -782,11 +782,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, rName, exportPrefix) +`, rName, exportPrefix)) } func testAccAwsFsxLustreFileSystemConfigImportPath(rName, importPrefix string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_s3_bucket" "test" { acl = "private" bucket = %[1]q @@ -800,11 +800,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, rName, importPrefix) +`, rName, importPrefix)) } func testAccAwsFsxLustreFileSystemConfigImportedFileChunkSize(rName string, importedFileChunkSize int) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_s3_bucket" "test" { acl = "private" bucket = %[1]q @@ -819,11 +819,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, rName, importedFileChunkSize) +`, rName, importedFileChunkSize)) } func testAccAwsFsxLustreFileSystemConfigSecurityGroupIds1() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_security_group" "test1" { description = "security group for FSx testing" vpc_id = aws_vpc.test.id @@ -851,11 +851,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -` +`) } func testAccAwsFsxLustreFileSystemConfigSecurityGroupIds2() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_security_group" "test1" { description = "security group for FSx testing" vpc_id = aws_vpc.test.id @@ -902,11 +902,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -` +`) } func testAccAwsFsxLustreFileSystemConfigStorageCapacity(storageCapacity int) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_fsx_lustre_file_system" "test" { @@ -914,21 +914,21 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, storageCapacity) +`, storageCapacity)) } func testAccAwsFsxLustreFileSystemConfigSubnetIds1() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -` +`) } func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_fsx_lustre_file_system" "test" { @@ -940,11 +940,11 @@ resource "aws_fsx_lustre_file_system" "test" { %[1]q = %[2]q } } -`, tagKey1, tagValue1) +`, tagKey1, tagValue1)) } func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_fsx_lustre_file_system" "test" { @@ -957,11 +957,11 @@ resource "aws_fsx_lustre_file_system" "test" { %[3]q = %[4]q } } -`, tagKey1, tagValue1, tagKey2, tagValue2) +`, tagKey1, tagValue1, tagKey2, tagValue2)) } func testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` data "aws_partition" "current" {} resource "aws_fsx_lustre_file_system" "test" { @@ -970,11 +970,11 @@ resource "aws_fsx_lustre_file_system" "test" { weekly_maintenance_start_time = %[1]q deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, weeklyMaintenanceStartTime) +`, weeklyMaintenanceStartTime)) } func testAccAwsFsxLustreFileSystemConfigDailyAutomaticBackupStartTime(dailyAutomaticBackupStartTime string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] @@ -983,11 +983,11 @@ resource "aws_fsx_lustre_file_system" "test" { daily_automatic_backup_start_time = %[1]q automatic_backup_retention_days = 1 } -`, dailyAutomaticBackupStartTime) +`, dailyAutomaticBackupStartTime)) } func testAccAwsFsxLustreFileSystemConfigAutomaticBackupRetentionDays(retention int) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = ["${aws_subnet.test1.id}"] @@ -995,32 +995,32 @@ resource "aws_fsx_lustre_file_system" "test" { per_unit_storage_throughput = 50 automatic_backup_retention_days = %[1]d } -`, retention) +`, retention)) } func testAccAwsFsxLustreFileSystemDeploymentType(deploymentType string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] deployment_type = %[1]q } -`, deploymentType) +`, deploymentType)) } func testAccAwsFsxLustreFileSystemPersistentDeploymentType(perUnitStorageThroughput int) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] deployment_type = "PERSISTENT_1" per_unit_storage_throughput = %[1]d } -`, perUnitStorageThroughput) +`, perUnitStorageThroughput)) } func testAccAwsFsxLustreFileSystemConfigKmsKeyId1() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_kms_key" "test1" { description = "FSx KMS Testing key" deletion_window_in_days = 7 @@ -1033,11 +1033,11 @@ resource "aws_fsx_lustre_file_system" "test" { per_unit_storage_throughput = 50 kms_key_id = aws_kms_key.test1.arn } -` +`) } func testAccAwsFsxLustreFileSystemConfigKmsKeyId2() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_kms_key" "test2" { description = "FSx KMS Testing key" deletion_window_in_days = 7 @@ -1050,11 +1050,11 @@ resource "aws_fsx_lustre_file_system" "test" { per_unit_storage_throughput = 50 kms_key_id = aws_kms_key.test2.arn } -` +`) } func testAccAwsFsxLustreFileSystemHddStorageType(drive_cache_type string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 6000 subnet_ids = [aws_subnet.test1.id] @@ -1063,11 +1063,11 @@ resource "aws_fsx_lustre_file_system" "test" { storage_type = "HDD" drive_cache_type = %[1]q } -`, drive_cache_type) +`, drive_cache_type)) } func testAccAwsFsxLustreFileSystemAutoImportPolicyConfig(rName, exportPrefix, policy string) string { - return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` resource "aws_s3_bucket" "test" { acl = "private" bucket = %[1]q @@ -1083,11 +1083,11 @@ resource "aws_fsx_lustre_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = data.aws_partition.current.partition == "aws-us-gov" ? "SCRATCH_2" : null # GovCloud does not support SCRATCH_1 } -`, rName, exportPrefix, policy) +`, rName, exportPrefix, policy)) } func testAccAwsFsxLustreFileSystemCopyTagsToBackups() string { - return testAccAwsFsxLustreFileSystemConfigBase() + ` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 deployment_type = "PERSISTENT_1" @@ -1095,5 +1095,5 @@ resource "aws_fsx_lustre_file_system" "test" { per_unit_storage_throughput = 50 copy_tags_to_backups = true } -` +`) } From 9717f6b0f6d903f956913350fe1ef5b7d71462f2 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Jan 2021 11:40:42 -0500 Subject: [PATCH 0683/1212] tests/resource/fsx_lustre_file_system: Add partition to basic --- ...esource_aws_fsx_lustre_file_system_test.go | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index 6d7beac7902..efcc8a09a01 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -754,6 +754,8 @@ func testAccCheckFsxLustreFileSystemRecreated(i, j *fsx.FileSystem) resource.Tes func testAccAwsFsxLustreFileSystemConfigBase() string { return composeConfig(testAccAvailableAZsNoOptInConfig(), ` +data "aws_partition" "current" {} + resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" } @@ -773,8 +775,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" import_path = "s3://${aws_s3_bucket.test.bucket}" @@ -792,8 +792,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" storage_capacity = 1200 @@ -810,8 +808,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { import_path = "s3://${aws_s3_bucket.test.bucket}" imported_file_chunk_size = %[2]d @@ -843,8 +839,6 @@ resource "aws_security_group" "test1" { } } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id] storage_capacity = 1200 @@ -894,8 +888,6 @@ resource "aws_security_group" "test2" { } } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { security_group_ids = [aws_security_group.test1.id, aws_security_group.test2.id] storage_capacity = 1200 @@ -907,8 +899,6 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigStorageCapacity(storageCapacity int) string { return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { storage_capacity = %[1]d subnet_ids = [aws_subnet.test1.id] @@ -929,8 +919,6 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string { return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] @@ -945,8 +933,6 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] @@ -962,8 +948,6 @@ resource "aws_fsx_lustre_file_system" "test" { func testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] @@ -1073,8 +1057,6 @@ resource "aws_s3_bucket" "test" { bucket = %[1]q } -data "aws_partition" "current" {} - resource "aws_fsx_lustre_file_system" "test" { export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" import_path = "s3://${aws_s3_bucket.test.bucket}" From 678b364c513ae93be2cbd6add5fe3f369c45612d Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 14 Jan 2021 08:44:44 -0800 Subject: [PATCH 0684/1212] Update CHANGELOG.md for #17012 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03578cea0e8..7a910090575 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES * **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] * **New Resource:** `aws_cloudwatch_composite_alarm` [GH-15023] * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) +* **New Resource:** `aws_route53_resolver_dnssec_config` [GH-17012] * **New Resource:** `aws_sagemaker_domain` [GH-16077] ENHANCEMENTS From 7ea565a21b57ad9acbe29e09a87d5cc9aff59c71 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 14 Jan 2021 09:02:56 -0800 Subject: [PATCH 0685/1212] add prechecks for SimpleAD --- aws/data_source_aws_workspaces_directory_test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_workspaces_directory_test.go b/aws/data_source_aws_workspaces_directory_test.go index a742dd21587..68eb7e95d12 100644 --- a/aws/data_source_aws_workspaces_directory_test.go +++ b/aws/data_source_aws_workspaces_directory_test.go @@ -15,7 +15,12 @@ func TestAccDataSourceAwsWorkspacesDirectory_basic(t *testing.T) { dataSourceName := "data.aws_workspaces_directory.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckHasIAMRole(t, "workspaces_DefaultRole") }, + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckWorkspacesDirectory(t) + testAccPreCheckAWSDirectoryServiceSimpleDirectory(t) + testAccPreCheckHasIAMRole(t, "workspaces_DefaultRole") + }, Providers: testAccProviders, Steps: []resource.TestStep{ { From 4a17f24d4f8b8142c8f8c6e33dd4175fc264b516 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Jan 2021 13:06:09 -0500 Subject: [PATCH 0686/1212] Update Data Handling and Conversion guide: Fix typo and encourage error checking (#17116) --- docs/contributing/data-handling-and-conversion.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/contributing/data-handling-and-conversion.md b/docs/contributing/data-handling-and-conversion.md index a8244309263..452515269a5 100644 --- a/docs/contributing/data-handling-and-conversion.md +++ b/docs/contributing/data-handling-and-conversion.md @@ -40,7 +40,7 @@ At the bottom of this documentation is a [Glossary section](#glossary), which ma ## Data Conversions in Terraform Providers -Before getting into highly specifc documentation about the Terraform AWS Provider handling of data, it may be helpful to briefly highlight how Terraform Plugins (Terraform Providers in this case) interact with Terraform CLI and the Terraform State in general and where this documentation fits into the whole process. +Before getting into highly specific documentation about the Terraform AWS Provider handling of data, it may be helpful to briefly highlight how Terraform Plugins (Terraform Providers in this case) interact with Terraform CLI and the Terraform State in general and where this documentation fits into the whole process. There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/master/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). @@ -289,7 +289,9 @@ if v, ok := d.GetOk("attribute_name"); ok && len(v.([]interface{})) > 0 { To write: ```go -d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeName)) +if err := d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeName)); err != nil { + return fmt.Errorf("error setting attribute_name: %w", err) +} ``` ### Root TypeList of Resource and AWS Structure @@ -308,7 +310,9 @@ To write (_likely to have helper function introduced soon_): ```go if output.Thing.AttributeName != nil { - d.Set("attribute_name", []interface{}{flattenServiceStructure(output.Thing.AttributeName)}) + if err := d.Set("attribute_name", []interface{}{flattenServiceStructure(output.Thing.AttributeName)}); err != nil { + return fmt.Errorf("error setting attribute_name: %w", err) + } } else { d.Set("attribute_name", nil) } @@ -365,7 +369,9 @@ if v, ok := d.GetOk("attribute_name"); ok && v.(*schema.Set).Len() > 0 { To write: ```go -d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeNames)) +if err := d.Set("attribute_name", flattenServiceStructures(output.Thing.AttributeNames)); err != nil { + return fmt.Errorf("error setting attribute_name: %w", err) +} ``` ### Root TypeSet of TypeString and AWS List of String From de0c2ef39fecea8fe0f4f4b6db8893ac692205e5 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 14 Jan 2021 10:45:40 -0800 Subject: [PATCH 0687/1212] Update CHANGELOG.md for #17112 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a910090575..c621d7b6b40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,9 @@ ENHANCEMENTS * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] +* resource/aws_globalaccelerator_accelerator: Add custom timeouts [GH-17112] +* resource/aws_globalaccelerator_endpoint_group: Add custom timeouts [GH-17112] +* resource/aws_globalaccelerator_endpoint_listener: Add custom timeouts [GH-17112] * resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.[GH-15474] * resource/aws_workspaces_directory: Add access properties [GH-16688] * datasource/aws_workspaces_directory: Add access properties [GH-16688] From 86533574266cb3ea1824efc7be7a9b571b43d0e3 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 10:48:04 -0800 Subject: [PATCH 0688/1212] Update CHANGELOG for #16959 --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c621d7b6b40..e1533923e3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ FEATURES ENHANCEMENTS +* data-source/aws_workspaces_directory: Add access properties [GH-16688] * resource/aws_api_gateway_base_path_mapping: Support in-place updates for `api_id`, `base_path`, and `stage_name` [GH-16147] * resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] * resource/aws_api_gateway_integration: Add `tls_config` configuration block [GH-15499] @@ -17,6 +18,8 @@ ENHANCEMENTS * resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] * resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] * resource/aws_apigatewayv2_integration: Add `response_parameters` attribute [GH-17043] +* resource/aws_codepipeline: Deprecates GitHub v1 (OAuth token) authentication and removes hashing of GitHub token [GH-16959] +* resource/aws_codepipeline: Adds GitHub v2 (CodeStar Connetion) authentication [GH-16959] * resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] * resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] * resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] @@ -25,7 +28,6 @@ ENHANCEMENTS * resource/aws_globalaccelerator_endpoint_listener: Add custom timeouts [GH-17112] * resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.[GH-15474] * resource/aws_workspaces_directory: Add access properties [GH-16688] -* datasource/aws_workspaces_directory: Add access properties [GH-16688] BUX FIXES From 2f586613142b5f0bcbc19b7981078293222f01e8 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 14 Jan 2021 14:36:19 -0500 Subject: [PATCH 0689/1212] Update CHANGELOG for #15322 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1533923e3e..6e9b5cc9ace 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,10 +3,13 @@ FEATURES * **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] +* **New Data Source:** `aws_identitystore_group` [GH-15322] +* **New Data Source:** `aws_identitystore_user` [GH-15322] * **New Resource:** `aws_cloudwatch_composite_alarm` [GH-15023] * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) * **New Resource:** `aws_route53_resolver_dnssec_config` [GH-17012] * **New Resource:** `aws_sagemaker_domain` [GH-16077] +* **New Resource:** `aws_ssoadmin_account_assignment` [GH-15322] ENHANCEMENTS From 3fffe3edb929a3af0798c763f846cfaf65277123 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 14:55:59 -0800 Subject: [PATCH 0690/1212] Upgrade golangci-lint --- tools/go.mod | 2 +- tools/go.sum | 80 ++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 56 insertions(+), 26 deletions(-) diff --git a/tools/go.mod b/tools/go.mod index 748e315d1bb..4318f9f49ea 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -5,7 +5,7 @@ go 1.15 require ( github.com/bflad/tfproviderdocs v0.8.0 github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.33.0 + github.com/golangci/golangci-lint v1.35.2 github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 github.com/terraform-linters/tflint v0.20.3 ) diff --git a/tools/go.sum b/tools/go.sum index 1384a0fce44..2a15d22a255 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -105,6 +105,10 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ashanbrown/forbidigo v1.0.0 h1:QdNXBduDUopc3GW+YVYZn8jzmIMklQiCfdN2N5+dQeE= +github.com/ashanbrown/forbidigo v1.0.0/go.mod h1:PH+zMRWE15yW69fYfe7Kn8nYR6yYyafc3ntEGh2BBAg= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a h1:/U9tbJzDRof4fOR51vwzWdIBsIH6R2yU0KG1MBRM2Js= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -149,14 +153,14 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.4 h1:BUCKk5nlK2m+kRIsoj+wb/5hazHvHeZieBKWd9Afa8Q= -github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= +github.com/daixiang0/gci v0.2.8 h1:1mrIGMBQsBu0P7j7m1M8Lb+ZeZxsZL+jyGX4YoMJJpg= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.3.1 h1:ymEpSiFjeItCy1FOP+x0M2KdCELdEAHUsNa8F+hHc6w= -github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= +github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= @@ -187,8 +191,8 @@ github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af/go.mod h1:dV20 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.2 h1:3RJdgf6u4NZUumoP8nzbqiiNT8e1tC2Oc7jlgqre/IA= -github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo= +github.com/go-critic/go-critic v0.5.3 h1:xQEweNxzBNpSqI3wotXZAixRarETng3PTG4pkcrLCOA= +github.com/go-critic/go-critic v0.5.3/go.mod h1:2Lrs1m4jtOnnG/EdezbSpAoL0F2pRW+9HWJUZ+QaktY= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= @@ -289,8 +293,8 @@ github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d h1:pXTK/gkVNs7Zyy github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.33.0 h1:/o4OtOR3Idim4FHKBJXcy+6ZjNDm82gwK/v6+gWyH9U= -github.com/golangci/golangci-lint v1.33.0/go.mod h1:zMnMLSCaDlrXExYsuq2LOweE9CHVqYk5jexk23UsjYM= +github.com/golangci/golangci-lint v1.35.2 h1:hD1999/sq3tCPXhhI4UpunxpAAdH9pK7kDIObqoGuWA= +github.com/golangci/golangci-lint v1.35.2/go.mod h1:Sg5fFp5oLLI1B8gXfUVUSePju8XF0uWefMkuZuGIHUo= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= @@ -317,6 +321,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -357,8 +363,12 @@ github.com/gostaticanalysis/analysisutil v0.0.3 h1:iwp+5/UAyzQSFgQ4uR2sni99sJ8Eo github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0 h1:E4c8Y1EQURbBEAHoXc/jBTK7Np14ArT8NPUiSFOl9yc= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw= github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -513,6 +523,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.2.1 h1:H4rSHiB3ALx//SXr+k9OPqKoOw2cAZpIQwVNH1RL5T4= +github.com/kulti/thelper v0.2.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU= github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= @@ -618,6 +630,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhXzFfl8= github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -657,8 +671,8 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3 h1:Amgs0nbayPhBNGh1qPqqr2e7B2qNAcBgRjnBH/lmn8k= -github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f h1:xAw10KgJqG5NJDfmRqJ05Z0IFblKumjtMeyiOLxj3+4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= @@ -678,17 +692,17 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.2.0 h1:UOVMyH2EKkxIfzrULvA9n/tO+HtEhqD9mrLSWMr5FwU= -github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw= +github.com/quasilyte/go-ruleguard v0.2.1-0.20201030093329-408e96760278 h1:5gcJ7tORNCNB2QjOJF+MYjzS9aiWpxhP3gntf7RVrOQ= +github.com/quasilyte/go-ruleguard v0.2.1-0.20201030093329-408e96760278/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.1.0 h1:DWbye9KyMgytn8uYpuHkwf0RHqAYO6Ay/D0TbCpPtVU= -github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= +github.com/ryancurrah/gomodguard v1.2.0 h1:YWfhGOrXwLGiqcC/u5EqG6YeS8nh+1fw0HEc85CVZro= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -784,8 +798,8 @@ github.com/terraform-linters/tflint-plugin-sdk v0.5.0 h1:wnVl1oaGoKWhwJCkok82Dpi github.com/terraform-linters/tflint-plugin-sdk v0.5.0/go.mod h1:xbvHhlyCO/04nM+PBTERWP6VOIYGG5QLZNIgvjxi3xc= github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20201015205411-546f68d4a935 h1:PbobnAeVvdzE1/qqTYxaB9h/YIpHCZXbCRBaXNIi0qA= github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20201015205411-546f68d4a935/go.mod h1:DdjydHaAmjsZl+uZ4QLwfx9iP+trTBMjEqLeAV9/OFE= -github.com/tetafro/godot v1.3.0 h1:rKXb6aAz2AnwS98jYlU3snCFFXnIInQdaGiftNwpj+k= -github.com/tetafro/godot v1.3.0/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= +github.com/tetafro/godot v1.3.2 h1:HzWC3XjadkyeuBZxkfAFNY20UVvle0YD51I6zf6RKlU= +github.com/tetafro/godot v1.3.2/go.mod h1:ah7jjYmOMnIjS9ku2krapvGQrFNtTLo9Z/qB3dGU1eU= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -793,8 +807,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d h1:3EZyvNUMsGD1QA8cu0STNn1L7I77rvhf2IhOcHYQhSw= github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= github.com/tombuildsstuff/giovanni v0.12.0/go.mod h1:qJ5dpiYWkRsuOSXO8wHbee7+wElkLNfWVolcf59N84E= -github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLKKwb7p1cnoygsbKIgNlJqSYBeAFON3Ar8As= -github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tommy-muehle/go-mnd v1.3.1-0.20201008215730-16041ac3fe65 h1:Y0bLA422kvb32uZI4fy/Plop/Tbld0l9pSzl+j1FWok= +github.com/tommy-muehle/go-mnd v1.3.1-0.20201008215730-16041ac3fe65/go.mod h1:T22e7iRN4LsFPZGyRLRXeF+DWVXFuV9thsyO7NjbbTI= github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= @@ -898,6 +912,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -942,6 +958,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgN golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -956,6 +974,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1006,6 +1025,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSK golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8= golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 h1:bNEHhJCnrwMKNMmOx3yAynp5vs5/gRy+XWFtZFu7NBM= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1016,6 +1036,8 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5f golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1026,7 +1048,6 @@ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1044,6 +1065,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1068,6 +1090,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1083,18 +1106,23 @@ golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0 h1:SQvH+DjrwqD1hyyQU+K7JegHz1KEZgEwt17p9d6R2eg= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f h1:33yHANSyO/TeglgY9rBhUpX43wtonTXoFOsMRtNB6qE= golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752 h1:2ntEwh02rqo2jSsrYmp4yKHHjh0CbXP3ZtSUetSB+q8= -golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201030010431-2feb2bb1ff51/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105210202-9ed45478a130 h1:8qSBr5nyKsEgkP918Pu5FFDZpTtLIjXSo6mrtdVOFfk= +golang.org/x/tools v0.0.0-20210105210202-9ed45478a130/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -1206,6 +1234,8 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1230,8 +1260,8 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/utils v0.0.0-20200411171748-3d5a2fe318e4/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d h1:t8TAw9WgTLghti7RYkpPmqk4JtQ3+wcP5GgZqgWeWLQ= -mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws= +mvdan.cc/gofumpt v0.1.0 h1:hsVv+Y9UsZ/mFZTxJZuHVI6shSQCtzZ11h1JEFPAZLw= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= From 5bb0ab125c011351ed2ff7b3475b974e157943a2 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Thu, 14 Jan 2021 23:51:29 +0000 Subject: [PATCH 0691/1212] v3.24.0 --- CHANGELOG.md | 68 ++++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e9b5cc9ace..a5ce9208e5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,47 +1,47 @@ -## 3.24.0 (Unreleased) +## 3.24.0 (January 14, 2021) FEATURES -* **New Data Source:** `aws_api_gateway_domain_name` [GH-12489] -* **New Data Source:** `aws_identitystore_group` [GH-15322] -* **New Data Source:** `aws_identitystore_user` [GH-15322] -* **New Resource:** `aws_cloudwatch_composite_alarm` [GH-15023] +* **New Data Source:** `aws_api_gateway_domain_name` ([#12489](https://github.com/hashicorp/terraform-provider-aws/issues/12489)) +* **New Data Source:** `aws_identitystore_group` ([#15322](https://github.com/hashicorp/terraform-provider-aws/issues/15322)) +* **New Data Source:** `aws_identitystore_user` ([#15322](https://github.com/hashicorp/terraform-provider-aws/issues/15322)) +* **New Resource:** `aws_cloudwatch_composite_alarm` ([#15023](https://github.com/hashicorp/terraform-provider-aws/issues/15023)) * **New Resource:** `aws_fms_policy` ([#9594](https://github.com/hashicorp/terraform-provider-aws/issues/9594)) -* **New Resource:** `aws_route53_resolver_dnssec_config` [GH-17012] -* **New Resource:** `aws_sagemaker_domain` [GH-16077] -* **New Resource:** `aws_ssoadmin_account_assignment` [GH-15322] +* **New Resource:** `aws_route53_resolver_dnssec_config` ([#17012](https://github.com/hashicorp/terraform-provider-aws/issues/17012)) +* **New Resource:** `aws_sagemaker_domain` ([#16077](https://github.com/hashicorp/terraform-provider-aws/issues/16077)) +* **New Resource:** `aws_ssoadmin_account_assignment` ([#15322](https://github.com/hashicorp/terraform-provider-aws/issues/15322)) ENHANCEMENTS -* data-source/aws_workspaces_directory: Add access properties [GH-16688] -* resource/aws_api_gateway_base_path_mapping: Support in-place updates for `api_id`, `base_path`, and `stage_name` [GH-16147] -* resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block [GH-15258] -* resource/aws_api_gateway_integration: Add `tls_config` configuration block [GH-15499] -* resource/aws_api_gateway_method: Add `operation_name` argument [GH-13282] -* resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument [GH-16198] -* resource/aws_api_gateway_rest_api: Add `parameters` argument [GH-7374] -* resource/aws_apigatewayv2_integration: Add `response_parameters` attribute [GH-17043] -* resource/aws_codepipeline: Deprecates GitHub v1 (OAuth token) authentication and removes hashing of GitHub token [GH-16959] -* resource/aws_codepipeline: Adds GitHub v2 (CodeStar Connetion) authentication [GH-16959] -* resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument [GH-16827] -* resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine [GH-15592] -* resource/aws_elasticache_replication_group: Add support for final snapshot [GH-15592] -* resource/aws_globalaccelerator_accelerator: Add custom timeouts [GH-17112] -* resource/aws_globalaccelerator_endpoint_group: Add custom timeouts [GH-17112] -* resource/aws_globalaccelerator_endpoint_listener: Add custom timeouts [GH-17112] -* resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.[GH-15474] -* resource/aws_workspaces_directory: Add access properties [GH-16688] +* data-source/aws_workspaces_directory: Add access properties ([#16688](https://github.com/hashicorp/terraform-provider-aws/issues/16688)) +* resource/aws_api_gateway_base_path_mapping: Support in-place updates for `api_id`, `base_path`, and `stage_name` ([#16147](https://github.com/hashicorp/terraform-provider-aws/issues/16147)) +* resource/aws_api_gateway_domain_name: Add `mutual_tls_authentication` configuration block ([#15258](https://github.com/hashicorp/terraform-provider-aws/issues/15258)) +* resource/aws_api_gateway_integration: Add `tls_config` configuration block ([#15499](https://github.com/hashicorp/terraform-provider-aws/issues/15499)) +* resource/aws_api_gateway_method: Add `operation_name` argument ([#13282](https://github.com/hashicorp/terraform-provider-aws/issues/13282)) +* resource/aws_api_gateway_rest_api: Add `disable_execute_api_endpoint` argument ([#16198](https://github.com/hashicorp/terraform-provider-aws/issues/16198)) +* resource/aws_api_gateway_rest_api: Add `parameters` argument ([#7374](https://github.com/hashicorp/terraform-provider-aws/issues/7374)) +* resource/aws_apigatewayv2_integration: Add `response_parameters` attribute ([#17043](https://github.com/hashicorp/terraform-provider-aws/issues/17043)) +* resource/aws_codepipeline: Deprecates GitHub v1 (OAuth token) authentication and removes hashing of GitHub token ([#16959](https://github.com/hashicorp/terraform-provider-aws/issues/16959)) +* resource/aws_codepipeline: Adds GitHub v2 (CodeStar Connetion) authentication ([#16959](https://github.com/hashicorp/terraform-provider-aws/issues/16959)) +* resource/aws_dms_endpoint: Add `s3_settings` `date_partition_enabled` argument ([#16827](https://github.com/hashicorp/terraform-provider-aws/issues/16827)) +* resource/aws_elasticache_cluster: Add support for final snapshot with Redis engine ([#15592](https://github.com/hashicorp/terraform-provider-aws/issues/15592)) +* resource/aws_elasticache_replication_group: Add support for final snapshot ([#15592](https://github.com/hashicorp/terraform-provider-aws/issues/15592)) +* resource/aws_globalaccelerator_accelerator: Add custom timeouts ([#17112](https://github.com/hashicorp/terraform-provider-aws/issues/17112)) +* resource/aws_globalaccelerator_endpoint_group: Add custom timeouts ([#17112](https://github.com/hashicorp/terraform-provider-aws/issues/17112)) +* resource/aws_globalaccelerator_endpoint_listener: Add custom timeouts ([#17112](https://github.com/hashicorp/terraform-provider-aws/issues/17112)) +* resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.([#15474](https://github.com/hashicorp/terraform-provider-aws/issues/15474)) +* resource/aws_workspaces_directory: Add access properties ([#16688](https://github.com/hashicorp/terraform-provider-aws/issues/16688)) BUX FIXES -* resource/aws_appmesh_route: Allow an empty `match` attribute to specified for a `grpc_route`, indicating that any service should be matched [GH-16867] -* resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time [GH-16885] -* resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read [GH-16827] -* resource/aws_instance: Prevent `volume_tags` from improperly interfering with `tags` in `aws_ebs_volume` [GH-15474] -* resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation [GH-16884] -* resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association [GH-17023] -* resource/aws_sagemaker_image - fix error on wait for delete when image does not exist [GH-16077] -* resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks [GH-17055] +* resource/aws_appmesh_route: Allow an empty `match` attribute to specified for a `grpc_route`, indicating that any service should be matched ([#16867](https://github.com/hashicorp/terraform-provider-aws/issues/16867)) +* resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time ([#16885](https://github.com/hashicorp/terraform-provider-aws/issues/16885)) +* resource/aws_dms_endpoint: Support `extra_connection_attributes` for all engine names during create and read ([#16827](https://github.com/hashicorp/terraform-provider-aws/issues/16827)) +* resource/aws_instance: Prevent `volume_tags` from improperly interfering with `tags` in `aws_ebs_volume` ([#15474](https://github.com/hashicorp/terraform-provider-aws/issues/15474)) +* resource/aws_networkfirewall_rule_group: Prevent resource recreation due to `stateful_rule` changes after creation ([#16884](https://github.com/hashicorp/terraform-provider-aws/issues/16884)) +* resource/aws_route53_zone_association: Prevent deletion errors for missing Hosted Zone or VPC association ([#17023](https://github.com/hashicorp/terraform-provider-aws/issues/17023)) +* resource/aws_sagemaker_image - fix error on wait for delete when image does not exist ([#16077](https://github.com/hashicorp/terraform-provider-aws/issues/16077)) +* resource/aws_s3_bucket_inventory: Prevent crashes with empty `destination`, `filter`, and `schedule` configuration blocks ([#17055](https://github.com/hashicorp/terraform-provider-aws/issues/17055)) * service/apigateway: All operations will now automatically retry on `ConflictException: Unable to complete operation due to concurrent modification. Please try again later.` errors. ## 3.23.0 (January 08, 2021) From 3e0f2cdf38591601871d678c511c314d563fbdf4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:34:04 -0800 Subject: [PATCH 0692/1212] Fixes slice initialization issues --- aws/internal/keyvaluetags/key_value_tags.go | 2 +- aws/resource_aws_autoscaling_group.go | 8 +-- aws/resource_aws_instance.go | 12 ++--- aws/structure.go | 56 ++++++++++----------- 4 files changed, 36 insertions(+), 42 deletions(-) diff --git a/aws/internal/keyvaluetags/key_value_tags.go b/aws/internal/keyvaluetags/key_value_tags.go index 00b80bc572c..f9af201a7c0 100644 --- a/aws/internal/keyvaluetags/key_value_tags.go +++ b/aws/internal/keyvaluetags/key_value_tags.go @@ -242,7 +242,7 @@ func (tags KeyValueTags) Keys() []string { // ListofMap returns a list of flattened tags. // Compatible with setting Terraform state for strongly typed configuration blocks. func (tags KeyValueTags) ListofMap() []map[string]interface{} { - result := make([]map[string]interface{}, len(tags)) + result := make([]map[string]interface{}, 0, len(tags)) for k, v := range tags { m := map[string]interface{}{ diff --git a/aws/resource_aws_autoscaling_group.go b/aws/resource_aws_autoscaling_group.go index c92d698178c..dfaa19d8a8f 100644 --- a/aws/resource_aws_autoscaling_group.go +++ b/aws/resource_aws_autoscaling_group.go @@ -1231,9 +1231,9 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{}) strs[i] = a.(string) } if attrsSet.Contains("tag") && !attrsSet.Contains("tags") { - strs = append(strs, "tags") + strs = append(strs, "tags") // nozero } else if !attrsSet.Contains("tag") && attrsSet.Contains("tags") { - strs = append(strs, "tag") + strs = append(strs, "tag") // nozero } shouldRefreshInstances = d.HasChanges(strs...) } @@ -1595,8 +1595,8 @@ func getTargetGroupInstanceStates(g *autoscaling.Group, meta interface{}) (map[s func expandVpcZoneIdentifiers(list []interface{}) *string { strs := make([]string, len(list)) - for _, s := range list { - strs = append(strs, s.(string)) + for i, s := range list { + strs[i] = s.(string) } return aws.String(strings.Join(strs, ",")) } diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 925861a4489..1c5b948070c 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -1913,12 +1913,10 @@ func buildNetworkInterfaceOpts(d *schema.ResourceData, groups []*string, nInterf if v, ok := d.GetOk("ipv6_addresses"); ok { ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) - for _, address := range v.([]interface{}) { - ipv6Address := &ec2.InstanceIpv6Address{ + for i, address := range v.([]interface{}) { + ipv6Addresses[i] = &ec2.InstanceIpv6Address{ Ipv6Address: aws.String(address.(string)), } - - ipv6Addresses = append(ipv6Addresses, ipv6Address) } ni.Ipv6Addresses = ipv6Addresses @@ -2379,12 +2377,10 @@ func buildAwsInstanceOpts(d *schema.ResourceData, meta interface{}) (*awsInstanc if v, ok := d.GetOk("ipv6_addresses"); ok { ipv6Addresses := make([]*ec2.InstanceIpv6Address, len(v.([]interface{}))) - for _, address := range v.([]interface{}) { - ipv6Address := &ec2.InstanceIpv6Address{ + for i, address := range v.([]interface{}) { + ipv6Addresses[i] = &ec2.InstanceIpv6Address{ Ipv6Address: aws.String(address.(string)), } - - ipv6Addresses = append(ipv6Addresses, ipv6Address) } opts.Ipv6Addresses = ipv6Addresses diff --git a/aws/structure.go b/aws/structure.go index 69bd8d1b26e..a9f7f382b69 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -3987,24 +3987,23 @@ func flattenAwsDynamoDbTableResource(d *schema.ResourceData, table *dynamodb.Tab d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits) d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits) - attributes := []interface{}{} - for _, attrdef := range table.AttributeDefinitions { - attribute := map[string]string{ - "name": *attrdef.AttributeName, - "type": *attrdef.AttributeType, + attributes := make([]interface{}, len(table.AttributeDefinitions)) + for i, attrdef := range table.AttributeDefinitions { + attributes[i] = map[string]string{ + "name": aws.StringValue(attrdef.AttributeName), + "type": aws.StringValue(attrdef.AttributeType), } - attributes = append(attributes, attribute) } d.Set("attribute", attributes) d.Set("name", table.TableName) for _, attribute := range table.KeySchema { - if *attribute.KeyType == dynamodb.KeyTypeHash { + if aws.StringValue(attribute.KeyType) == dynamodb.KeyTypeHash { d.Set("hash_key", attribute.AttributeName) } - if *attribute.KeyType == dynamodb.KeyTypeRange { + if aws.StringValue(attribute.KeyType) == dynamodb.KeyTypeRange { d.Set("range_key", attribute.AttributeName) } } @@ -4012,19 +4011,18 @@ func flattenAwsDynamoDbTableResource(d *schema.ResourceData, table *dynamodb.Tab lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) for _, lsiObject := range table.LocalSecondaryIndexes { lsi := map[string]interface{}{ - "name": *lsiObject.IndexName, - "projection_type": *lsiObject.Projection.ProjectionType, + "name": aws.StringValue(lsiObject.IndexName), + "projection_type": aws.StringValue(lsiObject.Projection.ProjectionType), } for _, attribute := range lsiObject.KeySchema { - - if *attribute.KeyType == dynamodb.KeyTypeRange { - lsi["range_key"] = *attribute.AttributeName + if aws.StringValue(attribute.KeyType) == dynamodb.KeyTypeRange { + lsi["range_key"] = aws.StringValue(attribute.AttributeName) } } nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) - for _, nka := range lsiObject.Projection.NonKeyAttributes { - nkaList = append(nkaList, *nka) + for i, nka := range lsiObject.Projection.NonKeyAttributes { + nkaList[i] = aws.StringValue(nka) } lsi["non_key_attributes"] = nkaList @@ -4036,33 +4034,33 @@ func flattenAwsDynamoDbTableResource(d *schema.ResourceData, table *dynamodb.Tab return err } - gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) - for _, gsiObject := range table.GlobalSecondaryIndexes { + gsiList := make([]map[string]interface{}, len(table.GlobalSecondaryIndexes)) + for i, gsiObject := range table.GlobalSecondaryIndexes { gsi := map[string]interface{}{ - "write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits, - "read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits, - "name": *gsiObject.IndexName, + "write_capacity": aws.Int64Value(gsiObject.ProvisionedThroughput.WriteCapacityUnits), + "read_capacity": aws.Int64Value(gsiObject.ProvisionedThroughput.ReadCapacityUnits), + "name": aws.StringValue(gsiObject.IndexName), } for _, attribute := range gsiObject.KeySchema { - if *attribute.KeyType == dynamodb.KeyTypeHash { - gsi["hash_key"] = *attribute.AttributeName + if aws.StringValue(attribute.KeyType) == dynamodb.KeyTypeHash { + gsi["hash_key"] = aws.StringValue(attribute.AttributeName) } - if *attribute.KeyType == dynamodb.KeyTypeRange { - gsi["range_key"] = *attribute.AttributeName + if aws.StringValue(attribute.KeyType) == dynamodb.KeyTypeRange { + gsi["range_key"] = aws.StringValue(attribute.AttributeName) } } - gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) + gsi["projection_type"] = aws.StringValue(gsiObject.Projection.ProjectionType) - nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) - for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { - nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) + nonKeyAttrs := make([]string, len(gsiObject.Projection.NonKeyAttributes)) + for i, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { + nonKeyAttrs[i] = aws.StringValue(nonKeyAttr) } gsi["non_key_attributes"] = nonKeyAttrs - gsiList = append(gsiList, gsi) + gsiList[i] = gsi } if table.StreamSpecification != nil { From 59dab0eda015f4126149feb774e364de50663038 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:34:26 -0800 Subject: [PATCH 0693/1212] Enables makezero linter --- .golangci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yml b/.golangci.yml index ace701c8fbb..8a6b2d0e9fd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,6 +20,7 @@ linters: - gofmt - gosimple - ineffassign + - makezero - misspell - nakedret - staticcheck From b3d0745fb34a97c03c295c1c634c28960db6e531 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:55:10 -0800 Subject: [PATCH 0694/1212] Adds validation and additional tests --- ...ource_aws_elasticache_replication_group.go | 36 ++++ ..._aws_elasticache_replication_group_test.go | 164 ++++++++++++++++++ 2 files changed, 200 insertions(+) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index debae58817b..b34acb1eda8 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -1,6 +1,8 @@ package aws import ( + "context" + "errors" "fmt" "log" "regexp" @@ -10,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -273,6 +276,39 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Delete: schema.DefaultTimeout(40 * time.Minute), Update: schema.DefaultTimeout(40 * time.Minute), }, + + CustomizeDiff: customdiff.Sequence( + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + if v := diff.Get("multi_az_enabled").(bool); !v { + return nil + } + if v := diff.Get("automatic_failover_enabled").(bool); !v { + return errors.New(`automatic_failover_enabled must be true if multi_az_enabled is true`) + } + return nil + }, + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + if v := diff.Get("automatic_failover_enabled").(bool); !v { + return nil + } + + if v, ok := diff.GetOkExists("number_cache_clusters"); ok { + if v.(int) > 1 { + return nil + } + return errors.New(`if automatic_failover_enabled is true, number_cache_clusters must be greater than 1`) + } + + if v, ok := diff.GetOkExists("cluster_mode.0.replicas_per_node_group"); ok { + if v.(int) > 0 { + return nil + } + return errors.New(`if automatic_failover_enabled is true, cluster_mode[0].replicas_per_node_group must be greater than 0`) + } + + return nil + }, + ), } } diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 127cdf3acf5..91fa1fd9b8b 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -308,6 +308,53 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { }) } +func TestAccAWSElasticacheReplicationGroup_multiAzNotInVpc(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_MultiAZNotInVPC_Basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, + { + Config: testAccAWSElasticacheReplicationGroupConfig_MultiAZNotInVPC_AvailabilityZones(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "availability_zones.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "availability_zones.0", "data.aws_availability_zones.available", "names.0"), + resource.TestCheckResourceAttrPair(resourceName, "availability_zones.1", "data.aws_availability_zones.available", "names.1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "availability_zones"}, + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -347,6 +394,42 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { }) } +func TestAccAWSElasticacheReplicationGroup_multiAz_NoAutomaticFailover(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_MultiAZ_NoAutomaticFailover(rName), + ExpectError: regexp.MustCompile("automatic_failover_enabled must be true if multi_az_enabled is true"), + }, + }, + }) +} + +func TestAccAWSElasticacheReplicationGroup_AutomaticFailover_OneCacheCluster(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_MultiAZOneCacheCluster_SingleNodeGroup(rName), + ExpectError: regexp.MustCompile(`if automatic_failover_enabled is true, number_cache_clusters must be greater than 1`), + }, + { + Config: testAccAWSElasticacheReplicationGroupConfig_MultiAZOneCacheCluster_ClusterMode(rName), + ExpectError: regexp.MustCompile(`if automatic_failover_enabled is true, cluster_mode\[0\].replicas_per_node_group must be greater than 0`), + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -404,6 +487,8 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "port", "6379"), resource.TestCheckResourceAttrSet(resourceName, "configuration_endpoint_address"), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), ), }, { @@ -1107,6 +1192,42 @@ resource "aws_elasticache_replication_group" "test" { } `, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) +func testAccAWSElasticacheReplicationGroupConfig_MultiAZNotInVPC_Basic(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + number_cache_clusters = 2 + node_type = "cache.t3.small" + automatic_failover_enabled = true + multi_az_enabled = true +} +`, rName) +} + +func testAccAWSElasticacheReplicationGroupConfig_MultiAZNotInVPC_AvailabilityZones(rName string) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + number_cache_clusters = 2 + node_type = "cache.t3.small" + automatic_failover_enabled = true + multi_az_enabled = true + availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1]] +} +`, rName) +} + func testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig(rName string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { @@ -1184,6 +1305,49 @@ resource "aws_elasticache_replication_group" "test" { `, rName) } +func testAccAWSElasticacheReplicationGroupConfig_MultiAZ_NoAutomaticFailover(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + number_cache_clusters = 1 + node_type = "cache.t3.small" + automatic_failover_enabled = false + multi_az_enabled = true +} +`, rName) +} + +func testAccAWSElasticacheReplicationGroupConfig_MultiAZOneCacheCluster_SingleNodeGroup(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + number_cache_clusters = 1 + node_type = "cache.t3.small" + automatic_failover_enabled = true + multi_az_enabled = true +} +`, rName) +} + +func testAccAWSElasticacheReplicationGroupConfig_MultiAZOneCacheCluster_ClusterMode(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + node_type = "cache.t3.small" + automatic_failover_enabled = true + multi_az_enabled = true + + cluster_mode { + num_node_groups = 1 + replicas_per_node_group = 0 + } +} +`, rName) +} + func testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig(rName string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { From b34ae51947b8c8b05ee237b1ea15b8b9ac576d3e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:56:51 -0800 Subject: [PATCH 0695/1212] Updates resource documentation --- website/docs/r/elasticache_replication_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index e1d2b5b986f..70c5e35e315 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -105,7 +105,7 @@ The following arguments are supported: * `number_cache_clusters` - (Optional) The number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. One of `number_cache_clusters` or `cluster_mode` is required. * `node_type` - (Required) The compute and memory capacity of the nodes in the node group. * `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ is disabled for this replication group. Must be enabled for Redis (cluster mode enabled) replication groups. Defaults to `false`. -* `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. Defaults to `false`. +* `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. If `true`, `automatic_failover_enabled` must also be enabled. Defaults to `false`. * `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. This parameter is currently not supported by the AWS API. Defaults to `true`. * `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important. * `engine` - (Optional) The name of the cache engine to be used for the clusters in this replication group. The only valid value is `redis`. From 3e063765c858aab327517abb301923aa33c50db0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:57:18 -0800 Subject: [PATCH 0696/1212] Updates data source --- ...ource_aws_elasticache_replication_group.go | 21 +++++++++- ..._aws_elasticache_replication_group_test.go | 39 +++++++++++++++++++ ...lasticache_replication_group.html.markdown | 3 +- 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/aws/data_source_aws_elasticache_replication_group.go b/aws/data_source_aws_elasticache_replication_group.go index 2cc8a21a47e..d7cbb780750 100644 --- a/aws/data_source_aws_elasticache_replication_group.go +++ b/aws/data_source_aws_elasticache_replication_group.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" @@ -54,6 +55,10 @@ func dataSourceAwsElasticacheReplicationGroup() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "multi_az_enabled": { + Type: schema.TypeBool, + Computed: true, + }, "node_type": { Type: schema.TypeString, Computed: true, @@ -78,7 +83,6 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i ReplicationGroupId: aws.String(groupID), } - log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", input) resp, err := conn.DescribeReplicationGroups(input) if err != nil { if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { @@ -96,6 +100,7 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i d.SetId(aws.StringValue(rg.ReplicationGroupId)) d.Set("replication_group_description", rg.Description) d.Set("auth_token_enabled", rg.AuthTokenEnabled) + if rg.AutomaticFailover != nil { switch aws.StringValue(rg.AutomaticFailover) { case elasticache.AutomaticFailoverStatusDisabled, elasticache.AutomaticFailoverStatusDisabling: @@ -104,13 +109,25 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i d.Set("automatic_failover_enabled", true) } } + + if rg.MultiAZ != nil { + switch strings.ToLower(aws.StringValue(rg.MultiAZ)) { + case elasticache.MultiAZStatusEnabled: + d.Set("multi_az_enabled", true) + case elasticache.MultiAZStatusDisabled: + d.Set("multi_az_enabled", false) + default: + log.Printf("Unknown MultiAZ state %q", aws.StringValue(rg.MultiAZ)) + } + } + if rg.ConfigurationEndpoint != nil { d.Set("port", rg.ConfigurationEndpoint.Port) d.Set("configuration_endpoint_address", rg.ConfigurationEndpoint.Address) } else { if rg.NodeGroups == nil { d.SetId("") - return fmt.Errorf("Elasticache Replication Group (%s) doesn't have node groups.", aws.StringValue(rg.ReplicationGroupId)) + return fmt.Errorf("Elasticache Replication Group (%s) doesn't have node groups", aws.StringValue(rg.ReplicationGroupId)) } d.Set("port", rg.NodeGroups[0].PrimaryEndpoint.Port) d.Set("primary_endpoint_address", rg.NodeGroups[0].PrimaryEndpoint.Address) diff --git a/aws/data_source_aws_elasticache_replication_group_test.go b/aws/data_source_aws_elasticache_replication_group_test.go index 5e04cb7d75b..58c09cd398c 100644 --- a/aws/data_source_aws_elasticache_replication_group_test.go +++ b/aws/data_source_aws_elasticache_replication_group_test.go @@ -23,6 +23,7 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "auth_token_enabled", "false"), resource.TestCheckResourceAttrPair(dataSourceName, "automatic_failover_enabled", resourceName, "automatic_failover_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "multi_az_enabled", resourceName, "multi_az_enabled"), resource.TestCheckResourceAttrPair(dataSourceName, "member_clusters.#", resourceName, "member_clusters.#"), resource.TestCheckResourceAttrPair(dataSourceName, "node_type", resourceName, "node_type"), resource.TestCheckResourceAttrPair(dataSourceName, "number_cache_clusters", resourceName, "number_cache_clusters"), @@ -52,6 +53,7 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_ClusterMode(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "auth_token_enabled", "false"), resource.TestCheckResourceAttrPair(dataSourceName, "automatic_failover_enabled", resourceName, "automatic_failover_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "multi_az_enabled", resourceName, "multi_az_enabled"), resource.TestCheckResourceAttrPair(dataSourceName, "configuration_endpoint_address", resourceName, "configuration_endpoint_address"), resource.TestCheckResourceAttrPair(dataSourceName, "node_type", resourceName, "node_type"), resource.TestCheckResourceAttrPair(dataSourceName, "port", resourceName, "port"), @@ -63,6 +65,26 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_ClusterMode(t *testing.T) { }) } +func TestAccDataSourceAwsElasticacheReplicationGroup_MultiAZ(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + dataSourceName := "data.aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsElasticacheReplicationGroupConfig_MultiAZ(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "automatic_failover_enabled", resourceName, "automatic_failover_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "multi_az_enabled", resourceName, "multi_az_enabled"), + ), + }, + }, + }) +} + func TestAccDataSourceAwsElasticacheReplicationGroup_NonExistent(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ @@ -117,6 +139,23 @@ data "aws_elasticache_replication_group" "test" { `, rName) } +func testAccDataSourceAwsElasticacheReplicationGroupConfig_MultiAZ(rName string) string { + return fmt.Sprintf(` +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test description" + node_type = "cache.t3.small" + number_cache_clusters = 2 + automatic_failover_enabled = true + multi_az_enabled = true +} + +data "aws_elasticache_replication_group" "test" { + replication_group_id = aws_elasticache_replication_group.test.replication_group_id +} +`, rName) +} + const testAccDataSourceAwsElasticacheReplicationGroupConfig_NonExistent = ` data "aws_elasticache_replication_group" "test" { replication_group_id = "tf-acc-test-nonexistent" diff --git a/website/docs/d/elasticache_replication_group.html.markdown b/website/docs/d/elasticache_replication_group.html.markdown index 85edc90efb8..238ce83713a 100644 --- a/website/docs/d/elasticache_replication_group.html.markdown +++ b/website/docs/d/elasticache_replication_group.html.markdown @@ -30,11 +30,12 @@ In addition to all arguments above, the following attributes are exported: * `replication_group_id` - The identifier for the replication group. * `replication_group_description` - The description of the replication group. -* `auth_token_enabled` - A flag that enables using an AuthToken (password) when issuing Redis commands. +* `auth_token_enabled` - Specifies whether an AuthToken (password) is enabled. * `automatic_failover_enabled` - A flag whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. * `node_type` – The cluster node type. * `number_cache_clusters` – The number of cache clusters that the replication group has. * `member_clusters` - The identifiers of all the nodes that are part of this replication group. +* `multi_az_enabled` - Specifies whether Multi-AZ Support is enabled for the replication group. * `snapshot_window` - The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). * `snapshot_retention_limit` - The number of days for which ElastiCache retains automatic cache cluster snapshots before deleting them. * `port` – The port number on which the configuration endpoint will accept connections. From b0a7eace725f11573f767410b968baf655411259 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 14 Jan 2021 16:57:32 -0800 Subject: [PATCH 0697/1212] Cleanup --- ...ource_aws_elasticache_replication_group.go | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index b34acb1eda8..9c92335d473 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -486,18 +486,18 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int case elasticache.AutomaticFailoverStatusEnabled, elasticache.AutomaticFailoverStatusEnabling: d.Set("automatic_failover_enabled", true) default: - log.Printf("Unknown AutomaticFailover state %s", aws.StringValue(rgp.AutomaticFailover)) + log.Printf("Unknown AutomaticFailover state %q", aws.StringValue(rgp.AutomaticFailover)) } } if rgp.MultiAZ != nil { - switch strings.ToLower(*rgp.MultiAZ) { - case "enabled": + switch strings.ToLower(aws.StringValue(rgp.MultiAZ)) { + case elasticache.MultiAZStatusEnabled: d.Set("multi_az_enabled", true) - case "disabled": + case elasticache.MultiAZStatusDisabled: d.Set("multi_az_enabled", false) default: - log.Printf("Unknown MultiAZ state %s", *rgp.MultiAZ) + log.Printf("Unknown MultiAZ state %q", aws.StringValue(rgp.MultiAZ)) } } @@ -883,10 +883,10 @@ func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta i return nil } -func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupId string, pending []string) resource.StateRefreshFunc { +func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupID string, pending []string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(replicationGroupId), + ReplicationGroupId: aws.String(replicationGroupID), }) if err != nil { if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { @@ -899,27 +899,27 @@ func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replic } if len(resp.ReplicationGroups) == 0 { - return nil, "", fmt.Errorf("Error: no Cache Replication Groups found for id (%s)", replicationGroupId) + return nil, "", fmt.Errorf("Error: no Cache Replication Groups found for id (%s)", replicationGroupID) } var rg *elasticache.ReplicationGroup for _, replicationGroup := range resp.ReplicationGroups { rgID := aws.StringValue(replicationGroup.ReplicationGroupId) - if rgID == replicationGroupId { + if rgID == replicationGroupID { log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", rgID) rg = replicationGroup } } if rg == nil { - return nil, "", fmt.Errorf("Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupId) + return nil, "", fmt.Errorf("Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupID) } - log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupId, aws.StringValue(rg.Status)) + log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupID, aws.StringValue(rg.Status)) // return the current state if it's in the pending array for _, p := range pending { - log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupId, aws.StringValue(rg.Status)) + log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupID, aws.StringValue(rg.Status)) s := aws.StringValue(rg.Status) if p == s { log.Printf("[DEBUG] Return with status: %v", aws.StringValue(rg.Status)) From 4e6427ba3ced8d407e03511bc84dd824d0e64f6c Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 15 Jan 2021 19:55:47 +0900 Subject: [PATCH 0698/1212] Add owner_id to resource_aws_ec2_capacity_reservation --- aws/resource_aws_ec2_capacity_reservation.go | 4 ++++ aws/resource_aws_ec2_capacity_reservation_test.go | 1 + website/docs/r/ec2_capacity_reservation.html.markdown | 1 + 3 files changed, 6 insertions(+) diff --git a/aws/resource_aws_ec2_capacity_reservation.go b/aws/resource_aws_ec2_capacity_reservation.go index d07ca865ab0..bcff136b996 100644 --- a/aws/resource_aws_ec2_capacity_reservation.go +++ b/aws/resource_aws_ec2_capacity_reservation.go @@ -97,6 +97,10 @@ func resourceAwsEc2CapacityReservation() *schema.Resource { Required: true, ForceNew: true, }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, "tags": tagsSchema(), "tenancy": { Type: schema.TypeString, diff --git a/aws/resource_aws_ec2_capacity_reservation_test.go b/aws/resource_aws_ec2_capacity_reservation_test.go index 103be132942..2354ba70d53 100644 --- a/aws/resource_aws_ec2_capacity_reservation_test.go +++ b/aws/resource_aws_ec2_capacity_reservation_test.go @@ -88,6 +88,7 @@ func TestAccAWSEc2CapacityReservation_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "instance_match_criteria", "open"), resource.TestCheckResourceAttr(resourceName, "instance_platform", "Linux/UNIX"), resource.TestCheckResourceAttr(resourceName, "instance_type", "t2.micro"), + resource.TestCheckResourceAttrSet(resourceName, "owner_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tenancy", "default"), ), diff --git a/website/docs/r/ec2_capacity_reservation.html.markdown b/website/docs/r/ec2_capacity_reservation.html.markdown index 085442978a1..7ebd333c696 100644 --- a/website/docs/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/r/ec2_capacity_reservation.html.markdown @@ -42,6 +42,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: * `id` - The Capacity Reservation ID. +* `owner_id` - The ID of the AWS account that owns the Capacity Reservation. * `arn` - The ARN of the Capacity Reservation. ## Import From 5faea324204a1fd66e450fd627f61f73c5f0efc6 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Fri, 15 Jan 2021 19:56:16 +0900 Subject: [PATCH 0699/1212] Fix resource_aws_ec2_capacity_reservation to set ARN from its API response --- aws/resource_aws_ec2_capacity_reservation.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_ec2_capacity_reservation.go b/aws/resource_aws_ec2_capacity_reservation.go index bcff136b996..c7afe55c551 100644 --- a/aws/resource_aws_ec2_capacity_reservation.go +++ b/aws/resource_aws_ec2_capacity_reservation.go @@ -6,7 +6,6 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -209,22 +208,14 @@ func resourceAwsEc2CapacityReservationRead(d *schema.ResourceData, meta interfac d.Set("instance_match_criteria", reservation.InstanceMatchCriteria) d.Set("instance_platform", reservation.InstancePlatform) d.Set("instance_type", reservation.InstanceType) + d.Set("owner_id", reservation.OwnerId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(reservation.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } d.Set("tenancy", reservation.Tenancy) - - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "ec2", - Region: meta.(*AWSClient).region, - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("capacity-reservation/%s", d.Id()), - }.String() - - d.Set("arn", arn) + d.Set("arn", reservation.CapacityReservationArn) return nil } From 5f831021061910146b75b46c4045f226aca3e60b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Jan 2021 11:14:58 -0500 Subject: [PATCH 0700/1212] resource/instance: Fix invalid address set --- aws/resource_aws_instance_test.go | 44 +++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 4fc3687923c..84e10fdaebf 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1220,6 +1220,33 @@ func TestAccAWSInstance_blockDeviceTags_ebsAndRoot(t *testing.T) { }) } +func TestAccAWSInstance_blockDeviceTags_noDevices(t *testing.T) { + // https://github.com/hashicorp/terraform-provider-aws/issues/17125 + var v ec2.Instance + resourceName := "aws_instance.test" + rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfigBlockDeviceTagsNoTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, + }, + }, + }) +} + func TestAccAWSInstance_instanceProfileChange(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -4207,6 +4234,23 @@ resource "aws_instance" "test" { `) } +func testAccInstanceConfigBlockDeviceTagsNoTags(rName string) string { + // https://github.com/hashicorp/terraform-provider-aws/issues/17125 + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableEc2InstanceTypeForRegion("t1.micro", "m1.small", "t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccInstanceConfigBlockDeviceTagsEBSTagsConflict() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { From 3d22c961bc2ce420354eb5d4a4e8aa12fbaeba39 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Jan 2021 12:55:12 -0500 Subject: [PATCH 0701/1212] data source/instance: Add block device tags --- aws/data_source_aws_instance.go | 4 ++ aws/data_source_aws_instance_test.go | 56 ++++++++++++++++++++++++++++ aws/resource_aws_instance.go | 2 +- aws/resource_aws_instance_test.go | 44 ---------------------- 4 files changed, 61 insertions(+), 45 deletions(-) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index ad5f55c6d53..171de746cef 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -208,6 +208,8 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "tags": tagsSchema(), + "throughput": { Type: schema.TypeInt, Computed: true, @@ -268,6 +270,8 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, + "tags": tagsSchema(), + "throughput": { Type: schema.TypeInt, Computed: true, diff --git a/aws/data_source_aws_instance_test.go b/aws/data_source_aws_instance_test.go index ae488ca058f..ae5831ebbce 100644 --- a/aws/data_source_aws_instance_test.go +++ b/aws/data_source_aws_instance_test.go @@ -533,6 +533,25 @@ func TestAccAWSInstanceDataSource_enclaveOptions(t *testing.T) { }) } +func TestAccAWSInstanceDataSource_blockDeviceTags(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_instance.test" + datasourceName := "data.aws_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccInstanceDataSourceConfig_blockDeviceTags(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, "instance_type", resourceName, "instance_type"), + ), + }, + }, + }) +} + // Lookup based on InstanceID var testAccInstanceDataSourceConfig = testAccLatestAmazonLinuxHvmEbsAmiConfig() + ` resource "aws_instance" "test" { @@ -1016,3 +1035,40 @@ data "aws_instance" "test" { } `, rName)) } + +func testAccInstanceDataSourceConfig_blockDeviceTags(rName string) string { + return composeConfig( + testAccLatestAmazonLinuxHvmEbsAmiConfig(), + testAccAvailableEc2InstanceTypeForRegion("t3.micro", "t2.micro"), + fmt.Sprintf(` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = data.aws_ec2_instance_type_offering.available.instance_type + + tags = { + Name = %[1]q + } + + ebs_block_device { + device_name = "/dev/xvdc" + volume_size = 10 + + tags = { + Name = %[1]q + Factum = "SapereAude" + } + } + + root_block_device { + tags = { + Name = %[1]q + Factum = "VincitQuiSeVincit" + } + } +} + +data "aws_instance" "test" { + instance_id = aws_instance.test.id +} +`, rName)) +} diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 925861a4489..bc5a81d0711 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -1790,7 +1790,7 @@ func readBlockDevicesFromInstance(d *schema.ResourceData, instance *ec2.Instance if instanceBd.DeviceName != nil { bd["device_name"] = aws.StringValue(instanceBd.DeviceName) } - if _, ok := d.GetOk("volume_tags"); !ok && vol.Tags != nil { + if v, ok := d.GetOk("volume_tags"); (!ok || v == nil) && vol.Tags != nil { bd["tags"] = keyvaluetags.Ec2KeyValueTags(vol.Tags).IgnoreAws().Map() } diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 84e10fdaebf..4fc3687923c 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1220,33 +1220,6 @@ func TestAccAWSInstance_blockDeviceTags_ebsAndRoot(t *testing.T) { }) } -func TestAccAWSInstance_blockDeviceTags_noDevices(t *testing.T) { - // https://github.com/hashicorp/terraform-provider-aws/issues/17125 - var v ec2.Instance - resourceName := "aws_instance.test" - rName := fmt.Sprintf("tf-testacc-instance-%s", acctest.RandString(12)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigBlockDeviceTagsNoTags(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resourceName, &v), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, - }, - }, - }) -} - func TestAccAWSInstance_instanceProfileChange(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" @@ -4234,23 +4207,6 @@ resource "aws_instance" "test" { `) } -func testAccInstanceConfigBlockDeviceTagsNoTags(rName string) string { - // https://github.com/hashicorp/terraform-provider-aws/issues/17125 - return composeConfig( - testAccLatestAmazonLinuxHvmEbsAmiConfig(), - testAccAvailableEc2InstanceTypeForRegion("t1.micro", "m1.small", "t3.micro", "t2.micro"), - fmt.Sprintf(` -resource "aws_instance" "test" { - ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = data.aws_ec2_instance_type_offering.available.instance_type - - tags = { - Name = %[1]q - } -} -`, rName)) -} - func testAccInstanceConfigBlockDeviceTagsEBSTagsConflict() string { return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` resource "aws_instance" "test" { From 7e8995108ab85d5a0c1fbb1947b05396cb1c60d1 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Jan 2021 13:04:27 -0500 Subject: [PATCH 0702/1212] resource/instance: Revert volume_tags logic --- aws/resource_aws_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index bc5a81d0711..925861a4489 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -1790,7 +1790,7 @@ func readBlockDevicesFromInstance(d *schema.ResourceData, instance *ec2.Instance if instanceBd.DeviceName != nil { bd["device_name"] = aws.StringValue(instanceBd.DeviceName) } - if v, ok := d.GetOk("volume_tags"); (!ok || v == nil) && vol.Tags != nil { + if _, ok := d.GetOk("volume_tags"); !ok && vol.Tags != nil { bd["tags"] = keyvaluetags.Ec2KeyValueTags(vol.Tags).IgnoreAws().Map() } From 6202907fa47eb1e5624f4711023eed61497da735 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Jan 2021 13:11:39 -0500 Subject: [PATCH 0703/1212] data source/instance: Fix tags to be computed --- aws/data_source_aws_instance.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 171de746cef..3eb17e5c359 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -208,7 +208,7 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, - "tags": tagsSchema(), + "tags": tagsSchemaComputed(), "throughput": { Type: schema.TypeInt, @@ -270,7 +270,7 @@ func dataSourceAwsInstance() *schema.Resource { Computed: true, }, - "tags": tagsSchema(), + "tags": tagsSchemaComputed(), "throughput": { Type: schema.TypeInt, From 14c19280a95e6cadf6aa6d5dfbff79bed9a87ca4 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Jan 2021 13:29:07 -0500 Subject: [PATCH 0704/1212] resource/instance: Adjust volume tags logic --- aws/resource_aws_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 925861a4489..9cd45cf3818 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -1790,7 +1790,7 @@ func readBlockDevicesFromInstance(d *schema.ResourceData, instance *ec2.Instance if instanceBd.DeviceName != nil { bd["device_name"] = aws.StringValue(instanceBd.DeviceName) } - if _, ok := d.GetOk("volume_tags"); !ok && vol.Tags != nil { + if v, ok := d.GetOk("volume_tags"); (!ok || v == nil || len(v.(map[string]interface{})) == 0) && vol.Tags != nil { bd["tags"] = keyvaluetags.Ec2KeyValueTags(vol.Tags).IgnoreAws().Map() } From 31539ee62d8c58d6c3116aaaf6938da20ba6fd3c Mon Sep 17 00:00:00 2001 From: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Date: Fri, 15 Jan 2021 13:48:28 -0500 Subject: [PATCH 0705/1212] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5ce9208e5f..ab390e670bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +## 3.24.1 (Unreleased) + +BUX FIXES + +* data-source/instance: Fix EBS and root block device tags issue with "Invalid address to set" ([#17136](https://github.com/hashicorp/terraform-provider-aws/issues/17136)) + ## 3.24.0 (January 14, 2021) FEATURES From 9a94e4fe535ba7986c1e4985aa68f656d76194c0 Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 15 Jan 2021 18:58:59 +0000 Subject: [PATCH 0706/1212] v3.24.1 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab390e670bf..9ebdc1746d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 3.24.1 (Unreleased) +## 3.24.1 (January 15, 2021) BUX FIXES From aa957b71dbb0e4560a183a981521be67137c8c99 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 15 Jan 2021 11:21:14 -0800 Subject: [PATCH 0707/1212] Update CHANGELOG.md after release --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ebdc1746d5..2e6bab32377 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ +## 3.25.0 (Unreleased) + ## 3.24.1 (January 15, 2021) -BUX FIXES +BUG FIXES * data-source/instance: Fix EBS and root block device tags issue with "Invalid address to set" ([#17136](https://github.com/hashicorp/terraform-provider-aws/issues/17136)) @@ -38,7 +40,7 @@ ENHANCEMENTS * resource/aws_instance: Add `tags` parameter to `root_block_device`, `ebs_block_device` blocks.([#15474](https://github.com/hashicorp/terraform-provider-aws/issues/15474)) * resource/aws_workspaces_directory: Add access properties ([#16688](https://github.com/hashicorp/terraform-provider-aws/issues/16688)) -BUX FIXES +BUG FIXES * resource/aws_appmesh_route: Allow an empty `match` attribute to specified for a `grpc_route`, indicating that any service should be matched ([#16867](https://github.com/hashicorp/terraform-provider-aws/issues/16867)) * resource/aws_db_instance: Correctly validate `final_snapshot_identifier` argument at plan-time ([#16885](https://github.com/hashicorp/terraform-provider-aws/issues/16885)) From 7021388e1deaacd014adf314e51aaaec08196150 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 16 Jan 2021 00:54:44 +0200 Subject: [PATCH 0708/1212] descope s3 permissions + retry on s3 invalid issue --- aws/resource_aws_sagemaker_feature_group.go | 3 + ...source_aws_sagemaker_feature_group_test.go | 63 +++++++++---------- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group.go b/aws/resource_aws_sagemaker_feature_group.go index e6a05649b0d..56638c201c9 100644 --- a/aws/resource_aws_sagemaker_feature_group.go +++ b/aws/resource_aws_sagemaker_feature_group.go @@ -229,6 +229,9 @@ func resourceAwsSagemakerFeatureGroupCreate(d *schema.ResourceData, meta interfa if isAWSErr(err, "ValidationException", "The execution role ARN is invalid.") { return resource.RetryableError(err) } + if isAWSErr(err, "ValidationException", "Invalid S3Uri provided") { + return resource.RetryableError(err) + } return resource.NonRetryableError(err) } diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index cf2151b8d19..c2a8a5a8d58 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -282,7 +282,6 @@ func TestAccAWSSagemakerFeatureGroup_offlineConfig_createCatalog(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.catalog", "AwsDataCatalog"), resource.TestCheckResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.database", "sagemaker_featurestore"), resource.TestMatchResourceAttr(resourceName, "offline_store_config.0.data_catalog_config.0.table_name", regexp.MustCompile(fmt.Sprintf("^%s-", rName))), - // testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), ), }, { @@ -415,6 +414,16 @@ data "aws_iam_policy_document" "test" { } } } +`, rName) +} + +func testAccAWSSagemakerFeatureGroupOfflineBaseConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + force_destroy = true +} resource "aws_iam_role_policy_attachment" "test" { role = aws_iam_role.test.name @@ -422,18 +431,19 @@ resource "aws_iam_role_policy_attachment" "test" { } resource "aws_iam_policy" "test" { - policy = < Date: Fri, 15 Jan 2021 15:52:10 -0800 Subject: [PATCH 0709/1212] Uses actual ARN API parameter and adds attribute to data source --- aws/data_source_aws_elasticache_replication_group.go | 5 +++++ ...a_source_aws_elasticache_replication_group_test.go | 1 + aws/resource_aws_elasticache_replication_group.go | 11 +---------- ...resource_aws_elasticache_replication_group_test.go | 1 + .../d/elasticache_replication_group.html.markdown | 2 +- .../r/elasticache_replication_group.html.markdown | 2 +- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/aws/data_source_aws_elasticache_replication_group.go b/aws/data_source_aws_elasticache_replication_group.go index 2cc8a21a47e..1880a52ad6e 100644 --- a/aws/data_source_aws_elasticache_replication_group.go +++ b/aws/data_source_aws_elasticache_replication_group.go @@ -21,6 +21,10 @@ func dataSourceAwsElasticacheReplicationGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, "auth_token_enabled": { Type: schema.TypeBool, Computed: true, @@ -95,6 +99,7 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i d.SetId(aws.StringValue(rg.ReplicationGroupId)) d.Set("replication_group_description", rg.Description) + d.Set("arn", rg.ARN) d.Set("auth_token_enabled", rg.AuthTokenEnabled) if rg.AutomaticFailover != nil { switch aws.StringValue(rg.AutomaticFailover) { diff --git a/aws/data_source_aws_elasticache_replication_group_test.go b/aws/data_source_aws_elasticache_replication_group_test.go index 5e04cb7d75b..5a4726745d5 100644 --- a/aws/data_source_aws_elasticache_replication_group_test.go +++ b/aws/data_source_aws_elasticache_replication_group_test.go @@ -22,6 +22,7 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_basic(t *testing.T) { Config: testAccDataSourceAwsElasticacheReplicationGroupConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "auth_token_enabled", "false"), + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), resource.TestCheckResourceAttrPair(dataSourceName, "automatic_failover_enabled", resourceName, "automatic_failover_enabled"), resource.TestCheckResourceAttrPair(dataSourceName, "member_clusters.#", resourceName, "member_clusters.#"), resource.TestCheckResourceAttrPair(dataSourceName, "node_type", resourceName, "node_type"), diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index b6d6137feaf..4ec2da537c4 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -462,6 +462,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } d.Set("cluster_enabled", rgp.ClusterEnabled) d.Set("replication_group_id", rgp.ReplicationGroupId) + d.Set("arn", rgp.ARN) if rgp.NodeGroups != nil { if len(rgp.NodeGroups[0].NodeGroupMembers) == 0 { @@ -534,16 +535,6 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } } - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "elasticache", - Region: meta.(*AWSClient).region, - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("replicationgroup:%s", d.Id()), - }.String() - - d.Set("arn", arn) - return nil } diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 4d710f0a5fa..59d623616e2 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -71,6 +71,7 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "elasticache", fmt.Sprintf("replicationgroup:%s", rName)), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), diff --git a/website/docs/d/elasticache_replication_group.html.markdown b/website/docs/d/elasticache_replication_group.html.markdown index 85edc90efb8..f820b2511b0 100644 --- a/website/docs/d/elasticache_replication_group.html.markdown +++ b/website/docs/d/elasticache_replication_group.html.markdown @@ -28,8 +28,8 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: -* `replication_group_id` - The identifier for the replication group. * `replication_group_description` - The description of the replication group. +* `arn` - The Amazon Resource Name (ARN) of the created ElastiCache Replication Group. * `auth_token_enabled` - A flag that enables using an AuthToken (password) when issuing Redis commands. * `automatic_failover_enabled` - A flag whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. * `node_type` – The cluster node type. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index c80fcd4771c..9aa04cf0c14 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -149,7 +149,7 @@ Cluster Mode (`cluster_mode`) supports the following: In addition to all arguments above, the following attributes are exported: -* `arn` - The ARN of the created ElastiCache Replication Group. +* `arn` - The Amazon Resource Name (ARN) of the created ElastiCache Replication Group. * `id` - The ID of the ElastiCache Replication Group. * `cluster_enabled` - Indicates if cluster mode is enabled. * `configuration_endpoint_address` - The address of the replication group configuration endpoint when cluster mode is enabled. From 73e0fe06df88bba281b08d073e81708c53c5a2b0 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 15 Jan 2021 16:08:48 -0800 Subject: [PATCH 0710/1212] Add changelog entry for #15348 --- .changelog/15348.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/15348.txt diff --git a/.changelog/15348.txt b/.changelog/15348.txt new file mode 100644 index 00000000000..2479b5f65d8 --- /dev/null +++ b/.changelog/15348.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_elasticache_replication_group: Adds `arn` attribute +``` + +```release-note:enhancement +resource/aws_elasticache_replication_group: Adds `arn` attribute +``` \ No newline at end of file From 5766c039557ef7b9ccae6fcaa9535116fe5e03d1 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 15 Jan 2021 21:18:10 -0500 Subject: [PATCH 0711/1212] update rule_variables documentation per AWS API docs --- website/docs/r/networkfirewall_rule_group.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/networkfirewall_rule_group.html.markdown b/website/docs/r/networkfirewall_rule_group.html.markdown index dff13ff2094..82873eb8b66 100644 --- a/website/docs/r/networkfirewall_rule_group.html.markdown +++ b/website/docs/r/networkfirewall_rule_group.html.markdown @@ -166,7 +166,7 @@ The following arguments are supported: The `rule_group` block supports the following argument: -* `rule_variables` - (Optional) A configuration block that defines additional settings available to use in the rules defined in the rule group. See [Rule Variables](#rule-variables) below for details. +* `rule_variables` - (Optional) A configuration block that defines additional settings available to use in the rules defined in the rule group. Can only be specified for **stateful** rule groups. See [Rule Variables](#rule-variables) below for details. * `rules_source` - (Required) A configuration block that defines the stateful or stateless rules for the rule group. See [Rules Source](#rules-source) below for details. From f9ee8799f7eb6d7bf8c477130cad3ffe885f6700 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 16 Jan 2021 13:01:07 +0200 Subject: [PATCH 0712/1212] use arn --- aws/resource_aws_sagemaker_feature_group_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_feature_group_test.go b/aws/resource_aws_sagemaker_feature_group_test.go index c2a8a5a8d58..774b44dd8c9 100644 --- a/aws/resource_aws_sagemaker_feature_group_test.go +++ b/aws/resource_aws_sagemaker_feature_group_test.go @@ -436,8 +436,8 @@ resource "aws_iam_policy" "test" { "Statement" : [{ "Effect" : "Allow", "Resource" : [ - "arn:aws:s3:::${aws_s3_bucket.test.bucket}", - "arn:aws:s3:::${aws_s3_bucket.test.bucket}/*" + "${aws_s3_bucket.test.arn}", + "${aws_s3_bucket.test.arn}/*" ], "Action" : [ "s3:*" From 027aad0482bba31abeb7907abf6f443238291532 Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Sun, 17 Jan 2021 09:14:37 +0900 Subject: [PATCH 0713/1212] Use testAccCheckResourceAttrAccountID test helper for ec2_capacity_reservation test --- aws/resource_aws_ec2_capacity_reservation_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_ec2_capacity_reservation_test.go b/aws/resource_aws_ec2_capacity_reservation_test.go index 2354ba70d53..3a64e9d5107 100644 --- a/aws/resource_aws_ec2_capacity_reservation_test.go +++ b/aws/resource_aws_ec2_capacity_reservation_test.go @@ -88,7 +88,7 @@ func TestAccAWSEc2CapacityReservation_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "instance_match_criteria", "open"), resource.TestCheckResourceAttr(resourceName, "instance_platform", "Linux/UNIX"), resource.TestCheckResourceAttr(resourceName, "instance_type", "t2.micro"), - resource.TestCheckResourceAttrSet(resourceName, "owner_id"), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tenancy", "default"), ), From de1654b992875a6e5e17eb4bc17de8a989441ae6 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Mon, 18 Jan 2021 09:15:41 -0800 Subject: [PATCH 0714/1212] Update CHANGELOG.md for #16728 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e6bab32377..d256b06ded8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.25.0 (Unreleased) +FEATURES + +* **New Resource:** `aws_sagemaker_feature_group` [GH-16728] + ## 3.24.1 (January 15, 2021) BUG FIXES From 87f25e511b0e90efa9150c6a60f65da0ad578bd8 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 14 Jan 2021 23:06:35 +0200 Subject: [PATCH 0715/1212] initial commit --- .../service/sagemaker/finder/finder.go | 20 + .../service/sagemaker/waiter/status.go | 29 +- .../service/sagemaker/waiter/waiter.go | 44 ++ aws/provider.go | 1 + aws/resource_aws_sagemaker_domain.go | 3 + aws/resource_aws_sagemaker_domain_test.go | 121 ----- aws/resource_aws_sagemaker_user_profile.go | 378 +++++++++++++ ...esource_aws_sagemaker_user_profile_test.go | 496 ++++++++++++++++++ 8 files changed, 970 insertions(+), 122 deletions(-) create mode 100644 aws/resource_aws_sagemaker_user_profile.go create mode 100644 aws/resource_aws_sagemaker_user_profile_test.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 9c0a55f460c..ae122f1c9ae 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -80,3 +80,23 @@ func FeatureGroupByName(conn *sagemaker.SageMaker, name string) (*sagemaker.Desc return output, nil } + +// UserProfileByName returns the domain corresponding to the specified domain id. +// Returns nil if no domain is found. +func UserProfileByName(conn *sagemaker.SageMaker, domainID, userProfileName string) (*sagemaker.DescribeUserProfileOutput, error) { + input := &sagemaker.DescribeUserProfileInput{ + DomainId: aws.String(domainID), + UserProfileName: aws.String(userProfileName), + } + + output, err := conn.DescribeUserProfile(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 8a9baadc920..2f411a00682 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -17,6 +17,7 @@ const ( SagemakerDomainStatusNotFound = "NotFound" SagemakerFeatureGroupStatusNotFound = "NotFound" SagemakerFeatureGroupStatusUnknown = "Unknown" + SagemakerUserProfileStatusNotFound = "NotFound" ) // NotebookInstanceStatus fetches the NotebookInstance and its Status @@ -83,7 +84,7 @@ func DomainStatus(conn *sagemaker.SageMaker, domainID string) resource.StateRefr output, err := conn.DescribeDomain(input) if tfawserr.ErrMessageContains(err, "ValidationException", "RecordNotFound") { - return nil, SagemakerDomainStatusNotFound, nil + return nil, sagemaker.UserProfileStatusFailed, nil } if err != nil { @@ -117,3 +118,29 @@ func FeatureGroupStatus(conn *sagemaker.SageMaker, name string) resource.StateRe return output, aws.StringValue(output.FeatureGroupStatus), nil } } + +// UserProfileStatus fetches the UserProfile and its Status +func UserProfileStatus(conn *sagemaker.SageMaker, domainID, userProfileName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &sagemaker.DescribeUserProfileInput{ + DomainId: aws.String(domainID), + UserProfileName: aws.String(userProfileName), + } + + output, err := conn.DescribeUserProfile(input) + + if tfawserr.ErrMessageContains(err, "ValidationException", "RecordNotFound") { + return nil, SagemakerUserProfileStatusNotFound, nil + } + + if err != nil { + return nil, sagemaker.UserProfileStatusFailed, err + } + + if output == nil { + return nil, SagemakerUserProfileStatusNotFound, nil + } + + return output, aws.StringValue(output.Status), nil + } +} diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index b34bf3fcf73..5b5afa95b64 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -17,6 +17,8 @@ const ( DomainDeletedTimeout = 10 * time.Minute FeatureGroupCreatedTimeout = 10 * time.Minute FeatureGroupDeletedTimeout = 10 * time.Minute + UserProfileInServiceTimeout = 10 * time.Minute + UserProfileDeletedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -198,3 +200,45 @@ func FeatureGroupDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.Des return nil, err } + +// UserProfileInService waits for a UserProfile to return InService +func UserProfileInService(conn *sagemaker.SageMaker, domainID, userProfileName string) (*sagemaker.DescribeUserProfileOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + SagemakerUserProfileStatusNotFound, + sagemaker.UserProfileStatusPending, + sagemaker.UserProfileStatusUpdating, + }, + Target: []string{sagemaker.UserProfileStatusInService}, + Refresh: UserProfileStatus(conn, domainID, userProfileName), + Timeout: UserProfileInServiceTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeUserProfileOutput); ok { + return output, err + } + + return nil, err +} + +// UserProfileDeleted waits for a UserProfile to return Deleted +func UserProfileDeleted(conn *sagemaker.SageMaker, domainID, userProfileName string) (*sagemaker.DescribeUserProfileOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.UserProfileStatusDeleting, + }, + Target: []string{}, + Refresh: UserProfileStatus(conn, domainID, userProfileName), + Timeout: UserProfileDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeUserProfileOutput); ok { + return output, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 316ed6f74fd..405a3614203 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -884,6 +884,7 @@ func Provider() *schema.Provider { "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), + "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), "aws_secretsmanager_secret": resourceAwsSecretsManagerSecret(), "aws_secretsmanager_secret_policy": resourceAwsSecretsManagerSecretPolicy(), "aws_secretsmanager_secret_version": resourceAwsSecretsManagerSecretVersion(), diff --git a/aws/resource_aws_sagemaker_domain.go b/aws/resource_aws_sagemaker_domain.go index baea64f0eed..15191c3e613 100644 --- a/aws/resource_aws_sagemaker_domain.go +++ b/aws/resource_aws_sagemaker_domain.go @@ -360,6 +360,9 @@ func resourceAwsSagemakerDomainDelete(d *schema.ResourceData, meta interface{}) input := &sagemaker.DeleteDomainInput{ DomainId: aws.String(d.Id()), + RetentionPolicy: &sagemaker.RetentionPolicy{ + HomeEfsFileSystem: aws.String(sagemaker.RetentionTypeDelete), + }, } if _, err := conn.DeleteDomain(input); err != nil { diff --git a/aws/resource_aws_sagemaker_domain_test.go b/aws/resource_aws_sagemaker_domain_test.go index a1b7c62f819..63eeea8ede3 100644 --- a/aws/resource_aws_sagemaker_domain_test.go +++ b/aws/resource_aws_sagemaker_domain_test.go @@ -4,12 +4,9 @@ import ( "fmt" "log" "regexp" - "strings" "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/efs" "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -93,7 +90,6 @@ func TestAccAWSSagemakerDomain_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), resource.TestCheckResourceAttrSet(resourceName, "url"), resource.TestCheckResourceAttrSet(resourceName, "home_efs_file_system_id"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -120,7 +116,6 @@ func TestAccAWSSagemakerDomain_kms(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -170,7 +165,6 @@ func TestAccAWSSagemakerDomain_tags(t *testing.T) { testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, }, @@ -206,7 +200,6 @@ func TestAccAWSSagemakerDomain_securityGroup(t *testing.T) { testAccCheckAWSSagemakerDomainExists(resourceName, &domain), resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.security_groups.#", "2"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, }, @@ -232,7 +225,6 @@ func TestAccAWSSagemakerDomain_sharingSettings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.sharing_settings.0.notebook_output_option", "Allowed"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.sharing_settings.0.s3_kms_key_id", "aws_kms_key.test", "arn"), resource.TestCheckResourceAttrSet(resourceName, "default_user_settings.0.sharing_settings.0.s3_output_path"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -262,7 +254,6 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -293,7 +284,6 @@ func TestAccAWSSagemakerDomain_tensorboardAppSettingsWithImage(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.sagemaker_image_arn", "aws_sagemaker_image.test", "arn"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -323,7 +313,6 @@ func TestAccAWSSagemakerDomain_kernelGatewayAppSettings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -353,7 +342,6 @@ func TestAccAWSSagemakerDomain_jupyterServerAppSettings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.0.default_resource_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.jupyter_server_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), ), }, { @@ -379,7 +367,6 @@ func TestAccAWSSagemakerDomain_disappears(t *testing.T) { Config: testAccAWSSagemakerDomainBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerDomainExists(resourceName, &domain), - testAccCheckAWSSagemakerDomainDeleteImplicitResources(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerDomain(), resourceName), ), ExpectNonEmptyPlan: true, @@ -438,114 +425,6 @@ func testAccCheckAWSSagemakerDomainExists(n string, codeRepo *sagemaker.Describe } } -func testAccCheckAWSSagemakerDomainDeleteImplicitResources(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Sagemaker domain not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("Sagemaker domain name not set") - } - - conn := testAccProvider.Meta().(*AWSClient).efsconn - efsFsID := rs.Primary.Attributes["home_efs_file_system_id"] - vpcID := rs.Primary.Attributes["vpc_id"] - - resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ - FileSystemId: aws.String(efsFsID), - }) - - if err != nil { - return fmt.Errorf("Sagemaker domain EFS mount targets for EFS FS (%s) not found: %w", efsFsID, err) - } - - //reusing EFS mount target delete for wait logic - mountTargets := resp.MountTargets - for _, mt := range mountTargets { - r := resourceAwsEfsMountTarget() - d := r.Data(nil) - mtID := aws.StringValue(mt.MountTargetId) - d.SetId(mtID) - err := r.Delete(d, testAccProvider.Meta()) - if err != nil { - return fmt.Errorf("Sagemaker domain EFS mount target (%s) failed to delete: %w", mtID, err) - } - } - - r := resourceAwsEfsFileSystem() - d := r.Data(nil) - d.SetId(efsFsID) - err = r.Delete(d, testAccProvider.Meta()) - if err != nil { - return fmt.Errorf("Sagemaker domain EFS file system (%s) failed to delete: %w", efsFsID, err) - } - - var filters []*ec2.Filter - filters = append(filters, &ec2.Filter{ - Name: aws.String("vpc-id"), - Values: aws.StringSlice([]string{vpcID}), - }) - - req := &ec2.DescribeSecurityGroupsInput{ - Filters: filters, - } - - ec2conn := testAccProvider.Meta().(*AWSClient).ec2conn - - sgResp, err := ec2conn.DescribeSecurityGroups(req) - if err != nil { - return fmt.Errorf("error reading security groups: %w", err) - } - - //revoke permissions - for _, sg := range sgResp.SecurityGroups { - sgID := aws.StringValue(sg.GroupId) - - if len(sg.IpPermissions) > 0 { - req := &ec2.RevokeSecurityGroupIngressInput{ - GroupId: sg.GroupId, - IpPermissions: sg.IpPermissions, - } - _, err = ec2conn.RevokeSecurityGroupIngress(req) - - if err != nil { - return fmt.Errorf("Error revoking security group %s rules: %w", sgID, err) - } - } - - if len(sg.IpPermissionsEgress) > 0 { - req := &ec2.RevokeSecurityGroupEgressInput{ - GroupId: sg.GroupId, - IpPermissions: sg.IpPermissionsEgress, - } - _, err = ec2conn.RevokeSecurityGroupEgress(req) - - if err != nil { - return fmt.Errorf("Error revoking security group %s rules: %w", sgID, err) - } - } - } - - for _, sg := range sgResp.SecurityGroups { - sgID := aws.StringValue(sg.GroupId) - sgName := aws.StringValue(sg.GroupName) - if sgName != "default" && !strings.HasPrefix(sgName, "tf-acc-test") { - r := resourceAwsSecurityGroup() - d := r.Data(nil) - d.SetId(sgID) - err = r.Delete(d, testAccProvider.Meta()) - if err != nil { - return fmt.Errorf("Sagemaker domain EFS file system sg (%s) failed to delete: %w", sgID, err) - } - } - } - - return nil - } -} - func testAccAWSSagemakerDomainConfigBase(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { diff --git a/aws/resource_aws_sagemaker_user_profile.go b/aws/resource_aws_sagemaker_user_profile.go new file mode 100644 index 00000000000..3d74b87fa7d --- /dev/null +++ b/aws/resource_aws_sagemaker_user_profile.go @@ -0,0 +1,378 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" +) + +func resourceAwsSagemakerUserProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerUserProfileCreate, + Read: resourceAwsSagemakerUserProfileRead, + Update: resourceAwsSagemakerUserProfileUpdate, + Delete: resourceAwsSagemakerUserProfileDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "user_profile_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + ), + }, + "domain_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "single_sign_on_user_indentifier": { + Type: schema.TypeString, + Optional: true, + }, + "single_sign_on_user_value": { + Type: schema.TypeString, + Optional: true, + }, + "user_settings": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "execution_role": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "sharing_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notebook_output_option": { + Type: schema.TypeString, + Optional: true, + Default: sagemaker.NotebookOutputOptionDisabled, + ValidateFunc: validation.StringInSlice(sagemaker.NotebookOutputOption_Values(), false), + }, + "s3_kms_key_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + "s3_output_path": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "tensor_board_app_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + "jupyter_server_app_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + "kernel_gateway_app_settings": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_resource_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(sagemaker.AppInstanceType_Values(), false), + }, + "sagemaker_image_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + "custom_image": { + Type: schema.TypeList, + Optional: true, + MaxItems: 30, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_image_config_name": { + Type: schema.TypeString, + Required: true, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + }, + "image_version_number": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "tags": tagsSchema(), + "home_efs_file_system_uid": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsSagemakerUserProfileCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.CreateUserProfileInput{ + UserProfileName: aws.String(d.Get("user_profile_name").(string)), + DomainId: aws.String(d.Get("domain_id").(string)), + } + + if v, ok := d.GetOk("user_settings"); ok { + input.UserSettings = expandSagemakerDomainDefaultUserSettings(v.([]interface{})) + } + + if v, ok := d.GetOk("single_sign_on_user_indentifier"); ok { + input.SingleSignOnUserIdentifier = aws.String(v.(string)) + } + + if v, ok := d.GetOk("single_sign_on_user_value"); ok { + input.SingleSignOnUserValue = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + + log.Printf("[DEBUG] SageMaker User Profile create config: %#v", *input) + output, err := conn.CreateUserProfile(input) + if err != nil { + return fmt.Errorf("error creating SageMaker User Profile: %w", err) + } + + userProfileArn := aws.StringValue(output.UserProfileArn) + domainID, userProfileName, err := decodeSagemakerUserProfileName(userProfileArn) + if err != nil { + return err + } + + d.SetId(userProfileArn) + + if _, err := waiter.UserProfileInService(conn, domainID, userProfileName); err != nil { + return fmt.Errorf("error waiting for SageMaker User Profile (%s) to create: %w", d.Id(), err) + } + + return resourceAwsSagemakerUserProfileRead(d, meta) +} + +func resourceAwsSagemakerUserProfileRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + domainID, userProfileName, err := decodeSagemakerUserProfileName(d.Id()) + if err != nil { + return err + } + + UserProfile, err := finder.UserProfileByName(conn, domainID, userProfileName) + if err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + d.SetId("") + log.Printf("[WARN] Unable to find SageMaker UserProfile (%s), removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading SageMaker UserProfile (%s): %w", d.Id(), err) + } + + arn := aws.StringValue(UserProfile.UserProfileArn) + d.Set("user_profile_name", UserProfile.UserProfileName) + d.Set("domain_id", UserProfile.DomainId) + d.Set("single_sign_on_user_indentifier", UserProfile.SingleSignOnUserIdentifier) + d.Set("single_sign_on_user_value", UserProfile.SingleSignOnUserValue) + d.Set("arn", arn) + d.Set("home_efs_file_system_uid", UserProfile.HomeEfsFileSystemUid) + + if err := d.Set("user_settings", flattenSagemakerDomainDefaultUserSettings(UserProfile.UserSettings)); err != nil { + return fmt.Errorf("error setting user_settings for SageMaker UserProfile (%s): %w", d.Id(), err) + } + + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for SageMaker UserProfile (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsSagemakerUserProfileUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + if d.HasChange("user_settings") { + input := &sagemaker.UpdateUserProfileInput{ + UserProfileName: aws.String(d.Get("user_profile_name").(string)), + DomainId: aws.String(d.Get("domain_id").(string)), + UserSettings: expandSagemakerDomainDefaultUserSettings(d.Get("user_settings").([]interface{})), + } + + log.Printf("[DEBUG] SageMaker User Profile update config: %#v", *input) + _, err := conn.UpdateUserProfile(input) + if err != nil { + return fmt.Errorf("error updating SageMaker UserProfile: %w", err) + } + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating SageMaker UserProfile (%s) tags: %w", d.Id(), err) + } + } + + return resourceAwsSagemakerUserProfileRead(d, meta) +} + +func resourceAwsSagemakerUserProfileDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + userProfileName := d.Get("user_profile_name").(string) + domainID := d.Get("domain_id").(string) + + input := &sagemaker.DeleteUserProfileInput{ + UserProfileName: aws.String(userProfileName), + DomainId: aws.String(domainID), + } + + if _, err := conn.DeleteUserProfile(input); err != nil { + if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + return fmt.Errorf("error deleting SageMaker UserProfile (%s): %w", d.Id(), err) + } + } + + if _, err := waiter.UserProfileDeleted(conn, domainID, userProfileName); err != nil { + if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { + return fmt.Errorf("error waiting for SageMaker UserProfile (%s) to delete: %w", d.Id(), err) + } + } + + return nil +} + +func decodeSagemakerUserProfileName(id string) (string, string, error) { + userProfileARN, err := arn.Parse(id) + if err != nil { + return "", "", err + } + + userProfileResourceNameName := strings.TrimPrefix(userProfileARN.Resource, "user-profile/") + parts := strings.Split(userProfileResourceNameName, "/") + + if len(parts) != 2 { + return "", "", fmt.Errorf("Unexpected format of ID (%q), expected DOMAIN-ID/USER-PROFILE-NAME", userProfileResourceNameName) + } + + domainID := parts[0] + userProfileName := parts[1] + + return domainID, userProfileName, nil +} diff --git a/aws/resource_aws_sagemaker_user_profile_test.go b/aws/resource_aws_sagemaker_user_profile_test.go new file mode 100644 index 00000000000..31547dfd60a --- /dev/null +++ b/aws/resource_aws_sagemaker_user_profile_test.go @@ -0,0 +1,496 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func init() { + resource.AddTestSweepers("aws_sagemaker_user_profile", &resource.Sweeper{ + Name: "aws_sagemaker_user_profile", + F: testSweepSagemakerUserProfiles, + Dependencies: []string{ + "aws_efs_mount_target", + "aws_efs_file_system", + }, + }) +} + +func testSweepSagemakerUserProfiles(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).sagemakerconn + + err = conn.ListUserProfilesPages(&sagemaker.ListUserProfilesInput{}, func(page *sagemaker.ListUserProfilesOutput, lastPage bool) bool { + for _, instance := range page.UserProfiles { + input := &sagemaker.DeleteUserProfileInput{ + UserProfileName: instance.UserProfileName, + DomainId: instance.DomainId, + } + + userProfile := aws.StringValue(instance.UserProfileName) + log.Printf("[INFO] Deleting SageMaker User Profile: %s", userProfile) + if _, err := conn.DeleteUserProfile(input); err != nil { + log.Printf("[ERROR] Error deleting SageMaker User Profile (%s): %s", userProfile, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker domain sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("Error retrieving SageMaker domains: %w", err) + } + + return nil +} + +func TestAccAWSSagemakerUserProfile_basic(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_profile_name", rName), + resource.TestCheckResourceAttrPair(resourceName, "domain_id", "aws_sagemaker_domain.test", "id"), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", "0"), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "sagemaker", regexp.MustCompile(`user-profile/.+`)), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "home_efs_file_system_uid"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_tags(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerUserProfileConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerUserProfileConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_tensorboardAppSettings(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileConfigTensorBoardAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_tensorboardAppSettingsWithImage(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileConfigTensorBoardAppSettingsWithImage(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + resource.TestCheckResourceAttrPair(resourceName, "user_settings.0.tensor_board_app_settings.0.default_resource_spec.0.sagemaker_image_arn", "aws_sagemaker_image.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_kernelGatewayAppSettings(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileConfigKernelGatewayAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.kernel_gateway_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.kernel_gateway_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_jupyterServerAppSettings(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileConfigJupyterServerAppSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.jupyter_server_app_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.jupyter_server_app_settings.0.default_resource_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.jupyter_server_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerUserProfile_disappears(t *testing.T) { + var domain sagemaker.DescribeUserProfileOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_user_profile.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerUserProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerUserProfileBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerUserProfileExists(resourceName, &domain), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerUserProfile(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerUserProfileDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_user_settings" { + continue + } + + domainID := rs.Primary.Attributes["domain_id"] + userProfileName := rs.Primary.Attributes["user_profile_name"] + + userProfile, err := finder.UserProfileByName(conn, domainID, userProfileName) + if err != nil { + return nil + } + + userProfileArn := aws.StringValue(userProfile.UserProfileArn) + if userProfileArn == rs.Primary.ID { + return fmt.Errorf("SageMaker User Profile %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerUserProfileExists(n string, userProfile *sagemaker.DescribeUserProfileOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker domain ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + domainID := rs.Primary.Attributes["domain_id"] + userProfileName := rs.Primary.Attributes["user_profile_name"] + + resp, err := finder.UserProfileByName(conn, domainID, userProfileName) + if err != nil { + return err + } + + *userProfile = *resp + + return nil + } +} + +func testAccAWSSagemakerUserProfileConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + + tags = { + Name = %[1]q + } +} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = [aws_subnet.test.id] + + default_user_settings { + execution_role = aws_iam_role.test.arn + } +} +`, rName) +} + +func testAccAWSSagemakerUserProfileBasicConfig(rName string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q +} +`, rName) +} + +func testAccAWSSagemakerUserProfileConfigTags1(rName, tagKey1, tagValue1 string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSagemakerUserProfileConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccAWSSagemakerUserProfileConfigTensorBoardAppSettings(rName string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + user_settings { + execution_role = aws_iam_role.test.arn + + tensor_board_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} + +func testAccAWSSagemakerUserProfileConfigTensorBoardAppSettingsWithImage(rName string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn +} + +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + user_settings { + execution_role = aws_iam_role.test.arn + + tensor_board_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + sagemaker_image_arn = aws_sagemaker_image.test.arn + } + } + } +} +`, rName) +} + +func testAccAWSSagemakerUserProfileConfigJupyterServerAppSettings(rName string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + user_settings { + execution_role = aws_iam_role.test.arn + + jupyter_server_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} + +func testAccAWSSagemakerUserProfileConfigKernelGatewayAppSettings(rName string) string { + return testAccAWSSagemakerUserProfileConfigBase(rName) + fmt.Sprintf(` +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + user_settings { + execution_role = aws_iam_role.test.arn + + kernel_gateway_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + } + } + } +} +`, rName) +} From 1c2c249bad4f85ce3b4a85356645ba6db1008513 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 00:30:36 +0200 Subject: [PATCH 0716/1212] docs --- .../r/sagemaker_user_profile.html.markdown | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 website/docs/r/sagemaker_user_profile.html.markdown diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown new file mode 100644 index 00000000000..a58405524d2 --- /dev/null +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -0,0 +1,106 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_user_profile" +description: |- + Provides a Sagemaker User Profile resource. +--- + +# Resource: aws_sagemaker_user_profile + +Provides a Sagemaker User Profile resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q +} + +resource "aws_iam_role" "example" { + name = "example" + path = "/" + assume_role_policy = data.aws_iam_policy_document.example.json +} + +data "aws_iam_policy_document" "example" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.amazonaws.com"] + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `user_profile_name` - (Required) The name for the User Profile. +* `domain_id` - (Required) The ID of the associated Domain. +* `single_sign_on_user_indentifier` - (Optional) A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. +* `single_sign_on_user_value` - (Required) The username of the associated AWS Single Sign-On User for this UserProfile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. +* `user_settings` - (Required) The user settings. See [User Settings](#user-settings) below. +* `tags` - (Optional) A map of tags to assign to the resource. + +### User Settings + +* `execution_role` - (Required) The execution role ARN for the user. +* `security_groups` - (Optional) The security groups. +* `sharing_settings` - (Optional) The sharing settings. See [Sharing Settings](#sharing-settings) below. +* `tensor_board_app_settings` - (Optional) The TensorBoard app settings. See [TensorBoard App Settings](#tensorboard-app-settings) below. +* `jupyter_server_app_settings` - (Optional) The Jupyter server's app settings. See [Jupyter Server App Settings](#jupyter-server-app-settings) below. +* `kernel_gateway_app_settings` - (Optional) The kernel gateway app settings. See [Kernel Gateway App Settings](#kernal-gateway-app-settings) below. + +#### Sharing Settings + +* `notebook_output_option` - (Optional) Whether to include the notebook cell output when sharing the notebook. The default is `Disabled`. Valid values are `Allowed` and `Disabled`. +* `s3_kms_key_id` - (Optional) When `notebook_output_option` is Allowed, the AWS Key Management Service (KMS) encryption key ID used to encrypt the notebook cell output in the Amazon S3 bucket. +* `s3_output_path` - (Optional) When `notebook_output_option` is Allowed, the Amazon S3 bucket used to save the notebook cell output. + +#### TensorBoard App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. + +#### Kernel Gateway App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a KernelGateway app. see [Custom Image](#custom-image) below. + +#### Jupyter Server App Settings + +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. + +##### Default Resource Spec + +* `instance_type` - (Optional) The instance type. +* `sagemaker_image_arn` - (Optional) The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + +##### Custom Image + +* `app_image_config_name` - (Required) The name of the App Image Config. +* `image_name` - (Required) The name of the Custom Image. +* `image_version_number` - (Optional) The version number of the Custom Image. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The user profile Amazon Resource Name (ARN). +* `arn` - The user profile Amazon Resource Name (ARN). +* `home_efs_file_system_uid` - The ID of the user's profile in the Amazon Elastic File System (EFS) volume. + + +## Import + +Sagemaker Code User Profiles can be imported using the `arn`, e.g. + +``` +$ terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name +``` From 36c54eb8e2433f8ec60144ffa85fe3b005b3cd78 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 00:34:07 +0200 Subject: [PATCH 0717/1212] fmt --- aws/resource_aws_sagemaker_user_profile_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_user_profile_test.go b/aws/resource_aws_sagemaker_user_profile_test.go index 31547dfd60a..d15199d2bc2 100644 --- a/aws/resource_aws_sagemaker_user_profile_test.go +++ b/aws/resource_aws_sagemaker_user_profile_test.go @@ -369,7 +369,7 @@ resource "aws_sagemaker_domain" "test" { auth_mode = "IAM" vpc_id = aws_vpc.test.id subnet_ids = [aws_subnet.test.id] - + default_user_settings { execution_role = aws_iam_role.test.arn } @@ -482,7 +482,7 @@ resource "aws_sagemaker_user_profile" "test" { domain_id = aws_sagemaker_domain.test.id user_profile_name = %[1]q - user_settings { + user_settings { execution_role = aws_iam_role.test.arn kernel_gateway_app_settings { From 4d0c6c010c394386faad26e69a76b56d9fd9e469 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 00:38:55 +0200 Subject: [PATCH 0718/1212] docs --- .../docs/r/sagemaker_user_profile.html.markdown | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown index a58405524d2..186c1b886b6 100644 --- a/website/docs/r/sagemaker_user_profile.html.markdown +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -19,23 +19,6 @@ resource "aws_sagemaker_user_profile" "test" { domain_id = aws_sagemaker_domain.test.id user_profile_name = %[1]q } - -resource "aws_iam_role" "example" { - name = "example" - path = "/" - assume_role_policy = data.aws_iam_policy_document.example.json -} - -data "aws_iam_policy_document" "example" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["sagemaker.amazonaws.com"] - } - } -} ``` ## Argument Reference From 71f8348fbc6239608e3756d6ec637926bfafef41 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 00:42:11 +0200 Subject: [PATCH 0719/1212] doc fmt --- website/docs/r/sagemaker_user_profile.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown index 186c1b886b6..03dbad50f2c 100644 --- a/website/docs/r/sagemaker_user_profile.html.markdown +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -15,9 +15,9 @@ Provides a Sagemaker User Profile resource. ### Basic usage ```hcl -resource "aws_sagemaker_user_profile" "test" { +resource "aws_sagemaker_user_profile" "example" { domain_id = aws_sagemaker_domain.test.id - user_profile_name = %[1]q + user_profile_name = "example" } ``` From a4ccf92596950add1217a5912b94567a74170430 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Sat, 16 Jan 2021 00:27:12 +0200 Subject: [PATCH 0720/1212] Apply suggestions from code review Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_user_profile.go | 22 ++++++++++--------- .../r/sagemaker_user_profile.html.markdown | 4 ++-- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sagemaker_user_profile.go b/aws/resource_aws_sagemaker_user_profile.go index 3d74b87fa7d..e1b1558a997 100644 --- a/aws/resource_aws_sagemaker_user_profile.go +++ b/aws/resource_aws_sagemaker_user_profile.go @@ -45,13 +45,15 @@ func resourceAwsSagemakerUserProfile() *schema.Resource { Required: true, ForceNew: true, }, - "single_sign_on_user_indentifier": { + "single_sign_on_user_identifier": { Type: schema.TypeString, Optional: true, + ForceNew: true, }, "single_sign_on_user_value": { Type: schema.TypeString, Optional: true, + ForceNew: true, }, "user_settings": { Type: schema.TypeList, @@ -227,7 +229,7 @@ func resourceAwsSagemakerUserProfileCreate(d *schema.ResourceData, meta interfac input.UserSettings = expandSagemakerDomainDefaultUserSettings(v.([]interface{})) } - if v, ok := d.GetOk("single_sign_on_user_indentifier"); ok { + if v, ok := d.GetOk("single_sign_on_user_identifier"); ok { input.SingleSignOnUserIdentifier = aws.String(v.(string)) } @@ -273,28 +275,28 @@ func resourceAwsSagemakerUserProfileRead(d *schema.ResourceData, meta interface{ if err != nil { if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { d.SetId("") - log.Printf("[WARN] Unable to find SageMaker UserProfile (%s), removing from state", d.Id()) + log.Printf("[WARN] Unable to find SageMaker User Profile (%s), removing from state", d.Id()) return nil } - return fmt.Errorf("error reading SageMaker UserProfile (%s): %w", d.Id(), err) + return fmt.Errorf("error reading SageMaker User Profile (%s): %w", d.Id(), err) } arn := aws.StringValue(UserProfile.UserProfileArn) d.Set("user_profile_name", UserProfile.UserProfileName) d.Set("domain_id", UserProfile.DomainId) - d.Set("single_sign_on_user_indentifier", UserProfile.SingleSignOnUserIdentifier) + d.Set("single_sign_on_user_identifier", UserProfile.SingleSignOnUserIdentifier) d.Set("single_sign_on_user_value", UserProfile.SingleSignOnUserValue) d.Set("arn", arn) d.Set("home_efs_file_system_uid", UserProfile.HomeEfsFileSystemUid) if err := d.Set("user_settings", flattenSagemakerDomainDefaultUserSettings(UserProfile.UserSettings)); err != nil { - return fmt.Errorf("error setting user_settings for SageMaker UserProfile (%s): %w", d.Id(), err) + return fmt.Errorf("error setting user_settings for SageMaker User Profile (%s): %w", d.Id(), err) } tags, err := keyvaluetags.SagemakerListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for SageMaker UserProfile (%s): %w", d.Id(), err) + return fmt.Errorf("error listing tags for SageMaker User Profile (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { @@ -317,7 +319,7 @@ func resourceAwsSagemakerUserProfileUpdate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] SageMaker User Profile update config: %#v", *input) _, err := conn.UpdateUserProfile(input) if err != nil { - return fmt.Errorf("error updating SageMaker UserProfile: %w", err) + return fmt.Errorf("error updating SageMaker User Profile: %w", err) } } @@ -345,13 +347,13 @@ func resourceAwsSagemakerUserProfileDelete(d *schema.ResourceData, meta interfac if _, err := conn.DeleteUserProfile(input); err != nil { if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { - return fmt.Errorf("error deleting SageMaker UserProfile (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting SageMaker User Profile (%s): %w", d.Id(), err) } } if _, err := waiter.UserProfileDeleted(conn, domainID, userProfileName); err != nil { if !isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "") { - return fmt.Errorf("error waiting for SageMaker UserProfile (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker User Profile (%s) to delete: %w", d.Id(), err) } } diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown index 03dbad50f2c..fe8d26dfed3 100644 --- a/website/docs/r/sagemaker_user_profile.html.markdown +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -27,8 +27,8 @@ The following arguments are supported: * `user_profile_name` - (Required) The name for the User Profile. * `domain_id` - (Required) The ID of the associated Domain. -* `single_sign_on_user_indentifier` - (Optional) A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. -* `single_sign_on_user_value` - (Required) The username of the associated AWS Single Sign-On User for this UserProfile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. +* `single_sign_on_user_identifier` - (Optional) A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified. +* `single_sign_on_user_value` - (Required) The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified. * `user_settings` - (Required) The user settings. See [User Settings](#user-settings) below. * `tags` - (Optional) A map of tags to assign to the resource. From 50fb175af6aecf21de26bbb6b17813a79e27ceba Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 23:33:25 +0200 Subject: [PATCH 0721/1212] initial commit --- .../service/sagemaker/finder/finder.go | 23 +- .../service/sagemaker/waiter/status.go | 31 +++ .../service/sagemaker/waiter/waiter.go | 40 ++++ aws/provider.go | 1 + aws/resource_aws_sagemaker_image_version.go | 124 ++++++++++ ...source_aws_sagemaker_image_version_test.go | 215 ++++++++++++++++++ 6 files changed, 432 insertions(+), 2 deletions(-) create mode 100644 aws/resource_aws_sagemaker_image_version.go create mode 100644 aws/resource_aws_sagemaker_image_version_test.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 9c0a55f460c..1119faada07 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -24,8 +24,8 @@ func CodeRepositoryByName(conn *sagemaker.SageMaker, name string) (*sagemaker.De return output, nil } -// ImageByName returns the code repository corresponding to the specified name. -// Returns nil if no code repository is found. +// ImageByName returns the Image corresponding to the specified name. +// Returns nil if no Image is found. func ImageByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { input := &sagemaker.DescribeImageInput{ ImageName: aws.String(name), @@ -43,6 +43,25 @@ func ImageByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeIma return output, nil } +// ImageVersionByName returns the Image Version corresponding to the specified name. +// Returns nil if no Image Version is found. +func ImageVersionByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageVersionOutput, error) { + input := &sagemaker.DescribeImageVersionInput{ + ImageName: aws.String(name), + } + + output, err := conn.DescribeImageVersion(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} + // DomainByName returns the domain corresponding to the specified domain id. // Returns nil if no domain is found. func DomainByName(conn *sagemaker.SageMaker, domainID string) (*sagemaker.DescribeDomainOutput, error) { diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 8a9baadc920..7b2d08f3b34 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -14,6 +14,8 @@ const ( SagemakerNotebookInstanceStatusNotFound = "NotFound" SagemakerImageStatusNotFound = "NotFound" SagemakerImageStatusFailed = "Failed" + SagemakerImageVersionStatusNotFound = "NotFound" + SagemakerImageVersionStatusFailed = "Failed" SagemakerDomainStatusNotFound = "NotFound" SagemakerFeatureGroupStatusNotFound = "NotFound" SagemakerFeatureGroupStatusUnknown = "Unknown" @@ -73,6 +75,35 @@ func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFu } } +// ImageVersionStatus fetches the ImageVersion and its Status +func ImageVersionStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &sagemaker.DescribeImageVersionInput{ + ImageName: aws.String(name), + } + + output, err := conn.DescribeImageVersion(input) + + if tfawserr.ErrMessageContains(err, sagemaker.ErrCodeResourceNotFound, "No ImageVersion with the name") { + return nil, SagemakerImageVersionStatusNotFound, nil + } + + if err != nil { + return nil, SagemakerImageVersionStatusFailed, err + } + + if output == nil { + return nil, SagemakerImageVersionStatusNotFound, nil + } + + if aws.StringValue(output.ImageVersionStatus) == sagemaker.ImageVersionStatusCreateFailed { + return output, sagemaker.ImageVersionStatusCreateFailed, fmt.Errorf("%s", aws.StringValue(output.FailureReason)) + } + + return output, aws.StringValue(output.ImageVersionStatus), nil + } +} + // DomainStatus fetches the Domain and its Status func DomainStatus(conn *sagemaker.SageMaker, domainID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index b34bf3fcf73..f66cd4cda71 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -13,6 +13,8 @@ const ( NotebookInstanceDeletedTimeout = 10 * time.Minute ImageCreatedTimeout = 10 * time.Minute ImageDeletedTimeout = 10 * time.Minute + ImageVersionCreatedTimeout = 10 * time.Minute + ImageVersionDeletedTimeout = 10 * time.Minute DomainInServiceTimeout = 10 * time.Minute DomainDeletedTimeout = 10 * time.Minute FeatureGroupCreatedTimeout = 10 * time.Minute @@ -122,6 +124,44 @@ func ImageDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeIm return nil, err } +// ImageVersionCreated waits for a ImageVersion to return Created +func ImageVersionCreated(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageVersionOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.ImageVersionStatusCreating, + }, + Target: []string{sagemaker.ImageVersionStatusCreated}, + Refresh: ImageVersionStatus(conn, name), + Timeout: ImageVersionCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeImageVersionOutput); ok { + return output, err + } + + return nil, err +} + +// ImageVersionDeleted waits for a ImageVersion to return Deleted +func ImageVersionDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageVersionOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{sagemaker.ImageVersionStatusDeleting}, + Target: []string{}, + Refresh: ImageVersionStatus(conn, name), + Timeout: ImageVersionDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeImageVersionOutput); ok { + return output, err + } + + return nil, err +} + // DomainInService waits for a Domain to return InService func DomainInService(conn *sagemaker.SageMaker, domainID string) (*sagemaker.DescribeDomainOutput, error) { stateConf := &resource.StateChangeConf{ diff --git a/aws/provider.go b/aws/provider.go index 316ed6f74fd..39af7271bda 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -881,6 +881,7 @@ func Provider() *schema.Provider { "aws_sagemaker_endpoint_configuration": resourceAwsSagemakerEndpointConfiguration(), "aws_sagemaker_feature_group": resourceAwsSagemakerFeatureGroup(), "aws_sagemaker_image": resourceAwsSagemakerImage(), + "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), "aws_sagemaker_model": resourceAwsSagemakerModel(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), diff --git a/aws/resource_aws_sagemaker_image_version.go b/aws/resource_aws_sagemaker_image_version.go new file mode 100644 index 00000000000..30938b488b3 --- /dev/null +++ b/aws/resource_aws_sagemaker_image_version.go @@ -0,0 +1,124 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" +) + +func resourceAwsSagemakerImageVersion() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerImageVersionCreate, + Read: resourceAwsSagemakerImageVersionRead, + Update: resourceAwsSagemakerImageVersionCreate, + Delete: resourceAwsSagemakerImageVersionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "base_image": { + Type: schema.TypeString, + Required: true, + }, + "image_arn": { + Type: schema.TypeString, + Computed: true, + }, + "container_image": { + Type: schema.TypeString, + Computed: true, + }, + "version": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceAwsSagemakerImageVersionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + name := d.Get("image_name").(string) + input := &sagemaker.CreateImageVersionInput{ + ImageName: aws.String(name), + BaseImage: aws.String(d.Get("base_image").(string)), + } + + _, err := conn.CreateImageVersion(input) + if err != nil { + return fmt.Errorf("error creating Sagemaker Image Version %s: %w", name, err) + } + + d.SetId(name) + + if _, err := waiter.ImageVersionCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for SageMaker Image Version (%s) to be created: %w", d.Id(), err) + } + + return resourceAwsSagemakerImageVersionRead(d, meta) +} + +func resourceAwsSagemakerImageVersionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + image, err := finder.ImageVersionByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { + d.SetId("") + log.Printf("[WARN] Unable to find Sagemaker Image Version (%s); removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading Sagemaker Image Version (%s): %w", d.Id(), err) + + } + + d.Set("arn", image.ImageVersionArn) + d.Set("base_image", image.BaseImage) + d.Set("image_arn", image.ImageArn) + d.Set("container_image", image.ContainerImage) + d.Set("version", image.Version) + d.Set("image_name", d.Id()) + + return nil +} + +func resourceAwsSagemakerImageVersionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteImageVersionInput{ + ImageName: aws.String(d.Id()), + Version: aws.Int64(int64(d.Get("version").(int))), + } + + if _, err := conn.DeleteImageVersion(input); err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { + return nil + } + return fmt.Errorf("error deleting Sagemaker Image Version (%s): %w", d.Id(), err) + } + + if _, err := waiter.ImageVersionDeleted(conn, d.Id()); err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { + return nil + } + return fmt.Errorf("error waiting for SageMaker Image Version (%s) to delete: %w", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_sagemaker_image_version_test.go b/aws/resource_aws_sagemaker_image_version_test.go new file mode 100644 index 00000000000..310a628586f --- /dev/null +++ b/aws/resource_aws_sagemaker_image_version_test.go @@ -0,0 +1,215 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +// func init() { +// resource.AddTestSweepers("aws_sagemaker_image", &resource.Sweeper{ +// Name: "aws_sagemaker_image", +// F: testSweepSagemakerImages, +// }) +// } + +// func testSweepSagemakerImages(region string) error { +// client, err := sharedClientForRegion(region) +// if err != nil { +// return fmt.Errorf("error getting client: %s", err) +// } +// conn := client.(*AWSClient).sagemakerconn + +// err = conn.ListImagesPages(&sagemaker.ListImagesInput{}, func(page *sagemaker.ListImagesOutput, lastPage bool) bool { +// for _, Image := range page.Images { +// name := aws.StringValue(Image.ImageName) + +// input := &sagemaker.DeleteImageInput{ +// ImageName: Image.ImageName, +// } + +// log.Printf("[INFO] Deleting SageMaker Image: %s", name) +// if _, err := conn.DeleteImage(input); err != nil { +// log.Printf("[ERROR] Error deleting SageMaker Image (%s): %s", name, err) +// continue +// } +// } + +// return !lastPage +// }) + +// if testSweepSkipSweepError(err) { +// log.Printf("[WARN] Skipping SageMaker Image sweep for %s: %s", region, err) +// return nil +// } + +// if err != nil { +// return fmt.Errorf("Error retrieving SageMaker Images: %w", err) +// } + +// return nil +// } + +func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { + var image sagemaker.DescribeImageVersionOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image_version.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), + resource.TestCheckResourceAttr(resourceName, "image_name", rName), + resource.TestCheckResourceAttr(resourceName, "base_image", "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + resource.TestCheckResourceAttr(resourceName, "version", "1"), + testAccCheckResourceAttrRegionalARN(resourceName, "image_arn", "sagemaker", fmt.Sprintf("image/%s", rName)), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("image-version/%s/1", rName)), + resource.TestCheckResourceAttrSet(resourceName, "container_image"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { + var image sagemaker.DescribeImageVersionOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image_version.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImageVersion(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSSagemakerImageVersion_disappears_image(t *testing.T) { + var image sagemaker.DescribeImageVersionOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_image_version.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImage(), "aws_sagemaker_image.test"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerImageVersionDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_image_version" { + continue + } + + imageVersion, err := finder.ImageVersionByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + if aws.StringValue(imageVersion.ImageVersionArn) == rs.Primary.Attributes["arn"] { + return fmt.Errorf("SageMaker Image Version %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerImageVersionExists(n string, image *sagemaker.DescribeImageVersionOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker Image ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.ImageVersionByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *image = *resp + + return nil + } +} + +func testAccAWSSagemakerImageVersionBasicConfig(rName, baseImage string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = data.aws_iam_policy_document.test.json +} + +data "aws_iam_policy_document" "test" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["sagemaker.${data.aws_partition.current.dns_suffix}"] + } + } +} + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSageMakerFullAccess" +} + +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + + depends_on = [aws_iam_role_policy_attachment.test] +} + +resource "aws_sagemaker_image_version" "test" { + image_name = aws_sagemaker_image.test.id + base_image = %[2]q +} +`, rName, baseImage) +} From 1fd281bddcbb72231abc1d2a2c684f089fdf368b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 15 Jan 2021 23:55:34 +0200 Subject: [PATCH 0722/1212] skip tests + fix image does not exist error for image --- aws/resource_aws_sagemaker_image.go | 2 +- ...source_aws_sagemaker_image_version_test.go | 71 ++++++------------- 2 files changed, 24 insertions(+), 49 deletions(-) diff --git a/aws/resource_aws_sagemaker_image.go b/aws/resource_aws_sagemaker_image.go index 104da90c363..d8d7afd5b62 100644 --- a/aws/resource_aws_sagemaker_image.go +++ b/aws/resource_aws_sagemaker_image.go @@ -104,7 +104,7 @@ func resourceAwsSagemakerImageRead(d *schema.ResourceData, meta interface{}) err image, err := finder.ImageByName(conn, d.Id()) if err != nil { - if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { d.SetId("") log.Printf("[WARN] Unable to find SageMaker Image (%s); removing from state", d.Id()) return nil diff --git a/aws/resource_aws_sagemaker_image_version_test.go b/aws/resource_aws_sagemaker_image_version_test.go index 310a628586f..d972801cd0d 100644 --- a/aws/resource_aws_sagemaker_image_version_test.go +++ b/aws/resource_aws_sagemaker_image_version_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "os" "testing" "github.com/aws/aws-sdk-go/aws" @@ -12,54 +13,16 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" ) -// func init() { -// resource.AddTestSweepers("aws_sagemaker_image", &resource.Sweeper{ -// Name: "aws_sagemaker_image", -// F: testSweepSagemakerImages, -// }) -// } - -// func testSweepSagemakerImages(region string) error { -// client, err := sharedClientForRegion(region) -// if err != nil { -// return fmt.Errorf("error getting client: %s", err) -// } -// conn := client.(*AWSClient).sagemakerconn - -// err = conn.ListImagesPages(&sagemaker.ListImagesInput{}, func(page *sagemaker.ListImagesOutput, lastPage bool) bool { -// for _, Image := range page.Images { -// name := aws.StringValue(Image.ImageName) - -// input := &sagemaker.DeleteImageInput{ -// ImageName: Image.ImageName, -// } - -// log.Printf("[INFO] Deleting SageMaker Image: %s", name) -// if _, err := conn.DeleteImage(input); err != nil { -// log.Printf("[ERROR] Error deleting SageMaker Image (%s): %s", name, err) -// continue -// } -// } - -// return !lastPage -// }) - -// if testSweepSkipSweepError(err) { -// log.Printf("[WARN] Skipping SageMaker Image sweep for %s: %s", region, err) -// return nil -// } - -// if err != nil { -// return fmt.Errorf("Error retrieving SageMaker Images: %w", err) -// } - -// return nil -// } - func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { + + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + } + var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -67,11 +30,11 @@ func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, baseImage), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), resource.TestCheckResourceAttr(resourceName, "image_name", rName), - resource.TestCheckResourceAttr(resourceName, "base_image", "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + resource.TestCheckResourceAttr(resourceName, "base_image", baseImage), resource.TestCheckResourceAttr(resourceName, "version", "1"), testAccCheckResourceAttrRegionalARN(resourceName, "image_arn", "sagemaker", fmt.Sprintf("image/%s", rName)), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("image-version/%s/1", rName)), @@ -88,9 +51,15 @@ func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { } func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { + + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + } + var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -98,7 +67,7 @@ func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, baseImage), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImageVersion(), resourceName), @@ -110,9 +79,15 @@ func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { } func TestAccAWSSagemakerImageVersion_disappears_image(t *testing.T) { + + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + } + var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -120,7 +95,7 @@ func TestAccAWSSagemakerImageVersion_disappears_image(t *testing.T) { CheckDestroy: testAccCheckAWSSagemakerImageVersionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSagemakerImageVersionBasicConfig(rName, "544685987707.dkr.ecr.us-west-2.amazonaws.com/smstudio-custom:latest"), + Config: testAccAWSSagemakerImageVersionBasicConfig(rName, baseImage), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerImageVersionExists(resourceName, &image), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerImage(), "aws_sagemaker_image.test"), From 04afda342216da528561e14581749f0a6dac0452 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 16 Jan 2021 00:04:40 +0200 Subject: [PATCH 0723/1212] docs --- .../r/sagemaker_image_version.html.markdown | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 website/docs/r/sagemaker_image_version.html.markdown diff --git a/website/docs/r/sagemaker_image_version.html.markdown b/website/docs/r/sagemaker_image_version.html.markdown new file mode 100644 index 00000000000..391b456e80f --- /dev/null +++ b/website/docs/r/sagemaker_image_version.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_image_version" +description: |- + Provides a Sagemaker Image Version resource. +--- + +# Resource: aws_sagemaker_image_version + +Provides a Sagemaker Image Version resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_image_version" "test" { + image_name = aws_sagemaker_image.test.id + base_image = "012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest +} +``` + +## Argument Reference + +The following arguments are supported: + +* `image_name` - (Required) The name of the image. Must be unique to your account. +* `base_image` - (Required) The registry path of the container image on which this image version is based. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The name of the Image. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Image Version. +* `image_arn`- The Amazon Resource Name (ARN) of the image the version is based on. +* `container_image` - The registry path of the container image that contains this image version. + +## Import + +Sagemaker Image Versions can be imported using the `name`, e.g. + +``` +$ terraform import aws_sagemaker_image_version.test_image my-code-repo +``` From 1ff9c2625d96ca9cdd10da0d6eb67db8bb8652f4 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 16 Jan 2021 00:10:36 +0200 Subject: [PATCH 0724/1212] docs fmt --- website/docs/r/sagemaker_image_version.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/sagemaker_image_version.html.markdown b/website/docs/r/sagemaker_image_version.html.markdown index 391b456e80f..ad86e2fac40 100644 --- a/website/docs/r/sagemaker_image_version.html.markdown +++ b/website/docs/r/sagemaker_image_version.html.markdown @@ -17,7 +17,7 @@ Provides a Sagemaker Image Version resource. ```hcl resource "aws_sagemaker_image_version" "test" { image_name = aws_sagemaker_image.test.id - base_image = "012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest + base_image = "012345678912.dkr.ecr.us-west-2.amazonaws.com/image:latest" } ``` From 4df0bc4661392fe1577db77c5e938b05368c65a7 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 16 Jan 2021 00:21:00 +0200 Subject: [PATCH 0725/1212] hardcoded partition --- aws/resource_aws_sagemaker_image_version_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image_version_test.go b/aws/resource_aws_sagemaker_image_version_test.go index d972801cd0d..6989e1e9a62 100644 --- a/aws/resource_aws_sagemaker_image_version_test.go +++ b/aws/resource_aws_sagemaker_image_version_test.go @@ -172,7 +172,7 @@ data "aws_iam_policy_document" "test" { resource "aws_iam_role_policy_attachment" "test" { role = aws_iam_role.test.name - policy_arn = "arn:aws:iam::aws:policy/AmazonSageMakerFullAccess" + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSageMakerFullAccess" } resource "aws_sagemaker_image" "test" { From 1416fd0779e5f566577c686505942bafe78555a6 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 17 Jan 2021 23:54:07 +0200 Subject: [PATCH 0726/1212] ordering --- aws/resource_aws_sagemaker_image_version.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sagemaker_image_version.go b/aws/resource_aws_sagemaker_image_version.go index 30938b488b3..39239808317 100644 --- a/aws/resource_aws_sagemaker_image_version.go +++ b/aws/resource_aws_sagemaker_image_version.go @@ -26,22 +26,22 @@ func resourceAwsSagemakerImageVersion() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "image_name": { + "base_image": { Type: schema.TypeString, Required: true, - ForceNew: true, }, - "base_image": { + "container_image": { Type: schema.TypeString, - Required: true, + Computed: true, }, "image_arn": { Type: schema.TypeString, Computed: true, }, - "container_image": { + "image_name": { Type: schema.TypeString, - Computed: true, + Required: true, + ForceNew: true, }, "version": { Type: schema.TypeInt, From bf686bfbc5637d66ea8fab407492b3e54ff73dbb Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 17 Jan 2021 23:54:32 +0200 Subject: [PATCH 0727/1212] spelling --- ...esource_aws_sagemaker_image_version_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_sagemaker_image_version_test.go b/aws/resource_aws_sagemaker_image_version_test.go index 6989e1e9a62..0b39ba99661 100644 --- a/aws/resource_aws_sagemaker_image_version_test.go +++ b/aws/resource_aws_sagemaker_image_version_test.go @@ -15,14 +15,14 @@ import ( func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { - if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { - t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") } var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" - baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -52,14 +52,14 @@ func TestAccAWSSagemakerImageVersion_basic(t *testing.T) { func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { - if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { - t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") } var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" - baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -80,14 +80,14 @@ func TestAccAWSSagemakerImageVersion_disappears(t *testing.T) { func TestAccAWSSagemakerImageVersion_disappears_image(t *testing.T) { - if os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") == "" { - t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BAES_IMAGE is not set") + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") } var image sagemaker.DescribeImageVersionOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_image_version.test" - baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BAES_IMAGE") + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 9a21963b08b6675fed588b1e23bdca2f0ee7851d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 17 Jan 2021 23:57:22 +0200 Subject: [PATCH 0728/1212] maint docs --- docs/MAINTAINING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/MAINTAINING.md b/docs/MAINTAINING.md index 696f8574ded..1d32db37258 100644 --- a/docs/MAINTAINING.md +++ b/docs/MAINTAINING.md @@ -410,6 +410,7 @@ Environment variables (beyond standard AWS Go SDK ones) used by acceptance testi | `GCM_API_KEY` | API Key for Google Cloud Messaging in Pinpoint and SNS Platform Application testing. | | `GITHUB_TOKEN` | GitHub token for CodePipeline testing. | | `MACIE_MEMBER_ACCOUNT_ID` | Identifier of AWS Account for Macie Member testing. **DEPRECATED:** Should be replaced with standard alternate account handling for tests. | +| `SAGEMAKER_IMAGE_VERSION_BASE_IMAGE` | Sagemaker base image to use for tests. | | `SERVICEQUOTAS_INCREASE_ON_CREATE_QUOTA_CODE` | Quota Code for Service Quotas testing (submits support case). | | `SERVICEQUOTAS_INCREASE_ON_CREATE_SERVICE_CODE` | Service Code for Service Quotas testing (submits support case). | | `SERVICEQUOTAS_INCREASE_ON_CREATE_VALUE` | Value of quota increase for Service Quotas testing (submits support case). | From 626e869844d85580065b4a219ed819de5a68bd30 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 18 Jan 2021 19:26:03 +0200 Subject: [PATCH 0729/1212] remove update --- aws/resource_aws_sagemaker_image_version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_image_version.go b/aws/resource_aws_sagemaker_image_version.go index 39239808317..e97c3a3edc1 100644 --- a/aws/resource_aws_sagemaker_image_version.go +++ b/aws/resource_aws_sagemaker_image_version.go @@ -15,7 +15,6 @@ func resourceAwsSagemakerImageVersion() *schema.Resource { return &schema.Resource{ Create: resourceAwsSagemakerImageVersionCreate, Read: resourceAwsSagemakerImageVersionRead, - Update: resourceAwsSagemakerImageVersionCreate, Delete: resourceAwsSagemakerImageVersionDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -29,6 +28,7 @@ func resourceAwsSagemakerImageVersion() *schema.Resource { "base_image": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "container_image": { Type: schema.TypeString, From 3942716ea278819d3b696bcb14c8583cec2b55f3 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Mon, 18 Jan 2021 09:26:04 -0800 Subject: [PATCH 0730/1212] Update CHANGELOG.md for #16475 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d256b06ded8..8d5b7f4557a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES +* **New Resource:** `aws_backup_global_settings` [GH-16475] * **New Resource:** `aws_sagemaker_feature_group` [GH-16728] ## 3.24.1 (January 15, 2021) From bea47b6c852a87410492e886d1f21677f02adcda Mon Sep 17 00:00:00 2001 From: shuheiktgw Date: Tue, 19 Jan 2021 07:55:48 +0900 Subject: [PATCH 0731/1212] Add arn and owner_id to License Manager license configuration --- ...esource_aws_licensemanager_license_configuration.go | 10 ++++++++++ ...ce_aws_licensemanager_license_configuration_test.go | 3 +++ .../r/licensemanager_license_configuration.markdown | 2 ++ 3 files changed, 15 insertions(+) diff --git a/aws/resource_aws_licensemanager_license_configuration.go b/aws/resource_aws_licensemanager_license_configuration.go index 8c8ca3216c9..3a38bbf8a34 100644 --- a/aws/resource_aws_licensemanager_license_configuration.go +++ b/aws/resource_aws_licensemanager_license_configuration.go @@ -23,6 +23,10 @@ func resourceAwsLicenseManagerLicenseConfiguration() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "description": { Type: schema.TypeString, Optional: true, @@ -60,6 +64,10 @@ func resourceAwsLicenseManagerLicenseConfiguration() *schema.Resource { Type: schema.TypeString, Required: true, }, + "owner_account_id": { + Type: schema.TypeString, + Computed: true, + }, "tags": tagsSchema(), }, } @@ -120,6 +128,7 @@ func resourceAwsLicenseManagerLicenseConfigurationRead(d *schema.ResourceData, m return fmt.Errorf("Error reading License Manager license configuration: %s", err) } + d.Set("arn", resp.LicenseConfigurationArn) d.Set("description", resp.Description) d.Set("license_count", resp.LicenseCount) d.Set("license_count_hard_limit", resp.LicenseCountHardLimit) @@ -128,6 +137,7 @@ func resourceAwsLicenseManagerLicenseConfigurationRead(d *schema.ResourceData, m return fmt.Errorf("error setting license_rules: %s", err) } d.Set("name", resp.Name) + d.Set("owner_account_id", resp.OwnerAccountId) if err := d.Set("tags", keyvaluetags.LicensemanagerKeyValueTags(resp.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) diff --git a/aws/resource_aws_licensemanager_license_configuration_test.go b/aws/resource_aws_licensemanager_license_configuration_test.go index 56dfe03fdad..0948bdee40d 100644 --- a/aws/resource_aws_licensemanager_license_configuration_test.go +++ b/aws/resource_aws_licensemanager_license_configuration_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -72,6 +73,7 @@ func TestAccAWSLicenseManagerLicenseConfiguration_basic(t *testing.T) { Config: testAccLicenseManagerLicenseConfigurationConfig_basic, Check: resource.ComposeTestCheckFunc( testAccCheckLicenseManagerLicenseConfigurationExists(resourceName, &licenseConfiguration), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "license-manager", regexp.MustCompile(`license-configuration:lic-.+`)), resource.TestCheckResourceAttr(resourceName, "name", "Example"), resource.TestCheckResourceAttr(resourceName, "description", "Example"), resource.TestCheckResourceAttr(resourceName, "license_count", "10"), @@ -79,6 +81,7 @@ func TestAccAWSLicenseManagerLicenseConfiguration_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "license_counting_type", "Socket"), resource.TestCheckResourceAttr(resourceName, "license_rules.#", "1"), resource.TestCheckResourceAttr(resourceName, "license_rules.0", "#minimumSockets=3"), + testAccCheckResourceAttrAccountID(resourceName, "owner_account_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.foo", "barr"), ), diff --git a/website/docs/r/licensemanager_license_configuration.markdown b/website/docs/r/licensemanager_license_configuration.markdown index 09a6ab8edd5..e7b3162bffe 100644 --- a/website/docs/r/licensemanager_license_configuration.markdown +++ b/website/docs/r/licensemanager_license_configuration.markdown @@ -60,7 +60,9 @@ License rules should be in the format of `#RuleType=RuleValue`. Supported rule t In addition to all arguments above, the following attributes are exported: +* `arn` - The license configuration ARN. * `id` - The license configuration ARN. +* `owner_account_id` - Account ID of the owner of the license configuration. ## Import From d49e28735aa39b38bfe67deb58f46ac3a42e8ebc Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 18 Jan 2021 18:05:17 -0800 Subject: [PATCH 0732/1212] Adds waiter functions for ElastiCache Replication Group --- .../service/elasticache/finder/finder.go | 34 ++++ .../service/elasticache/waiter/status.go | 33 ++++ .../service/elasticache/waiter/waiter.go | 52 ++++++ ...ource_aws_elasticache_replication_group.go | 160 +++++------------- ..._aws_elasticache_replication_group_test.go | 25 +-- 5 files changed, 170 insertions(+), 134 deletions(-) create mode 100644 aws/internal/service/elasticache/finder/finder.go create mode 100644 aws/internal/service/elasticache/waiter/status.go create mode 100644 aws/internal/service/elasticache/waiter/waiter.go diff --git a/aws/internal/service/elasticache/finder/finder.go b/aws/internal/service/elasticache/finder/finder.go new file mode 100644 index 00000000000..cc344d9d97d --- /dev/null +++ b/aws/internal/service/elasticache/finder/finder.go @@ -0,0 +1,34 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// ReplicationGroupByID retrieves an ElastiCache Replication Group by id. +func ReplicationGroupByID(conn *elasticache.ElastiCache, id string) (*elasticache.ReplicationGroup, error) { + input := &elasticache.DescribeReplicationGroupsInput{ + ReplicationGroupId: aws.String(id), + } + result, err := conn.DescribeReplicationGroups(input) + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { + return nil, err + } + + if result == nil || len(result.ReplicationGroups) == 0 || result.ReplicationGroups[0] == nil { + return nil, &resource.NotFoundError{ + Message: "Empty result", + LastRequest: input, + } + } + + return result.ReplicationGroups[0], nil +} diff --git a/aws/internal/service/elasticache/waiter/status.go b/aws/internal/service/elasticache/waiter/status.go new file mode 100644 index 00000000000..bdffde8ec0c --- /dev/null +++ b/aws/internal/service/elasticache/waiter/status.go @@ -0,0 +1,33 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" +) + +const ( + ReplicationGroupStatusCreating = "creating" + ReplicationGroupStatusAvailable = "available" + ReplicationGroupStatusModifying = "modifying" + ReplicationGroupStatusDeleting = "deleting" + ReplicationGroupStatusCreateFailed = "create-failed" + ReplicationGroupStatusSnapshotting = "snapshotting" +) + +// ReplicationGroupStatus fetches the ReplicationGroup and its Status +func ReplicationGroupStatus(conn *elasticache.ElastiCache, replicationGroupID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + rg, err := finder.ReplicationGroupByID(conn, replicationGroupID) + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + + return rg, aws.StringValue(rg.Status), nil + } +} diff --git a/aws/internal/service/elasticache/waiter/waiter.go b/aws/internal/service/elasticache/waiter/waiter.go new file mode 100644 index 00000000000..37864fa1e89 --- /dev/null +++ b/aws/internal/service/elasticache/waiter/waiter.go @@ -0,0 +1,52 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + replicationGroupAvailableMinTimeout = 10 * time.Second + replicationGroupAvailableDelay = 30 * time.Second + + replicationGroupDeletedMinTimeout = 10 * time.Second + replicationGroupDeletedDelay = 30 * time.Second +) + +// ReplicationGroupAvailable waits for a ReplicationGroup to return Available +func ReplicationGroupAvailable(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ReplicationGroupStatusCreating, ReplicationGroupStatusModifying, ReplicationGroupStatusSnapshotting}, + Target: []string{ReplicationGroupStatusAvailable}, + Refresh: ReplicationGroupStatus(conn, replicationGroupID), + Timeout: timeout, + MinTimeout: replicationGroupAvailableMinTimeout, + Delay: replicationGroupAvailableDelay, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return v, err + } + return nil, err +} + +// ReplicationGroupDeleted waits for a ReplicationGroup to be deleted +func ReplicationGroupDeleted(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ReplicationGroupStatusCreating, ReplicationGroupStatusAvailable, ReplicationGroupStatusDeleting}, + Target: []string{}, + Refresh: ReplicationGroupStatus(conn, replicationGroupID), + Timeout: timeout, + MinTimeout: replicationGroupDeletedMinTimeout, + Delay: replicationGroupDeletedDelay, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return v, err + } + return nil, err +} diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 4ec2da537c4..12c4bd57166 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" ) func resourceAwsElasticacheReplicationGroup() *schema.Resource { @@ -117,7 +118,7 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Optional: true, Computed: true, StateFunc: func(val interface{}) string { - // Elasticache always changes the maintenance + // ElastiCache always changes the maintenance // to lowercase return strings.ToLower(val.(string)) }, @@ -377,25 +378,14 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i resp, err := conn.CreateReplicationGroup(params) if err != nil { - return fmt.Errorf("Error creating Elasticache Replication Group: %w", err) + return fmt.Errorf("Error creating ElastiCache Replication Group: %w", err) } d.SetId(aws.StringValue(resp.ReplicationGroup.ReplicationGroupId)) - pending := []string{"creating", "modifying", "restoring", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, d.Id(), pending), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache replication group (%s) to be created: %s", d.Id(), sterr) + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be created: %w", d.Id(), err) } return resourceAwsElasticacheReplicationGroupRead(d, meta) @@ -412,7 +402,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int res, err := conn.DescribeReplicationGroups(req) if err != nil { if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { - log.Printf("[WARN] Elasticache Replication Group (%s) not found", d.Id()) + log.Printf("[WARN] ElastiCache Replication Group (%s) not found", d.Id()) d.SetId("") return nil } @@ -563,15 +553,15 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i input.NodeGroupsToRemove = aws.StringSlice(nodeGroupsToRemove) } - log.Printf("[DEBUG] Modifying Elasticache Replication Group (%s) shard configuration: %s", d.Id(), input) + log.Printf("[DEBUG] Modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), input) _, err := conn.ModifyReplicationGroupShardConfiguration(input) if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group shard configuration: %w", err) + return fmt.Errorf("error modifying ElastiCache Replication Group shard configuration: %w", err) } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) shard reconfiguration completion: %w", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) shard reconfiguration completion: %w", d.Id(), err) } } @@ -600,7 +590,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i if err != nil { // Future enhancement: we could retry creation with random ID on naming collision // if isAWSErr(err, elasticache.ErrCodeCacheClusterAlreadyExistsFault, "") { ... } - return fmt.Errorf("error creating Elasticache Cache Cluster (adding replica): %w", err) + return fmt.Errorf("error creating ElastiCache Cache Cluster (adding replica): %w", err) } } @@ -608,7 +598,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i for _, cacheClusterID := range addClusterIDs { err := waitForCreateElasticacheCacheCluster(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Cache Cluster (%s) to be created (adding replica): %w", cacheClusterID, err) + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be created (adding replica): %w", cacheClusterID, err) } } } @@ -626,7 +616,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i // Future enhancement: we could retry deletion with random existing ID on missing name // if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { ... } if !isAWSErr(err, elasticache.ErrCodeInvalidCacheClusterStateFault, "serving as primary") { - return fmt.Errorf("error deleting Elasticache Cache Cluster (%s) (removing replica): %w", cacheClusterID, err) + return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica): %w", cacheClusterID, err) } // Use Replication Group MemberClusters to find a new primary cache cluster ID @@ -636,13 +626,13 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i describeReplicationGroupInput := &elasticache.DescribeReplicationGroupsInput{ ReplicationGroupId: aws.String(d.Id()), } - log.Printf("[DEBUG] Reading Elasticache Replication Group: %s", describeReplicationGroupInput) + log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", describeReplicationGroupInput) output, err := conn.DescribeReplicationGroups(describeReplicationGroupInput) if err != nil { - return fmt.Errorf("error reading Elasticache Replication Group (%s) to determine new primary: %w", d.Id(), err) + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: %w", d.Id(), err) } if output == nil || len(output.ReplicationGroups) == 0 || len(output.ReplicationGroups[0].MemberClusters) == 0 { - return fmt.Errorf("error reading Elasticache Replication Group (%s) to determine new primary: missing replication group information", d.Id()) + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: missing replication group information", d.Id()) } for _, memberClusterPtr := range output.ReplicationGroups[0].MemberClusters { @@ -660,7 +650,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i } } if newPrimaryClusterID == "" { - return fmt.Errorf("error reading Elasticache Replication Group (%s) to determine new primary: unable to assign new primary", d.Id()) + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: unable to assign new primary", d.Id()) } // Disable automatic failover if enabled @@ -677,14 +667,14 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i AutomaticFailoverEnabled: aws.Bool(false), ReplicationGroupId: aws.String(d.Id()), } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", modifyReplicationGroupInput) + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to set new primary: %sw", d.Id(), err) + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %sw", d.Id(), err) } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) } } @@ -694,21 +684,21 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i PrimaryClusterId: aws.String(newPrimaryClusterID), ReplicationGroupId: aws.String(d.Id()), } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", modifyReplicationGroupInput) + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to set new primary: %w", d.Id(), err) + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %w", d.Id(), err) } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) } // Finally retry deleting the cache cluster var finalSnapshotID = d.Get("final_snapshot_identifier").(string) err = deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) if err != nil { - return fmt.Errorf("error deleting Elasticache Cache Cluster (%s) (removing replica after setting new primary): %w", cacheClusterID, err) + return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica after setting new primary): %w", cacheClusterID, err) } } } @@ -717,7 +707,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i for _, cacheClusterID := range removeClusterIDs { err := waitForDeleteElasticacheCacheCluster(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) } } @@ -728,10 +718,10 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i AutomaticFailoverEnabled: aws.Bool(true), ReplicationGroupId: aws.String(d.Id()), } - log.Printf("[DEBUG] Modifying Elasticache Replication Group: %s", input) + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", input) _, err := conn.ModifyReplicationGroup(input) if err != nil { - return fmt.Errorf("error modifying Elasticache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) } } } @@ -816,12 +806,12 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i if requestUpdate { _, err := conn.ModifyReplicationGroup(params) if err != nil { - return fmt.Errorf("error updating Elasticache Replication Group (%s): %w", d.Id(), err) + return fmt.Errorf("error updating ElastiCache Replication Group (%s): %w", d.Id(), err) } - err = waitForModifyElasticacheReplicationGroup(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { - return fmt.Errorf("error waiting for Elasticache Replication Group (%s) to be updated: %w", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be updated: %w", d.Id(), err) } } @@ -852,63 +842,15 @@ func resourceAwsElasticacheReplicationGroupDelete(d *schema.ResourceData, meta i conn := meta.(*AWSClient).elasticacheconn var finalSnapshotID = d.Get("final_snapshot_identifier").(string) - err := deleteElasticacheReplicationGroup(d.Id(), conn, finalSnapshotID) + err := deleteElasticacheReplicationGroup(d.Id(), conn, finalSnapshotID, d.Timeout(schema.TimeoutDelete)) if err != nil { - return fmt.Errorf("error deleting Elasticache Replication Group (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting ElastiCache Replication Group (%s): %w", d.Id(), err) } return nil } -func cacheReplicationGroupStateRefreshFunc(conn *elasticache.ElastiCache, replicationGroupId string, pending []string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(replicationGroupId), - }) - if err != nil { - if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { - log.Printf("[DEBUG] Replication Group Not Found") - return nil, "", nil - } - - log.Printf("[ERROR] cacheClusterReplicationGroupStateRefreshFunc: %s", err) - return nil, "", err - } - - if len(resp.ReplicationGroups) == 0 { - return nil, "", fmt.Errorf("Error: no Cache Replication Groups found for id (%s)", replicationGroupId) - } - - var rg *elasticache.ReplicationGroup - for _, replicationGroup := range resp.ReplicationGroups { - rgId := aws.StringValue(replicationGroup.ReplicationGroupId) - if rgId == replicationGroupId { - log.Printf("[DEBUG] Found matching ElastiCache Replication Group: %s", rgId) - rg = replicationGroup - } - } - - if rg == nil { - return nil, "", fmt.Errorf("Error: no matching ElastiCache Replication Group for id (%s)", replicationGroupId) - } - - log.Printf("[DEBUG] ElastiCache Replication Group (%s) status: %v", replicationGroupId, aws.StringValue(rg.Status)) - - // return the current state if it's in the pending array - for _, p := range pending { - log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for Replication Group (%s), Replication Group status: %s", pending, replicationGroupId, aws.StringValue(rg.Status)) - s := aws.StringValue(rg.Status) - if p == s { - log.Printf("[DEBUG] Return with status: %v", aws.StringValue(rg.Status)) - return s, p, nil - } - } - - return rg, aws.StringValue(rg.Status), nil - } -} - -func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elasticache.ElastiCache, finalSnapshotID string) error { +func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elasticache.ElastiCache, finalSnapshotID string, timeout time.Duration) error { input := &elasticache.DeleteReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), } @@ -941,20 +883,10 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica } if err != nil { - return fmt.Errorf("error deleting Elasticache Replication Group: %w", err) + return fmt.Errorf("error deleting ElastiCache Replication Group: %w", err) } - log.Printf("[DEBUG] Waiting for deletion: %s", replicationGroupID) - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "available", "deleting"}, - Target: []string{}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, replicationGroupID, []string{}), - Timeout: 40 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, err = stateConf.WaitForState() + _, err = waiter.ReplicationGroupDeleted(conn, replicationGroupID, timeout) return err } @@ -970,22 +902,6 @@ func flattenElasticacheNodeGroupsToClusterMode(nodeGroups []*elasticache.NodeGro return []map[string]interface{}{m} } -func waitForModifyElasticacheReplicationGroup(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { - pending := []string{"creating", "modifying", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheReplicationGroupStateRefreshFunc(conn, replicationGroupID, pending), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for Elasticache Replication Group (%s) to become available", replicationGroupID) - _, err := stateConf.WaitForState() - return err -} - func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws []string, errors []error) { if strings.ToLower(v.(string)) != "redis" { errors = append(errors, fmt.Errorf("The only acceptable Engine type when using Replication Groups is Redis")) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 59d623616e2..3650a69c66c 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" ) func init() { @@ -32,27 +33,27 @@ func testSweepElasticacheReplicationGroups(region string) error { err = conn.DescribeReplicationGroupsPages(&elasticache.DescribeReplicationGroupsInput{}, func(page *elasticache.DescribeReplicationGroupsOutput, isLast bool) bool { if len(page.ReplicationGroups) == 0 { - log.Print("[DEBUG] No Elasticache Replicaton Groups to sweep") + log.Print("[DEBUG] No ElastiCache Replicaton Groups to sweep") return false } for _, replicationGroup := range page.ReplicationGroups { id := aws.StringValue(replicationGroup.ReplicationGroupId) - log.Printf("[INFO] Deleting Elasticache Replication Group: %s", id) - err := deleteElasticacheReplicationGroup(id, conn, "") + log.Printf("[INFO] Deleting ElastiCache Replication Group: %s", id) + err := deleteElasticacheReplicationGroup(id, conn, "", 40*time.Minute) if err != nil { - log.Printf("[ERROR] Failed to delete Elasticache Replication Group (%s): %s", id, err) + log.Printf("[ERROR] Failed to delete ElastiCache Replication Group (%s): %s", id, err) } } return !isLast }) if err != nil { if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Elasticache Replication Group sweep for %s: %s", region, err) + log.Printf("[WARN] Skipping ElastiCache Replication Group sweep for %s: %s", region, err) return nil } - return fmt.Errorf("Error retrieving Elasticache Replication Groups: %w", err) + return fmt.Errorf("Error retrieving ElastiCache Replication Groups: %w", err) } return nil } @@ -707,7 +708,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail if _, err := conn.ModifyReplicationGroup(input); err != nil { t.Fatalf("error setting new primary cache cluster: %s", err) } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { + if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { t.Fatalf("error waiting for new primary cache cluster: %s", err) } }, @@ -754,7 +755,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail if _, err := conn.ModifyReplicationGroup(input); err != nil { t.Fatalf("error disabling automatic failover: %s", err) } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { + if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { t.Fatalf("error waiting for disabling automatic failover: %s", err) } @@ -767,7 +768,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail if _, err := conn.ModifyReplicationGroup(input); err != nil { t.Fatalf("error setting new primary cache cluster: %s", err) } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { + if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { t.Fatalf("error waiting for new primary cache cluster: %s", err) } @@ -780,7 +781,7 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail if _, err := conn.ModifyReplicationGroup(input); err != nil { t.Fatalf("error enabled automatic failover: %s", err) } - if err := waitForModifyElasticacheReplicationGroup(conn, rName, 40*time.Minute); err != nil { + if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { t.Fatalf("error waiting for enabled automatic failover: %s", err) } }, @@ -905,7 +906,7 @@ func testAccCheckAWSElasticacheReplicationGroupExists(n string, v *elasticache.R ReplicationGroupId: aws.String(rs.Primary.ID), }) if err != nil { - return fmt.Errorf("Elasticache error: %v", err) + return fmt.Errorf("ElastiCache error: %v", err) } for _, rg := range res.ReplicationGroups { @@ -1027,7 +1028,7 @@ func testAccAWSElasticacheReplicationGroupConfigParameterGroupName(rName string, resource "aws_elasticache_parameter_group" "test" { count = 2 - # We do not have a data source for "latest" Elasticache family + # We do not have a data source for "latest" ElastiCache family # so unfortunately we must hardcode this for now family = "redis6.x" From 59d3866152a1eb7c1ab8cc1bf6d440bf24299f99 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Jan 2021 08:05:17 -0500 Subject: [PATCH 0733/1212] service/ec2: AMI throughput handling (#16631) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSAMI_basic (64.21s) --- PASS: TestAccAWSAMI_description (76.85s) --- PASS: TestAccAWSAMI_disappears (60.98s) --- PASS: TestAccAWSAMI_EphemeralBlockDevices (64.91s) --- PASS: TestAccAWSAMI_Gp3BlockDevice (48.76s) --- PASS: TestAccAWSAMI_tags (73.71s) --- PASS: TestAccAWSAMICopy_basic (378.01s) --- PASS: TestAccAWSAMICopy_Description (391.97s) --- PASS: TestAccAWSAMICopy_EnaSupport (377.97s) --- PASS: TestAccAWSAMICopy_tags (404.08s) --- PASS: TestAccAWSAmiDataSource_Gp3BlockDevice (48.44s) --- PASS: TestAccAWSAmiDataSource_instanceStore (27.12s) --- PASS: TestAccAWSAmiDataSource_localNameFilter (30.76s) --- PASS: TestAccAWSAmiDataSource_natInstance (26.80s) --- PASS: TestAccAWSAmiDataSource_windowsInstance (28.30s) --- PASS: TestAccAWSAMIFromInstance_basic (444.76s) --- PASS: TestAccAWSAMIFromInstance_tags (294.34s) --- PASS: TestAccAWSAMILaunchPermission_basic (344.81s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_AMI (359.20s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_LaunchPermission (336.46s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_LaunchPermission_Public (348.74s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSAMI_basic (68.61s) --- PASS: TestAccAWSAMI_description (76.31s) --- PASS: TestAccAWSAMI_disappears (65.32s) --- PASS: TestAccAWSAMI_EphemeralBlockDevices (69.63s) --- PASS: TestAccAWSAMI_Gp3BlockDevice (67.43s) --- PASS: TestAccAWSAMI_tags (99.68s) --- PASS: TestAccAWSAMICopy_basic (391.97s) --- PASS: TestAccAWSAMICopy_Description (409.20s) --- PASS: TestAccAWSAMICopy_EnaSupport (392.72s) --- PASS: TestAccAWSAMICopy_tags (413.77s) --- PASS: TestAccAWSAmiDataSource_Gp3BlockDevice (67.39s) --- PASS: TestAccAWSAmiDataSource_instanceStore (27.26s) --- PASS: TestAccAWSAmiDataSource_localNameFilter (30.92s) --- PASS: TestAccAWSAmiDataSource_natInstance (27.50s) --- PASS: TestAccAWSAmiDataSource_windowsInstance (27.77s) --- PASS: TestAccAWSAMIFromInstance_basic (255.31s) --- PASS: TestAccAWSAMIFromInstance_tags (359.49s) --- PASS: TestAccAWSAMILaunchPermission_basic (356.36s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_AMI (371.79s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_LaunchPermission (354.32s) --- PASS: TestAccAWSAMILaunchPermission_Disappears_LaunchPermission_Public (353.67s) ``` --- aws/data_source_aws_ami.go | 1 + aws/data_source_aws_ami_test.go | 50 ++++ aws/resource_aws_ami.go | 321 ++++++++++++++++----- aws/resource_aws_ami_copy.go | 12 +- aws/resource_aws_ami_from_instance.go | 12 +- aws/resource_aws_ami_test.go | 385 ++++++++++++++++++-------- website/docs/d/ami.html.markdown | 1 + website/docs/r/ami.html.markdown | 6 +- 8 files changed, 594 insertions(+), 194 deletions(-) diff --git a/aws/data_source_aws_ami.go b/aws/data_source_aws_ami.go index a1d1aba2f54..7f53f605fc5 100644 --- a/aws/data_source_aws_ami.go +++ b/aws/data_source_aws_ami.go @@ -329,6 +329,7 @@ func amiBlockDeviceMappings(m []*ec2.BlockDeviceMapping) *schema.Set { "delete_on_termination": fmt.Sprintf("%t", aws.BoolValue(v.Ebs.DeleteOnTermination)), "encrypted": fmt.Sprintf("%t", aws.BoolValue(v.Ebs.Encrypted)), "iops": fmt.Sprintf("%d", aws.Int64Value(v.Ebs.Iops)), + "throughput": fmt.Sprintf("%d", aws.Int64Value(v.Ebs.Throughput)), "volume_size": fmt.Sprintf("%d", aws.Int64Value(v.Ebs.VolumeSize)), "snapshot_id": aws.StringValue(v.Ebs.SnapshotId), "volume_type": aws.StringValue(v.Ebs.VolumeType), diff --git a/aws/data_source_aws_ami_test.go b/aws/data_source_aws_ami_test.go index e17e87aad73..9def968d79c 100644 --- a/aws/data_source_aws_ami_test.go +++ b/aws/data_source_aws_ami_test.go @@ -5,6 +5,7 @@ import ( "regexp" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -55,6 +56,7 @@ func TestAccAWSAmiDataSource_natInstance(t *testing.T) { }, }) } + func TestAccAWSAmiDataSource_windowsInstance(t *testing.T) { resourceName := "data.aws_ami.windows_ami" resource.ParallelTest(t, resource.TestCase{ @@ -147,6 +149,37 @@ func TestAccAWSAmiDataSource_localNameFilter(t *testing.T) { }) } +func TestAccAWSAmiDataSource_Gp3BlockDevice(t *testing.T) { + resourceName := "aws_ami.test" + datasourceName := "data.aws_ami.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAmiDataSourceConfigGp3BlockDevice(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAmiDataSourceID(datasourceName), + resource.TestCheckResourceAttrPair(datasourceName, "architecture", resourceName, "architecture"), + resource.TestCheckResourceAttrPair(datasourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(datasourceName, "block_device_mappings.#", resourceName, "ebs_block_device.#"), + resource.TestCheckResourceAttrPair(datasourceName, "description", resourceName, "description"), + resource.TestCheckResourceAttrPair(datasourceName, "image_id", resourceName, "id"), + testAccCheckResourceAttrAccountID(datasourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasourceName, "root_device_name", resourceName, "root_device_name"), + resource.TestCheckResourceAttr(datasourceName, "root_device_type", "ebs"), + resource.TestCheckResourceAttrPair(datasourceName, "root_snapshot_id", resourceName, "root_snapshot_id"), + resource.TestCheckResourceAttrPair(datasourceName, "sriov_net_support", resourceName, "sriov_net_support"), + resource.TestCheckResourceAttrPair(datasourceName, "tags.%", resourceName, "tags.%"), + resource.TestCheckResourceAttrPair(datasourceName, "virtualization_type", resourceName, "virtualization_type"), + ), + }, + }, + }) +} + func testAccCheckAwsAmiDataSourceID(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -234,3 +267,20 @@ data "aws_ami" "name_regex_filtered_ami" { name_regex = "^amzn-ami-min[a-z]{4}-hvm" } ` + +func testAccAmiDataSourceConfigGp3BlockDevice(rName string) string { + return composeConfig( + testAccAmiConfigGp3BlockDevice(rName), + ` +data "aws_caller_identity" "current" {} + +data "aws_ami" "test" { + owners = [data.aws_caller_identity.current.account_id] + + filter { + name = "image-id" + values = [aws_ami.test.id] + } +} +`) +} diff --git a/aws/resource_aws_ami.go b/aws/resource_aws_ami.go index 43d94dbfc83..1fe711e8fd3 100644 --- a/aws/resource_aws_ami.go +++ b/aws/resource_aws_ami.go @@ -110,6 +110,13 @@ func resourceAwsAmi() *schema.Resource { ForceNew: true, }, + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "volume_size": { Type: schema.TypeInt, Optional: true, @@ -243,45 +250,36 @@ func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error { req.RamdiskId = aws.String(ramdiskId) } - ebsBlockDevsSet := d.Get("ebs_block_device").(*schema.Set) - ephemeralBlockDevsSet := d.Get("ephemeral_block_device").(*schema.Set) - for _, ebsBlockDevI := range ebsBlockDevsSet.List() { - ebsBlockDev := ebsBlockDevI.(map[string]interface{}) - blockDev := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(ebsBlockDev["device_name"].(string)), - Ebs: &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(ebsBlockDev["delete_on_termination"].(bool)), - VolumeType: aws.String(ebsBlockDev["volume_type"].(string)), - }, - } - if iops, ok := ebsBlockDev["iops"]; ok { - if iop := iops.(int); iop != 0 { - blockDev.Ebs.Iops = aws.Int64(int64(iop)) + if v, ok := d.GetOk("ebs_block_device"); ok && v.(*schema.Set).Len() > 0 { + for _, tfMapRaw := range v.(*schema.Set).List() { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue } - } - if size, ok := ebsBlockDev["volume_size"]; ok { - if s := size.(int); s != 0 { - blockDev.Ebs.VolumeSize = aws.Int64(int64(s)) + + var encrypted bool + + if v, ok := tfMap["encrypted"].(bool); ok { + encrypted = v } - } - encrypted := ebsBlockDev["encrypted"].(bool) - if snapshotId := ebsBlockDev["snapshot_id"].(string); snapshotId != "" { - blockDev.Ebs.SnapshotId = aws.String(snapshotId) - if encrypted { + + var snapshot string + + if v, ok := tfMap["snapshot_id"].(string); ok && v != "" { + snapshot = v + } + + if snapshot != "" && encrypted { return errors.New("can't set both 'snapshot_id' and 'encrypted'") } - } else if encrypted { - blockDev.Ebs.Encrypted = aws.Bool(true) } - req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) + + req.BlockDeviceMappings = expandEc2BlockDeviceMappingsForAmiEbsBlockDevice(v.(*schema.Set).List()) } - for _, ephemeralBlockDevI := range ephemeralBlockDevsSet.List() { - ephemeralBlockDev := ephemeralBlockDevI.(map[string]interface{}) - blockDev := &ec2.BlockDeviceMapping{ - DeviceName: aws.String(ephemeralBlockDev["device_name"].(string)), - VirtualName: aws.String(ephemeralBlockDev["virtual_name"].(string)), - } - req.BlockDeviceMappings = append(req.BlockDeviceMappings, blockDev) + + if v, ok := d.GetOk("ephemeral_block_device"); ok && v.(*schema.Set).Len() > 0 { + req.BlockDeviceMappings = append(req.BlockDeviceMappings, expandEc2BlockDeviceMappingsForAmiEphemeralBlockDevice(v.(*schema.Set).List())...) } res, err := client.RegisterImage(req) @@ -394,40 +392,16 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", imageArn) - var ebsBlockDevs []map[string]interface{} - var ephemeralBlockDevs []map[string]interface{} - - for _, blockDev := range image.BlockDeviceMappings { - if blockDev.Ebs != nil { - ebsBlockDev := map[string]interface{}{ - "device_name": *blockDev.DeviceName, - "delete_on_termination": *blockDev.Ebs.DeleteOnTermination, - "encrypted": *blockDev.Ebs.Encrypted, - "iops": 0, - "volume_size": int(*blockDev.Ebs.VolumeSize), - "volume_type": *blockDev.Ebs.VolumeType, - } - if blockDev.Ebs.Iops != nil { - ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops) - } - // The snapshot ID might not be set. - if blockDev.Ebs.SnapshotId != nil { - ebsBlockDev["snapshot_id"] = *blockDev.Ebs.SnapshotId - } - ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev) - } else { - ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{ - "device_name": *blockDev.DeviceName, - "virtual_name": *blockDev.VirtualName, - }) - } + if err := d.Set("ebs_block_device", flattenEc2BlockDeviceMappingsForAmiEbsBlockDevice(image.BlockDeviceMappings)); err != nil { + return fmt.Errorf("error setting ebs_block_device: %w", err) } - d.Set("ebs_block_device", ebsBlockDevs) - d.Set("ephemeral_block_device", ephemeralBlockDevs) + if err := d.Set("ephemeral_block_device", flattenEc2BlockDeviceMappingsForAmiEphemeralBlockDevice(image.BlockDeviceMappings)); err != nil { + return fmt.Errorf("error setting ephemeral_block_device: %w", err) + } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(image.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil @@ -568,3 +542,224 @@ func resourceAwsAmiWaitForAvailable(timeout time.Duration, id string, client *ec } return info.(*ec2.Image), nil } + +func expandEc2BlockDeviceMappingForAmiEbsBlockDevice(tfMap map[string]interface{}) *ec2.BlockDeviceMapping { + if tfMap == nil { + return nil + } + + apiObject := &ec2.BlockDeviceMapping{ + Ebs: &ec2.EbsBlockDevice{}, + } + + if v, ok := tfMap["delete_on_termination"].(bool); ok { + apiObject.Ebs.DeleteOnTermination = aws.Bool(v) + } + + if v, ok := tfMap["device_name"].(string); ok && v != "" { + apiObject.DeviceName = aws.String(v) + } + + if v, ok := tfMap["iops"].(int); ok && v != 0 { + apiObject.Ebs.Iops = aws.Int64(int64(v)) + } + + // "Parameter encrypted is invalid. You cannot specify the encrypted flag if specifying a snapshot id in a block device mapping." + if v, ok := tfMap["snapshot_id"].(string); ok && v != "" { + apiObject.Ebs.SnapshotId = aws.String(v) + } else if v, ok := tfMap["encrypted"].(bool); ok { + apiObject.Ebs.Encrypted = aws.Bool(v) + } + + if v, ok := tfMap["throughput"].(int); ok && v != 0 { + apiObject.Ebs.Throughput = aws.Int64(int64(v)) + } + + if v, ok := tfMap["volume_size"].(int); ok && v != 0 { + apiObject.Ebs.VolumeSize = aws.Int64(int64(v)) + } + + if v, ok := tfMap["volume_type"].(string); ok && v != "" { + apiObject.Ebs.VolumeType = aws.String(v) + } + + return apiObject +} + +func expandEc2BlockDeviceMappingsForAmiEbsBlockDevice(tfList []interface{}) []*ec2.BlockDeviceMapping { + if len(tfList) == 0 { + return nil + } + + var apiObjects []*ec2.BlockDeviceMapping + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandEc2BlockDeviceMappingForAmiEbsBlockDevice(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func flattenEc2BlockDeviceMappingForAmiEbsBlockDevice(apiObject *ec2.BlockDeviceMapping) map[string]interface{} { + if apiObject == nil { + return nil + } + + if apiObject.Ebs == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Ebs.DeleteOnTermination; v != nil { + tfMap["delete_on_termination"] = aws.BoolValue(v) + } + + if v := apiObject.DeviceName; v != nil { + tfMap["device_name"] = aws.StringValue(v) + } + + if v := apiObject.Ebs.Encrypted; v != nil { + tfMap["encrypted"] = aws.BoolValue(v) + } + + if v := apiObject.Ebs.Iops; v != nil { + tfMap["iops"] = aws.Int64Value(v) + } + + if v := apiObject.Ebs.SnapshotId; v != nil { + tfMap["snapshot_id"] = aws.StringValue(v) + } + + if v := apiObject.Ebs.Throughput; v != nil { + tfMap["throughput"] = aws.Int64Value(v) + } + + if v := apiObject.Ebs.VolumeSize; v != nil { + tfMap["volume_size"] = aws.Int64Value(v) + } + + if v := apiObject.Ebs.VolumeType; v != nil { + tfMap["volume_type"] = aws.StringValue(v) + } + + return tfMap +} + +func flattenEc2BlockDeviceMappingsForAmiEbsBlockDevice(apiObjects []*ec2.BlockDeviceMapping) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + if apiObject.Ebs == nil { + continue + } + + tfList = append(tfList, flattenEc2BlockDeviceMappingForAmiEbsBlockDevice(apiObject)) + } + + return tfList +} + +func expandEc2BlockDeviceMappingForAmiEphemeralBlockDevice(tfMap map[string]interface{}) *ec2.BlockDeviceMapping { + if tfMap == nil { + return nil + } + + apiObject := &ec2.BlockDeviceMapping{} + + if v, ok := tfMap["device_name"].(string); ok && v != "" { + apiObject.DeviceName = aws.String(v) + } + + if v, ok := tfMap["virtual_name"].(string); ok && v != "" { + apiObject.VirtualName = aws.String(v) + } + + return apiObject +} + +func expandEc2BlockDeviceMappingsForAmiEphemeralBlockDevice(tfList []interface{}) []*ec2.BlockDeviceMapping { + if len(tfList) == 0 { + return nil + } + + var apiObjects []*ec2.BlockDeviceMapping + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandEc2BlockDeviceMappingForAmiEphemeralBlockDevice(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func flattenEc2BlockDeviceMappingForAmiEphemeralBlockDevice(apiObject *ec2.BlockDeviceMapping) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DeviceName; v != nil { + tfMap["device_name"] = aws.StringValue(v) + } + + if v := apiObject.VirtualName; v != nil { + tfMap["virtual_name"] = aws.StringValue(v) + } + + return tfMap +} + +func flattenEc2BlockDeviceMappingsForAmiEphemeralBlockDevice(apiObjects []*ec2.BlockDeviceMapping) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + if apiObject.Ebs != nil { + continue + } + + tfList = append(tfList, flattenEc2BlockDeviceMappingForAmiEphemeralBlockDevice(apiObject)) + } + + return tfList +} diff --git a/aws/resource_aws_ami_copy.go b/aws/resource_aws_ami_copy.go index 1ed5bb2ea93..07e0e92ebec 100644 --- a/aws/resource_aws_ami_copy.go +++ b/aws/resource_aws_ami_copy.go @@ -67,6 +67,11 @@ func resourceAwsAmiCopy() *schema.Resource { Computed: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_size": { Type: schema.TypeInt, Computed: true, @@ -215,17 +220,16 @@ func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error { return err } - id := *res.ImageId - d.SetId(id) + d.SetId(aws.StringValue(res.ImageId)) d.Set("manage_ebs_snapshots", true) if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { - if err := keyvaluetags.Ec2CreateTags(client, id, v); err != nil { + if err := keyvaluetags.Ec2CreateTags(client, d.Id(), v); err != nil { return fmt.Errorf("error adding tags: %s", err) } } - _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), d.Id(), client) if err != nil { return err } diff --git a/aws/resource_aws_ami_from_instance.go b/aws/resource_aws_ami_from_instance.go index 2dc7dbbfb9c..9cdadc90815 100644 --- a/aws/resource_aws_ami_from_instance.go +++ b/aws/resource_aws_ami_from_instance.go @@ -67,6 +67,11 @@ func resourceAwsAmiFromInstance() *schema.Resource { Computed: true, }, + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, + "volume_size": { Type: schema.TypeInt, Computed: true, @@ -197,17 +202,16 @@ func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) return err } - id := *res.ImageId - d.SetId(id) + d.SetId(aws.StringValue(res.ImageId)) d.Set("manage_ebs_snapshots", true) if v := d.Get("tags").(map[string]interface{}); len(v) > 0 { - if err := keyvaluetags.Ec2CreateTags(client, id, v); err != nil { + if err := keyvaluetags.Ec2CreateTags(client, d.Id(), v); err != nil { return fmt.Errorf("error adding tags: %s", err) } } - _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), d.Id(), client) if err != nil { return err } diff --git a/aws/resource_aws_ami_test.go b/aws/resource_aws_ami_test.go index 3e6e399629a..730c052688d 100644 --- a/aws/resource_aws_ami_test.go +++ b/aws/resource_aws_ami_test.go @@ -17,6 +17,7 @@ import ( func TestAccAWSAMI_basic(t *testing.T) { var ami ec2.Image resourceName := "aws_ami.test" + snapshotResourceName := "aws_ebs_snapshot.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -25,15 +26,32 @@ func TestAccAWSAMI_basic(t *testing.T) { CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAmiConfigBasic(rName, 8), + Config: testAccAmiConfigBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "true", + "device_name": "/dev/sda1", + "encrypted": "false", + "iops": "0", + "throughput": "0", + "volume_size": "8", + "volume_type": "standard", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "ebs_block_device.*.snapshot_id", snapshotResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "ena_support", "true"), + resource.TestCheckResourceAttr(resourceName, "ephemeral_block_device.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_id", ""), resource.TestCheckResourceAttr(resourceName, "name", rName), - testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), + resource.TestCheckResourceAttr(resourceName, "ramdisk_id", ""), resource.TestCheckResourceAttr(resourceName, "root_device_name", "/dev/sda1"), + resource.TestCheckResourceAttrPair(resourceName, "root_snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "sriov_net_support", "simple"), resource.TestCheckResourceAttr(resourceName, "virtualization_type", "hvm"), - resource.TestMatchResourceAttr(resourceName, "root_snapshot_id", regexp.MustCompile("^snap-")), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -52,6 +70,7 @@ func TestAccAWSAMI_basic(t *testing.T) { func TestAccAWSAMI_description(t *testing.T) { var ami ec2.Image resourceName := "aws_ami.test" + snapshotResourceName := "aws_ebs_snapshot.test" rName := acctest.RandomWithPrefix("tf-acc-test") desc := acctest.RandomWithPrefix("desc") descUpdated := acctest.RandomWithPrefix("desc-updated") @@ -62,10 +81,33 @@ func TestAccAWSAMI_description(t *testing.T) { CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAmiConfigDesc(rName, desc, 8), + Config: testAccAmiConfigDesc(rName, desc), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), resource.TestCheckResourceAttr(resourceName, "description", desc), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "true", + "device_name": "/dev/sda1", + "encrypted": "false", + "iops": "0", + "throughput": "0", + "volume_size": "8", + "volume_type": "standard", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "ebs_block_device.*.snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "ena_support", "true"), + resource.TestCheckResourceAttr(resourceName, "ephemeral_block_device.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_id", ""), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "ramdisk_id", ""), + resource.TestCheckResourceAttr(resourceName, "root_device_name", "/dev/sda1"), + resource.TestCheckResourceAttrPair(resourceName, "root_snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "sriov_net_support", "simple"), + resource.TestCheckResourceAttr(resourceName, "virtualization_type", "hvm"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -77,10 +119,33 @@ func TestAccAWSAMI_description(t *testing.T) { }, }, { - Config: testAccAmiConfigDesc(rName, descUpdated, 8), + Config: testAccAmiConfigDesc(rName, descUpdated), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), resource.TestCheckResourceAttr(resourceName, "description", descUpdated), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "true", + "device_name": "/dev/sda1", + "encrypted": "false", + "iops": "0", + "throughput": "0", + "volume_size": "8", + "volume_type": "standard", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "ebs_block_device.*.snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "ena_support", "true"), + resource.TestCheckResourceAttr(resourceName, "ephemeral_block_device.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_id", ""), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "ramdisk_id", ""), + resource.TestCheckResourceAttr(resourceName, "root_device_name", "/dev/sda1"), + resource.TestCheckResourceAttrPair(resourceName, "root_snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "sriov_net_support", "simple"), + resource.TestCheckResourceAttr(resourceName, "virtualization_type", "hvm"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -98,7 +163,7 @@ func TestAccAWSAMI_disappears(t *testing.T) { CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAmiConfigBasic(rName, 8), + Config: testAccAmiConfigBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), testAccCheckResourceDisappears(testAccProvider, resourceAwsAmi(), resourceName), @@ -109,9 +174,10 @@ func TestAccAWSAMI_disappears(t *testing.T) { }) } -func TestAccAWSAMI_tags(t *testing.T) { +func TestAccAWSAMI_EphemeralBlockDevices(t *testing.T) { var ami ec2.Image resourceName := "aws_ami.test" + snapshotResourceName := "aws_ebs_snapshot.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -120,11 +186,40 @@ func TestAccAWSAMI_tags(t *testing.T) { CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAmiConfigTags1(rName, "key1", "value1", 8), + Config: testAccAmiConfigEphemeralBlockDevices(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "true", + "device_name": "/dev/sda1", + "encrypted": "false", + "iops": "0", + "throughput": "0", + "volume_size": "8", + "volume_type": "standard", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "ebs_block_device.*.snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "ena_support", "true"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ephemeral_block_device.*", map[string]string{ + "device_name": "/dev/sdb", + "virtual_name": "ephemeral0", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ephemeral_block_device.*", map[string]string{ + "device_name": "/dev/sdc", + "virtual_name": "ephemeral1", + }), + resource.TestCheckResourceAttr(resourceName, "kernel_id", ""), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "ramdisk_id", ""), + resource.TestCheckResourceAttr(resourceName, "root_device_name", "/dev/sda1"), + resource.TestCheckResourceAttrPair(resourceName, "root_snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "sriov_net_support", "simple"), + resource.TestCheckResourceAttr(resourceName, "virtualization_type", "hvm"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -135,54 +230,88 @@ func TestAccAWSAMI_tags(t *testing.T) { "manage_ebs_snapshots", }, }, + }, + }) +} + +func TestAccAWSAMI_Gp3BlockDevice(t *testing.T) { + var ami ec2.Image + resourceName := "aws_ami.test" + snapshotResourceName := "aws_ebs_snapshot.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAmiDestroy, + Steps: []resource.TestStep{ { - Config: testAccAmiConfigTags2(rName, "key1", "value1updated", "key2", "value2", 8), + Config: testAccAmiConfigGp3BlockDevice(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "ec2", regexp.MustCompile(`image/ami-.+`)), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "true", + "device_name": "/dev/sda1", + "encrypted": "false", + "iops": "0", + "throughput": "0", + "volume_size": "8", + "volume_type": "standard", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "ebs_block_device.*.snapshot_id", snapshotResourceName, "id"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "delete_on_termination": "false", + "device_name": "/dev/sdb", + "encrypted": "true", + "iops": "100", + "throughput": "500", + "volume_size": "10", + "volume_type": "gp3", + }), + resource.TestCheckResourceAttr(resourceName, "ena_support", "false"), + resource.TestCheckResourceAttr(resourceName, "ephemeral_block_device.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_id", ""), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "ramdisk_id", ""), + resource.TestCheckResourceAttr(resourceName, "root_device_name", "/dev/sda1"), + resource.TestCheckResourceAttrPair(resourceName, "root_snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "sriov_net_support", "simple"), + resource.TestCheckResourceAttr(resourceName, "virtualization_type", "hvm"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { - Config: testAccAmiConfigTags1(rName, "key2", "value2", 8), - Check: resource.ComposeTestCheckFunc( - testAccCheckAmiExists(resourceName, &ami), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "manage_ebs_snapshots", + }, }, }, }) } -func TestAccAWSAMI_snapshotSize(t *testing.T) { +func TestAccAWSAMI_tags(t *testing.T) { var ami ec2.Image - var bd ec2.BlockDeviceMapping resourceName := "aws_ami.test" rName := acctest.RandomWithPrefix("tf-acc-test") - expectedDevice := &ec2.EbsBlockDevice{ - DeleteOnTermination: aws.Bool(true), - Encrypted: aws.Bool(false), - Iops: aws.Int64(0), - VolumeSize: aws.Int64(20), - VolumeType: aws.String("standard"), - } - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAmiConfigBasic(rName, 20), + Config: testAccAmiConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckAmiExists(resourceName, &ami), - testAccCheckAmiBlockDevice(&ami, &bd, "/dev/sda1"), - testAccCheckAmiEbsBlockDevice(&bd, expectedDevice), - resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), - resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, { @@ -193,6 +322,23 @@ func TestAccAWSAMI_snapshotSize(t *testing.T) { "manage_ebs_snapshots", }, }, + { + Config: testAccAmiConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAmiExists(resourceName, &ami), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAmiConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAmiExists(resourceName, &ami), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, }, }) } @@ -270,80 +416,16 @@ func testAccCheckAmiExists(n string, ami *ec2.Image) resource.TestCheckFunc { } } -func testAccCheckAmiBlockDevice(ami *ec2.Image, blockDevice *ec2.BlockDeviceMapping, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - devices := make(map[string]*ec2.BlockDeviceMapping) - for _, device := range ami.BlockDeviceMappings { - devices[*device.DeviceName] = device - } - - // Check if the block device exists - if _, ok := devices[n]; !ok { - return fmt.Errorf("block device doesn't exist: %s", n) - } - - *blockDevice = *devices[n] - return nil - } -} - -func testAccCheckAmiEbsBlockDevice(bd *ec2.BlockDeviceMapping, ed *ec2.EbsBlockDevice) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Test for things that ed has, don't care about unset values - cd := bd.Ebs - if ed.VolumeType != nil { - if *ed.VolumeType != *cd.VolumeType { - return fmt.Errorf("Volume type mismatch. Expected: %s Got: %s", - *ed.VolumeType, *cd.VolumeType) - } - } - if ed.DeleteOnTermination != nil { - if *ed.DeleteOnTermination != *cd.DeleteOnTermination { - return fmt.Errorf("DeleteOnTermination mismatch. Expected: %t Got: %t", - *ed.DeleteOnTermination, *cd.DeleteOnTermination) - } - } - if ed.Encrypted != nil { - if *ed.Encrypted != *cd.Encrypted { - return fmt.Errorf("Encrypted mismatch. Expected: %t Got: %t", - *ed.Encrypted, *cd.Encrypted) - } - } - // Integer defaults need to not be `0` so we don't get a panic - if ed.Iops != nil && *ed.Iops != 0 { - if *ed.Iops != *cd.Iops { - return fmt.Errorf("IOPS mismatch. Expected: %d Got: %d", - *ed.Iops, *cd.Iops) - } - } - if ed.VolumeSize != nil && *ed.VolumeSize != 0 { - if *ed.VolumeSize != *cd.VolumeSize { - return fmt.Errorf("Volume Size mismatch. Expected: %d Got: %d", - *ed.VolumeSize, *cd.VolumeSize) - } - } - - return nil - } -} - -func testAccAmiConfigBase(rName string, size int) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +func testAccAmiConfigBase(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` resource "aws_ebs_volume" "test" { availability_zone = data.aws_availability_zones.available.names[0] - size = %d + size = 8 tags = { - Name = "%[2]s" + Name = %[1]q } } @@ -351,15 +433,16 @@ resource "aws_ebs_snapshot" "test" { volume_id = aws_ebs_volume.test.id tags = { - Name = "%[2]s" + Name = %[1]q } } - -`, size, rName) +`, rName)) } -func testAccAmiConfigBasic(rName string, size int) string { - return testAccAmiConfigBase(rName, size) + fmt.Sprintf(` +func testAccAmiConfigBasic(rName string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` resource "aws_ami" "test" { ena_support = true name = %[1]q @@ -371,11 +454,13 @@ resource "aws_ami" "test" { snapshot_id = aws_ebs_snapshot.test.id } } -`, rName) +`, rName)) } -func testAccAmiConfigDesc(rName, desc string, size int) string { - return testAccAmiConfigBase(rName, size) + fmt.Sprintf(` +func testAccAmiConfigDesc(rName, desc string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` resource "aws_ami" "test" { ena_support = true name = %[1]q @@ -388,11 +473,69 @@ resource "aws_ami" "test" { snapshot_id = aws_ebs_snapshot.test.id } } -`, rName, desc) +`, rName, desc)) +} + +func testAccAmiConfigEphemeralBlockDevices(rName string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` +resource "aws_ami" "test" { + ena_support = true + name = %[1]q + root_device_name = "/dev/sda1" + virtualization_type = "hvm" + + ebs_block_device { + device_name = "/dev/sda1" + snapshot_id = aws_ebs_snapshot.test.id + } + + ephemeral_block_device { + device_name = "/dev/sdb" + virtual_name = "ephemeral0" + } + + ephemeral_block_device { + device_name = "/dev/sdc" + virtual_name = "ephemeral1" + } +} +`, rName)) +} + +func testAccAmiConfigGp3BlockDevice(rName string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` +resource "aws_ami" "test" { + ena_support = false + name = %[1]q + root_device_name = "/dev/sda1" + virtualization_type = "hvm" + + ebs_block_device { + device_name = "/dev/sda1" + snapshot_id = aws_ebs_snapshot.test.id + } + + ebs_block_device { + delete_on_termination = false + device_name = "/dev/sdb" + encrypted = true + iops = 100 + throughput = 500 + volume_size = 10 + volume_type = "gp3" + } +} +`, rName)) } -func testAccAmiConfigTags1(rName, tagKey1, tagValue1 string, size int) string { - return testAccAmiConfigBase(rName, size) + fmt.Sprintf(` +func testAccAmiConfigTags1(rName, tagKey1, tagValue1 string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` resource "aws_ami" "test" { ena_support = true name = %[1]q @@ -408,11 +551,13 @@ resource "aws_ami" "test" { %[2]q = %[3]q } } -`, rName, tagKey1, tagValue1) +`, rName, tagKey1, tagValue1)) } -func testAccAmiConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string, size int) string { - return testAccAmiConfigBase(rName, size) + fmt.Sprintf(` +func testAccAmiConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return composeConfig( + testAccAmiConfigBase(rName), + fmt.Sprintf(` resource "aws_ami" "test" { ena_support = true name = %[1]q @@ -429,5 +574,5 @@ resource "aws_ami" "test" { %[4]q = %[5]q } } -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } diff --git a/website/docs/d/ami.html.markdown b/website/docs/d/ami.html.markdown index e8e99a13444..4f39df2df54 100644 --- a/website/docs/d/ami.html.markdown +++ b/website/docs/d/ami.html.markdown @@ -82,6 +82,7 @@ interpolation. not a provisioned IOPS image, otherwise the supported IOPS count. * `block_device_mappings.#.ebs.snapshot_id` - The ID of the snapshot. * `block_device_mappings.#.ebs.volume_size` - The size of the volume, in GiB. + * `block_device_mappings.#.ebs.throughput` - The throughput that the EBS volume supports, in MiB/s. * `block_device_mappings.#.ebs.volume_type` - The volume type. * `block_device_mappings.#.no_device` - Suppresses the specified device included in the block device mapping of the AMI. diff --git a/website/docs/r/ami.html.markdown b/website/docs/r/ami.html.markdown index 90b0a833661..10bb0d6d634 100644 --- a/website/docs/r/ami.html.markdown +++ b/website/docs/r/ami.html.markdown @@ -74,16 +74,16 @@ Nested `ebs_block_device` blocks have the following structure: * `delete_on_termination` - (Optional) Boolean controlling whether the EBS volumes created to support each created instance will be deleted once that instance is terminated. * `encrypted` - (Optional) Boolean controlling whether the created EBS volumes will be encrypted. Can't be used with `snapshot_id`. -* `iops` - (Required only when `volume_type` is "io1/io2") Number of I/O operations per second the +* `iops` - (Required only when `volume_type` is `io1` or `io2`) Number of I/O operations per second the created volumes will support. * `snapshot_id` - (Optional) The id of an EBS snapshot that will be used to initialize the created EBS volumes. If set, the `volume_size` attribute must be at least as large as the referenced snapshot. +* `throughput` - (Optional) The throughput that the EBS volume supports, in MiB/s. Only valid for `volume_type` of `gp3`. * `volume_size` - (Required unless `snapshot_id` is set) The size of created volumes in GiB. If `snapshot_id` is set and `volume_size` is omitted then the volume will have the same size as the selected snapshot. -* `volume_type` - (Optional) The type of EBS volume to create. Can be one of "standard" (the - default), "io1", "io2" or "gp2". +* `volume_type` - (Optional) The type of EBS volume to create. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `standard`). * `kms_key_id` - (Optional) The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used From 79fe9fae92affad773b1bc732bc89bdb9d2e3555 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 19 Jan 2021 08:07:42 -0500 Subject: [PATCH 0734/1212] Update CHANGELOG for #16631 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d5b7f4557a..134664bfaed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ FEATURES * **New Resource:** `aws_backup_global_settings` [GH-16475] * **New Resource:** `aws_sagemaker_feature_group` [GH-16728] +ENHANCEMENTS + +* data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute [GH-16631] +* resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block [GH-16631] +* resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] +* resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] + ## 3.24.1 (January 15, 2021) BUG FIXES From ed86205ea92157a419504f94923c7632a3bc7f0d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Jan 2021 09:49:44 -0500 Subject: [PATCH 0735/1212] service/ec2: Add gp3 throughput (#16517) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSEBSVolume_basic (66.62s) --- PASS: TestAccAWSEBSVolume_disappears (56.99s) --- PASS: TestAccAWSEBSVolume_gp3_basic (63.55s) --- PASS: TestAccAWSEBSVolume_gp3_iops (91.99s) --- PASS: TestAccAWSEBSVolume_gp3_throughput (90.69s) --- PASS: TestAccAWSEBSVolume_InvalidIopsForType (9.87s) --- PASS: TestAccAWSEBSVolume_InvalidThroughputForType (10.03s) --- PASS: TestAccAWSEBSVolume_kmsKey (65.84s) --- PASS: TestAccAWSEBSVolume_multiAttach (66.28s) --- PASS: TestAccAWSEBSVolume_NoIops (63.62s) --- PASS: TestAccAWSEBSVolume_updateAttachedEbsVolume (170.50s) --- PASS: TestAccAWSEBSVolume_updateIops_Io1 (91.00s) --- PASS: TestAccAWSEBSVolume_updateIops_Io2 (91.30s) --- PASS: TestAccAWSEBSVolume_updateSize (82.58s) --- PASS: TestAccAWSEBSVolume_updateType (92.09s) --- PASS: TestAccAWSEBSVolume_withTags (66.49s) --- SKIP: TestAccAWSEBSVolume_outpost (1.68s) --- PASS: TestAccAWSEbsVolumeDataSource_basic (57.85s) --- PASS: TestAccAWSEbsVolumeDataSource_multipleFilters (57.39s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- FAIL: TestAccAWSEBSVolume_multiAttach (12.16s) # unrelated; will ensure there is covering issue resource_aws_ebs_volume_test.go:398: Step 1/2 error: Error running apply: Error: Error creating EC2 volume: InvalidParameterValue: The specified zone does not support multi-attach-enabled volumes. status code: 400, request id: 13ded026-b104-49b4-921e-f3cc20abfdc5 --- FAIL: TestAccAWSEBSVolume_updateIops_Io2 (12.52s) # unrelated; will ensure there is covering issue resource_aws_ebs_volume_test.go:248: Step 1/3 error: Error running apply: Error: Error creating EC2 volume: UnknownVolumeType: Unsupported volume type 'io2' for volume creation. status code: 400, request id: 2e43e591-c913-491c-a9dd-4bf69031059c --- PASS: TestAccAWSEBSVolume_basic (37.24s) --- PASS: TestAccAWSEBSVolume_disappears (31.88s) --- PASS: TestAccAWSEBSVolume_gp3_basic (38.94s) --- PASS: TestAccAWSEBSVolume_gp3_iops (64.65s) --- PASS: TestAccAWSEBSVolume_gp3_throughput (64.05s) --- PASS: TestAccAWSEBSVolume_InvalidIopsForType (5.91s) --- PASS: TestAccAWSEBSVolume_InvalidThroughputForType (7.92s) --- PASS: TestAccAWSEBSVolume_kmsKey (41.00s) --- PASS: TestAccAWSEBSVolume_NoIops (35.33s) --- PASS: TestAccAWSEBSVolume_updateAttachedEbsVolume (172.84s) --- PASS: TestAccAWSEBSVolume_updateIops_Io1 (64.67s) --- PASS: TestAccAWSEBSVolume_updateSize (65.03s) --- PASS: TestAccAWSEBSVolume_updateType (64.96s) --- PASS: TestAccAWSEBSVolume_withTags (38.69s) --- SKIP: TestAccAWSEBSVolume_outpost (2.43s) --- PASS: TestAccAWSEbsVolumeDataSource_basic (29.45s) --- PASS: TestAccAWSEbsVolumeDataSource_multipleFilters (29.91s) ``` --- aws/data_source_aws_ebs_volume.go | 5 + aws/data_source_aws_ebs_volume_test.go | 1 + aws/resource_aws_ebs_volume.go | 160 +++++++++------ aws/resource_aws_ebs_volume_test.go | 255 +++++++++++++++++++++++- website/docs/d/ebs_volume.html.markdown | 1 + website/docs/r/ebs_volume.html.markdown | 7 +- 6 files changed, 369 insertions(+), 60 deletions(-) diff --git a/aws/data_source_aws_ebs_volume.go b/aws/data_source_aws_ebs_volume.go index 0389b9881a6..6279bd9de6a 100644 --- a/aws/data_source_aws_ebs_volume.go +++ b/aws/data_source_aws_ebs_volume.go @@ -68,6 +68,10 @@ func dataSourceAwsEbsVolume() *schema.Resource { Computed: true, }, "tags": tagsSchemaComputed(), + "throughput": { + Type: schema.TypeInt, + Computed: true, + }, }, } } @@ -151,6 +155,7 @@ func volumeDescriptionAttributes(d *schema.ResourceData, client *AWSClient, volu d.Set("volume_type", volume.VolumeType) d.Set("outpost_arn", volume.OutpostArn) d.Set("multi_attach_enabled", volume.MultiAttachEnabled) + d.Set("throughput", volume.Throughput) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(volume.Tags).IgnoreAws().IgnoreConfig(client.IgnoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) diff --git a/aws/data_source_aws_ebs_volume_test.go b/aws/data_source_aws_ebs_volume_test.go index 6ceaef16bf6..a956929510d 100644 --- a/aws/data_source_aws_ebs_volume_test.go +++ b/aws/data_source_aws_ebs_volume_test.go @@ -25,6 +25,7 @@ func TestAccAWSEbsVolumeDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "tags", resourceName, "tags"), resource.TestCheckResourceAttrPair(dataSourceName, "outpost_arn", resourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "multi_attach_enabled", resourceName, "multi_attach_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "throughput", resourceName, "throughput"), ), }, }, diff --git a/aws/resource_aws_ebs_volume.go b/aws/resource_aws_ebs_volume.go index 871890bdd75..47c6ae8122f 100644 --- a/aws/resource_aws_ebs_volume.go +++ b/aws/resource_aws_ebs_volume.go @@ -1,6 +1,7 @@ package aws import ( + "context" "fmt" "log" "time" @@ -11,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) @@ -24,6 +26,8 @@ func resourceAwsEbsVolume() *schema.Resource { State: schema.ImportStatePassthrough, }, + CustomizeDiff: resourceAwsEbsVolumeCustomizeDiff, + Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -44,9 +48,6 @@ func resourceAwsEbsVolume() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return (d.Get("type").(string) != ec2.VolumeTypeIo1 && new == "0") || (d.Get("type").(string) != ec2.VolumeTypeIo2 && new == "0") - }, }, "kms_key_id": { Type: schema.TypeString, @@ -61,15 +62,17 @@ func resourceAwsEbsVolume() *schema.Resource { ForceNew: true, }, "size": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"size", "snapshot_id"}, }, "snapshot_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ExactlyOneOf: []string{"size", "snapshot_id"}, }, "outpost_arn": { Type: schema.TypeString, @@ -83,6 +86,12 @@ func resourceAwsEbsVolume() *schema.Resource { Computed: true, }, "tags": tagsSchema(), + "throughput": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(125, 1000), + }, }, } } @@ -97,6 +106,9 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error if value, ok := d.GetOk("encrypted"); ok { request.Encrypted = aws.Bool(value.(bool)) } + if value, ok := d.GetOk("iops"); ok { + request.Iops = aws.Int64(int64(value.(int))) + } if value, ok := d.GetOk("kms_key_id"); ok { request.KmsKeyId = aws.String(value.(string)) } @@ -112,28 +124,11 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error if value, ok := d.GetOk("outpost_arn"); ok { request.OutpostArn = aws.String(value.(string)) } - - // IOPs are only valid, and required for, storage type io1 and io2. The current minimum - // is 100. Hard validation in place to return an error if IOPs are provided - // for an unsupported storage type. - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 - var t string - if value, ok := d.GetOk("type"); ok { - t = value.(string) - request.VolumeType = aws.String(t) + if value, ok := d.GetOk("throughput"); ok { + request.Throughput = aws.Int64(int64(value.(int))) } - - if iops := d.Get("iops").(int); iops > 0 { - if t != ec2.VolumeTypeIo1 && t != ec2.VolumeTypeIo2 { - if t == "" { - // Volume creation would default to gp2 - t = ec2.VolumeTypeGp2 - } - return fmt.Errorf("error creating ebs_volume: iops attribute not supported for type %s", t) - } - // We add the iops value without validating it's size, to allow AWS to - // enforce a size requirement (currently 100) - request.Iops = aws.Int64(int64(iops)) + if value, ok := d.GetOk("type"); ok { + request.VolumeType = aws.String(value.(string)) } log.Printf("[DEBUG] EBS Volume create opts: %s", request) @@ -168,27 +163,29 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error func resourceAWSEbsVolumeUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - requestUpdate := false - params := &ec2.ModifyVolumeInput{ - VolumeId: aws.String(d.Id()), - } + if d.HasChangesExcept("tags") { + params := &ec2.ModifyVolumeInput{ + VolumeId: aws.String(d.Id()), + } - if d.HasChange("size") { - requestUpdate = true - params.Size = aws.Int64(int64(d.Get("size").(int))) - } + if d.HasChange("size") { + params.Size = aws.Int64(int64(d.Get("size").(int))) + } - if d.HasChange("type") { - requestUpdate = true - params.VolumeType = aws.String(d.Get("type").(string)) - } + if d.HasChange("type") { + params.VolumeType = aws.String(d.Get("type").(string)) + } - if d.HasChange("iops") { - requestUpdate = true - params.Iops = aws.Int64(int64(d.Get("iops").(int))) - } + if d.HasChange("iops") { + params.Iops = aws.Int64(int64(d.Get("iops").(int))) + } + + // "If no throughput value is specified, the existing value is retained." + // Not currently correct, so always specify any non-zero throughput value. + if v := d.Get("throughput").(int); v > 0 { + params.Throughput = aws.Int64(int64(v)) + } - if requestUpdate { result, err := conn.ModifyVolume(params) if err != nil { return err @@ -278,20 +275,21 @@ func resourceAwsEbsVolumeRead(d *schema.ResourceData, meta interface{}) error { Service: "ec2", } d.Set("arn", arn.String()) - d.Set("availability_zone", aws.StringValue(volume.AvailabilityZone)) - d.Set("encrypted", aws.BoolValue(volume.Encrypted)) - d.Set("iops", aws.Int64Value(volume.Iops)) - d.Set("kms_key_id", aws.StringValue(volume.KmsKeyId)) - d.Set("size", aws.Int64Value(volume.Size)) - d.Set("snapshot_id", aws.StringValue(volume.SnapshotId)) - d.Set("outpost_arn", aws.StringValue(volume.OutpostArn)) + d.Set("availability_zone", volume.AvailabilityZone) + d.Set("encrypted", volume.Encrypted) + d.Set("iops", volume.Iops) + d.Set("kms_key_id", volume.KmsKeyId) + d.Set("size", volume.Size) + d.Set("snapshot_id", volume.SnapshotId) + d.Set("outpost_arn", volume.OutpostArn) d.Set("multi_attach_enabled", volume.MultiAttachEnabled) + d.Set("throughput", volume.Throughput) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(volume.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return fmt.Errorf("error setting tags: %s", err) } - d.Set("type", aws.StringValue(volume.VolumeType)) + d.Set("type", volume.VolumeType) return nil } @@ -373,3 +371,53 @@ func resourceAwsEbsVolumeDelete(d *schema.ResourceData, meta interface{}) error return nil } + +func resourceAwsEbsVolumeCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + iops := diff.Get("iops").(int) + multiAttachEnabled := diff.Get("multi_attach_enabled").(bool) + throughput := diff.Get("throughput").(int) + volumeType := diff.Get("type").(string) + + if diff.Id() == "" { + // Create. + + // Iops is required for io1 and io2 volumes. + // The default for gp3 volumes is 3,000 IOPS. + // This parameter is not supported for gp2, st1, sc1, or standard volumes. + // Hard validation in place to return an error if IOPs are provided + // for an unsupported storage type. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 + switch volumeType { + case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2: + if iops == 0 { + return fmt.Errorf("'iops' must be set when 'type' is '%s'", volumeType) + } + + case ec2.VolumeTypeGp3: + + default: + if iops != 0 { + return fmt.Errorf("'iops' must not be set when 'type' is '%s'", volumeType) + } + } + + // MultiAttachEnabled is supported with io1 volumes only. + if multiAttachEnabled && volumeType != ec2.VolumeTypeIo1 { + return fmt.Errorf("'multi_attach_enabled' must not be set when 'type' is '%s'", volumeType) + } + + // Throughput is valid only for gp3 volumes. + if throughput > 0 && volumeType != ec2.VolumeTypeGp3 { + return fmt.Errorf("'throughput' must not be set when 'type' is '%s'", volumeType) + } + } else { + // Update. + + // Setting 'iops = 0' is a no-op if the volume type does not require Iops to be specified. + if diff.HasChange("iops") && volumeType != ec2.VolumeTypeIo1 && volumeType != ec2.VolumeTypeIo2 && iops == 0 { + return diff.Clear("iops") + } + } + + return nil +} diff --git a/aws/resource_aws_ebs_volume_test.go b/aws/resource_aws_ebs_volume_test.go index 36214365714..8550bbebe5e 100644 --- a/aws/resource_aws_ebs_volume_test.go +++ b/aws/resource_aws_ebs_volume_test.go @@ -89,6 +89,7 @@ func TestAccAWSEBSVolume_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "gp2"), resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -115,6 +116,7 @@ func TestAccAWSEBSVolume_updateAttachedEbsVolume(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -127,6 +129,7 @@ func TestAccAWSEBSVolume_updateAttachedEbsVolume(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "size", "20"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, }, @@ -148,6 +151,7 @@ func TestAccAWSEBSVolume_updateSize(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "size", "1"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -160,6 +164,7 @@ func TestAccAWSEBSVolume_updateSize(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, }, @@ -181,6 +186,7 @@ func TestAccAWSEBSVolume_updateType(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "type", "gp2"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -193,6 +199,7 @@ func TestAccAWSEBSVolume_updateType(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "type", "sc1"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, }, @@ -214,6 +221,7 @@ func TestAccAWSEBSVolume_updateIops_Io1(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "iops", "100"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -226,6 +234,7 @@ func TestAccAWSEBSVolume_updateIops_Io1(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "iops", "200"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, }, @@ -247,6 +256,7 @@ func TestAccAWSEBSVolume_updateIops_Io2(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "iops", "100"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -259,6 +269,7 @@ func TestAccAWSEBSVolume_updateIops_Io2(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "iops", "200"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, }, @@ -284,6 +295,7 @@ func TestAccAWSEBSVolume_kmsKey(t *testing.T) { testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "encrypted", "true"), resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -308,6 +320,7 @@ func TestAccAWSEBSVolume_NoIops(t *testing.T) { Config: testAccAwsEbsVolumeConfigWithNoIops, Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -329,7 +342,22 @@ func TestAccAWSEBSVolume_InvalidIopsForType(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAwsEbsVolumeConfigWithInvalidIopsForType, - ExpectError: regexp.MustCompile(`error creating ebs_volume: iops attribute not supported for type gp2`), + ExpectError: regexp.MustCompile(`'iops' must not be set when 'type' is`), + }, + }, + }) +} + +func TestAccAWSEBSVolume_InvalidThroughputForType(t *testing.T) { + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigWithInvalidThroughputForType, + ExpectError: regexp.MustCompile(`'throughput' must not be set when 'type' is`), }, }, }) @@ -378,6 +406,7 @@ func TestAccAWSEBSVolume_multiAttach(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckVolumeExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), ), }, { @@ -416,6 +445,156 @@ func TestAccAWSEBSVolume_outpost(t *testing.T) { }) } +func TestAccAWSEBSVolume_gp3_basic(t *testing.T) { + var v ec2.Volume + resourceName := "aws_ebs_volume.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigSizeType(rName, 10, "gp3"), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "3000"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "snapshot_id", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "125"), + resource.TestCheckResourceAttr(resourceName, "type", "gp3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSEBSVolume_gp3_iops(t *testing.T) { + var v ec2.Volume + resourceName := "aws_ebs_volume.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigGp3Iops(rName, 4000), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "4000"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "snapshot_id", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "200"), + resource.TestCheckResourceAttr(resourceName, "type", "gp3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEbsVolumeConfigGp3Iops(rName, 5000), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "5000"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "snapshot_id", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "200"), + resource.TestCheckResourceAttr(resourceName, "type", "gp3"), + ), + }, + }, + }) +} + +func TestAccAWSEBSVolume_gp3_throughput(t *testing.T) { + var v ec2.Volume + resourceName := "aws_ebs_volume.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigGp3Throughput(rName, 400), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "3000"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "snapshot_id", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "400"), + resource.TestCheckResourceAttr(resourceName, "type", "gp3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEbsVolumeConfigGp3Throughput(rName, 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "3000"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "10"), + resource.TestCheckResourceAttr(resourceName, "snapshot_id", ""), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "600"), + resource.TestCheckResourceAttr(resourceName, "type", "gp3"), + ), + }, + }, + }) +} + func TestAccAWSEBSVolume_disappears(t *testing.T) { var v ec2.Volume resourceName := "aws_ebs_volume.test" @@ -869,6 +1048,29 @@ resource "aws_ebs_volume" "test" { } ` +const testAccAwsEbsVolumeConfigWithInvalidThroughputForType = ` +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + size = 10 + iops = 100 + throughput = 500 + type = "io1" + + tags = { + Name = "TerraformTest" + } +} +` + func testAccAwsEbsVolumeConfigOutpost() string { return ` data "aws_outposts_outposts" "test" {} @@ -913,3 +1115,54 @@ resource "aws_ebs_volume" "test" { } `, rName) } + +func testAccAwsEbsVolumeConfigSizeType(rName string, size int, volumeType string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + type = %[3]q + size = %[2]d + + tags = { + Name = %[1]q + } +} +`, rName, size, volumeType)) +} + +func testAccAwsEbsVolumeConfigGp3Throughput(rName string, throughput int) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + type = "gp3" + size = 10 + throughput = %[2]d + + tags = { + Name = %[1]q + } +} +`, rName, throughput)) +} + +func testAccAwsEbsVolumeConfigGp3Iops(rName string, iops int) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + type = "gp3" + iops = %[2]d + size = 10 + throughput = 200 + + tags = { + Name = %[1]q + } +} +`, rName, iops)) +} diff --git a/website/docs/d/ebs_volume.html.markdown b/website/docs/d/ebs_volume.html.markdown index 0a4d9d5c72b..a3d74495dc7 100644 --- a/website/docs/d/ebs_volume.html.markdown +++ b/website/docs/d/ebs_volume.html.markdown @@ -57,5 +57,6 @@ In addition to all arguments above, the following attributes are exported: * `volume_type` - The type of EBS volume. * `kms_key_id` - The ARN for the KMS encryption key. * `tags` - A map of tags for the resource. +* `throughput` - The throughput that the volume supports, in MiB/s. [1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-volumes.html diff --git a/website/docs/r/ebs_volume.html.markdown b/website/docs/r/ebs_volume.html.markdown index 4bf9dced640..e0c1e716ead 100644 --- a/website/docs/r/ebs_volume.html.markdown +++ b/website/docs/r/ebs_volume.html.markdown @@ -31,16 +31,17 @@ The following arguments are supported: * `availability_zone` - (Required) The AZ where the EBS volume will exist. * `encrypted` - (Optional) If true, the disk will be encrypted. -* `iops` - (Optional) The amount of IOPS to provision for the disk. Only valid for `type` of `io1` or `io2`. +* `iops` - (Optional) The amount of IOPS to provision for the disk. Only valid for `type` of `io1`, `io2` or `gp3`. * `multi_attach_enabled` - (Optional) Specifies whether to enable Amazon EBS Multi-Attach. Multi-Attach is supported exclusively on `io1` volumes. * `size` - (Optional) The size of the drive in GiBs. * `snapshot_id` (Optional) A snapshot to base the EBS volume off of. * `outpost_arn` - (Optional) The Amazon Resource Name (ARN) of the Outpost. -* `type` - (Optional) The type of EBS volume. Can be "standard", "gp2", "io1", "io2", "sc1" or "st1" (Default: "gp2"). +* `type` - (Optional) The type of EBS volume. Can be `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1` or `st1` (Default: `gp2`). * `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true. * `tags` - (Optional) A map of tags to assign to the resource. +* `throughput` - (Optional) The throughput that the volume supports, in MiB/s. Only valid for `type` of `gp3`. -~> **NOTE**: When changing the `size`, `iops` or `type` of an instance, there are [considerations](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/considerations.html) to be aware of that Amazon have written about this. +~> **NOTE**: When changing the `size`, `iops` or `type` of an instance, there are [considerations](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/considerations.html) to be aware of. ## Attributes Reference From 4ccea18c4ac7aeb926332c53bde786a9b38fbc7b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 19 Jan 2021 09:51:26 -0500 Subject: [PATCH 0736/1212] Update CHANGELOG for #16517 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 134664bfaed..0427f1e676f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,11 @@ FEATURES ENHANCEMENTS * data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute [GH-16631] +* data-source/aws_ebs_volume: Add `throughput` attribute [GH-16517] * resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] +* resource/aws_ebs_volume: Add `throughput` argument [GH-16517] ## 3.24.1 (January 15, 2021) From cd3341e15afd4a82a569ede9924c0c0eb9446c1e Mon Sep 17 00:00:00 2001 From: Pavels Veretennikovs Date: Tue, 19 Jan 2021 16:56:48 +0200 Subject: [PATCH 0737/1212] docs/resource/aws_api_gateway_rest_api_policy: Use execution_arn instead of arn (#17132) --- website/docs/r/api_gateway_rest_api_policy.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/api_gateway_rest_api_policy.html.markdown b/website/docs/r/api_gateway_rest_api_policy.html.markdown index 3e18dbc0d7f..ffdad097ec3 100644 --- a/website/docs/r/api_gateway_rest_api_policy.html.markdown +++ b/website/docs/r/api_gateway_rest_api_policy.html.markdown @@ -34,7 +34,7 @@ resource "aws_api_gateway_rest_api_policy" "test" { "AWS": "*" }, "Action": "execute-api:Invoke", - "Resource": "${aws_api_gateway_rest_api.test.arn}", + "Resource": "${aws_api_gateway_rest_api.test.execution_arn}", "Condition": { "IpAddress": { "aws:SourceIp": "123.123.123.123/32" @@ -66,4 +66,4 @@ In addition to all arguments above, the following attributes are exported: ``` $ terraform import aws_api_gateway_rest_api_policy.example 12345abcde -``` \ No newline at end of file +``` From cfa41147c0b993395d00ec68df84a1477dee8e85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Jan 2021 10:09:21 -0500 Subject: [PATCH 0738/1212] build(deps): bump github.com/aws/aws-sdk-go in /awsproviderlint (#17154) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.19 to 1.36.28. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.19...v1.36.28) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../github.com/aws/aws-sdk-go/aws/config.go | 32 +++++++++++--- .../stscreds/assume_role_provider.go | 12 +++-- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 6 +++ .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/private/protocol/host.go | 44 +++++++++++++++++-- awsproviderlint/vendor/modules.txt | 2 +- 8 files changed, 84 insertions(+), 20 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index db9c1276796..8d08dcde54e 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.19 + github.com/aws/aws-sdk-go v1.36.28 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 03b33ba6b9c..237bfe97c47 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -56,8 +56,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.19 h1:zbJZKkxeDiYxUYFjymjWxPye+qa1G2gRVyhIzZrB9zA= -github.com/aws/aws-sdk-go v1.36.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.28 h1:JVRN7BZgwQ31SQCBwG5QM445+ynJU0ruKu+miFIijYY= +github.com/aws/aws-sdk-go v1.36.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/config.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/config.go index 3b809e8478c..39fa6d5fe74 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -438,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { return c } -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - // WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag // when resolving the endpoint for a service func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { @@ -459,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn return c } +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -571,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 6846ef6f808..e42c5cdbb2e 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -244,9 +244,11 @@ type AssumeRoleProvider struct { MaxJitterFrac float64 } -// NewCredentials returns a pointer to a new Credentials object wrapping the +// NewCredentials returns a pointer to a new Credentials value wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. @@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As return credentials.NewCredentials(p) } -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. // // Takes an AssumeRoler which can be satisfied by the STS client. // diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 7ea175ecd11..013ccec4a5b 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -581,6 +581,12 @@ var awsPartition = partition{ }, }, }, + "api.fleethub.iot": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index d2fbb55f6dc..609aa89c084 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.19" +const SDKVersion = "1.36.28" diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go index d7d42db0a6a..1f1d27aea49 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -1,9 +1,10 @@ package protocol import ( - "strings" - "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" ) // ValidateEndpointHostHandler is a request handler that will validate the @@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{ // 3986 host. Returns error if the host is not valid. func ValidateEndpointHost(opName, host string) error { paramErrs := request.ErrInvalidParams{Context: opName} - labels := strings.Split(host, ".") + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") for i, label := range labels { if i == len(labels)-1 && len(label) == 0 { // Allow trailing dot for FQDN hosts. @@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error { } } - if len(host) > 255 { + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { paramErrs.Add(request.NewErrParamMaxLen( "endpoint host", 255, host, )) @@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool { return true } + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index a020a1a2d01..18a6f81bdae 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.36.19 +# github.com/aws/aws-sdk-go v1.36.28 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From ad5271033f059c3858c42f0c2f5e5c5e0c9f98bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Jan 2021 11:40:10 -0500 Subject: [PATCH 0739/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.36.19 to 1.36.28 (#17155) * build(deps): bump github.com/aws/aws-sdk-go from 1.36.19 to 1.36.28 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.19 to 1.36.28. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.19...v1.36.28) Signed-off-by: dependabot[bot] * resource/aws_lightsail_instance: Deprecate ipv6_address attribute and add ipv6_addresses attribute Reference: https://github.com/aws/aws-sdk-go/pull/3735/files?file-filters%5B%5D=.json#diff-5a6179b4e5f5d9255890a587d6df77227a75e539120b585f57091b3be60a138cL5246-R5268 AWS Go SDK version 1.36.25 introduced a breaking API model change by removing a string `Ipv6Addess` field and replacing it with a list `Ipv6Addresses` field. This offers a best effort replacement for the expected value of the existing attribute by taking the first element of the new list field, while marking it deprecated to nudge practitioners to use the list instead. Output from acceptance testing: ``` --- PASS: TestAccAWSLightsailInstance_disapear (50.09s) --- PASS: TestAccAWSLightsailInstance_basic (51.05s) --- PASS: TestAccAWSLightsailInstance_Tags (76.09s) --- PASS: TestAccAWSLightsailInstance_Name (122.35s) ``` Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Brian Flad --- aws/resource_aws_lightsail_instance.go | 16 ++++++++++++++-- aws/resource_aws_lightsail_instance_test.go | 2 ++ go.mod | 2 +- go.sum | 4 ++-- website/docs/r/lightsail_instance.html.markdown | 7 ++----- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_lightsail_instance.go b/aws/resource_aws_lightsail_instance.go index 9f4cb4a2a14..09e1be108f4 100644 --- a/aws/resource_aws_lightsail_instance.go +++ b/aws/resource_aws_lightsail_instance.go @@ -92,8 +92,14 @@ func resourceAwsLightsailInstance() *schema.Resource { Computed: true, }, "ipv6_address": { - Type: schema.TypeString, + Type: schema.TypeString, + Computed: true, + Deprecated: "use `ipv6_addresses` attribute instead", + }, + "ipv6_addresses": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "is_static_ip": { Type: schema.TypeBool, @@ -209,7 +215,13 @@ func resourceAwsLightsailInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("created_at", i.CreatedAt.Format(time.RFC3339)) d.Set("cpu_count", i.Hardware.CpuCount) d.Set("ram_size", i.Hardware.RamSizeInGb) - d.Set("ipv6_address", i.Ipv6Address) + + // Deprecated: AWS Go SDK v1.36.25 removed Ipv6Address field + if len(i.Ipv6Addresses) > 0 { + d.Set("ipv6_address", aws.StringValue(i.Ipv6Addresses[0])) + } + + d.Set("ipv6_addresses", aws.StringValueSlice(i.Ipv6Addresses)) d.Set("is_static_ip", i.IsStaticIp) d.Set("private_ip_address", i.PrivateIpAddress) d.Set("public_ip_address", i.PublicIpAddress) diff --git a/aws/resource_aws_lightsail_instance_test.go b/aws/resource_aws_lightsail_instance_test.go index 19ca8bcff23..a4dce503afe 100644 --- a/aws/resource_aws_lightsail_instance_test.go +++ b/aws/resource_aws_lightsail_instance_test.go @@ -93,6 +93,8 @@ func TestAccAWSLightsailInstance_basic(t *testing.T) { resource.TestCheckResourceAttrSet("aws_lightsail_instance.lightsail_instance_test", "availability_zone"), resource.TestCheckResourceAttrSet("aws_lightsail_instance.lightsail_instance_test", "blueprint_id"), resource.TestCheckResourceAttrSet("aws_lightsail_instance.lightsail_instance_test", "bundle_id"), + resource.TestMatchResourceAttr("aws_lightsail_instance.lightsail_instance_test", "ipv6_address", regexp.MustCompile(`([a-f0-9]{1,4}:){7}[a-f0-9]{1,4}`)), + resource.TestCheckResourceAttr("aws_lightsail_instance.lightsail_instance_test", "ipv6_addresses.#", "1"), resource.TestCheckResourceAttrSet("aws_lightsail_instance.lightsail_instance_test", "key_pair_name"), resource.TestCheckResourceAttr("aws_lightsail_instance.lightsail_instance_test", "tags.%", "0"), resource.TestMatchResourceAttr("aws_lightsail_instance.lightsail_instance_test", "ram_size", regexp.MustCompile(`\d+(.\d+)?`)), diff --git a/go.mod b/go.mod index 8487cdce11c..d2f5b45e7fe 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.19 + github.com/aws/aws-sdk-go v1.36.28 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index b4e8f6da350..13177949ffa 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.36.19 h1:zbJZKkxeDiYxUYFjymjWxPye+qa1G2gRVyhIzZrB9zA= -github.com/aws/aws-sdk-go v1.36.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.28 h1:JVRN7BZgwQ31SQCBwG5QM445+ynJU0ruKu+miFIijYY= +github.com/aws/aws-sdk-go v1.36.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= diff --git a/website/docs/r/lightsail_instance.html.markdown b/website/docs/r/lightsail_instance.html.markdown index e427329ed52..7c15aafe52d 100644 --- a/website/docs/r/lightsail_instance.html.markdown +++ b/website/docs/r/lightsail_instance.html.markdown @@ -102,11 +102,8 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ARN of the Lightsail instance (matches `arn`). * `arn` - The ARN of the Lightsail instance (matches `id`). * `created_at` - The timestamp when the instance was created. -* `availability_zone` -* `blueprint_id` -* `bundle_id` -* `key_pair_name` -* `user_data` +* `ipv6_address` - (**Deprecated**) The first IPv6 address of the Lightsail instance. Use `ipv6_addresses` attribute instead. +* `ipv6_addresses` - List of IPv6 addresses for the Lightsail instance. ## Import From 502cca37e9386e411ad09cdc668b6c65d6479a37 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 19 Jan 2021 11:56:30 -0500 Subject: [PATCH 0740/1212] Update CHANGELOG for #17155 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0427f1e676f..8b40e4cfc63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.25.0 (Unreleased) +NOTES + +* resource/aws_lightsail_instance: The `ipv6_address` attribute has been deprecated. Use the `ipv6_addresses` attribute instead. This is due to a backwards incompatible change in the Lightsail API. [GH-17155] + FEATURES * **New Resource:** `aws_backup_global_settings` [GH-16475] @@ -13,6 +17,7 @@ ENHANCEMENTS * resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ebs_volume: Add `throughput` argument [GH-16517] +* resource/aws_lightsail_instance: Add `ipv6_addresses` attribute [GH-17155] ## 3.24.1 (January 15, 2021) From d7717edb04f1942fe045c9434593a70bdb2fcee9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Jan 2021 09:51:42 -0800 Subject: [PATCH 0741/1212] Use finder for data source and acceptance test --- ...ource_aws_elasticache_replication_group.go | 21 +++----------- ..._aws_elasticache_replication_group_test.go | 28 +++++-------------- 2 files changed, 11 insertions(+), 38 deletions(-) diff --git a/aws/data_source_aws_elasticache_replication_group.go b/aws/data_source_aws_elasticache_replication_group.go index 1880a52ad6e..43e44fe30b5 100644 --- a/aws/data_source_aws_elasticache_replication_group.go +++ b/aws/data_source_aws_elasticache_replication_group.go @@ -2,11 +2,11 @@ package aws import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elasticache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" ) func dataSourceAwsElasticacheReplicationGroup() *schema.Resource { @@ -78,25 +78,12 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i conn := meta.(*AWSClient).elasticacheconn groupID := d.Get("replication_group_id").(string) - input := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(groupID), - } - log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", input) - resp, err := conn.DescribeReplicationGroups(input) + rg, err := finder.ReplicationGroupByID(conn, groupID) if err != nil { - if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { - return fmt.Errorf("ElastiCache Replication Group (%s) not found", groupID) - } - return fmt.Errorf("error reading replication group (%s): %w", groupID, err) + return fmt.Errorf("error reading ElastiCache Replication Group (%s): %w", groupID, err) } - if resp == nil || len(resp.ReplicationGroups) == 0 { - return fmt.Errorf("error reading replication group (%s): empty output", groupID) - } - - rg := resp.ReplicationGroups[0] - d.SetId(aws.StringValue(rg.ReplicationGroupId)) d.Set("replication_group_description", rg.Description) d.Set("arn", rg.ARN) @@ -115,7 +102,7 @@ func dataSourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta i } else { if rg.NodeGroups == nil { d.SetId("") - return fmt.Errorf("Elasticache Replication Group (%s) doesn't have node groups.", aws.StringValue(rg.ReplicationGroupId)) + return fmt.Errorf("ElastiCache Replication Group (%s) doesn't have node groups", aws.StringValue(rg.ReplicationGroupId)) } d.Set("port", rg.NodeGroups[0].PrimaryEndpoint.Port) d.Set("primary_endpoint_address", rg.NodeGroups[0].PrimaryEndpoint.Address) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 3650a69c66c..6b193f80781 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -9,12 +9,13 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func init() { @@ -902,18 +903,12 @@ func testAccCheckAWSElasticacheReplicationGroupExists(n string, v *elasticache.R } conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(rs.Primary.ID), - }) + rg, err := finder.ReplicationGroupByID(conn, rs.Primary.ID) if err != nil { - return fmt.Errorf("ElastiCache error: %v", err) + return fmt.Errorf("ElastiCache error: %w", err) } - for _, rg := range res.ReplicationGroups { - if *rg.ReplicationGroupId == rs.Primary.ID { - *v = *rg - } - } + *v = *rg return nil } @@ -926,19 +921,10 @@ func testAccCheckAWSElasticacheReplicationDestroy(s *terraform.State) error { if rs.Type != "aws_elasticache_replication_group" { continue } - res, err := conn.DescribeReplicationGroups(&elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(rs.Primary.ID), - }) - if err != nil { - // Verify the error is what we want - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ReplicationGroupNotFoundFault" { - continue - } + _, err := finder.ReplicationGroupByID(conn, rs.Primary.ID) + if !tfresource.NotFound(err) { return err } - if len(res.ReplicationGroups) > 0 { - return fmt.Errorf("still exist.") - } } return nil } From 03c3ddb794908f87daef78630187819eca8887d4 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 19 Jan 2021 15:44:44 -0500 Subject: [PATCH 0742/1212] Update CHANGELOG for #16544 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b40e4cfc63..c62242ea29d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,10 @@ ENHANCEMENTS * resource/aws_ebs_volume: Add `throughput` argument [GH-16517] * resource/aws_lightsail_instance: Add `ipv6_addresses` attribute [GH-17155] +BUG FIXES + +* resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] + ## 3.24.1 (January 15, 2021) BUG FIXES From 62da40cab5376b7d5128cc91a32d7a42a6b47d7b Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 19 Jan 2021 16:02:36 -0500 Subject: [PATCH 0743/1212] Update CHANGELOG for #16614 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c62242ea29d..0b5b274773f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ ENHANCEMENTS BUG FIXES +* resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] ## 3.24.1 (January 15, 2021) From e43c9a1e2cd446cd048e74731c1d1348b8718d11 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Jan 2021 13:56:33 -0800 Subject: [PATCH 0744/1212] Prefer tfresource.NotFound() over checking error type --- .semgrep.yml | 20 +++++++++++++++++++ ...source_aws_cloudwatch_log_metric_filter.go | 3 ++- aws/resource_aws_lambda_permission.go | 5 +++-- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.semgrep.yml b/.semgrep.yml index 2d40fae922b..d4999b9f43d 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -128,3 +128,23 @@ rules: ... return ... severity: WARNING + + - id: is-not-found-error + languages: [go] + message: Check for resource.NotFoundError errors with tfresource.NotFound() + paths: + include: + - aws/ + patterns: + - pattern-either: + - patterns: + - pattern: | + var $CAST *resource.NotFoundError + ... + errors.As($ERR, &$CAST) + - pattern-not-inside: func NotFound(err error) bool { ... } + - patterns: + - pattern: | + $X, $Y := $ERR.(*resource.NotFoundError) + - pattern-not-inside: func isResourceNotFoundError(err error) bool { ... } + severity: WARNING diff --git a/aws/resource_aws_cloudwatch_log_metric_filter.go b/aws/resource_aws_cloudwatch_log_metric_filter.go index 41a9f4cd2be..fdef50cd8fa 100644 --- a/aws/resource_aws_cloudwatch_log_metric_filter.go +++ b/aws/resource_aws_cloudwatch_log_metric_filter.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsCloudWatchLogMetricFilter() *schema.Resource { @@ -125,7 +126,7 @@ func resourceAwsCloudWatchLogMetricFilterRead(d *schema.ResourceData, meta inter mf, err := lookupCloudWatchLogMetricFilter(conn, d.Get("name").(string), d.Get("log_group_name").(string), nil) if err != nil { - if _, ok := err.(*resource.NotFoundError); ok { + if tfresource.NotFound(err) { log.Printf("[WARN] Removing CloudWatch Log Metric Filter as it is gone") d.SetId("") return nil diff --git a/aws/resource_aws_lambda_permission.go b/aws/resource_aws_lambda_permission.go index d7dd2365543..3fc9ef3011d 100644 --- a/aws/resource_aws_lambda_permission.go +++ b/aws/resource_aws_lambda_permission.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/lambda" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) var LambdaFunctionRegexp = `^(arn:[\w-]+:lambda:)?([a-z]{2}-(?:[a-z]+-){1,2}\d{1}:)?(\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?$` @@ -252,8 +253,8 @@ func resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) e } // Missing permission inside valid policy - if nfErr, ok := err.(*resource.NotFoundError); ok { - log.Printf("[WARN] %s", nfErr) + if tfresource.NotFound(err) { + log.Printf("[WARN] %s", err) d.SetId("") return nil } From a87c95a4c3a54ec70b232524a325fdd70b8e1cf5 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Jan 2021 14:36:02 -0800 Subject: [PATCH 0745/1212] Use waiters and finders for ElastiCache Cache Clusters --- aws/data_source_aws_elasticache_cluster.go | 41 +-- ..._aws_elasticache_replication_group_test.go | 2 +- .../service/elasticache/finder/finder.go | 40 +++ .../service/elasticache/waiter/status.go | 27 ++ .../service/elasticache/waiter/waiter.go | 73 ++++- aws/resource_aws_elasticache_cluster.go | 268 +++++------------- aws/resource_aws_elasticache_cluster_test.go | 48 ++-- ...ource_aws_elasticache_replication_group.go | 4 +- ..._aws_elasticache_replication_group_test.go | 12 +- 9 files changed, 251 insertions(+), 264 deletions(-) diff --git a/aws/data_source_aws_elasticache_cluster.go b/aws/data_source_aws_elasticache_cluster.go index b8a9805f162..0ff5810fb17 100644 --- a/aws/data_source_aws_elasticache_cluster.go +++ b/aws/data_source_aws_elasticache_cluster.go @@ -2,14 +2,13 @@ package aws import ( "fmt" - "log" "strings" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/elasticache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func dataSourceAwsElastiCacheCluster() *schema.Resource { @@ -154,26 +153,15 @@ func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).elasticacheconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - req := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(d.Get("cluster_id").(string)), - ShowCacheNodeInfo: aws.Bool(true), + clusterID := d.Get("cluster_id").(string) + cluster, err := finder.CacheClusterWithNodeInfoByID(conn, clusterID) + if tfresource.NotFound(err) { + return fmt.Errorf("Your query returned no results. Please change your search criteria and try again") } - - log.Printf("[DEBUG] Reading ElastiCache Cluster: %s", req) - resp, err := conn.DescribeCacheClusters(req) if err != nil { - return err - } - - if len(resp.CacheClusters) < 1 { - return fmt.Errorf("Your query returned no results. Please change your search criteria and try again.") - } - if len(resp.CacheClusters) > 1 { - return fmt.Errorf("Your query returned more than one result. Please try a more specific search criteria.") + return fmt.Errorf("error reading ElastiCache Cache Cluster (%s): %w", clusterID, err) } - cluster := resp.CacheClusters[0] - d.SetId(aws.StringValue(cluster.CacheClusterId)) d.Set("cluster_id", cluster.CacheClusterId) @@ -214,23 +202,16 @@ func dataSourceAwsElastiCacheClusterRead(d *schema.ResourceData, meta interface{ return err } - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "elasticache", - Region: meta.(*AWSClient).region, - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("cluster:%s", d.Id()), - }.String() - d.Set("arn", arn) + d.Set("arn", cluster.ARN) - tags, err := keyvaluetags.ElasticacheListTags(conn, arn) + tags, err := keyvaluetags.ElasticacheListTags(conn, aws.StringValue(cluster.ARN)) if err != nil { - return fmt.Errorf("error listing tags for Elasticache Cluster (%s): %s", arn, err) + return fmt.Errorf("error listing tags for Elasticache Cluster (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_elasticache_replication_group_test.go b/aws/data_source_aws_elasticache_replication_group_test.go index 5a4726745d5..5e9fcc4bfb1 100644 --- a/aws/data_source_aws_elasticache_replication_group_test.go +++ b/aws/data_source_aws_elasticache_replication_group_test.go @@ -72,7 +72,7 @@ func TestAccDataSourceAwsElasticacheReplicationGroup_NonExistent(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDataSourceAwsElasticacheReplicationGroupConfig_NonExistent, - ExpectError: regexp.MustCompile(`not found`), + ExpectError: regexp.MustCompile(`couldn't find resource`), }, }, }) diff --git a/aws/internal/service/elasticache/finder/finder.go b/aws/internal/service/elasticache/finder/finder.go index cc344d9d97d..196c9219117 100644 --- a/aws/internal/service/elasticache/finder/finder.go +++ b/aws/internal/service/elasticache/finder/finder.go @@ -32,3 +32,43 @@ func ReplicationGroupByID(conn *elasticache.ElastiCache, id string) (*elasticach return result.ReplicationGroups[0], nil } + +// CacheClusterByID retrieves an ElastiCache Cache Cluster by id. +func CacheClusterByID(conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { + input := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(id), + } + return CacheCluster(conn, input) +} + +// CacheClusterWithNodeInfoByID retrieves an ElastiCache Cache Cluster with Node Info by id. +func CacheClusterWithNodeInfoByID(conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { + input := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(id), + ShowCacheNodeInfo: aws.Bool(true), + } + return CacheCluster(conn, input) +} + +// CacheCluster retrieves an ElastiCache Cache Cluster using DescribeCacheClustersInput. +func CacheCluster(conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput) (*elasticache.CacheCluster, error) { + result, err := conn.DescribeCacheClusters(input) + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheClusterNotFoundFault) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { + return nil, err + } + + if result == nil || len(result.CacheClusters) == 0 || result.CacheClusters[0] == nil { + return nil, &resource.NotFoundError{ + Message: "Empty result", + LastRequest: input, + } + } + + return result.CacheClusters[0], nil +} diff --git a/aws/internal/service/elasticache/waiter/status.go b/aws/internal/service/elasticache/waiter/status.go index bdffde8ec0c..dc1b6ff886f 100644 --- a/aws/internal/service/elasticache/waiter/status.go +++ b/aws/internal/service/elasticache/waiter/status.go @@ -31,3 +31,30 @@ func ReplicationGroupStatus(conn *elasticache.ElastiCache, replicationGroupID st return rg, aws.StringValue(rg.Status), nil } } + +const ( + CacheClusterStatusAvailable = "available" + CacheClusterStatusCreating = "creating" + CacheClusterStatusDeleted = "deleted" + CacheClusterStatusDeleting = "deleting" + CacheClusterStatusIncompatibleNetwork = "incompatible-network" + CacheClusterStatusModifying = "modifying" + CacheClusterStatusRebootingClusterNodes = "rebooting cluster nodes" + CacheClusterStatusRestoreFailed = "restore-failed" + CacheClusterStatusSnapshotting = "snapshotting" +) + +// CacheClusterStatus fetches the CacheCluster and its Status +func CacheClusterStatus(conn *elasticache.ElastiCache, cacheClusterID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + c, err := finder.CacheClusterByID(conn, cacheClusterID) + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + + return c, aws.StringValue(c.CacheClusterStatus), nil + } +} diff --git a/aws/internal/service/elasticache/waiter/waiter.go b/aws/internal/service/elasticache/waiter/waiter.go index 37864fa1e89..d01788ad108 100644 --- a/aws/internal/service/elasticache/waiter/waiter.go +++ b/aws/internal/service/elasticache/waiter/waiter.go @@ -18,7 +18,11 @@ const ( // ReplicationGroupAvailable waits for a ReplicationGroup to return Available func ReplicationGroupAvailable(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{ReplicationGroupStatusCreating, ReplicationGroupStatusModifying, ReplicationGroupStatusSnapshotting}, + Pending: []string{ + ReplicationGroupStatusCreating, + ReplicationGroupStatusModifying, + ReplicationGroupStatusSnapshotting, + }, Target: []string{ReplicationGroupStatusAvailable}, Refresh: ReplicationGroupStatus(conn, replicationGroupID), Timeout: timeout, @@ -36,7 +40,11 @@ func ReplicationGroupAvailable(conn *elasticache.ElastiCache, replicationGroupID // ReplicationGroupDeleted waits for a ReplicationGroup to be deleted func ReplicationGroupDeleted(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{ReplicationGroupStatusCreating, ReplicationGroupStatusAvailable, ReplicationGroupStatusDeleting}, + Pending: []string{ + ReplicationGroupStatusCreating, + ReplicationGroupStatusAvailable, + ReplicationGroupStatusDeleting, + }, Target: []string{}, Refresh: ReplicationGroupStatus(conn, replicationGroupID), Timeout: timeout, @@ -50,3 +58,64 @@ func ReplicationGroupDeleted(conn *elasticache.ElastiCache, replicationGroupID s } return nil, err } + +const ( + CacheClusterCreatedTimeout = 40 * time.Minute + CacheClusterUpdatedTimeout = 80 * time.Minute + CacheClusterDeletedTimeout = 40 * time.Minute + + cacheClusterAvailableMinTimeout = 10 * time.Second + cacheClusterAvailableDelay = 30 * time.Second + + cacheClusterDeletedMinTimeout = 10 * time.Second + cacheClusterDeletedDelay = 30 * time.Second +) + +// CacheClusterAvailable waits for a ReplicationGroup to return Available +func CacheClusterAvailable(conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + CacheClusterStatusCreating, + CacheClusterStatusModifying, + CacheClusterStatusSnapshotting, + CacheClusterStatusRebootingClusterNodes, + }, + Target: []string{CacheClusterStatusAvailable}, + Refresh: CacheClusterStatus(conn, cacheClusterID), + Timeout: timeout, + MinTimeout: cacheClusterAvailableMinTimeout, + Delay: cacheClusterAvailableDelay, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return v, err + } + return nil, err +} + +// CacheClusterDeleted waits for a ReplicationGroup to be deleted +func CacheClusterDeleted(conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + CacheClusterStatusCreating, + CacheClusterStatusAvailable, + CacheClusterStatusModifying, + CacheClusterStatusDeleting, + CacheClusterStatusIncompatibleNetwork, + CacheClusterStatusRestoreFailed, + CacheClusterStatusSnapshotting, + }, + Target: []string{}, + Refresh: CacheClusterStatus(conn, cacheClusterID), + Timeout: timeout, + MinTimeout: cacheClusterDeletedMinTimeout, + Delay: cacheClusterDeletedDelay, + } + + outputRaw, err := stateConf.WaitForState() + if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return v, err + } + return nil, err +} diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index e7fae6a9ef3..9e4a6e6e11e 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -11,7 +11,6 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/elasticache" gversion "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -19,6 +18,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsElasticacheCluster() *schema.Resource { @@ -89,7 +91,7 @@ func resourceAwsElasticacheCluster() *schema.Resource { Required: true, ForceNew: true, StateFunc: func(val interface{}) string { - // Elasticache normalizes cluster ids to lowercase, + // ElastiCache normalizes cluster ids to lowercase, // so we have to do this too or else we can end up // with non-converging diffs. return strings.ToLower(val.(string)) @@ -122,7 +124,7 @@ func resourceAwsElasticacheCluster() *schema.Resource { Optional: true, Computed: true, StateFunc: func(val interface{}) string { - // Elasticache always changes the maintenance + // ElastiCache always changes the maintenance // to lowercase return strings.ToLower(val.(string)) }, @@ -408,14 +410,14 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ id, err := createElasticacheCacheCluster(conn, req) if err != nil { - return fmt.Errorf("error creating Elasticache Cache Cluster: %s", err) + return fmt.Errorf("error creating ElastiCache Cache Cluster: %w", err) } d.SetId(id) - err = waitForCreateElasticacheCacheCluster(conn, d.Id(), 40*time.Minute) + _, err = waiter.CacheClusterAvailable(conn, d.Id(), 40*time.Minute) if err != nil { - return fmt.Errorf("error waiting for Elasticache Cache Cluster (%s) to be created: %s", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be created: %w", d.Id(), err) } return resourceAwsElasticacheClusterRead(d, meta) @@ -425,85 +427,68 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).elasticacheconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - req := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(d.Id()), - ShowCacheNodeInfo: aws.Bool(true), + c, err := finder.CacheClusterWithNodeInfoByID(conn, d.Id()) + if tfresource.NotFound(err) { + log.Printf("[WARN] ElastiCache Cache Cluster (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - - res, err := conn.DescribeCacheClusters(req) if err != nil { - if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { - log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id()) - d.SetId("") - return nil + return fmt.Errorf("error reading ElastiCache Cache Cluster (%s): %w", d.Id(), err) + } + + d.Set("cluster_id", c.CacheClusterId) + d.Set("node_type", c.CacheNodeType) + d.Set("num_cache_nodes", c.NumCacheNodes) + d.Set("engine", c.Engine) + d.Set("engine_version", c.EngineVersion) + if c.ConfigurationEndpoint != nil { + d.Set("port", c.ConfigurationEndpoint.Port) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) + d.Set("cluster_address", aws.String((*c.ConfigurationEndpoint.Address))) + } else if len(c.CacheNodes) > 0 { + d.Set("port", int(aws.Int64Value(c.CacheNodes[0].Endpoint.Port))) + } + + if c.ReplicationGroupId != nil { + d.Set("replication_group_id", c.ReplicationGroupId) + } + + d.Set("subnet_group_name", c.CacheSubnetGroupName) + d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) + d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) + if c.CacheParameterGroup != nil { + d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) + } + d.Set("maintenance_window", c.PreferredMaintenanceWindow) + d.Set("snapshot_window", c.SnapshotWindow) + d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) + if c.NotificationConfiguration != nil { + if *c.NotificationConfiguration.TopicStatus == "active" { + d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) } + } + d.Set("availability_zone", c.PreferredAvailabilityZone) + if *c.PreferredAvailabilityZone == "Multiple" { + d.Set("az_mode", "cross-az") + } else { + d.Set("az_mode", "single-az") + } + if err := setCacheNodeData(d, c); err != nil { return err } - if len(res.CacheClusters) == 1 { - c := res.CacheClusters[0] - d.Set("cluster_id", c.CacheClusterId) - d.Set("node_type", c.CacheNodeType) - d.Set("num_cache_nodes", c.NumCacheNodes) - d.Set("engine", c.Engine) - d.Set("engine_version", c.EngineVersion) - if c.ConfigurationEndpoint != nil { - d.Set("port", c.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) - d.Set("cluster_address", aws.String((*c.ConfigurationEndpoint.Address))) - } else if len(c.CacheNodes) > 0 { - d.Set("port", int(aws.Int64Value(c.CacheNodes[0].Endpoint.Port))) - } - - if c.ReplicationGroupId != nil { - d.Set("replication_group_id", c.ReplicationGroupId) - } - - d.Set("subnet_group_name", c.CacheSubnetGroupName) - d.Set("security_group_names", flattenElastiCacheSecurityGroupNames(c.CacheSecurityGroups)) - d.Set("security_group_ids", flattenElastiCacheSecurityGroupIds(c.SecurityGroups)) - if c.CacheParameterGroup != nil { - d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) - } - d.Set("maintenance_window", c.PreferredMaintenanceWindow) - d.Set("snapshot_window", c.SnapshotWindow) - d.Set("snapshot_retention_limit", c.SnapshotRetentionLimit) - if c.NotificationConfiguration != nil { - if *c.NotificationConfiguration.TopicStatus == "active" { - d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) - } - } - d.Set("availability_zone", c.PreferredAvailabilityZone) - if *c.PreferredAvailabilityZone == "Multiple" { - d.Set("az_mode", "cross-az") - } else { - d.Set("az_mode", "single-az") - } - - if err := setCacheNodeData(d, c); err != nil { - return err - } - - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Service: "elasticache", - Region: meta.(*AWSClient).region, - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("cluster:%s", d.Id()), - }.String() - - d.Set("arn", arn) + d.Set("arn", c.ARN) - tags, err := keyvaluetags.ElasticacheListTags(conn, arn) + tags, err := keyvaluetags.ElasticacheListTags(conn, aws.StringValue(c.ARN)) - if err != nil { - return fmt.Errorf("error listing tags for Elasticache Cluster (%s): %s", arn, err) - } + if err != nil { + return fmt.Errorf("error listing tags for ElastiCache Cluster (%s): %w", d.Id(), err) + } - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) - } + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) } return nil @@ -516,7 +501,7 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ o, n := d.GetChange("tags") if err := keyvaluetags.ElasticacheUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Elasticache Cluster (%s) tags: %s", d.Get("arn").(string), err) + return fmt.Errorf("error updating ElastiCache Cluster (%s) tags: %w", d.Get("arn").(string), err) } } @@ -613,23 +598,12 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), req) _, err := conn.ModifyCacheCluster(req) if err != nil { - return fmt.Errorf("Error updating ElastiCache cluster (%s), error: %s", d.Id(), err) - } - - log.Printf("[DEBUG] Waiting for update: %s", d.Id()) - pending := []string{"modifying", "rebooting cache cluster nodes", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheClusterStateRefreshFunc(conn, d.Id(), "available", pending), - Timeout: 80 * time.Minute, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, + return fmt.Errorf("Error updating ElastiCache cluster (%s), error: %w", d.Id(), err) } - _, sterr := stateConf.WaitForState() - if sterr != nil { - return fmt.Errorf("Error waiting for elasticache (%s) to update: %s", d.Id(), sterr) + _, err = waiter.CacheClusterAvailable(conn, d.Id(), waiter.CacheClusterUpdatedTimeout) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to update: %w", d.Id(), err) } } @@ -686,89 +660,18 @@ func resourceAwsElasticacheClusterDelete(d *schema.ResourceData, meta interface{ if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { return nil } - return fmt.Errorf("error deleting Elasticache Cache Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s): %w", d.Id(), err) } - err = waitForDeleteElasticacheCacheCluster(conn, d.Id(), 40*time.Minute) + _, err = waiter.CacheClusterDeleted(conn, d.Id(), waiter.CacheClusterDeletedTimeout) if err != nil { - return fmt.Errorf("error waiting for Elasticache Cache Cluster (%s) to be deleted: %s", d.Id(), err) + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted: %w", d.Id(), err) } return nil } -func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, givenState string, pending []string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(clusterID), - ShowCacheNodeInfo: aws.Bool(true), - }) - if err != nil { - if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { - log.Printf("[DEBUG] Detect deletion") - return nil, "", nil - } - - log.Printf("[ERROR] CacheClusterStateRefreshFunc: %s", err) - return nil, "", err - } - - if len(resp.CacheClusters) == 0 { - return nil, "", fmt.Errorf("Error: no Cache Clusters found for id (%s)", clusterID) - } - - var c *elasticache.CacheCluster - for _, cluster := range resp.CacheClusters { - if *cluster.CacheClusterId == clusterID { - log.Printf("[DEBUG] Found matching ElastiCache cluster: %s", *cluster.CacheClusterId) - c = cluster - } - } - - if c == nil { - return nil, "", fmt.Errorf("Error: no matching Elastic Cache cluster for id (%s)", clusterID) - } - - log.Printf("[DEBUG] ElastiCache Cluster (%s) status: %v", clusterID, *c.CacheClusterStatus) - - // return the current state if it's in the pending array - for _, p := range pending { - log.Printf("[DEBUG] ElastiCache: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.CacheClusterStatus) - s := *c.CacheClusterStatus - if p == s { - log.Printf("[DEBUG] Return with status: %v", *c.CacheClusterStatus) - return c, p, nil - } - } - - // return given state if it's not in pending - if givenState != "" { - log.Printf("[DEBUG] ElastiCache: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.CacheClusterStatus) - // check to make sure we have the node count we're expecting - if int64(len(c.CacheNodes)) != *c.NumCacheNodes { - log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.CacheNodes), *c.NumCacheNodes) - return nil, "creating", nil - } - - log.Printf("[DEBUG] Node count matched (%d)", len(c.CacheNodes)) - // loop the nodes and check their status as well - for _, n := range c.CacheNodes { - log.Printf("[DEBUG] Checking cache node for status: %s", n) - if n.CacheNodeStatus != nil && *n.CacheNodeStatus != "available" { - log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.CacheNodeId, *n.CacheNodeStatus) - return nil, "creating", nil - } - log.Printf("[DEBUG] Cache node not in expected state") - } - log.Printf("[DEBUG] ElastiCache returning given state (%s), cluster: %s", givenState, c) - return c, givenState, nil - } - log.Printf("[DEBUG] current status: %v", *c.CacheClusterStatus) - return c, *c.CacheClusterStatus, nil - } -} - func createElasticacheCacheCluster(conn *elasticache.ElastiCache, input *elasticache.CreateCacheClusterInput) (string, error) { - log.Printf("[DEBUG] Creating Elasticache Cache Cluster: %s", input) + log.Printf("[DEBUG] Creating ElastiCache Cache Cluster: %s", input) output, err := conn.CreateCacheCluster(input) if err != nil { return "", err @@ -782,22 +685,6 @@ func createElasticacheCacheCluster(conn *elasticache.ElastiCache, input *elastic return strings.ToLower(aws.StringValue(output.CacheCluster.CacheClusterId)), nil } -func waitForCreateElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) error { - pending := []string{"creating", "modifying", "restoring", "snapshotting"} - stateConf := &resource.StateChangeConf{ - Pending: pending, - Target: []string{"available"}, - Refresh: cacheClusterStateRefreshFunc(conn, cacheClusterID, "available", pending), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - log.Printf("[DEBUG] Waiting for Elasticache Cache Cluster (%s) to be created", cacheClusterID) - _, err := stateConf.WaitForState() - return err -} - func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID string, finalSnapshotID string) error { input := &elasticache.DeleteCacheClusterInput{ CacheClusterId: aws.String(cacheClusterID), @@ -806,7 +693,7 @@ func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) } - log.Printf("[DEBUG] Deleting Elasticache Cache Cluster: %s", input) + log.Printf("[DEBUG] Deleting ElastiCache Cache Cluster: %s", input) err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteCacheCluster(input) if err != nil { @@ -828,18 +715,3 @@ func deleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID return err } - -func waitForDeleteElasticacheCacheCluster(conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network", "restore-failed", "snapshotting"}, - Target: []string{}, - Refresh: cacheClusterStateRefreshFunc(conn, cacheClusterID, "", []string{}), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - log.Printf("[DEBUG] Waiting for Elasticache Cache Cluster deletion: %v", cacheClusterID) - - _, err := stateConf.WaitForState() - return err -} diff --git a/aws/resource_aws_elasticache_cluster_test.go b/aws/resource_aws_elasticache_cluster_test.go index 24b307fbd51..df8f91956c2 100644 --- a/aws/resource_aws_elasticache_cluster_test.go +++ b/aws/resource_aws_elasticache_cluster_test.go @@ -8,14 +8,15 @@ import ( "strconv" "strings" "testing" - "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func init() { @@ -31,37 +32,37 @@ func init() { func testSweepElasticacheClusters(region string) error { client, err := sharedClientForRegion(region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } conn := client.(*AWSClient).elasticacheconn err = conn.DescribeCacheClustersPages(&elasticache.DescribeCacheClustersInput{}, func(page *elasticache.DescribeCacheClustersOutput, isLast bool) bool { if len(page.CacheClusters) == 0 { - log.Print("[DEBUG] No Elasticache Replicaton Groups to sweep") + log.Print("[DEBUG] No ElastiCache Replicaton Groups to sweep") return false } for _, cluster := range page.CacheClusters { id := aws.StringValue(cluster.CacheClusterId) - log.Printf("[INFO] Deleting Elasticache Cluster: %s", id) + log.Printf("[INFO] Deleting ElastiCache Cluster: %s", id) err := deleteElasticacheCacheCluster(conn, id, "") if err != nil { - log.Printf("[ERROR] Failed to delete Elasticache Cache Cluster (%s): %s", id, err) + log.Printf("[ERROR] Failed to delete ElastiCache Cache Cluster (%s): %s", id, err) } - err = waitForDeleteElasticacheCacheCluster(conn, id, 40*time.Minute) + _, err = waiter.CacheClusterDeleted(conn, id, waiter.CacheClusterDeletedTimeout) if err != nil { - log.Printf("[ERROR] Failed waiting for Elasticache Cache Cluster (%s) to be deleted: %s", id, err) + log.Printf("[ERROR] Failed waiting for ElastiCache Cache Cluster (%s) to be deleted: %s", id, err) } } return !isLast }) if err != nil { if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Elasticache Cluster sweep for %s: %s", region, err) + log.Printf("[WARN] Skipping ElastiCache Cluster sweep for %s: %s", region, err) return nil } - return fmt.Errorf("Error retrieving Elasticache Clusters: %s", err) + return fmt.Errorf("Error retrieving ElastiCache Clusters: %s", err) } return nil } @@ -758,7 +759,7 @@ func testAccCheckAWSElasticacheClusterReplicationGroupIDAttribute(cluster *elast func testAccCheckAWSElasticacheClusterNotRecreated(i, j *elasticache.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.TimeValue(i.CacheClusterCreateTime) != aws.TimeValue(j.CacheClusterCreateTime) { - return errors.New("Elasticache Cluster was recreated") + return errors.New("ElastiCache Cluster was recreated") } return nil @@ -768,7 +769,7 @@ func testAccCheckAWSElasticacheClusterNotRecreated(i, j *elasticache.CacheCluste func testAccCheckAWSElasticacheClusterRecreated(i, j *elasticache.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { if aws.TimeValue(i.CacheClusterCreateTime) == aws.TimeValue(j.CacheClusterCreateTime) { - return errors.New("Elasticache Cluster was not recreated") + return errors.New("ElastiCache Cluster was not recreated") } return nil @@ -782,19 +783,14 @@ func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error { if rs.Type != "aws_elasticache_cluster" { continue } - res, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(rs.Primary.ID), - }) + _, err := finder.CacheClusterByID(conn, rs.Primary.ID) + if tfresource.NotFound(err) { + continue + } if err != nil { - // Verify the error is what we want - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "CacheClusterNotFound" { - continue - } return err } - if len(res.CacheClusters) > 0 { - return fmt.Errorf("still exist.") - } + return fmt.Errorf("ElastiCache Cache Cluster (%s) still exists", rs.Primary.ID) } return nil } @@ -815,7 +811,7 @@ func testAccCheckAWSElasticacheClusterExists(n string, v *elasticache.CacheClust CacheClusterId: aws.String(rs.Primary.ID), }) if err != nil { - return fmt.Errorf("Elasticache error: %v", err) + return fmt.Errorf("ElastiCache error: %v", err) } for _, c := range resp.CacheClusters { @@ -848,7 +844,7 @@ func testAccCheckAWSElasticacheClusterEc2ClassicExists(n string, v *elasticache. output, err := conn.DescribeCacheClusters(input) if err != nil { - return fmt.Errorf("error describing Elasticache Cluster (%s): %w", rs.Primary.ID, err) + return fmt.Errorf("error describing ElastiCache Cluster (%s): %w", rs.Primary.ID, err) } for _, c := range output.CacheClusters { @@ -859,7 +855,7 @@ func testAccCheckAWSElasticacheClusterEc2ClassicExists(n string, v *elasticache. } } - return fmt.Errorf("Elasticache Cluster (%s) not found", rs.Primary.ID) + return fmt.Errorf("ElastiCache Cluster (%s) not found", rs.Primary.ID) } } @@ -926,7 +922,7 @@ resource "aws_security_group" "test" { } tags = { - Name = "TestAccAWSElasticacheCluster_basic" + Name = "TestAccAWSElastiCacheCluster_basic" } } diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 12c4bd57166..fa3269a5a3d 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -596,7 +596,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i // Wait for all Cache Cluster creations for _, cacheClusterID := range addClusterIDs { - err := waitForCreateElasticacheCacheCluster(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) + _, err := waiter.CacheClusterAvailable(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be created (adding replica): %w", cacheClusterID, err) } @@ -705,7 +705,7 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i // Wait for all Cache Cluster deletions for _, cacheClusterID := range removeClusterIDs { - err := waitForDeleteElasticacheCacheCluster(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) + _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) if err != nil { return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) } diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 6b193f80781..d8746080454 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -292,10 +292,8 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupInVPCConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr( - resourceName, "number_cache_clusters", "1"), - resource.TestCheckResourceAttr( - resourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "1"), + resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), ), }, { @@ -922,9 +920,13 @@ func testAccCheckAWSElasticacheReplicationDestroy(s *terraform.State) error { continue } _, err := finder.ReplicationGroupByID(conn, rs.Primary.ID) - if !tfresource.NotFound(err) { + if tfresource.NotFound(err) { + continue + } + if err != nil { return err } + return fmt.Errorf("ElastiCache Replication Group (%s) still exists", rs.Primary.ID) } return nil } From 8f2e38127b1bc364e8bf3249fe326765fdc5a3c1 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 10:04:40 -0800 Subject: [PATCH 0746/1212] Updates ElastiCache documentation for AWS documentation split and corrects some validation --- aws/resource_aws_elasticache_cluster.go | 19 +++++---- ...ource_aws_elasticache_replication_group.go | 14 +++---- .../docs/r/elasticache_cluster.html.markdown | 42 +++++++++---------- ...lasticache_replication_group.html.markdown | 14 ++++--- 4 files changed, 44 insertions(+), 45 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 47c755d89dd..8a6d936408c 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -204,18 +204,19 @@ func resourceAwsElasticacheCluster() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - // A single-element string list containing an Amazon Resource Name (ARN) that - // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - // file will be used to populate the node group. - // - // See also: - // https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079 "snapshot_arns": { Type: schema.TypeSet, Optional: true, ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validateArn, + validation.StringDoesNotContainAny(","), + ), + }, + Set: schema.HashString, }, "snapshot_retention_limit": { Type: schema.TypeInt, @@ -293,7 +294,7 @@ func resourceAwsElasticacheCluster() *schema.Resource { func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { // Engine memcached does not currently support vertical scaling // InvalidParameterCombination: Scaling is not supported for engine memcached - // https://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Scaling.Memcached.html#Scaling.Memcached.Vertically + // https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Scaling.html#Scaling.Memcached.Vertically if diff.Id() == "" || !diff.HasChange("node_type") { return nil } diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 4ec2da537c4..abebe7db8a9 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -204,19 +204,17 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - // A single-element string list containing an Amazon Resource Name (ARN) that - // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - // file will be used to populate the node group. - // - // See also: - // https://github.com/aws/aws-sdk-go/blob/4862a174f7fc92fb523fc39e68f00b87d91d2c3d/service/elasticache/api.go#L2079 "snapshot_arns": { Type: schema.TypeSet, Optional: true, ForceNew: true, + // Note: Unlike aws_elasticache_cluster, this does not have a limit of 1 item. Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, + Type: schema.TypeString, + ValidateFunc: validation.All( + validateArn, + validation.StringDoesNotContainAny(","), + ), }, Set: schema.HashString, }, diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index 1fdfb49847f..24ad808b4cb 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -8,7 +8,11 @@ description: |- # Resource: aws_elasticache_cluster -Provides an ElastiCache Cluster resource, which manages a Memcached cluster or Redis instance. +Provides an ElastiCache Cluster resource, which manages either a +[Memcached cluster](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/WhatIs.html), a +[single-node Redis instance](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/WhatIs.html), or a +[read replica in a Redis (Cluster Mode Enabled) replication group]. + For working with Redis (Cluster Mode Enabled) replication groups, see the [`aws_elasticache_replication_group` resource](/docs/providers/aws/r/elasticache_replication_group.html). @@ -17,7 +21,10 @@ it is applied in the next maintenance window. Because of this, Terraform may rep a difference in its planning phase because the actual modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately. Using `apply_immediately` can result in a brief downtime as the server reboots. -See the AWS Docs on [Modifying an ElastiCache Cache Cluster][2] for more information. +See the AWS Documentation on Modifying an ElastiCache Cache Cluster for +[ElastiCache for Memcached](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Clusters.Modify.html) or +[ElastiCache for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Modify.html) +for more information. ## Example Usage @@ -80,38 +87,32 @@ on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00` * `node_type` – (Required unless `replication_group_id` is provided) The compute and memory capacity of the nodes. See -[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for -supported node types +[Available Cache Node Types](https://aws.amazon.com/elasticache/pricing/#Available_node_types) for +supported node types. For Memcached, changing this value will re-create the resource. * `num_cache_nodes` – (Required unless `replication_group_id` is provided) The initial number of cache nodes that the -cache cluster will have. For Redis, this value must be 1. For Memcache, this +cache cluster will have. For Redis, this value must be 1. For Memcached, this value must be between 1 and 20. If this number is reduced on subsequent runs, the highest numbered nodes will be removed. * `parameter_group_name` – (Required unless `replication_group_id` is provided) Name of the parameter group to associate with this cache cluster -* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. +* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. -* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used -for the cache cluster. +* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used for the cache cluster. -* `security_group_names` – (Optional, EC2 Classic only) List of security group -names to associate with this cache cluster +* `security_group_names` – (Optional, EC2 Classic only) List of security group names to associate with this cache cluster -* `security_group_ids` – (Optional, VPC only) One or more VPC security groups associated - with the cache cluster +* `security_group_ids` – (Optional, VPC only) One or more VPC security groups associated with the cache cluster * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is - `false`. See [Amazon ElastiCache Documentation for more information.][1] - (Available since v0.6.0) + `false`. See [Amazon ElastiCache Documentation for more information.](https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html) -* `snapshot_arns` – (Optional) A single-element string list containing an -Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. -Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `snapshot_arns` – (Optional, Redis only) A single-element string list containing an Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. The object name cannot contain any commas. Changing `snapshot_arns` forces a new resource. -* `snapshot_name` - (Optional) The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource. +* `snapshot_name` - (Optional, Redis only) The name of a snapshot from which to restore data into the new node group. Changing `snapshot_name` forces a new resource. * `snapshot_window` - (Optional, Redis only) The daily time range (in UTC) during which ElastiCache will begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00 @@ -142,14 +143,9 @@ In addition to all arguments above, the following attributes are exported: * `arn` - The ARN of the created ElastiCache Cluster. * `cache_nodes` - List of node objects including `id`, `address`, `port` and `availability_zone`. - Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}` * `configuration_endpoint` - (Memcached only) The configuration endpoint to allow host discovery. * `cluster_address` - (Memcached only) The DNS name of the cache cluster without the port appended. -[1]: https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html -[2]: https://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Modify.html - - ## Import ElastiCache Clusters can be imported using the `cluster_id`, e.g. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 9aa04cf0c14..e91000fea3d 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -9,8 +9,10 @@ description: |- # Resource: aws_elasticache_replication_group Provides an ElastiCache Replication Group resource. -For working with Memcached or single primary Redis instances (Cluster Mode Disabled), see the -[`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html). + +For working with a [Memcached cluster](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/WhatIs.html) or a +[single-node Redis instance (Cluster Mode Disabled)](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/WhatIs.html), +see the [`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html). ~> **Note:** When you change an attribute, such as `engine_version`, by default the ElastiCache API applies it in the next maintenance window. Because @@ -19,6 +21,9 @@ actual modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately. Using `apply_immediately` can result in a brief downtime as servers reboots. +See the AWS Documentation on +[Modifying an ElastiCache Cache Cluster](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Modify.html) +for more information. ~> **Note:** Be aware of the terminology collision around "cluster" for `aws_elasticache_replication_group`. For example, it is possible to create a ["Cluster Mode Disabled [Redis] Cluster"](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Create.CON.Redis.html). With "Cluster Mode Enabled", the data will be stored in shards (called "node groups"). See [Redis Cluster Configuration](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/cluster-create-determine-requirements.html#redis-cluster-configuration) for a diagram of the differences. To enable cluster mode, use a parameter group that has cluster mode enabled. The default parameter groups provided by AWS end with ".cluster.on", for example `default.redis6.x.cluster.on`. @@ -118,9 +123,8 @@ The following arguments are supported: * `subnet_group_name` - (Optional) The name of the cache subnet group to be used for the replication group. * `security_group_names` - (Optional) A list of cache security group names to associate with this replication group. * `security_group_ids` - (Optional) One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud -* `snapshot_arns` – (Optional) A single-element string list containing an -Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3. -Example: `arn:aws:s3:::my_bucket/snapshot1.rdb` +* `snapshot_arns` – (Optional) A list of +Amazon Resource Names (ARNs) that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. * `snapshot_name` - (Optional) The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource. * `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). From bfb57b21a207f8cfdba6f317a1a3fb0b6f1c0c1c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 10:26:04 -0800 Subject: [PATCH 0747/1212] Removes unneeded Memcached validation from aws_elasticache_replication_group --- aws/resource_aws_elasticache_replication_group.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index abebe7db8a9..dc440d7688f 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -155,8 +155,8 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Optional: true, ForceNew: true, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Suppress default memcached/redis ports when not defined - if !d.IsNewResource() && new == "0" && (old == "6379" || old == "11211") { + // Suppress default Redis ports when not defined + if !d.IsNewResource() && new == "0" && old == elasticacheDefaultRedisPort { return true } return false From 7186a33328142d6cc7c5e5fe387a75a7bef46204 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 11 Jan 2021 10:26:38 -0800 Subject: [PATCH 0748/1212] Cleanup --- aws/resource_aws_elasticache_cluster.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 8a6d936408c..e80b5a34977 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -21,6 +21,11 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) +const ( + elasticacheDefaultRedisPort = "6379" + elasticacheDefaultMemcachedPort = "11211" +) + func resourceAwsElasticacheCluster() *schema.Resource { return &schema.Resource{ Create: resourceAwsElasticacheClusterCreate, @@ -154,7 +159,7 @@ func resourceAwsElasticacheCluster() *schema.Resource { ForceNew: true, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { // Suppress default memcached/redis ports when not defined - if !d.IsNewResource() && new == "0" && (old == "6379" || old == "11211") { + if !d.IsNewResource() && new == "0" && (old == elasticacheDefaultRedisPort || old == elasticacheDefaultMemcachedPort) { return true } return false @@ -205,7 +210,7 @@ func resourceAwsElasticacheCluster() *schema.Resource { Set: schema.HashString, }, "snapshot_arns": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, ForceNew: true, MaxItems: 1, @@ -216,7 +221,6 @@ func resourceAwsElasticacheCluster() *schema.Resource { validation.StringDoesNotContainAny(","), ), }, - Set: schema.HashString, }, "snapshot_retention_limit": { Type: schema.TypeInt, @@ -324,15 +328,9 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ if v, ok := d.GetOk("replication_group_id"); ok { req.ReplicationGroupId = aws.String(v.(string)) } else { - securityNameSet := d.Get("security_group_names").(*schema.Set) - securityIdSet := d.Get("security_group_ids").(*schema.Set) - securityNames := expandStringSet(securityNameSet) - securityIds := expandStringSet(securityIdSet) - tags := keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().ElasticacheTags() - - req.CacheSecurityGroupNames = securityNames - req.SecurityGroupIds = securityIds - req.Tags = tags + req.CacheSecurityGroupNames = expandStringSet(d.Get("security_group_names").(*schema.Set)) + req.SecurityGroupIds = expandStringSet(d.Get("security_group_ids").(*schema.Set)) + req.Tags = keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().ElasticacheTags() } if v, ok := d.GetOk("cluster_id"); ok { From 9b35e301ea5fe5df51b570f19fd74d9a689a3061 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 12 Jan 2021 17:14:22 -0800 Subject: [PATCH 0749/1212] Fixes snapshot_arns parameter handling --- aws/resource_aws_elasticache_cluster.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index e80b5a34977..ca2b1f66566 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -382,11 +382,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{ req.NotificationTopicArn = aws.String(v.(string)) } - snaps := d.Get("snapshot_arns").(*schema.Set) - if snaps.Len() > 0 { - s := expandStringSet(snaps) - req.SnapshotArns = s - log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", s) + snaps := d.Get("snapshot_arns").([]interface{}) + if len(snaps) > 0 { + req.SnapshotArns = expandStringList(snaps) + log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", snaps) } if v, ok := d.GetOk("snapshot_name"); ok { From 04b3e9d230e1413dfdc4632883ea5f7995fe8099 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 13 Jan 2021 15:03:22 -0800 Subject: [PATCH 0750/1212] Clarifies that re-creation of the resource will not be deferred until the maintenance window --- .../docs/r/elasticache_cluster.html.markdown | 17 +++++++++-------- .../elasticache_replication_group.html.markdown | 2 ++ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index 24ad808b4cb..b3194a664ed 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -16,7 +16,7 @@ Provides an ElastiCache Cluster resource, which manages either a For working with Redis (Cluster Mode Enabled) replication groups, see the [`aws_elasticache_replication_group` resource](/docs/providers/aws/r/elasticache_replication_group.html). -~> **Note:** When you change an attribute, such as `node_type`, by default +~> **Note:** When you change an attribute, such as `num_cache_nodes`, by default it is applied in the next maintenance window. Because of this, Terraform may report a difference in its planning phase because the actual modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the @@ -26,6 +26,8 @@ See the AWS Documentation on Modifying an ElastiCache Cache Cluster for [ElastiCache for Redis](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Modify.html) for more information. +~> **Note:** Any attribute changes that re-create the resource will be applied immediately, regardless of the value of `apply_immediately`. + ## Example Usage ### Memcached Cluster @@ -71,12 +73,11 @@ resource "aws_elasticache_cluster" "replica" { The following arguments are supported: * `cluster_id` – (Required) Group identifier. ElastiCache converts - this name to lowercase + this name to lowercase. Changing this value will re-create the resource. * `replication_group_id` - (Optional) The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. -* `engine` – (Required unless `replication_group_id` is provided) Name of the cache engine to be used for this cache cluster. - Valid values for this parameter are `memcached` or `redis` +* `engine` – (Required unless `replication_group_id` is provided) Name of the cache engine to be used for this cache cluster. Valid values are `memcached` or `redis`. * `engine_version` – (Optional) Version number of the cache engine to be used. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) @@ -98,11 +99,11 @@ the highest numbered nodes will be removed. * `parameter_group_name` – (Required unless `replication_group_id` is provided) Name of the parameter group to associate with this cache cluster -* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. +* `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. Changing this value will re-create the resource. -* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used for the cache cluster. +* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used for the cache cluster. Changing this value will re-create the resource. -* `security_group_names` – (Optional, EC2 Classic only) List of security group names to associate with this cache cluster +* `security_group_names` – (Optional, EC2 Classic only) List of security group names to associate with this cache cluster. Changing this value will re-create the resource. * `security_group_ids` – (Optional, VPC only) One or more VPC security groups associated with the cache cluster @@ -129,7 +130,7 @@ SNS topic to send ElastiCache notifications to. Example: * `az_mode` - (Optional, Memcached only) Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1` -* `availability_zone` - (Optional) The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone. +* `availability_zone` - (Optional) The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone. Changing this value will re-create the resource. * `preferred_availability_zones` - (Optional, Memcached only) A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference. diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index e91000fea3d..b7375664190 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -25,6 +25,8 @@ See the AWS Documentation on [Modifying an ElastiCache Cache Cluster](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Modify.html) for more information. +~> **Note:** Any attribute changes that re-create the resource will be applied immediately, regardless of the value of `apply_immediately`. + ~> **Note:** Be aware of the terminology collision around "cluster" for `aws_elasticache_replication_group`. For example, it is possible to create a ["Cluster Mode Disabled [Redis] Cluster"](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Create.CON.Redis.html). With "Cluster Mode Enabled", the data will be stored in shards (called "node groups"). See [Redis Cluster Configuration](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/cluster-create-determine-requirements.html#redis-cluster-configuration) for a diagram of the differences. To enable cluster mode, use a parameter group that has cluster mode enabled. The default parameter groups provided by AWS end with ".cluster.on", for example `default.redis6.x.cluster.on`. ## Example Usage From f77454ed0e48849a44331de1212401af5d758039 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Jan 2021 15:27:59 -0800 Subject: [PATCH 0751/1212] Markdown linter fix --- website/docs/r/elasticache_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/elasticache_cluster.html.markdown b/website/docs/r/elasticache_cluster.html.markdown index b3194a664ed..2c721d49c90 100644 --- a/website/docs/r/elasticache_cluster.html.markdown +++ b/website/docs/r/elasticache_cluster.html.markdown @@ -77,7 +77,7 @@ The following arguments are supported: * `replication_group_id` - (Optional) The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. -* `engine` – (Required unless `replication_group_id` is provided) Name of the cache engine to be used for this cache cluster. Valid values are `memcached` or `redis`. +* `engine` – (Required unless `replication_group_id` is provided) Name of the cache engine to be used for this cache cluster. Valid values are `memcached` or `redis`. * `engine_version` – (Optional) Version number of the cache engine to be used. See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html) From c6ab255d22cf1e4307d25b012726517d7ff8e688 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Jan 2021 17:46:31 -0800 Subject: [PATCH 0752/1212] Adds Replication Group default timeouts --- aws/internal/service/elasticache/waiter/waiter.go | 4 ++++ aws/resource_aws_elasticache_replication_group.go | 6 +++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/aws/internal/service/elasticache/waiter/waiter.go b/aws/internal/service/elasticache/waiter/waiter.go index d01788ad108..4dac2c3aae5 100644 --- a/aws/internal/service/elasticache/waiter/waiter.go +++ b/aws/internal/service/elasticache/waiter/waiter.go @@ -8,6 +8,10 @@ import ( ) const ( + ReplicationGroupDefaultCreatedTimeout = 60 * time.Minute + ReplicationGroupDefaultUpdatedTimeout = 40 * time.Minute + ReplicationGroupDefaultDeletedTimeout = 40 * time.Minute + replicationGroupAvailableMinTimeout = 10 * time.Second replicationGroupAvailableDelay = 30 * time.Second diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index fa3269a5a3d..18f0e418b55 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -270,9 +270,9 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(40 * time.Minute), + Create: schema.DefaultTimeout(waiter.ReplicationGroupDefaultCreatedTimeout), + Delete: schema.DefaultTimeout(waiter.ReplicationGroupDefaultDeletedTimeout), + Update: schema.DefaultTimeout(waiter.ReplicationGroupDefaultUpdatedTimeout), }, } } From addd4e37a52b2b660979d59badf862f1513372c7 Mon Sep 17 00:00:00 2001 From: Brian Zoetewey Date: Tue, 12 May 2020 13:17:26 -0400 Subject: [PATCH 0753/1212] Add tags to aws_iam_user datasource. --- aws/data_source_aws_iam_user.go | 5 +++ aws/data_source_aws_iam_user_test.go | 45 +++++++++++++++++++++++++++ website/docs/d/iam_user.html.markdown | 1 + 3 files changed, 51 insertions(+) diff --git a/aws/data_source_aws_iam_user.go b/aws/data_source_aws_iam_user.go index 6f699908887..5d5a2849a56 100644 --- a/aws/data_source_aws_iam_user.go +++ b/aws/data_source_aws_iam_user.go @@ -4,6 +4,8 @@ import ( "fmt" "log" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -34,12 +36,14 @@ func dataSourceAwsIAMUser() *schema.Resource { Type: schema.TypeString, Required: true, }, + "tags": tagsSchemaComputed(), }, } } func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { iamconn := meta.(*AWSClient).iamconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig userName := d.Get("user_name").(string) req := &iam.GetUserInput{ UserName: aws.String(userName), @@ -60,6 +64,7 @@ func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { d.Set("permissions_boundary", user.PermissionsBoundary.PermissionsBoundaryArn) } d.Set("user_id", user.UserId) + d.Set("tags", keyvaluetags.IamKeyValueTags(user.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) return nil } diff --git a/aws/data_source_aws_iam_user_test.go b/aws/data_source_aws_iam_user_test.go index 1f5b6fc4b92..6d1c72e9606 100644 --- a/aws/data_source_aws_iam_user_test.go +++ b/aws/data_source_aws_iam_user_test.go @@ -25,6 +25,33 @@ func TestAccAWSDataSourceIAMUser_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), resource.TestCheckResourceAttr(resourceName, "user_name", userName), resource.TestCheckResourceAttrPair(resourceName, "arn", "aws_iam_user.user", "arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccAWSDataSourceIAMUser_tags(t *testing.T) { + resourceName := "data.aws_iam_user.test" + + userName := fmt.Sprintf("test-datasource-user-%d", acctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAwsDataSourceIAMUserConfig_tags(userName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "user_id", "aws_iam_user.user", "unique_id"), + resource.TestCheckResourceAttr(resourceName, "path", "/"), + resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), + resource.TestCheckResourceAttr(resourceName, "user_name", userName), + resource.TestCheckResourceAttrPair(resourceName, "arn", "aws_iam_user.user", "arn"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.tag1", "test-value1"), + resource.TestCheckResourceAttr(resourceName, "tags.tag2", "test-value2"), ), }, }, @@ -43,3 +70,21 @@ data "aws_iam_user" "test" { } `, name) } + +func testAccAwsDataSourceIAMUserConfig_tags(name string) string { + return fmt.Sprintf(` +resource "aws_iam_user" "user" { + name = "%s" + path = "/" + + tags = { + tag1 = "test-value1" + tag2 = "test-value2" + } +} + +data "aws_iam_user" "test" { + user_name = "${aws_iam_user.user.name}" +} +`, name) +} diff --git a/website/docs/d/iam_user.html.markdown b/website/docs/d/iam_user.html.markdown index f0be94f2d06..33d8eb96e73 100644 --- a/website/docs/d/iam_user.html.markdown +++ b/website/docs/d/iam_user.html.markdown @@ -31,3 +31,4 @@ data "aws_iam_user" "example" { * `permissions_boundary` - The ARN of the policy that is used to set the permissions boundary for the user. * `user_id` - The unique ID assigned by AWS for this user. * `user_name` - The name associated to this User +* `tags` - The tags attached to the user. From c1aa7177254d6fa2414506cc9225627333c71340 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 19 Jan 2021 21:19:51 -0500 Subject: [PATCH 0754/1212] import lint and error check tags on d.Set; update tests --- aws/data_source_aws_iam_user.go | 8 +++--- aws/data_source_aws_iam_user_test.go | 35 ++++++++++++--------------- website/docs/d/iam_user.html.markdown | 2 +- 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/aws/data_source_aws_iam_user.go b/aws/data_source_aws_iam_user.go index 5d5a2849a56..a8aa35e2dae 100644 --- a/aws/data_source_aws_iam_user.go +++ b/aws/data_source_aws_iam_user.go @@ -4,11 +4,10 @@ import ( "fmt" "log" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func dataSourceAwsIAMUser() *schema.Resource { @@ -44,6 +43,7 @@ func dataSourceAwsIAMUser() *schema.Resource { func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { iamconn := meta.(*AWSClient).iamconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + userName := d.Get("user_name").(string) req := &iam.GetUserInput{ UserName: aws.String(userName), @@ -64,7 +64,9 @@ func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { d.Set("permissions_boundary", user.PermissionsBoundary.PermissionsBoundaryArn) } d.Set("user_id", user.UserId) - d.Set("tags", keyvaluetags.IamKeyValueTags(user.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) + if err := d.Set("tags", keyvaluetags.IamKeyValueTags(user.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } return nil } diff --git a/aws/data_source_aws_iam_user_test.go b/aws/data_source_aws_iam_user_test.go index 6d1c72e9606..8c14098822e 100644 --- a/aws/data_source_aws_iam_user_test.go +++ b/aws/data_source_aws_iam_user_test.go @@ -9,7 +9,8 @@ import ( ) func TestAccAWSDataSourceIAMUser_basic(t *testing.T) { - resourceName := "data.aws_iam_user.test" + resourceName := "aws_iam_user.test" + dataSourceName := "data.aws_iam_user.test" userName := fmt.Sprintf("test-datasource-user-%d", acctest.RandInt()) @@ -20,12 +21,12 @@ func TestAccAWSDataSourceIAMUser_basic(t *testing.T) { { Config: testAccAwsDataSourceIAMUserConfig(userName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "user_id", "aws_iam_user.user", "unique_id"), - resource.TestCheckResourceAttr(resourceName, "path", "/"), - resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), - resource.TestCheckResourceAttr(resourceName, "user_name", userName), - resource.TestCheckResourceAttrPair(resourceName, "arn", "aws_iam_user.user", "arn"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(dataSourceName, "user_id", resourceName, "unique_id"), + resource.TestCheckResourceAttrPair(dataSourceName, "path", resourceName, "path"), + resource.TestCheckResourceAttr(dataSourceName, "permissions_boundary", ""), + resource.TestCheckResourceAttrPair(dataSourceName, "user_name", resourceName, "name"), + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "tags", resourceName, "tags"), ), }, }, @@ -33,7 +34,8 @@ func TestAccAWSDataSourceIAMUser_basic(t *testing.T) { } func TestAccAWSDataSourceIAMUser_tags(t *testing.T) { - resourceName := "data.aws_iam_user.test" + resourceName := "aws_iam_user.test" + dataSourceName := "data.aws_iam_user.test" userName := fmt.Sprintf("test-datasource-user-%d", acctest.RandInt()) @@ -44,14 +46,7 @@ func TestAccAWSDataSourceIAMUser_tags(t *testing.T) { { Config: testAccAwsDataSourceIAMUserConfig_tags(userName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair(resourceName, "user_id", "aws_iam_user.user", "unique_id"), - resource.TestCheckResourceAttr(resourceName, "path", "/"), - resource.TestCheckResourceAttr(resourceName, "permissions_boundary", ""), - resource.TestCheckResourceAttr(resourceName, "user_name", userName), - resource.TestCheckResourceAttrPair(resourceName, "arn", "aws_iam_user.user", "arn"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.tag1", "test-value1"), - resource.TestCheckResourceAttr(resourceName, "tags.tag2", "test-value2"), + resource.TestCheckResourceAttrPair(dataSourceName, "tags", resourceName, "tags"), ), }, }, @@ -60,20 +55,20 @@ func TestAccAWSDataSourceIAMUser_tags(t *testing.T) { func testAccAwsDataSourceIAMUserConfig(name string) string { return fmt.Sprintf(` -resource "aws_iam_user" "user" { +resource "aws_iam_user" "test" { name = "%s" path = "/" } data "aws_iam_user" "test" { - user_name = aws_iam_user.user.name + user_name = aws_iam_user.test.name } `, name) } func testAccAwsDataSourceIAMUserConfig_tags(name string) string { return fmt.Sprintf(` -resource "aws_iam_user" "user" { +resource "aws_iam_user" "test" { name = "%s" path = "/" @@ -84,7 +79,7 @@ resource "aws_iam_user" "user" { } data "aws_iam_user" "test" { - user_name = "${aws_iam_user.user.name}" + user_name = aws_iam_user.test.name } `, name) } diff --git a/website/docs/d/iam_user.html.markdown b/website/docs/d/iam_user.html.markdown index 33d8eb96e73..803020737bb 100644 --- a/website/docs/d/iam_user.html.markdown +++ b/website/docs/d/iam_user.html.markdown @@ -31,4 +31,4 @@ data "aws_iam_user" "example" { * `permissions_boundary` - The ARN of the policy that is used to set the permissions boundary for the user. * `user_id` - The unique ID assigned by AWS for this user. * `user_name` - The name associated to this User -* `tags` - The tags attached to the user. +* `tags` - Map of key-value pairs associated with the user. From 6e1126c0be312017ce46616b326af8a669bce1cd Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Tue, 19 Jan 2021 22:09:50 -0500 Subject: [PATCH 0755/1212] Update CHANGELOG for #13287 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b5b274773f..02d5278db38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS * data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute [GH-16631] * data-source/aws_ebs_volume: Add `throughput` attribute [GH-16517] +* data-source/aws_iam_user: Add `tags` attribute [GH-13287] * resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] From c7220e27ac992c8057ba2b4887851c68015d0a21 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 20 Jan 2021 01:03:41 -0500 Subject: [PATCH 0756/1212] only call GetFunctionCodeSigningConfig for zip type funcs --- aws/data_source_aws_lambda_function.go | 29 ++++++------ aws/data_source_aws_lambda_function_test.go | 50 +++++++++++++++++++++ 2 files changed, 66 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_lambda_function.go b/aws/data_source_aws_lambda_function.go index 5174f9e9bd0..2079a23b30c 100644 --- a/aws/data_source_aws_lambda_function.go +++ b/aws/data_source_aws_lambda_function.go @@ -313,23 +313,26 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e return nil } - // Get Code Signing Config Output - // If code signing config output exists, set it to that value, otherwise set it empty. - codeSigningConfigInput := &lambda.GetFunctionCodeSigningConfigInput{ - FunctionName: aws.String(d.Get("function_name").(string)), - } + // Get Code Signing Config Output. + // Code Signing is only supported on zip packaged lambda functions. + var codeSigningConfigArn string - getCodeSigningConfigOutput, err := conn.GetFunctionCodeSigningConfig(codeSigningConfigInput) - if err != nil { - return fmt.Errorf("error getting Lambda Function (%s) Code Signing Config: %w", aws.StringValue(function.FunctionName), err) - } + if aws.StringValue(function.PackageType) == lambda.PackageTypeZip { + codeSigningConfigInput := &lambda.GetFunctionCodeSigningConfigInput{ + FunctionName: function.FunctionName, + } + getCodeSigningConfigOutput, err := conn.GetFunctionCodeSigningConfig(codeSigningConfigInput) + if err != nil { + return fmt.Errorf("error getting Lambda Function (%s) Code Signing Config: %w", aws.StringValue(function.FunctionName), err) + } - if getCodeSigningConfigOutput == nil || getCodeSigningConfigOutput.CodeSigningConfigArn == nil { - d.Set("code_signing_config_arn", "") - } else { - d.Set("code_signing_config_arn", getCodeSigningConfigOutput.CodeSigningConfigArn) + if getCodeSigningConfigOutput != nil { + codeSigningConfigArn = aws.StringValue(getCodeSigningConfigOutput.CodeSigningConfigArn) + } } + d.Set("code_signing_config_arn", codeSigningConfigArn) + d.SetId(aws.StringValue(function.FunctionName)) return nil diff --git a/aws/data_source_aws_lambda_function_test.go b/aws/data_source_aws_lambda_function_test.go index 4a2557ba05a..a39366c13d1 100644 --- a/aws/data_source_aws_lambda_function_test.go +++ b/aws/data_source_aws_lambda_function_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "os" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -180,6 +181,27 @@ func TestAccDataSourceAWSLambdaFunction_fileSystemConfig(t *testing.T) { }) } +func TestAccDataSourceAWSLambdaFunction_imageConfig(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_lambda_function.test" + resourceName := "aws_lambda_function.test" + + imageLatestID := os.Getenv("AWS_LAMBDA_IMAGE_LATEST_ID") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccDataSourceLambdaImagePreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSLambdaFunctionConfigImageConfig(rName, imageLatestID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "code_signing_config_arn", resourceName, "code_signing_config_arn"), + ), + }, + }, + }) +} + func testAccDataSourceAWSLambdaFunctionConfigBase(rName string) string { return fmt.Sprintf(` resource "aws_iam_role" "lambda" { @@ -495,3 +517,31 @@ data "aws_lambda_function" "test" { } `, rName) } + +func testAccDataSourceAWSLambdaFunctionConfigImageConfig(rName, imageID string) string { + return composeConfig( + testAccDataSourceAWSLambdaFunctionConfigBase(rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + image_uri = %q + function_name = %q + role = aws_iam_role.lambda.arn + package_type = "Image" + image_config { + entry_point = ["/bootstrap-with-handler"] + command = ["app.lambda_handler"] + working_directory = "/var/task" + } +} + +data "aws_lambda_function" "test" { + function_name = aws_lambda_function.test.function_name +} +`, imageID, rName)) +} + +func testAccDataSourceLambdaImagePreCheck(t *testing.T) { + if os.Getenv("AWS_LAMBDA_IMAGE_LATEST_ID") == "" { + t.Skip("AWS_LAMBDA_IMAGE_LATEST_ID env var must be set for Lambda Function Data Source Image Support acceptance tests.") + } +} From ee57dc37dc48587d688b202421425307d2854161 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Jan 2021 08:59:05 -0500 Subject: [PATCH 0757/1212] .github/workflows: Remove goreleaser parallelism (#17159) Reference: https://github.com/hashicorp/terraform-provider-aws/pull/17067 Reference: https://github.com/goreleaser/goreleaser/issues/2013 Reference: https://github.com/goreleaser/goreleaser-action#inputs Upstream `goreleaser` now includes parallelism that defaults to the number of CPUs. The `goreleaser/goreleaser-action` GitHub Action defaults to using the latest available version so no additional update required. ```console $ system_profiler -detailLevel mini SPHardwareDataType Hardware: Hardware Overview: Model Name: MacBook Pro Model Identifier: MacBookPro15,1 Processor Name: 8-Core Intel Core i9 Processor Speed: 2.4 GHz Number of Processors: 1 Total Number of Cores: 8 L2 Cache (per Core): 256 KB L3 Cache: 16 MB Hyper-Threading Technology: Enabled Memory: 32 GB Boot ROM Version: 1037.147.4.0.0 (iBridge: 17.16.16610.0.0,0) $ goreleaser --version goreleaser version 0.155.0 commit: ba82f43c5f2eefcc82d8d14634fe21b37ffc1799 built by: homebrew $ goreleaser build --help Builds the current project ... -p, --parallelism int Amount tasks to run concurrently (default 16) ``` --- .github/workflows/snapshot.yml | 2 +- .github/workflows/terraform_provider.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index 10f8d82710b..b4accb34088 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -27,7 +27,7 @@ jobs: - name: goreleaser release uses: goreleaser/goreleaser-action@v2 with: - args: release --parallelism 2 --rm-dist --skip-sign --snapshot --timeout 2h + args: release --rm-dist --skip-sign --snapshot --timeout 2h - name: artifact naming id: naming run: | diff --git a/.github/workflows/terraform_provider.yml b/.github/workflows/terraform_provider.yml index 1376c45dd32..857d829f980 100644 --- a/.github/workflows/terraform_provider.yml +++ b/.github/workflows/terraform_provider.yml @@ -300,7 +300,7 @@ jobs: - name: goreleaser build uses: goreleaser/goreleaser-action@v2 with: - args: build --parallelism 2 --snapshot --timeout 2h + args: build --snapshot --timeout 2h semgrep: runs-on: ubuntu-latest From 03757c12b167250823af03ffe709180757596854 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Jan 2021 09:11:23 -0500 Subject: [PATCH 0758/1212] resource/aws_lambda_function: Prevent crash with missing environment variable value (#17056) Previously if the environment variable value became `nil` (likely via AWS console update), the resource could panic: ``` { "Configuration": { ... "Environment": { "Variables": { "XXX": null, "YYY": "ZZZ" }, "Error": null }, ... }, ... } ``` ``` panic: runtime error: invalid memory address or nil pointer dereference 2020-12-10T19:59:19.412Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x401dab7] 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: goroutine 485 [running]: 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: github.com/terraform-providers/terraform-provider-aws/aws.flattenLambdaEnvironment(0xc00162daa0, 0x57add72, 0xa, 0x44b4460) 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: /opt/teamcity-agent/work/5d79fe75d4460a2f/src/github.com/terraform-providers/terraform-provider-aws/aws/structure.go:1594 +0xc7 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: github.com/terraform-providers/terraform-provider-aws/aws.resourceAwsLambdaFunctionRead(0xc00016ee00, 0x4c948a0, 0xc000160a00, 0xc00016ee00, 0x0) 2020-12-10T19:59:19.413Z [DEBUG] plugin.terraform-provider-aws_v3.0.0_x5: /opt/teamcity-agent/work/5d79fe75d4460a2f/src/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_lambda_function.go:599 +0x107a ``` This replaces the unsafe dereferencing with the AWS Go SDK conversion function. Output from acceptance testing: ``` --- PASS: TestAccAWSLambdaFunction_basic (93.45s) --- PASS: TestAccAWSLambdaFunction_codeSigningConfig (143.84s) --- PASS: TestAccAWSLambdaFunction_concurrency (119.18s) --- PASS: TestAccAWSLambdaFunction_concurrencyCycle (80.17s) --- PASS: TestAccAWSLambdaFunction_DeadLetterConfig (130.84s) --- PASS: TestAccAWSLambdaFunction_DeadLetterConfigUpdated (139.36s) --- PASS: TestAccAWSLambdaFunction_disablePublish (290.84s) --- PASS: TestAccAWSLambdaFunction_disappears (76.76s) --- PASS: TestAccAWSLambdaFunction_EmptyVpcConfig (303.87s) --- PASS: TestAccAWSLambdaFunction_enablePublish (301.82s) --- PASS: TestAccAWSLambdaFunction_encryptedEnvVariables (152.33s) --- PASS: TestAccAWSLambdaFunction_Environment_Variables_NoValue (59.03s) --- PASS: TestAccAWSLambdaFunction_envVariables (135.67s) --- PASS: TestAccAWSLambdaFunction_expectFilenameAndS3Attributes (19.70s) --- PASS: TestAccAWSLambdaFunction_FileSystemConfig (2369.47s) --- PASS: TestAccAWSLambdaFunction_KmsKeyArn_NoEnvironmentVariables (1001.01s) --- PASS: TestAccAWSLambdaFunction_Layers (830.27s) --- PASS: TestAccAWSLambdaFunction_LayersUpdate (1218.31s) --- PASS: TestAccAWSLambdaFunction_localUpdate (1253.77s) --- PASS: TestAccAWSLambdaFunction_localUpdate_nameOnly (318.24s) --- PASS: TestAccAWSLambdaFunction_nilDeadLetterConfig (75.02s) --- PASS: TestAccAWSLambdaFunction_runtimes (434.13s) --- PASS: TestAccAWSLambdaFunction_s3 (39.17s) --- PASS: TestAccAWSLambdaFunction_s3Update_basic (65.26s) --- PASS: TestAccAWSLambdaFunction_s3Update_unversioned (66.94s) --- PASS: TestAccAWSLambdaFunction_tags (111.80s) --- PASS: TestAccAWSLambdaFunction_tracingConfig (1022.97s) --- PASS: TestAccAWSLambdaFunction_UnpublishedCodeUpdate (326.86s) --- PASS: TestAccAWSLambdaFunction_versioned (816.85s) --- PASS: TestAccAWSLambdaFunction_versionedUpdate (1283.13s) --- PASS: TestAccAWSLambdaFunction_VPC (810.42s) --- PASS: TestAccAWSLambdaFunction_VPC_withInvocation (1415.48s) --- PASS: TestAccAWSLambdaFunction_VpcConfig_ProperIamDependencies (346.28s) --- PASS: TestAccAWSLambdaFunction_VPCRemoval (2383.24s) --- PASS: TestAccAWSLambdaFunction_VPCUpdate (2409.76s) ``` --- aws/resource_aws_lambda_function.go | 14 +++++++ aws/resource_aws_lambda_function_test.go | 47 ++++++++++++++++++++++++ aws/structure.go | 18 --------- 3 files changed, 61 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_lambda_function.go b/aws/resource_aws_lambda_function.go index 7f987cc458f..1486b02f4f9 100644 --- a/aws/resource_aws_lambda_function.go +++ b/aws/resource_aws_lambda_function.go @@ -1274,6 +1274,20 @@ func waitForLambdaFunctionUpdate(conn *lambda.Lambda, functionName string, timeo return err } +func flattenLambdaEnvironment(apiObject *lambda.EnvironmentResponse) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Variables; v != nil { + tfMap["variables"] = aws.StringValueMap(v) + } + + return []interface{}{tfMap} +} + func flattenLambdaFileSystemConfigs(fscList []*lambda.FileSystemConfig) []map[string]interface{} { results := make([]map[string]interface{}, 0, len(fscList)) for _, fsc := range fscList { diff --git a/aws/resource_aws_lambda_function_test.go b/aws/resource_aws_lambda_function_test.go index a2c9e2c0936..262dce47fc6 100644 --- a/aws/resource_aws_lambda_function_test.go +++ b/aws/resource_aws_lambda_function_test.go @@ -416,6 +416,33 @@ func TestAccAWSLambdaFunction_envVariables(t *testing.T) { }) } +func TestAccAWSLambdaFunction_Environment_Variables_NoValue(t *testing.T) { + var conf lambda.GetFunctionOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaFunctionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaConfigEnvironmentVariablesNoValue(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaFunctionExists(resourceName, rName, &conf), + resource.TestCheckResourceAttr(resourceName, "environment.0.variables.key1", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename", "publish"}, + }, + }, + }) +} + func TestAccAWSLambdaFunction_encryptedEnvVariables(t *testing.T) { var conf lambda.GetFunctionOutput @@ -2241,6 +2268,26 @@ resource "aws_lambda_function" "test" { `, funcName) } +func testAccAWSLambdaConfigEnvironmentVariablesNoValue(rName string) string { + return composeConfig( + baseAccAWSLambdaConfig(rName, rName, rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + handler = "exports.example" + role = aws_iam_role.iam_for_lambda.arn + runtime = "nodejs12.x" + + environment { + variables = { + key1 = "" + } + } +} +`, rName)) +} + func testAccAWSLambdaConfigEncryptedEnvVariables(keyDesc, funcName, policyName, roleName, sgName string) string { return fmt.Sprintf(baseAccAWSLambdaConfig(policyName, roleName, sgName)+` resource "aws_kms_key" "foo" { diff --git a/aws/structure.go b/aws/structure.go index a9f7f382b69..1781aa7bb81 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -1587,24 +1587,6 @@ func flattenDSVpcSettings( return []map[string]interface{}{settings} } -func flattenLambdaEnvironment(lambdaEnv *lambda.EnvironmentResponse) []interface{} { - envs := make(map[string]interface{}) - en := make(map[string]string) - - if lambdaEnv == nil { - return nil - } - - for k, v := range lambdaEnv.Variables { - en[k] = *v - } - if len(en) > 0 { - envs["variables"] = en - } - - return []interface{}{envs} -} - func expandLambdaEventSourceMappingDestinationConfig(vDest []interface{}) *lambda.DestinationConfig { if len(vDest) == 0 { return nil From 8ae1af29abd92dce7f9ead75353b394e8bf3739c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Jan 2021 09:12:02 -0500 Subject: [PATCH 0759/1212] Update CHANGELOG for #17056 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02d5278db38..66eee7a0bc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ BUG FIXES * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] +* resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] ## 3.24.1 (January 15, 2021) From 7a74dac0b55ffed2ab71871a1a4d957fe74a572a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Jan 2021 13:35:46 -0500 Subject: [PATCH 0760/1212] build(deps): bump github.com/mitchellh/go-testing-interface (#16859) Bumps [github.com/mitchellh/go-testing-interface](https://github.com/mitchellh/go-testing-interface) from 1.0.4 to 1.14.1. - [Release notes](https://github.com/mitchellh/go-testing-interface/releases) - [Commits](https://github.com/mitchellh/go-testing-interface/compare/v1.0.4...v1.14.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d2f5b45e7fe..f9e2fc55c58 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/mattn/go-colorable v0.1.7 // indirect github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/go-testing-interface v1.0.4 + github.com/mitchellh/go-testing-interface v1.14.1 github.com/pquerna/otp v1.3.0 gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index 13177949ffa..82e535a7230 100644 --- a/go.sum +++ b/go.sum @@ -274,6 +274,8 @@ github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go. github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= From ed541766bd8b75cb4e917b97ff8d5f601ca7fffa Mon Sep 17 00:00:00 2001 From: Shrikanth Shetty Date: Wed, 20 Jan 2021 13:40:19 -0500 Subject: [PATCH 0761/1212] Correct block-devices linking. --- website/docs/r/instance.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index bbe39b65ede..9e899d97fb0 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -100,10 +100,10 @@ The following arguments are supported: * `cpu_threads_per_core` - (Optional - has no effect unless `cpu_core_count` is also set) If set to to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. * `credit_specification` - (Optional) Customize the credit specification of the instance. See [Credit Specification](#credit-specification) below for more details. * `disable_api_termination` - (Optional) If true, enables [EC2 Instance Termination Protection](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination). -* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See [Block Devices](#block-devices) below for details on attributes and drift detection. +* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the instance. Block device configurations only apply on resource creation. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details on attributes and drift detection. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. Note that if this is not set on an instance type that is optimized by default then this will show as disabled but if the instance type is optimized by default then there is no need to set this and there is no effect to disabling it. See the [EBS Optimized section](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) of the AWS User Guide for more information. * `enclave_options` - (Optional) Enable Nitro Enclaves on launched instances. See [Enclave Options](#enclave-options) below for more details. -* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details. +* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as "Instance Store") volumes on the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. * `get_password_data` - (Optional) If true, wait for password data to become available and retrieve it. Useful for getting the administrator password for instances running Microsoft Windows. The password data is exported to the `password_data` attribute. See [GetPasswordData](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetPasswordData.html) for more information. * `hibernation` - (Optional) If true, the launched EC2 instance will support hibernation. * `host_id` - (Optional) ID of a dedicated host that the instance will be assigned to. Use when an instance is to be launched on a specific dedicated host. @@ -118,7 +118,7 @@ The following arguments are supported: * `network_interface` - (Optional) Customize network interfaces to be attached at instance boot time. See [Network Interfaces](#network-interfaces) below for more details. * `placement_group` - (Optional) Placement Group to start the instance in. * `private_ip` - (Optional) Private IP address to associate with the instance in a VPC. -* `root_block_device` - (Optional) Customize details about the root block device of the instance. See [Block Devices](#block-devices) below for details. +* `root_block_device` - (Optional) Customize details about the root block device of the instance. See [Block Devices](#ebs-ephemeral-and-root-block-devices) below for details. * `secondary_private_ips` - (Optional) A list of secondary private IPv4 addresses to assign to the instance's primary network interface (eth0) in a VPC. Can only be assigned to the primary network interface (eth0) attached at instance creation, not a pre-existing network interface i.e. referenced in a `network_interface` block. Refer to the [Elastic network interfaces documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) to see the maximum number of private IP addresses allowed per instance type. * `security_groups` - (Optional, EC2-Classic and default VPC only) A list of security group names (EC2-Classic) or IDs (default VPC) to associate with. * `source_dest_check` - (Optional) Controls if traffic is routed to the instance when the destination address does not match the instance. Used for NAT or VPNs. Defaults true. From b0184a0f88143df94bce83b6fe819790281a6d42 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 11:49:08 -0800 Subject: [PATCH 0762/1212] Adds semgrep rule for expandStringSet() --- .semgrep.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.semgrep.yml b/.semgrep.yml index 2d40fae922b..d02961855ea 100644 --- a/.semgrep.yml +++ b/.semgrep.yml @@ -69,6 +69,23 @@ rules: - pattern: schema.NewSet(schema.HashString, flattenStringList($APIOBJECT)) - pattern-not-inside: func flattenStringSet(list []*string) *schema.Set { ... } severity: WARNING + + - id: helper-schema-Set-extraneous-expandStringList-with-List + languages: [go] + message: Prefer `expandStringSet()` function for casting a set to a list of string pointers + paths: + include: + - aws/ + patterns: + - pattern-either: + - pattern: expandStringList($SET.List()) + - pattern: | + $LIST := $SET.List() + ... + expandStringList($LIST) + - pattern-not-inside: func expandStringSet(configured *schema.Set) []*string { ... } + severity: WARNING + - id: helper-schema-ResourceData-GetOk-with-extraneous-conditional languages: [go] From 7daf6daaba7a3abedd77e188bfe91d0506a9f25a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 14:55:39 -0500 Subject: [PATCH 0763/1212] tests/serverlessapplicationrespository_application: Fix hardcoded regions --- ...sapplicationrepository_application_test.go | 22 ++++++++----- ...ionrepository_cloudformation_stack_test.go | 31 ++++++++++--------- 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/aws/data_source_aws_serverlessapplicationrepository_application_test.go b/aws/data_source_aws_serverlessapplicationrepository_application_test.go index 9cff7bd4251..232358384ea 100644 --- a/aws/data_source_aws_serverlessapplicationrepository_application_test.go +++ b/aws/data_source_aws_serverlessapplicationrepository_application_test.go @@ -17,7 +17,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Basic(t *tes Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig, + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig(), Check: resource.ComposeTestCheckFunc( testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(datasourceName), resource.TestCheckResourceAttr(datasourceName, "name", "SecretsManagerRDSPostgreSQLRotationSingleUser"), @@ -71,7 +71,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Versioned(t ), }, { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent, + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent(), ExpectError: regexp.MustCompile(`error getting Serverless Application Repository application`), }, }, @@ -92,11 +92,15 @@ func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(n str } } -const testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig = testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication + ` +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig() string { + return composeConfig( + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), + fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { application_id = local.postgres_single_user_rotator_arn } -` +`)) +} const testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent = ` data "aws_serverlessapplicationrepository_application" "no_such_function" { @@ -110,7 +114,7 @@ data "aws_region" "current" {} func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(version string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { application_id = local.postgres_single_user_rotator_arn @@ -119,9 +123,13 @@ data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres `, version)) } -const testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent = testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication + ` +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent() string { + return composeConfig( + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), + fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { application_id = local.postgres_single_user_rotator_arn semantic_version = "42.13.7" } -` +`)) +} diff --git a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go index bd96fd7bc8e..382b9af3de5 100644 --- a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go +++ b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -299,7 +300,7 @@ func testAccAwsServerlessApplicationRepositoryCloudFormationStackNameNoPrefixImp func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -320,7 +321,7 @@ data "aws_region" "current" {} func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateInitial(stackName, functionName string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -344,7 +345,7 @@ data "aws_region" "current" {} func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateUpdated(stackName, functionName string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -368,7 +369,7 @@ data "aws_region" "current" {} func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, version string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -389,7 +390,7 @@ data "aws_region" "current" {} func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned2(stackName, version string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -411,7 +412,7 @@ data "aws_region" "current" {} func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versionedPaired(stackName, version string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -435,7 +436,7 @@ data "aws_region" "current" {} func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(rName, tagKey1, tagValue1 string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -459,7 +460,7 @@ data "aws_region" "current" {} func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication, + testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { name = "%[1]s" @@ -482,7 +483,8 @@ data "aws_region" "current" {} `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } -const testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication = ` +func testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication() string { + return fmt.Sprintf(` data "aws_partition" "current" {} locals { @@ -492,12 +494,13 @@ locals { application_account = local.security_manager_accounts[data.aws_partition.current.partition] security_manager_regions = { - "aws" = "us-east-1", - "aws-us-gov" = "us-gov-west-1", + %[1]q = %[3]q, + %[2]q = %[4]q, } security_manager_accounts = { - "aws" = "297356227824", - "aws-us-gov" = "023102451235", + %[1]q = "297356227824", + %[2]q = "023102451235", } } -` +`, endpoints.AwsPartitionID, endpoints.AwsUsGovPartitionID, endpoints.UsEast1RegionID, endpoints.UsGovWest1RegionID) +} From c0d983cf9818071ff3c61e868f604320409ddae5 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 15:02:20 -0500 Subject: [PATCH 0764/1212] tests/serverlessapplicationrepository: Lint fix --- ...essapplicationrepository_cloudformation_stack_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go index 382b9af3de5..2d7e1bd3306 100644 --- a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go +++ b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go @@ -494,12 +494,13 @@ locals { application_account = local.security_manager_accounts[data.aws_partition.current.partition] security_manager_regions = { - %[1]q = %[3]q, - %[2]q = %[4]q, + %[1]q = %[3]q + %[2]q = %[4]q } + security_manager_accounts = { - %[1]q = "297356227824", - %[2]q = "023102451235", + %[1]q = "297356227824" + %[2]q = "023102451235" } } `, endpoints.AwsPartitionID, endpoints.AwsUsGovPartitionID, endpoints.UsEast1RegionID, endpoints.UsGovWest1RegionID) From 6c6db8f11500e3d00acd30e6a5ebca7bd3778d5f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 8 Jan 2021 15:15:13 -0500 Subject: [PATCH 0765/1212] tests/serverlessapplicationrepository: Fix lint --- ...ws_serverlessapplicationrepository_application_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_serverlessapplicationrepository_application_test.go b/aws/data_source_aws_serverlessapplicationrepository_application_test.go index 232358384ea..c3a709a20ee 100644 --- a/aws/data_source_aws_serverlessapplicationrepository_application_test.go +++ b/aws/data_source_aws_serverlessapplicationrepository_application_test.go @@ -95,11 +95,11 @@ func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(n str func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig() string { return composeConfig( testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` + ` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { application_id = local.postgres_single_user_rotator_arn } -`)) +`) } const testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent = ` @@ -126,10 +126,10 @@ data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent() string { return composeConfig( testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` + ` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { application_id = local.postgres_single_user_rotator_arn semantic_version = "42.13.7" } -`)) +`) } From 92a33dda792586144ddc19fd3cc43b4eb5a5c42c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Jan 2021 15:52:04 -0500 Subject: [PATCH 0766/1212] tests/serverlessapplicationrepository: Rework test ARN boilerplate --- ...sapplicationrepository_application_test.go | 58 ++--- ...ionrepository_cloudformation_stack_test.go | 246 ++++++++++-------- 2 files changed, 160 insertions(+), 144 deletions(-) diff --git a/aws/data_source_aws_serverlessapplicationrepository_application_test.go b/aws/data_source_aws_serverlessapplicationrepository_application_test.go index c3a709a20ee..a22b68bdb52 100644 --- a/aws/data_source_aws_serverlessapplicationrepository_application_test.go +++ b/aws/data_source_aws_serverlessapplicationrepository_application_test.go @@ -11,13 +11,14 @@ import ( func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Basic(t *testing.T) { datasourceName := "data.aws_serverlessapplicationrepository_application.secrets_manager_postgres_single_user_rotator" + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig(), + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig(appARN), Check: resource.ComposeTestCheckFunc( testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(datasourceName), resource.TestCheckResourceAttr(datasourceName, "name", "SecretsManagerRDSPostgreSQLRotationSingleUser"), @@ -28,7 +29,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Basic(t *tes ), }, { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent, + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent(), ExpectError: regexp.MustCompile(`error getting Serverless Application Repository application`), }, }, @@ -36,6 +37,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Basic(t *tes } func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Versioned(t *testing.T) { datasourceName := "data.aws_serverlessapplicationrepository_application.secrets_manager_postgres_single_user_rotator" + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() const ( version1 = "1.0.13" @@ -47,7 +49,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Versioned(t Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(version1), + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(appARN, version1), Check: resource.ComposeTestCheckFunc( testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(datasourceName), resource.TestCheckResourceAttr(datasourceName, "name", "SecretsManagerRDSPostgreSQLRotationSingleUser"), @@ -58,7 +60,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Versioned(t ), }, { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(version2), + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(appARN, version2), Check: resource.ComposeTestCheckFunc( testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(datasourceName), resource.TestCheckResourceAttr(datasourceName, "name", "SecretsManagerRDSPostgreSQLRotationSingleUser"), @@ -71,7 +73,7 @@ func TestAccDataSourceAwsServerlessApplicationRepositoryApplication_Versioned(t ), }, { - Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent(), + Config: testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent(appARN), ExpectError: regexp.MustCompile(`error getting Serverless Application Repository application`), }, }, @@ -92,44 +94,42 @@ func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceID(n str } } -func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig() string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - ` +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig(appARN string) string { + return fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { - application_id = local.postgres_single_user_rotator_arn + application_id = %[1]q } -`) -} - -const testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent = ` -data "aws_serverlessapplicationrepository_application" "no_such_function" { - application_id = "arn:${data.aws_partition.current.partition}:serverlessrepo:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:applications/ThisFunctionDoesNotExist" +`, appARN) } +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_NonExistent() string { + return ` data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + data "aws_region" "current" {} + +data "aws_serverlessapplicationrepository_application" "no_such_function" { + application_id = "arn:${data.aws_partition.current.partition}:serverlessrepo:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:applications/ThisFunctionDoesNotExist" +} ` +} -func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(version string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned(appARN, version string) string { + return fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { - application_id = local.postgres_single_user_rotator_arn - semantic_version = "%[1]s" + application_id = %[1]q + semantic_version = %[2]q } -`, version)) +`, appARN, version) } -func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent() string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - ` +func testAccCheckAwsServerlessApplicationRepositoryApplicationDataSourceConfig_Versioned_NonExistent(appARN string) string { + return fmt.Sprintf(` data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { - application_id = local.postgres_single_user_rotator_arn + application_id = %[1]q semantic_version = "42.13.7" } -`) +`, appARN) } diff --git a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go index 2d7e1bd3306..8fec4456245 100644 --- a/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go +++ b/aws/resource_aws_serverlessapplicationrepository_cloudformation_stack_test.go @@ -5,8 +5,10 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" + serverlessrepository "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -18,7 +20,7 @@ import ( func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_basic(t *testing.T) { var stack cloudformation.Stack stackName := acctest.RandomWithPrefix("tf-acc-test") - + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" resource.ParallelTest(t, resource.TestCase{ @@ -27,7 +29,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_basic(t *testi CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName), + Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName, appARN), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "name", stackName), @@ -68,7 +70,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_basic(t *testi func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_disappears(t *testing.T) { var stack cloudformation.Stack stackName := acctest.RandomWithPrefix("tf-acc-test") - + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" resource.ParallelTest(t, resource.TestCase{ @@ -77,7 +79,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_disappears(t * CheckDestroy: testAccCheckAmiDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName), + Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName, appARN), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), testAccCheckResourceDisappears(testAccProvider, resourceAwsServerlessApplicationRepositoryCloudFormationStack(), resourceName), @@ -91,21 +93,21 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_disappears(t * func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_versioned(t *testing.T) { var stack1, stack2, stack3 cloudformation.Stack stackName := acctest.RandomWithPrefix("tf-acc-test") + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() + resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" const ( version1 = "1.0.13" version2 = "1.1.36" ) - resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, version1), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, appARN, version1), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack1), resource.TestCheckResourceAttr(resourceName, "semantic_version", version1), @@ -119,7 +121,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_versioned(t *t ImportStateVerify: true, }, { - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned2(stackName, version2), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned2(stackName, appARN, version2), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack2), testAccCheckCloudFormationStackNotRecreated(&stack1, &stack2), @@ -131,7 +133,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_versioned(t *t }, { // Confirm removal of "CAPABILITY_RESOURCE_POLICY" is handled properly - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, version1), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, appARN, version1), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack3), testAccCheckCloudFormationStackNotRecreated(&stack2, &stack3), @@ -147,18 +149,18 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_versioned(t *t func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_paired(t *testing.T) { var stack cloudformation.Stack stackName := acctest.RandomWithPrefix("tf-acc-test") + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() + resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" const version = "1.1.36" - resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versionedPaired(stackName, version), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versionedPaired(stackName, appARN, version), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "semantic_version", version), @@ -174,7 +176,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_paired(t *test func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_Tags(t *testing.T) { var stack cloudformation.Stack stackName := acctest.RandomWithPrefix("tf-acc-test") - + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" resource.ParallelTest(t, resource.TestCase{ @@ -183,7 +185,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_Tags(t *testin CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(stackName, "key1", "value1"), + Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(stackName, appARN, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -196,7 +198,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_Tags(t *testin ImportStateVerify: true, }, { - Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags2(stackName, "key1", "value1updated", "key2", "value2"), + Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags2(stackName, appARN, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), @@ -204,7 +206,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_Tags(t *testin ), }, { - Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(stackName, "key2", "value2"), + Config: testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(stackName, appARN, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -220,7 +222,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_update(t *test stackName := acctest.RandomWithPrefix("tf-acc-test") initialName := acctest.RandomWithPrefix("FuncName1") updatedName := acctest.RandomWithPrefix("FuncName2") - + appARN := testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" resource.ParallelTest(t, resource.TestCase{ @@ -229,7 +231,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_update(t *test CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateInitial(stackName, initialName), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateInitial(stackName, appARN, initialName), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), testAccCheckResourceAttrRegionalARNIgnoreRegionAndAccount(resourceName, "application_id", "serverlessrepo", "applications/SecretsManagerRDSPostgreSQLRotationSingleUser"), @@ -239,7 +241,7 @@ func TestAccAwsServerlessApplicationRepositoryCloudFormationStack_update(t *test ), }, { - Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateUpdated(stackName, updatedName), + Config: testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateUpdated(stackName, appARN, updatedName), Check: resource.ComposeTestCheckFunc( testAccCheckServerlessApplicationRepositoryCloudFormationStackExists(resourceName, &stack), resource.TestCheckResourceAttr(resourceName, "parameters.functionName", updatedName), @@ -298,127 +300,160 @@ func testAccAwsServerlessApplicationRepositoryCloudFormationStackNameNoPrefixImp } } -func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` +func testAccAwsServerlessApplicationRepositoryCloudFormationApplicationID() string { + arnRegion := endpoints.UsEast1RegionID + arnAccountID := "297356227824" + if testAccGetPartition() == endpoints.AwsUsGovPartitionID { + arnRegion = endpoints.UsGovWest1RegionID + arnAccountID = "023102451235" + } + + return arn.ARN{ + Partition: testAccGetPartition(), + Service: serverlessrepository.ServiceName, + Region: arnRegion, + AccountID: arnAccountID, + Resource: "applications/SecretsManagerRDSPostgreSQLRotationSingleUser", + }.String() +} + +func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfig(stackName, appARN string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_region" "current" {} + resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } } +`, stackName, appARN) +} + +func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateInitial(stackName, appARN, functionName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, stackName)) -} -func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateInitial(stackName, functionName string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] + parameters = { - functionName = "%[2]s" + functionName = %[3]q endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } + tags = { key = "value" } } +`, stackName, appARN, functionName) +} + +func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateUpdated(stackName, appARN, functionName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, stackName, functionName)) -} -func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_updateUpdated(stackName, functionName string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] + parameters = { - functionName = "%[2]s" + functionName = %[3]q endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } + tags = { key = "value" } } +`, stackName, appARN, functionName) +} + +func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, appARN, version string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, stackName, functionName)) -} -func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned(stackName, version string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn - semantic_version = "%[2]s" + name = %[1]q + application_id = %[2]q + semantic_version = %[3]q + capabilities = [ "CAPABILITY_IAM", ] + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } } +`, stackName, appARN, version) +} + +func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned2(stackName, appARN, version string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, stackName, version)) -} -func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versioned2(stackName, version string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] - semantic_version = "%[2]s" + + semantic_version = %[3]q + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } } +`, stackName, appARN, version) +} + +func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versionedPaired(stackName, appARN, version string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, stackName, version)) -} -func testAccAWSServerlessApplicationRepositoryCloudFormationStackConfig_versionedPaired(stackName, version string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" + name = %[1]q application_id = data.aws_serverlessapplicationrepository_application.secrets_manager_postgres_single_user_rotator.application_id semantic_version = data.aws_serverlessapplicationrepository_application.secrets_manager_postgres_single_user_rotator.semantic_version capabilities = data.aws_serverlessapplicationrepository_application.secrets_manager_postgres_single_user_rotator.required_capabilities + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" @@ -426,82 +461,63 @@ resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-ro } data "aws_serverlessapplicationrepository_application" "secrets_manager_postgres_single_user_rotator" { - application_id = local.postgres_single_user_rotator_arn - semantic_version = "%[2]s" + application_id = %[2]q + semantic_version = %[3]q +} +`, stackName, appARN, version) } +func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(rName, appARN, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + data "aws_region" "current" {} -`, stackName, version)) -} -func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags1(rName, tagKey1, tagValue1 string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } + tags = { - %[2]q = %[3]q + %[3]q = %[4]q } } +`, rName, appARN, tagKey1, tagValue1) +} + +func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags2(rName, appARN, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} data "aws_region" "current" {} -`, rName, tagKey1, tagValue1)) -} -func testAccAwsServerlessApplicationRepositoryCloudFormationStackConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return composeConfig( - testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication(), - fmt.Sprintf(` resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-rotator" { - name = "%[1]s" - application_id = local.postgres_single_user_rotator_arn + name = %[1]q + application_id = %[2]q + capabilities = [ "CAPABILITY_IAM", "CAPABILITY_RESOURCE_POLICY", ] + parameters = { functionName = "func-%[1]s" endpoint = "secretsmanager.${data.aws_region.current.name}.${data.aws_partition.current.dns_suffix}" } - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -data "aws_region" "current" {} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) -} - -func testAccCheckAwsServerlessApplicationRepositoryPostgresSingleUserRotatorApplication() string { - return fmt.Sprintf(` -data "aws_partition" "current" {} - -locals { - postgres_single_user_rotator_arn = "arn:${data.aws_partition.current.partition}:serverlessrepo:${local.application_region}:${local.application_account}:applications/SecretsManagerRDSPostgreSQLRotationSingleUser" - - application_region = local.security_manager_regions[data.aws_partition.current.partition] - application_account = local.security_manager_accounts[data.aws_partition.current.partition] - - security_manager_regions = { - %[1]q = %[3]q - %[2]q = %[4]q - } - - security_manager_accounts = { - %[1]q = "297356227824" - %[2]q = "023102451235" + tags = { + %[3]q = %[4]q + %[5]q = %[6]q } } -`, endpoints.AwsPartitionID, endpoints.AwsUsGovPartitionID, endpoints.UsEast1RegionID, endpoints.UsGovWest1RegionID) +`, rName, appARN, tagKey1, tagValue1, tagKey2, tagValue2) } From 3880f976f8fbf3952289882f3665d06b949b5bcb Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:32:13 -0800 Subject: [PATCH 0767/1212] Adds CustomizeDiff to update computed member_clusters --- ...ource_aws_elasticache_replication_group.go | 8 ++ ..._aws_elasticache_replication_group_test.go | 126 ++++++++++++++++-- 2 files changed, 124 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 4ec2da537c4..ba083b35fc6 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -1,6 +1,7 @@ package aws import ( + "context" "fmt" "log" "regexp" @@ -10,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -273,6 +275,12 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Delete: schema.DefaultTimeout(40 * time.Minute), Update: schema.DefaultTimeout(40 * time.Minute), }, + + CustomizeDiff: customdiff.Sequence( + customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { + return diff.HasChange("number_cache_clusters") || diff.HasChange("cluster_mode.0.num_node_groups") + }), + ), } } diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 59d623616e2..004d1c6e2b3 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -74,6 +74,8 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { testAccCheckResourceAttrRegionalARN(resourceName, "arn", "elasticache", fmt.Sprintf("replicationgroup:%s", rName)), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), @@ -392,7 +394,6 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), @@ -400,6 +401,12 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "port", "6379"), resource.TestCheckResourceAttrSet(resourceName, "configuration_endpoint_address"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), ), }, { @@ -432,8 +439,11 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NonClusteredParameterGrou resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexp.MustCompile(fmt.Sprintf("%s\\..+\\.%s", rName, testAccGetPartitionDNSSuffix()))), - resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), resource.TestCheckNoResourceAttr(resourceName, "configuration_endpoint_address"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), ), }, { @@ -460,12 +470,19 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 3, 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "3"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "6"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0003-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0003-002", rName)), ), }, { @@ -478,24 +495,88 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 1, 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), ), }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), + ), + }, + }, + }) +} + +func TestAccAWSElasticacheReplicationGroup_ClusterMode_ReplicasPerNodeGroup(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "2"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "6"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-003", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-003", rName)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), ), }, }, @@ -627,7 +708,7 @@ func TestAccAWSElasticacheReplicationGroup_useCmkKmsKeyId(t *testing.T) { }) } -func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { +func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Basic(t *testing.T) { var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -643,6 +724,9 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), ), }, { @@ -657,6 +741,11 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-003", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-004", rName)), ), }, { @@ -665,6 +754,9 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), ), }, }, @@ -687,6 +779,10 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-003", rName)), ), }, { @@ -716,6 +812,9 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), ), }, }, @@ -738,6 +837,10 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-003", rName)), ), }, { @@ -789,6 +892,9 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-001", rName)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-002", rName)), ), }, }, @@ -1430,7 +1536,7 @@ resource "aws_subnet" "test2" { } resource "aws_elasticache_subnet_group" "test" { - name = "tf-test-%[1]s" + name = %[1]q description = "tf-test-cache-subnet-group-descr" subnet_ids = [ @@ -1440,7 +1546,7 @@ resource "aws_elasticache_subnet_group" "test" { } resource "aws_security_group" "test" { - name = "tf-test-%[1]s" + name = %[1]q description = "tf-test-security-group-descr" vpc_id = aws_vpc.test.id @@ -1453,7 +1559,7 @@ resource "aws_security_group" "test" { } resource "aws_elasticache_replication_group" "test" { - replication_group_id = "tf-%[1]s" + replication_group_id = %[1]q replication_group_description = "test description" node_type = "cache.t2.micro" port = 6379 @@ -1462,8 +1568,8 @@ resource "aws_elasticache_replication_group" "test" { automatic_failover_enabled = true cluster_mode { - num_node_groups = %d - replicas_per_node_group = %d + num_node_groups = %[2]d + replicas_per_node_group = %[3]d } } `, rName, numNodeGroups, replicasPerNodeGroup) From 1abeba0b065e8db197fed8b92604ec15578950aa Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 20 Jan 2021 17:46:33 -0500 Subject: [PATCH 0768/1212] document lambda image related env vars in guide --- docs/MAINTAINING.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/MAINTAINING.md b/docs/MAINTAINING.md index 696f8574ded..f315205fd01 100644 --- a/docs/MAINTAINING.md +++ b/docs/MAINTAINING.md @@ -404,6 +404,9 @@ Environment variables (beyond standard AWS Go SDK ones) used by acceptance testi | `AWS_EC2_EIP_PUBLIC_IPV4_POOL` | Identifier for EC2 Public IPv4 Pool for EC2 EIP testing. | | `AWS_GUARDDUTY_MEMBER_ACCOUNT_ID` | Identifier of AWS Account for GuardDuty Member testing. **DEPRECATED:** Should be replaced with standard alternate account handling for tests. | | `AWS_GUARDDUTY_MEMBER_EMAIL` | Email address for GuardDuty Member testing. **DEPRECATED:** It may be possible to use a placeholder email address instead. | +| `AWS_LAMBDA_IMAGE_LATEST_ID` | ECR repository image URI (tagged as `latest`) for Lambda container image acceptance tests. +| `AWS_LAMBDA_IMAGE_V1_ID` | ECR repository image URI (tagged as `v1`) for Lambda container image acceptance tests. +| `AWS_LAMBDA_IMAGE_V2_ID` | ECR repository image URI (tagged as `v2`) for Lambda container image acceptance tests. | `DX_CONNECTION_ID` | Identifier for Direct Connect Connection testing. | | `DX_VIRTUAL_INTERFACE_ID` | Identifier for Direct Connect Virtual Interface testing. | | `EC2_SECURITY_GROUP_RULES_PER_GROUP_LIMIT` | EC2 Quota for Rules per Security Group. Defaults to 50. **DEPRECATED:** Can be augmented or replaced with Service Quotas lookup. | From 18dce77bf148f81613888581f765c29c2a7fcc04 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:46:55 -0800 Subject: [PATCH 0769/1212] Update aws/resource_aws_elasticache_cluster.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 9e4a6e6e11e..353c912630d 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -428,7 +428,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig c, err := finder.CacheClusterWithNodeInfoByID(conn, d.Id()) - if tfresource.NotFound(err) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache Cache Cluster (%s) not found, removing from state", d.Id()) d.SetId("") return nil From 1481bf466d4a75666f94aa252f89fd06a28e280d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:47:15 -0800 Subject: [PATCH 0770/1212] Update aws/resource_aws_elasticache_cluster.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 353c912630d..8f67de1fc09 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -444,7 +444,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) d.Set("engine_version", c.EngineVersion) if c.ConfigurationEndpoint != nil { d.Set("port", c.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ConfigurationEndpoint.Address, *c.ConfigurationEndpoint.Port))) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", aws.StringValue(c.ConfigurationEndpoint.Address), aws.Int64Value(c.ConfigurationEndpoint.Port)))) d.Set("cluster_address", aws.String((*c.ConfigurationEndpoint.Address))) } else if len(c.CacheNodes) > 0 { d.Set("port", int(aws.Int64Value(c.CacheNodes[0].Endpoint.Port))) From 33d79a7d09abddbb7bf659638227751954de6206 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:48:48 -0800 Subject: [PATCH 0771/1212] Update aws/resource_aws_elasticache_cluster.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index 8f67de1fc09..ad1f3820c23 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -445,7 +445,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) if c.ConfigurationEndpoint != nil { d.Set("port", c.ConfigurationEndpoint.Port) d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", aws.StringValue(c.ConfigurationEndpoint.Address), aws.Int64Value(c.ConfigurationEndpoint.Port)))) - d.Set("cluster_address", aws.String((*c.ConfigurationEndpoint.Address))) + d.Set("cluster_address", c.ConfigurationEndpoint.Address) } else if len(c.CacheNodes) > 0 { d.Set("port", int(aws.Int64Value(c.CacheNodes[0].Endpoint.Port))) } From 7392574bdf93e5c243765d75033a672f7daf1383 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:49:01 -0800 Subject: [PATCH 0772/1212] Update aws/resource_aws_elasticache_cluster.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_cluster.go b/aws/resource_aws_elasticache_cluster.go index ad1f3820c23..8ed42b300df 100644 --- a/aws/resource_aws_elasticache_cluster.go +++ b/aws/resource_aws_elasticache_cluster.go @@ -469,7 +469,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) } } d.Set("availability_zone", c.PreferredAvailabilityZone) - if *c.PreferredAvailabilityZone == "Multiple" { + if aws.StringValue(c.PreferredAvailabilityZone) == "Multiple" { d.Set("az_mode", "cross-az") } else { d.Set("az_mode", "single-az") From 66447e94f9ba0ceba24981f28f6e1b41a2dd7213 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 14:51:53 -0800 Subject: [PATCH 0773/1212] Update aws/resource_aws_elasticache_replication_group.go Co-authored-by: Brian Flad --- aws/resource_aws_elasticache_replication_group.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 18f0e418b55..b27e8e879e5 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -887,7 +887,11 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica } _, err = waiter.ReplicationGroupDeleted(conn, replicationGroupID, timeout) - return err + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to delete: %w", err) + } + + return nil } func flattenElasticacheNodeGroupsToClusterMode(nodeGroups []*elasticache.NodeGroup) []map[string]interface{} { From c577fbeba8d12345dea30d6ba2258b7b27d6909f Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 20 Jan 2021 14:52:15 -0800 Subject: [PATCH 0774/1212] Update CHANGELOG.md for #17123 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66eee7a0bc9..6c31f98e1bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES * **New Resource:** `aws_backup_global_settings` [GH-16475] * **New Resource:** `aws_sagemaker_feature_group` [GH-16728] +* **New Resource:** `aws_sagemaker_user_profile` [GH-17123] ENHANCEMENTS @@ -19,6 +20,7 @@ ENHANCEMENTS * resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ebs_volume: Add `throughput` argument [GH-16517] * resource/aws_lightsail_instance: Add `ipv6_addresses` attribute [GH-17155] +* resource/aws_sagemaker_domain: Delete implicit EFS file system [GH-17123] BUG FIXES From 69ce52471beab3b8003d62224762d1f178acf51a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Jan 2021 16:06:41 -0500 Subject: [PATCH 0775/1212] tests: Enable AWSAT003 (hardcoded regions) --- GNUmakefile | 1 - 1 file changed, 1 deletion(-) diff --git a/GNUmakefile b/GNUmakefile index 8c6994b2392..dc9080296ef 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -87,7 +87,6 @@ golangci-lint: awsproviderlint: @awsproviderlint \ -c 1 \ - -AWSAT003=false \ -AWSAT006=false \ -AWSV001=false \ -R001=false \ From 96ad2909a8e2e2f1e3de4e9c703a08b2a5c18544 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 15:29:03 -0800 Subject: [PATCH 0776/1212] Removes redundant error wrapping --- aws/resource_aws_elasticache_replication_group.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index b27e8e879e5..d5092095f62 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -881,14 +881,13 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { return nil } - if err != nil { - return fmt.Errorf("error deleting ElastiCache Replication Group: %w", err) + return err } _, err = waiter.ReplicationGroupDeleted(conn, replicationGroupID, timeout) if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to delete: %w", err) + return err } return nil From c2b2fda41fbcb6db9375e6e8de1a0301d68394f4 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 20 Jan 2021 15:58:09 -0800 Subject: [PATCH 0777/1212] Update CHANGELOG.md for #17141 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c31f98e1bb..99350a4e3e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES * **New Resource:** `aws_backup_global_settings` [GH-16475] * **New Resource:** `aws_sagemaker_feature_group` [GH-16728] +* **New Resource:** `aws_sagemaker_image_version` [GH-17141] * **New Resource:** `aws_sagemaker_user_profile` [GH-17123] ENHANCEMENTS @@ -27,6 +28,8 @@ BUG FIXES * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] * resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] +* resource/aws_sagemaker_image: Fix catching image not found on read error [GH-17141] + ## 3.24.1 (January 15, 2021) From 61dfaa9415779b76b24327552849354045a05e9e Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Jan 2021 18:58:23 -0500 Subject: [PATCH 0778/1212] resource/aws_api_gateway_rest_api: Additional OpenAPI specification acceptance testing and fix various attributes after import (#17099) * fix bug with rest api openapi removing policy * resource/aws_api_gateway_rest_api: Additional OpenAPI specification acceptance testing and fix various attributes after import Reference: https://github.com/hashicorp/terraform-provider-aws/issues/5364 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/7161 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/9722 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10766 Reference: https://github.com/hashicorp/terraform-provider-aws/pull/12432 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13841 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14290 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14660 Changes: ``` * resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) * resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) ``` The overall testing changes are: * Ensuring the basic test covers all attributes * Refactoring the basic test into per-attribute testing * Adding per-attribute tests to cover OpenAPI specificiations (`body` argument) being set without Terraform configurations -- these should be allowed with Terraform showing a planned difference * Adding per-attribute tests to cover OpenAPI specificiations (`body` argument) being set with Terraform configurations -- these should be allowed with the Terraform configuration value overriding the OpenAPI specification * Removing extraneous API object `TestCheckFunc` (covered by `ImportStateVerify` testing) It is worth mentioning that this does not cover the `disable_execute_api_endpoint` or `tags` attributes that can also be specified by OpenAPI since this change set is already very large. The `minimum_compression_size` attribute also needs an additional update to support OpenAPI-only configuration. Further updates can improve on this effort. Before code updates, these new acceptance tests show how the Terraform configuration value would not be applied if an OpenAPI specification was imported: ``` === CONT TestAccAWSAPIGatewayRestApi_ApiKeySource_OverrideBody resource_aws_api_gateway_rest_api_test.go:428: Step 1/4 error: Check failed: 1 error occurred: * Check 2/2 error: aws_api_gateway_rest_api.test: Attribute 'api_key_source' expected "AUTHORIZER", got "HEADER" --- FAIL: TestAccAWSAPIGatewayRestApi_ApiKeySource_OverrideBody (8.82s) === CONT TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_OverrideBody resource_aws_api_gateway_rest_api_test.go:464: Step 1/4 error: Check failed: 1 error occurred: * Check 3/3 error: aws_api_gateway_rest_api.test: Attribute 'binary_media_types.0' expected "application/octet-stream", got "image/jpeg" === CONT TestAccAWSAPIGatewayRestApi_Description_OverrideBody resource_aws_api_gateway_rest_api_test.go:527: Step 1/4 error: Check failed: 1 error occurred: * Check 2/2 error: aws_api_gateway_rest_api.test: Attribute 'description' expected "tfdescription1", got "oasdescription1" --- FAIL: TestAccAWSAPIGatewayRestApi_Description_OverrideBody (9.60s) === CONT TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_OverrideBody resource_aws_api_gateway_rest_api_test.go:688: Step 1/4 error: Check failed: 1 error occurred: * Check 2/2 error: aws_api_gateway_rest_api.test: Attribute 'minimum_compression_size' expected "1", got "5242880" --- FAIL: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_OverrideBody (8.41s) === CONT TestAccAWSAPIGatewayRestApi_Name_OverrideBody resource_aws_api_gateway_rest_api_test.go:528: Step 1/4 error: Check failed: 1 error occurred: * Check 2/2 error: aws_api_gateway_rest_api.test: Attribute 'name' expected "tf-acc-test-4252368909257291928", got "title1" --- FAIL: TestAccAWSAPIGatewayRestApi_Name_OverrideBody (8.57s) === CONT TestAccAWSAPIGatewayRestApi_Policy_OverrideBody resource_aws_api_gateway_rest_api_test.go:593: Step 1/4 error: Check failed: 1 error occurred: * Check 4/4 error: aws_api_gateway_rest_api.test: Attribute 'policy' didn't match "\"Allow\"", got "" --- FAIL: TestAccAWSAPIGatewayRestApi_Policy_OverrideBody (9.37s) ``` Before code updates, these acceptance tests show how the Terraform resource would report an unexpected difference for missing configurations that were imported by the OpenAPI specification: ``` === CONT TestAccAWSAPIGatewayRestApi_ApiKeySource_SetByBody resource_aws_api_gateway_rest_api_test.go:471: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { ~ api_key_source = "AUTHORIZER" -> "HEADER" id = "5ja4mnzxta" name = "tf-acc-test-4415455482847955650" # (8 unchanged attributes hidden) # (1 unchanged block hidden) } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_ApiKeySource_SetByBody (20.65s) === CONT TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_SetByBody resource_aws_api_gateway_rest_api_test.go:510: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { ~ binary_media_types = [ - "application/octet-stream", ] id = "7we4bv4s8b" name = "tf-acc-test-2053199682951305540" # (8 unchanged attributes hidden) # (1 unchanged block hidden) } Plan: 0 to add, 1 to change, 0 to destroy. === CONT TestAccAWSAPIGatewayRestApi_Description_SetByBody resource_aws_api_gateway_rest_api_test.go:570: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { - description = "oasdescription1" -> null id = "3k0fykhp76" name = "tf-acc-test-2107985362088533117" # (8 unchanged attributes hidden) # (1 unchanged block hidden) } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_Description_SetByBody (10.02s) === CONT TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_SetByBody resource_aws_api_gateway_rest_api_test.go:731: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { id = "bcmvzz0jfi" ~ minimum_compression_size = 1048576 -> -1 name = "tf-acc-test-2006611344091675720" # (7 unchanged attributes hidden) # (1 unchanged block hidden) } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_SetByBody (10.99s) ``` Additionally these new acceptance tests show how the Terraform resource already respected missing configurations that were imported by the OpenAPI specification: ``` --- PASS: TestAccAWSAPIGatewayRestApi_Policy_SetByBody (15.03s) ``` Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource (28.57s) --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource_OverrideBody (52.53s) --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource_SetByBody (25.48s) --- PASS: TestAccAWSAPIGatewayRestApi_basic (23.16s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes (80.33s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_OverrideBody (34.45s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_SetByBody (24.16s) --- PASS: TestAccAWSAPIGatewayRestApi_Body (26.69s) --- PASS: TestAccAWSAPIGatewayRestApi_Description (765.29s) --- PASS: TestAccAWSAPIGatewayRestApi_Description_OverrideBody (32.87s) --- PASS: TestAccAWSAPIGatewayRestApi_Description_SetByBody (51.31s) --- PASS: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint (30.21s) --- PASS: TestAccAWSAPIGatewayRestApi_disappears (38.64s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration (58.23s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private (15.02s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint (305.78s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize (42.89s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_OverrideBody (35.97s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_SetByBody (106.39s) --- PASS: TestAccAWSAPIGatewayRestApi_Name_OverrideBody (86.16s) --- PASS: TestAccAWSAPIGatewayRestApi_Parameters (39.90s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy (683.47s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy_OverrideBody (905.68s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy_SetByBody (28.12s) --- PASS: TestAccAWSAPIGatewayRestApi_tags (32.94s) ``` * tests/resource/aws_api_gateway_rest_api: terrafmt fixes * tests/resource/aws_api_gateway_rest_api: Remove extraneous minimum_compression_size testing from basic test * docs/resource/aws_api_gateway_rest_api: Fix misspell * Apply suggestions from code review Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Co-authored-by: james.warren Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> --- aws/resource_aws_api_gateway_rest_api.go | 168 +- aws/resource_aws_api_gateway_rest_api_test.go | 1396 +++++++++++++---- .../docs/r/api_gateway_rest_api.html.markdown | 14 +- 3 files changed, 1290 insertions(+), 288 deletions(-) diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index e8a1234c1c6..72c3f0355cb 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -34,16 +34,14 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { "description": { Type: schema.TypeString, Optional: true, + Computed: true, }, "api_key_source": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - apigateway.ApiKeySourceTypeAuthorizer, - apigateway.ApiKeySourceTypeHeader, - }, true), - Default: apigateway.ApiKeySourceTypeHeader, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(apigateway.ApiKeySourceType_Values(), false), }, "policy": { @@ -57,6 +55,7 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { "binary_media_types": { Type: schema.TypeList, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -204,11 +203,84 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} input.Parameters = stringMapToPointers(v.(map[string]interface{})) } - _, err := conn.PutRestApi(input) + output, err := conn.PutRestApi(input) if err != nil { return fmt.Errorf("error creating API Gateway specification: %s", err) } + + // Using PutRestApi with mode overwrite will remove any configuration + // that was done with CreateRestApi. Reconcile these changes by having + // any Terraform configured values overwrite imported configuration. + + updateInput := &apigateway.UpdateRestApiInput{ + RestApiId: aws.String(d.Id()), + PatchOperations: []*apigateway.PatchOperation{}, + } + + if v, ok := d.GetOk("api_key_source"); ok && v.(string) != aws.StringValue(output.ApiKeySource) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/apiKeySource"), + Value: aws.String(v.(string)), + }) + } + + if v, ok := d.GetOk("binary_media_types"); ok && len(v.([]interface{})) > 0 { + for _, elem := range aws.StringValueSlice(output.BinaryMediaTypes) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpRemove), + Path: aws.String("/binaryMediaTypes/" + escapeJsonPointer(elem)), + }) + } + + for _, elem := range v.([]interface{}) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpAdd), + Path: aws.String("/binaryMediaTypes/" + escapeJsonPointer(elem.(string))), + }) + } + } + + if v, ok := d.GetOk("description"); ok && v.(string) != aws.StringValue(output.Description) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/description"), + Value: aws.String(v.(string)), + }) + } + + if v := d.Get("minimum_compression_size").(int); v > -1 && int64(v) != aws.Int64Value(output.MinimumCompressionSize) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/minimumCompressionSize"), + Value: aws.String(strconv.Itoa(v)), + }) + } + + if v, ok := d.GetOk("name"); ok && v.(string) != aws.StringValue(output.Name) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/name"), + Value: aws.String(v.(string)), + }) + } + + if v, ok := d.GetOk("policy"); ok && v.(string) != aws.StringValue(output.Policy) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/policy"), + Value: aws.String(v.(string)), + }) + } + + if len(updateInput.PatchOperations) > 0 { + _, err := conn.UpdateRestApi(updateInput) + + if err != nil { + return fmt.Errorf("error updating REST API (%s) after OpenAPI import: %w", d.Id(), err) + } + } } return resourceAwsApiGatewayRestApiRead(d, meta) @@ -458,11 +530,86 @@ func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{} input.Parameters = stringMapToPointers(v.(map[string]interface{})) } - _, err := conn.PutRestApi(input) + output, err := conn.PutRestApi(input) if err != nil { return fmt.Errorf("error updating API Gateway specification: %s", err) } + + // Using PutRestApi with mode overwrite will remove any configuration + // that was done previously. Reconcile these changes by having + // any Terraform configured values overwrite imported configuration. + + updateInput := &apigateway.UpdateRestApiInput{ + RestApiId: aws.String(d.Id()), + PatchOperations: []*apigateway.PatchOperation{}, + } + + if v, ok := d.GetOk("api_key_source"); ok && v.(string) != aws.StringValue(output.ApiKeySource) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/apiKeySource"), + Value: aws.String(v.(string)), + }) + } + + if v, ok := d.GetOk("binary_media_types"); ok && len(v.([]interface{})) > 0 { + for _, elem := range aws.StringValueSlice(output.BinaryMediaTypes) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpRemove), + Path: aws.String("/binaryMediaTypes/" + escapeJsonPointer(elem)), + }) + } + + for _, elem := range v.([]interface{}) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpAdd), + Path: aws.String("/binaryMediaTypes/" + escapeJsonPointer(elem.(string))), + }) + } + } + + if v, ok := d.GetOk("description"); ok && v.(string) != aws.StringValue(output.Description) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/description"), + Value: aws.String(v.(string)), + }) + } + + if v := d.Get("minimum_compression_size").(int); v > -1 && int64(v) != aws.Int64Value(output.MinimumCompressionSize) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/minimumCompressionSize"), + Value: aws.String(strconv.Itoa(v)), + }) + } + + if v, ok := d.GetOk("name"); ok && v.(string) != aws.StringValue(output.Name) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/name"), + Value: aws.String(v.(string)), + }) + } + + if v, ok := d.GetOk("policy"); ok && v.(string) != aws.StringValue(output.Policy) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/policy"), + Value: aws.String(v.(string)), + }) + } + + if len(updateInput.PatchOperations) > 0 { + _, err := conn.UpdateRestApi(updateInput) + + if err != nil { + return fmt.Errorf("error updating REST API (%s) after OpenAPI import: %w", d.Id(), err) + } + } + + return resourceAwsApiGatewayRestApiRead(d, meta) } } @@ -472,9 +619,8 @@ func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{} }) if err != nil { - return err + return fmt.Errorf("error updating REST API (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Updated API Gateway %s", d.Id()) return resourceAwsApiGatewayRestApiRead(d, meta) } diff --git a/aws/resource_aws_api_gateway_rest_api_test.go b/aws/resource_aws_api_gateway_rest_api_test.go index 820cefcb3b7..ab157922eb7 100644 --- a/aws/resource_aws_api_gateway_rest_api_test.go +++ b/aws/resource_aws_api_gateway_rest_api_test.go @@ -75,21 +75,23 @@ func TestAccAWSAPIGatewayRestApi_basic(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfig(rName), + Config: testAccAWSAPIGatewayRestAPIConfigName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), testAccMatchResourceAttrRegionalARNNoAccount(resourceName, "arn", "apigateway", regexp.MustCompile(`/restapis/+.`)), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, rName), - testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttribute(&conf, 0), - resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "0"), + resource.TestCheckNoResourceAttr(resourceName, "body"), + testAccCheckResourceAttrRfc3339(resourceName, "created_date"), resource.TestCheckResourceAttr(resourceName, "description", ""), - resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), - resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), - resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "0"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "execution_arn"), - resource.TestCheckNoResourceAttr(resourceName, "binary_media_types"), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", "false"), resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + testAccMatchResourceAttrRegionalARN(resourceName, "execution_arn", "execute-api", regexp.MustCompile(`[a-z0-9]+`)), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "-1"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "parameters.%", "0"), + resource.TestMatchResourceAttr(resourceName, "root_resource_id", regexp.MustCompile(`[a-z0-9]+`)), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { @@ -97,33 +99,6 @@ func TestAccAWSAPIGatewayRestApi_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - - { - Config: testAccAWSAPIGatewayRestAPIUpdateConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, rName), - testAccCheckAWSAPIGatewayRestAPIDescriptionAttribute(&conf, "test"), - testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttribute(&conf, 10485760), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "description", "test"), - resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), - resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "10485760"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "execution_arn"), - resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), - resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet-stream"), - ), - }, - - { - Config: testAccAWSAPIGatewayRestAPIDisableCompressionConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttributeIsNil(&conf), - resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "-1"), - ), - }, }, }) } @@ -188,7 +163,7 @@ func TestAccAWSAPIGatewayRestApi_disappears(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfig(rName), + Config: testAccAWSAPIGatewayRestAPIConfigName(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), testAccCheckResourceDisappears(testAccProvider, resourceAwsApiGatewayRestApi(), resourceName), @@ -401,9 +376,7 @@ func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint(t *testing.T) }) } -func TestAccAWSAPIGatewayRestApi_api_key_source(t *testing.T) { - expectedAPIKeySource := "HEADER" - expectedUpdateAPIKeySource := "AUTHORIZER" +func TestAccAWSAPIGatewayRestApi_ApiKeySource(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" @@ -413,9 +386,9 @@ func TestAccAWSAPIGatewayRestApi_api_key_source(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfigWithAPIKeySource(rName), + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySource(rName, "AUTHORIZER"), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "api_key_source", expectedAPIKeySource), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "AUTHORIZER"), ), }, { @@ -424,22 +397,23 @@ func TestAccAWSAPIGatewayRestApi_api_key_source(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSAPIGatewayRestAPIConfigWithUpdateAPIKeySource(rName), + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySource(rName, "HEADER"), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "api_key_source", expectedUpdateAPIKeySource), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), ), }, { - Config: testAccAWSAPIGatewayRestAPIConfig(rName), + Config: testAccAWSAPIGatewayRestAPIConfigName(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "api_key_source", expectedAPIKeySource), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), ), }, }, }) } -func TestAccAWSAPIGatewayRestApi_disable_execute_api_endpoint(t *testing.T) { +func TestAccAWSAPIGatewayRestApi_ApiKeySource_OverrideBody(t *testing.T) { + var conf apigateway.RestApi rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" @@ -449,37 +423,69 @@ func TestAccAWSAPIGatewayRestApi_disable_execute_api_endpoint(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySourceOverrideBody(rName, "AUTHORIZER", "HEADER"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "AUTHORIZER"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, }, + // Verify updated API key source still overrides { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, true), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `true`), + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySourceOverrideBody(rName, "HEADER", "HEADER"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), ), }, + // Verify updated body API key source is still overridden { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySourceOverrideBody(rName, "HEADER", "AUTHORIZER"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "HEADER"), ), }, }, }) } -func TestAccAWSAPIGatewayRestApi_policy(t *testing.T) { +func TestAccAWSAPIGatewayRestApi_ApiKeySource_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" - expectedPolicyText := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"*"},"Action":"execute-api:Invoke","Resource":"*","Condition":{"IpAddress":{"aws:SourceIp":"123.123.123.123/32"}}}]}` - expectedUpdatePolicyText := `{"Version":"2012-10-17","Statement":[{"Effect":"Deny","Principal":{"AWS":"*"},"Action":"execute-api:Invoke","Resource":"*"}]}` + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigApiKeySourceSetByBody(rName, "AUTHORIZER"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "api_key_source", "AUTHORIZER"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_BinaryMediaTypes(t *testing.T) { + var conf apigateway.RestApi rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, @@ -487,27 +493,106 @@ func TestAccAWSAPIGatewayRestApi_policy(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfigWithPolicy(rName), + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1(rName, "application/octet-stream"), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "policy", expectedPolicyText), + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet-stream"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, }, { - Config: testAccAWSAPIGatewayRestAPIConfigUpdatePolicy(rName), + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1(rName, "application/octet"), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "policy", expectedUpdatePolicyText), + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1OverrideBody(rName, "application/octet-stream", "image/jpeg"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet-stream"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated minimum compression size still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1OverrideBody(rName, "application/octet", "image/jpeg"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet"), + ), + }, + // Verify updated body minimum compression size is still overridden + { + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1OverrideBody(rName, "application/octet", "image/png"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigBinaryMediaTypes1SetByBody(rName, "application/octet-stream"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "binary_media_types.0", "application/octet-stream"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, }, }) } -func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { +func TestAccAWSAPIGatewayRestApi_Body(t *testing.T) { var conf apigateway.RestApi rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" @@ -517,11 +602,11 @@ func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ + // The body is expected to only set a title (name) and one route { - Config: testAccAWSAPIGatewayRestAPIConfigOpenAPI(rName), + Config: testAccAWSAPIGatewayRestAPIConfigBody(rName, "/test"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, rName), testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "description", ""), @@ -537,10 +622,9 @@ func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { ImportStateVerifyIgnore: []string{"body"}, }, { - Config: testAccAWSAPIGatewayRestAPIUpdateConfigOpenAPI(rName), + Config: testAccAWSAPIGatewayRestAPIConfigBody(rName, "/update"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPINameAttribute(&conf, rName), testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/update"}), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttrSet(resourceName, "created_date"), @@ -551,7 +635,7 @@ func TestAccAWSAPIGatewayRestApi_openapi(t *testing.T) { }) } -func TestAccAWSAPIGatewayRestApi_Parameters(t *testing.T) { +func TestAccAWSAPIGatewayRestApi_Description(t *testing.T) { var conf apigateway.RestApi rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" @@ -562,92 +646,451 @@ func TestAccAWSAPIGatewayRestApi_Parameters(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "prepend"), - Check: resource.ComposeTestCheckFunc( + Config: testAccAWSAPIGatewayRestAPIConfigDescription(rName, "description1"), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo", "/foo/bar", "/foo/bar/baz", "/foo/bar/baz/test"}), + resource.TestCheckResourceAttr(resourceName, "description", "description1"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"body", "parameters"}, + ImportStateVerifyIgnore: []string{"body"}, }, { - Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "ignore"), - Check: resource.ComposeTestCheckFunc( + Config: testAccAWSAPIGatewayRestAPIConfigDescription(rName, "description2"), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), - testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), + resource.TestCheckResourceAttr(resourceName, "description", "description2"), ), }, }, }) } -func testAccCheckAWSAPIGatewayRestAPINameAttribute(conf *apigateway.RestApi, name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.Name != name { - return fmt.Errorf("Wrong Name: %q instead of %s", *conf.Name, name) - } - - return nil - } -} - -func testAccCheckAWSAPIGatewayRestAPIDescriptionAttribute(conf *apigateway.RestApi, description string) resource.TestCheckFunc { - return func(s *terraform.State) error { - if *conf.Description != description { - return fmt.Errorf("Wrong Description: %q", *conf.Description) - } - - return nil - } -} - -func testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttribute(conf *apigateway.RestApi, minimumCompressionSize int64) resource.TestCheckFunc { - return func(s *terraform.State) error { - if conf.MinimumCompressionSize == nil { - return fmt.Errorf("MinimumCompressionSize should not be nil") - } - if *conf.MinimumCompressionSize != minimumCompressionSize { - return fmt.Errorf("Wrong MinimumCompressionSize: %d", *conf.MinimumCompressionSize) - } +func TestAccAWSAPIGatewayRestApi_Description_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" - return nil - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigDescriptionOverrideBody(rName, "tfdescription1", "oasdescription1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "tfdescription1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated description still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigDescriptionOverrideBody(rName, "tfdescription2", "oasdescription1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "tfdescription2"), + ), + }, + // Verify updated body description is still overridden + { + Config: testAccAWSAPIGatewayRestAPIConfigDescriptionOverrideBody(rName, "tfdescription2", "oasdescription2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "tfdescription2"), + ), + }, + }, + }) } -func testAccCheckAWSAPIGatewayRestAPIMinimumCompressionSizeAttributeIsNil(conf *apigateway.RestApi) resource.TestCheckFunc { - return func(s *terraform.State) error { - if conf.MinimumCompressionSize != nil { - return fmt.Errorf("MinimumCompressionSize should be nil: %d", *conf.MinimumCompressionSize) - } +func TestAccAWSAPIGatewayRestApi_Description_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" - return nil - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigDescriptionSetByBody(rName, "oasdescription1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "oasdescription1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) } -func testAccCheckAWSAPIGatewayRestAPIRoutes(conf *apigateway.RestApi, routes []string) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).apigatewayconn - - resp, err := conn.GetResources(&apigateway.GetResourcesInput{ - RestApiId: conf.Id, - }) - if err != nil { - return err - } - - actualRoutePaths := map[string]bool{} - for _, resource := range resp.Items { - actualRoutePaths[*resource.Path] = true - } +func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" - for _, route := range routes { - if _, ok := actualRoutePaths[route]; !ok { - return fmt.Errorf("Expected path %v but did not find it in %v", route, actualRoutePaths) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `true`), + ), + }, + { + Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_MinimumCompressionSize(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSize(rName, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSize(rName, -1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "-1"), + ), + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSize(rName, 5242880), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "5242880"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSizeOverrideBody(rName, 1, 5242880), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated minimum compression size still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSizeOverrideBody(rName, 2, 5242880), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "2"), + ), + }, + // Verify updated body minimum compression size is still overridden + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSizeOverrideBody(rName, 2, 1048576), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "2"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigMinimumCompressionSizeSetByBody(rName, 1048576), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "minimum_compression_size", "1048576"), + ), + // TODO: The attribute type must be changed to NullableTypeInt so it can be Computed properly. + ExpectNonEmptyPlan: true, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_Name_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigNameOverrideBody(rName, "title1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated name still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigNameOverrideBody(rName2, "title1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + // Verify updated title still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigNameOverrideBody(rName2, "title2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_Parameters(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "prepend"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/foo", "/foo/bar", "/foo/bar/baz", "/foo/bar/baz/test"}), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body", "parameters"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigParameters1(rName, "basepath", "ignore"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_Policy(t *testing.T) { + resourceName := "aws_api_gateway_rest_api.test" + expectedPolicyText := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"*"},"Action":"execute-api:Invoke","Resource":"*","Condition":{"IpAddress":{"aws:SourceIp":"123.123.123.123/32"}}}]}` + expectedUpdatePolicyText := `{"Version":"2012-10-17","Statement":[{"Effect":"Deny","Principal":{"AWS":"*"},"Action":"execute-api:Invoke","Resource":"*"}]}` + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigWithPolicy(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "policy", expectedPolicyText), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigUpdatePolicy(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "policy", expectedUpdatePolicyText), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_Policy_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigPolicyOverrideBody(rName, "/test", "Allow"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test"}), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Allow"`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated body still has override policy + { + Config: testAccAWSAPIGatewayRestAPIConfigPolicyOverrideBody(rName, "/test2", "Allow"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test2"}), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Allow"`)), + ), + }, + // Verify updated policy still overrides body + { + Config: testAccAWSAPIGatewayRestAPIConfigPolicyOverrideBody(rName, "/test2", "Deny"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + testAccCheckAWSAPIGatewayRestAPIRoutes(&conf, []string{"/", "/test2"}), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Deny"`)), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_Policy_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigPolicySetByBody(rName, "Allow"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestMatchResourceAttr(resourceName, "policy", regexp.MustCompile(`"Allow"`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) +} + +func testAccCheckAWSAPIGatewayRestAPIRoutes(conf *apigateway.RestApi, routes []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).apigatewayconn + + resp, err := conn.GetResources(&apigateway.GetResourcesInput{ + RestApiId: conf.Id, + }) + if err != nil { + return err + } + + actualRoutePaths := map[string]bool{} + for _, resource := range resp.Items { + actualRoutePaths[*resource.Path] = true + } + + for _, route := range routes { + if _, ok := actualRoutePaths[route]; !ok { + return fmt.Errorf("Expected path %v but did not find it in %v", route, actualRoutePaths) + } delete(actualRoutePaths, route) } @@ -721,15 +1164,6 @@ func testAccAPIGatewayTypeEDGEPreCheck(t *testing.T) { } } -func testAccAWSAPIGatewayRestAPIConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_api_gateway_rest_api" "test" { - name = "%s" - minimum_compression_size = 0 -} -`, rName) -} - func testAccAWSAPIGatewayRestAPIConfig_EndpointConfiguration(rName, endpointType string) string { return fmt.Sprintf(` resource "aws_api_gateway_rest_api" "test" { @@ -927,30 +1361,11 @@ resource "aws_api_gateway_rest_api" "test" { `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } -func testAccAWSAPIGatewayRestAPIConfigWithAPIKeySource(rName string) string { - return fmt.Sprintf(` -resource "aws_api_gateway_rest_api" "test" { - name = "%s" - api_key_source = "HEADER" -} -`, rName) -} - -func testAccAWSAPIGatewayRestAPIConfigWithUpdateAPIKeySource(rName string) string { - return fmt.Sprintf(` -resource "aws_api_gateway_rest_api" "test" { - name = "%s" - api_key_source = "AUTHORIZER" -} -`, rName) -} - func testAccAWSAPIGatewayRestAPIConfigWithPolicy(rName string) string { return fmt.Sprintf(` resource "aws_api_gateway_rest_api" "test" { - name = "%s" - minimum_compression_size = 0 - policy = < Date: Wed, 20 Jan 2021 16:00:46 -0800 Subject: [PATCH 0779/1212] Separate mutually exclusive update steps --- ...ource_aws_elasticache_replication_group.go | 428 ++++++++++-------- 1 file changed, 231 insertions(+), 197 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index d5092095f62..ad78efef8f9 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -532,198 +532,14 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i conn := meta.(*AWSClient).elasticacheconn if d.HasChange("cluster_mode.0.num_node_groups") { - o, n := d.GetChange("cluster_mode.0.num_node_groups") - oldNumNodeGroups := o.(int) - newNumNodeGroups := n.(int) - - input := &elasticache.ModifyReplicationGroupShardConfigurationInput{ - ApplyImmediately: aws.Bool(true), - NodeGroupCount: aws.Int64(int64(newNumNodeGroups)), - ReplicationGroupId: aws.String(d.Id()), - } - - if oldNumNodeGroups > newNumNodeGroups { - // Node Group IDs are 1 indexed: 0001 through 0015 - // Loop from highest old ID until we reach highest new ID - nodeGroupsToRemove := []string{} - for i := oldNumNodeGroups; i > newNumNodeGroups; i-- { - nodeGroupID := fmt.Sprintf("%04d", i) - nodeGroupsToRemove = append(nodeGroupsToRemove, nodeGroupID) - } - input.NodeGroupsToRemove = aws.StringSlice(nodeGroupsToRemove) - } - - log.Printf("[DEBUG] Modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), input) - _, err := conn.ModifyReplicationGroupShardConfiguration(input) + err := elasticacheReplicationGroupModifyShardConfiguration(conn, d) if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group shard configuration: %w", err) + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) shard configuration: %w", d.Id(), err) } - - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + } else if d.HasChange("number_cache_clusters") { + err := elasticacheReplicationGroupModifyNumCacheClusters(conn, d) if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) shard reconfiguration completion: %w", d.Id(), err) - } - } - - if d.HasChange("number_cache_clusters") { - o, n := d.GetChange("number_cache_clusters") - oldNumberCacheClusters := o.(int) - newNumberCacheClusters := n.(int) - - // We will try to use similar naming to the console, which are 1 indexed: RGID-001 through RGID-006 - var addClusterIDs, removeClusterIDs []string - for clusterID := oldNumberCacheClusters + 1; clusterID <= newNumberCacheClusters; clusterID++ { - addClusterIDs = append(addClusterIDs, fmt.Sprintf("%s-%03d", d.Id(), clusterID)) - } - for clusterID := oldNumberCacheClusters; clusterID >= (newNumberCacheClusters + 1); clusterID-- { - removeClusterIDs = append(removeClusterIDs, fmt.Sprintf("%s-%03d", d.Id(), clusterID)) - } - - if len(addClusterIDs) > 0 { - // Kick off all the Cache Cluster creations - for _, cacheClusterID := range addClusterIDs { - input := &elasticache.CreateCacheClusterInput{ - CacheClusterId: aws.String(cacheClusterID), - ReplicationGroupId: aws.String(d.Id()), - } - _, err := createElasticacheCacheCluster(conn, input) - if err != nil { - // Future enhancement: we could retry creation with random ID on naming collision - // if isAWSErr(err, elasticache.ErrCodeCacheClusterAlreadyExistsFault, "") { ... } - return fmt.Errorf("error creating ElastiCache Cache Cluster (adding replica): %w", err) - } - } - - // Wait for all Cache Cluster creations - for _, cacheClusterID := range addClusterIDs { - _, err := waiter.CacheClusterAvailable(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be created (adding replica): %w", cacheClusterID, err) - } - } - } - - if len(removeClusterIDs) > 0 { - // Cannot reassign primary cluster ID while automatic failover is enabled - // If we temporarily disable automatic failover, ensure we re-enable it - reEnableAutomaticFailover := false - - // Kick off all the Cache Cluster deletions - for _, cacheClusterID := range removeClusterIDs { - var finalSnapshotID = d.Get("final_snapshot_identifier").(string) - err := deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) - if err != nil { - // Future enhancement: we could retry deletion with random existing ID on missing name - // if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { ... } - if !isAWSErr(err, elasticache.ErrCodeInvalidCacheClusterStateFault, "serving as primary") { - return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica): %w", cacheClusterID, err) - } - - // Use Replication Group MemberClusters to find a new primary cache cluster ID - // that is not in removeClusterIDs - newPrimaryClusterID := "" - - describeReplicationGroupInput := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", describeReplicationGroupInput) - output, err := conn.DescribeReplicationGroups(describeReplicationGroupInput) - if err != nil { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: %w", d.Id(), err) - } - if output == nil || len(output.ReplicationGroups) == 0 || len(output.ReplicationGroups[0].MemberClusters) == 0 { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: missing replication group information", d.Id()) - } - - for _, memberClusterPtr := range output.ReplicationGroups[0].MemberClusters { - memberCluster := aws.StringValue(memberClusterPtr) - memberClusterInRemoveClusterIDs := false - for _, removeClusterID := range removeClusterIDs { - if memberCluster == removeClusterID { - memberClusterInRemoveClusterIDs = true - break - } - } - if !memberClusterInRemoveClusterIDs { - newPrimaryClusterID = memberCluster - break - } - } - if newPrimaryClusterID == "" { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: unable to assign new primary", d.Id()) - } - - // Disable automatic failover if enabled - // Must be applied previous to trying to set new primary - // InvalidReplicationGroupState: Cannot manually promote a new master cache cluster while autofailover is enabled - if aws.StringValue(output.ReplicationGroups[0].AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled { - // Be kind and rewind - if d.Get("automatic_failover_enabled").(bool) { - reEnableAutomaticFailover = true - } - - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(false), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) - if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %sw", d.Id(), err) - } - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) - } - } - - // Set new primary - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(newPrimaryClusterID), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) - if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %w", d.Id(), err) - } - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) - } - - // Finally retry deleting the cache cluster - var finalSnapshotID = d.Get("final_snapshot_identifier").(string) - err = deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) - if err != nil { - return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica after setting new primary): %w", cacheClusterID, err) - } - } - } - - // Wait for all Cache Cluster deletions - for _, cacheClusterID := range removeClusterIDs { - _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) - } - } - - // Re-enable automatic failover if we needed to temporarily disable it - if reEnableAutomaticFailover { - input := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(true), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", input) - _, err := conn.ModifyReplicationGroup(input) - if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) - } - } + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) clusters: %w", d.Id(), err) } } @@ -861,15 +677,15 @@ func deleteElasticacheReplicationGroup(replicationGroupID string, conn *elastica // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete err := resource.Retry(10*time.Minute, func() *resource.RetryError { _, err := conn.DeleteReplicationGroup(input) + if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { + return nil + } + // Cache Cluster is creating/deleting or Replication Group is snapshotting + // InvalidReplicationGroupState: Cache cluster tf-acc-test-uqhe-003 is not in a valid state to be deleted + if isAWSErr(err, elasticache.ErrCodeInvalidReplicationGroupStateFault, "") { + return resource.RetryableError(err) + } if err != nil { - if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { - return nil - } - // Cache Cluster is creating/deleting or Replication Group is snapshotting - // InvalidReplicationGroupState: Cache cluster tf-acc-test-uqhe-003 is not in a valid state to be deleted - if isAWSErr(err, elasticache.ErrCodeInvalidReplicationGroupStateFault, "") { - return resource.RetryableError(err) - } return resource.NonRetryableError(err) } return nil @@ -911,3 +727,221 @@ func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws [ } return } + +func elasticacheReplicationGroupModifyShardConfiguration(conn *elasticache.ElastiCache, d *schema.ResourceData) error { + o, n := d.GetChange("cluster_mode.0.num_node_groups") + oldNumNodeGroups := o.(int) + newNumNodeGroups := n.(int) + + input := &elasticache.ModifyReplicationGroupShardConfigurationInput{ + ApplyImmediately: aws.Bool(true), + NodeGroupCount: aws.Int64(int64(newNumNodeGroups)), + ReplicationGroupId: aws.String(d.Id()), + } + + if oldNumNodeGroups > newNumNodeGroups { + // Node Group IDs are 1 indexed: 0001 through 0015 + // Loop from highest old ID until we reach highest new ID + nodeGroupsToRemove := []string{} + for i := oldNumNodeGroups; i > newNumNodeGroups; i-- { + nodeGroupID := fmt.Sprintf("%04d", i) + nodeGroupsToRemove = append(nodeGroupsToRemove, nodeGroupID) + } + input.NodeGroupsToRemove = aws.StringSlice(nodeGroupsToRemove) + } + + log.Printf("[DEBUG] Modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), input) + _, err := conn.ModifyReplicationGroupShardConfiguration(input) + if err != nil { + return fmt.Errorf("error modifying ElastiCache Replication Group shard configuration: %w", err) + } + + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) shard reconfiguration completion: %w", d.Id(), err) + } + + return nil +} + +func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiCache, d *schema.ResourceData) error { + o, n := d.GetChange("number_cache_clusters") + oldNumberCacheClusters := o.(int) + newNumberCacheClusters := n.(int) + + // var err error + if newNumberCacheClusters > oldNumberCacheClusters { + err := elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } else { + err := elasticacheReplicationGroupReduceNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate), d) + if err != nil { + return err + } + } + + return nil +} + +func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration) error { + var addClusterIDs []string + for clusterID := o + 1; clusterID <= n; clusterID++ { + addClusterIDs = append(addClusterIDs, fmt.Sprintf("%s-%03d", replicationGroupID, clusterID)) + } + + // Kick off all the Cache Cluster creations + for _, cacheClusterID := range addClusterIDs { + input := &elasticache.CreateCacheClusterInput{ + CacheClusterId: aws.String(cacheClusterID), + ReplicationGroupId: aws.String(replicationGroupID), + } + _, err := createElasticacheCacheCluster(conn, input) + if err != nil { + // Future enhancement: we could retry creation with random ID on naming collision + // if isAWSErr(err, elasticache.ErrCodeCacheClusterAlreadyExistsFault, "") { ... } + return fmt.Errorf("error creating ElastiCache Cache Cluster (adding replica): %w", err) + } + } + + // Wait for all Cache Cluster creations + for _, cacheClusterID := range addClusterIDs { + _, err := waiter.CacheClusterAvailable(conn, cacheClusterID, timeout) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be created (adding replica): %w", cacheClusterID, err) + } + } + + return nil +} + +func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration, d *schema.ResourceData) error { + var removeClusterIDs []string + for clusterID := o; clusterID >= (n + 1); clusterID-- { + removeClusterIDs = append(removeClusterIDs, fmt.Sprintf("%s-%03d", d.Id(), clusterID)) + } + + // Cannot reassign primary cluster ID while automatic failover is enabled + // If we temporarily disable automatic failover, ensure we re-enable it + reEnableAutomaticFailover := false + + // Kick off all the Cache Cluster deletions + for _, cacheClusterID := range removeClusterIDs { + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err := deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) + if err != nil { + // Future enhancement: we could retry deletion with random existing ID on missing name + // if isAWSErr(err, elasticache.ErrCodeCacheClusterNotFoundFault, "") { ... } + if !isAWSErr(err, elasticache.ErrCodeInvalidCacheClusterStateFault, "serving as primary") { + return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica): %w", cacheClusterID, err) + } + + // Use Replication Group MemberClusters to find a new primary cache cluster ID + // that is not in removeClusterIDs + newPrimaryClusterID := "" + + describeReplicationGroupInput := &elasticache.DescribeReplicationGroupsInput{ + ReplicationGroupId: aws.String(d.Id()), + } + log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", describeReplicationGroupInput) + output, err := conn.DescribeReplicationGroups(describeReplicationGroupInput) + if err != nil { + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: %w", d.Id(), err) + } + if output == nil || len(output.ReplicationGroups) == 0 || len(output.ReplicationGroups[0].MemberClusters) == 0 { + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: missing replication group information", d.Id()) + } + + for _, memberClusterPtr := range output.ReplicationGroups[0].MemberClusters { + memberCluster := aws.StringValue(memberClusterPtr) + memberClusterInRemoveClusterIDs := false + for _, removeClusterID := range removeClusterIDs { + if memberCluster == removeClusterID { + memberClusterInRemoveClusterIDs = true + break + } + } + if !memberClusterInRemoveClusterIDs { + newPrimaryClusterID = memberCluster + break + } + } + if newPrimaryClusterID == "" { + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: unable to assign new primary", d.Id()) + } + + // Disable automatic failover if enabled + // Must be applied previous to trying to set new primary + // InvalidReplicationGroupState: Cannot manually promote a new master cache cluster while autofailover is enabled + if aws.StringValue(output.ReplicationGroups[0].AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled { + // Be kind and rewind + if d.Get("automatic_failover_enabled").(bool) { + reEnableAutomaticFailover = true + } + + modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(false), + ReplicationGroupId: aws.String(d.Id()), + } + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) + _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) + if err != nil { + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %sw", d.Id(), err) + } + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) + } + } + + // Set new primary + modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + PrimaryClusterId: aws.String(newPrimaryClusterID), + ReplicationGroupId: aws.String(d.Id()), + } + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) + _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) + if err != nil { + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %w", d.Id(), err) + } + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) + } + + // Finally retry deleting the cache cluster + var finalSnapshotID = d.Get("final_snapshot_identifier").(string) + err = deleteElasticacheCacheCluster(conn, cacheClusterID, finalSnapshotID) + if err != nil { + return fmt.Errorf("error deleting ElastiCache Cache Cluster (%s) (removing replica after setting new primary): %w", cacheClusterID, err) + } + } + } + + // Wait for all Cache Cluster deletions + for _, cacheClusterID := range removeClusterIDs { + _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) + } + } + + // Re-enable automatic failover if we needed to temporarily disable it + if reEnableAutomaticFailover { + input := &elasticache.ModifyReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + ReplicationGroupId: aws.String(d.Id()), + } + log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", input) + _, err := conn.ModifyReplicationGroup(input) + if err != nil { + return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) + } + } + + return nil +} From cf228f12fadfcda837388f5c236d2d41d348cc00 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 20 Jan 2021 19:12:04 -0500 Subject: [PATCH 0780/1212] Update CHANGELOG for #17099 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99350a4e3e1..2815d0f8718 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ ENHANCEMENTS BUG FIXES * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] +* resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] +* resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] * resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] * resource/aws_sagemaker_image: Fix catching image not found on read error [GH-17141] From 7e6d7b4ed8e36c0d4b4a145179f0556a9169e9cc Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 16:39:51 -0800 Subject: [PATCH 0781/1212] Add CHANGELOG entry --- .changelog/17201.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17201.txt diff --git a/.changelog/17201.txt b/.changelog/17201.txt new file mode 100644 index 00000000000..67a284a809b --- /dev/null +++ b/.changelog/17201.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values +``` From fa6a528d2ddd1fcb3135494d5f6370d90a387998 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Wed, 20 Jan 2021 16:51:32 -0800 Subject: [PATCH 0782/1212] bump version to correctly handle multiple issues --- .github/workflows/milestone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/milestone.yml b/.github/workflows/milestone.yml index 8e93930efe1..aae0e3dd9f3 100644 --- a/.github/workflows/milestone.yml +++ b/.github/workflows/milestone.yml @@ -18,7 +18,7 @@ jobs: run: | echo ::set-output name=milestone_id::$(curl -H "Authorization: Bearer ${{secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{ github.repository_owner }}/${{ github.event.repository.name }}/milestones | jq 'map(select(.title == "${{ steps.get-current-milestone.outputs.current_milestone }}"))[0].number') - run: echo ${{ steps.get-milestone-id.outputs.milestone_id }} - - uses: breathingdust/current-milestone-action@v3 + - uses: breathingdust/current-milestone-action@v4 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} pull_number: ${{ github.event.pull_request.number }} From feb41d411412d74f4a7d30727936f438a2808d68 Mon Sep 17 00:00:00 2001 From: Tommy Callahan Jr Date: Wed, 20 Jan 2021 20:06:29 -0500 Subject: [PATCH 0783/1212] Updated aws_db_proxy_target documentation example as suggested in issue #17172 --- website/docs/r/db_proxy_target.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/db_proxy_target.html.markdown b/website/docs/r/db_proxy_target.html.markdown index 11decffdaf7..68bb78bd4c9 100644 --- a/website/docs/r/db_proxy_target.html.markdown +++ b/website/docs/r/db_proxy_target.html.markdown @@ -50,8 +50,8 @@ resource "aws_db_proxy_default_target_group" "example" { resource "aws_db_proxy_target" "example" { db_instance_identifier = aws_db_instance.example.id - db_proxy_name = aws_db_proxy.example.db_proxy_name - target_group_name = aws_db_proxy_default_target_group.example.name + db_proxy_name = aws_db_proxy.example.name + target_group_name = aws_db_proxy_default_target_group.example.db_proxy_name } ``` From 58353b9f73ee9efc6071b77642c451ea6f8d8ca7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 17:17:24 -0800 Subject: [PATCH 0784/1212] Refactor modification methods and use finders --- ...ource_aws_elasticache_replication_group.go | 150 ++++++++---------- ..._aws_elasticache_replication_group_test.go | 56 ++----- 2 files changed, 76 insertions(+), 130 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index ad78efef8f9..29b81287737 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -15,7 +15,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsElasticacheReplicationGroup() *schema.Resource { @@ -395,36 +397,18 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int conn := meta.(*AWSClient).elasticacheconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - req := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(d.Id()), + rgp, err := finder.ReplicationGroupByID(conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ElastiCache Replication Group (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - - res, err := conn.DescribeReplicationGroups(req) if err != nil { - if isAWSErr(err, elasticache.ErrCodeReplicationGroupNotFoundFault, "") { - log.Printf("[WARN] ElastiCache Replication Group (%s) not found", d.Id()) - d.SetId("") - return nil - } - return err } - var rgp *elasticache.ReplicationGroup - for _, r := range res.ReplicationGroups { - if aws.StringValue(r.ReplicationGroupId) == d.Id() { - rgp = r - } - } - - if rgp == nil { - log.Printf("[WARN] Replication Group (%s) not found", d.Id()) - d.SetId("") - return nil - } - if aws.StringValue(rgp.Status) == "deleting" { - log.Printf("[WARN] The Replication Group %q is currently in the `deleting` state", d.Id()) + log.Printf("[WARN] ElastiCache Replication Group (%s) is currently in the `deleting` status, removing from state", d.Id()) d.SetId("") return nil } @@ -620,15 +604,10 @@ func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta i } if requestUpdate { - _, err := conn.ModifyReplicationGroup(params) + err := resourceAwsElasticacheReplicationGroupModify(conn, d.Timeout(schema.TimeoutUpdate), params) if err != nil { return fmt.Errorf("error updating ElastiCache Replication Group (%s): %w", d.Id(), err) } - - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be updated: %w", d.Id(), err) - } } if d.HasChange("tags") { @@ -769,26 +748,23 @@ func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiC oldNumberCacheClusters := o.(int) newNumberCacheClusters := n.(int) - // var err error + var err error if newNumberCacheClusters > oldNumberCacheClusters { - err := elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return err - } + err = elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) } else { - err := elasticacheReplicationGroupReduceNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate), d) - if err != nil { - return err - } + err = elasticacheReplicationGroupReduceNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate), d) } + return err +} - return nil +func formatReplicationGroupClusterID(replicationGroupID string, clusterID int) string { + return fmt.Sprintf("%s-%03d", replicationGroupID, clusterID) } func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration) error { var addClusterIDs []string for clusterID := o + 1; clusterID <= n; clusterID++ { - addClusterIDs = append(addClusterIDs, fmt.Sprintf("%s-%03d", replicationGroupID, clusterID)) + addClusterIDs = append(addClusterIDs, formatReplicationGroupClusterID(replicationGroupID, clusterID)) } // Kick off all the Cache Cluster creations @@ -819,7 +795,7 @@ func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.Elast func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration, d *schema.ResourceData) error { var removeClusterIDs []string for clusterID := o; clusterID >= (n + 1); clusterID-- { - removeClusterIDs = append(removeClusterIDs, fmt.Sprintf("%s-%03d", d.Id(), clusterID)) + removeClusterIDs = append(removeClusterIDs, formatReplicationGroupClusterID(replicationGroupID, clusterID)) } // Cannot reassign primary cluster ID while automatic failover is enabled @@ -841,19 +817,12 @@ func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiC // that is not in removeClusterIDs newPrimaryClusterID := "" - describeReplicationGroupInput := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Reading ElastiCache Replication Group: %s", describeReplicationGroupInput) - output, err := conn.DescribeReplicationGroups(describeReplicationGroupInput) + rg, err := finder.ReplicationGroupByID(conn, replicationGroupID) if err != nil { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: %w", d.Id(), err) - } - if output == nil || len(output.ReplicationGroups) == 0 || len(output.ReplicationGroups[0].MemberClusters) == 0 { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: missing replication group information", d.Id()) + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: %w", replicationGroupID, err) } - for _, memberClusterPtr := range output.ReplicationGroups[0].MemberClusters { + for _, memberClusterPtr := range rg.MemberClusters { memberCluster := aws.StringValue(memberClusterPtr) memberClusterInRemoveClusterIDs := false for _, removeClusterID := range removeClusterIDs { @@ -868,48 +837,28 @@ func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiC } } if newPrimaryClusterID == "" { - return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: unable to assign new primary", d.Id()) + return fmt.Errorf("error reading ElastiCache Replication Group (%s) to determine new primary: unable to assign new primary", replicationGroupID) } // Disable automatic failover if enabled // Must be applied previous to trying to set new primary // InvalidReplicationGroupState: Cannot manually promote a new master cache cluster while autofailover is enabled - if aws.StringValue(output.ReplicationGroups[0].AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled { + if aws.StringValue(rg.AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled { // Be kind and rewind if d.Get("automatic_failover_enabled").(bool) { reEnableAutomaticFailover = true } - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(false), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) + err = resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn, replicationGroupID, timeout) if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %sw", d.Id(), err) - } - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error disabling Elasticache Replication Group (%s) automatic failover: %w", replicationGroupID, err) } } // Set new primary - modifyReplicationGroupInput := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(newPrimaryClusterID), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", modifyReplicationGroupInput) - _, err = conn.ModifyReplicationGroup(modifyReplicationGroupInput) + err = resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, replicationGroupID, newPrimaryClusterID, timeout) if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to set new primary: %w", d.Id(), err) - } - _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) to be available: %w", d.Id(), err) + return fmt.Errorf("error changing Elasticache Replication Group (%s) primary cluster: %w", replicationGroupID, err) } // Finally retry deleting the cache cluster @@ -923,7 +872,7 @@ func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiC // Wait for all Cache Cluster deletions for _, cacheClusterID := range removeClusterIDs { - _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, d.Timeout(schema.TimeoutUpdate)) + _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, timeout) if err != nil { return fmt.Errorf("error waiting for ElastiCache Cache Cluster (%s) to be deleted (removing replica): %w", cacheClusterID, err) } @@ -931,17 +880,44 @@ func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiC // Re-enable automatic failover if we needed to temporarily disable it if reEnableAutomaticFailover { - input := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(true), - ReplicationGroupId: aws.String(d.Id()), - } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group: %s", input) - _, err := conn.ModifyReplicationGroup(input) + err := resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn, replicationGroupID, timeout) if err != nil { - return fmt.Errorf("error modifying ElastiCache Replication Group (%s) to re-enable automatic failover: %w", d.Id(), err) + return fmt.Errorf("error re-enabling Elasticache Replication Group (%s) automatic failover: %w", replicationGroupID, err) } } return nil } + +func resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(false), + }) +} + +func resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + AutomaticFailoverEnabled: aws.Bool(true), + }) +} + +func resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn *elasticache.ElastiCache, replicationGroupID, primaryClusterID string, timeout time.Duration) error { + return resourceAwsElasticacheReplicationGroupModify(conn, timeout, &elasticache.ModifyReplicationGroupInput{ + ReplicationGroupId: aws.String(replicationGroupID), + ApplyImmediately: aws.Bool(true), + PrimaryClusterId: aws.String(primaryClusterID), + }) +} + +func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, timeout time.Duration, input *elasticache.ModifyReplicationGroupInput) error { + _, err := conn.ModifyReplicationGroup(input) + if err != nil { + return err + } + _, err = waiter.ReplicationGroupAvailable(conn, aws.StringValue(input.ReplicationGroupId), timeout) + return err +} diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index d8746080454..4695eb3bba9 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -697,18 +696,12 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }, { PreConfig: func() { - // Simulate failover so primary is on node we are trying to delete + // Ensure that primary is on the node we are trying to delete conn := testAccProvider.Meta().(*AWSClient).elasticacheconn - input := &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(fmt.Sprintf("%s-003", rName)), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error setting new primary cache cluster: %s", err) - } - if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for new primary cache cluster: %s", err) + timeout := 40 * time.Minute + + if err := resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, rName, formatReplicationGroupClusterID(rName, 3), timeout); err != nil { + t.Fatalf("error changing primary cache cluster: %s", err) } }, Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, false), @@ -742,46 +735,23 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }, { PreConfig: func() { - // Simulate failover so primary is on node we are trying to delete + // Ensure that primary is on the node we are trying to delete conn := testAccProvider.Meta().(*AWSClient).elasticacheconn + timeout := 40 * time.Minute // Must disable automatic failover first - var input *elasticache.ModifyReplicationGroupInput = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(false), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { + if err := resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn, rName, timeout); err != nil { t.Fatalf("error disabling automatic failover: %s", err) } - if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for disabling automatic failover: %s", err) - } - // Failover - input = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - PrimaryClusterId: aws.String(fmt.Sprintf("%s-003", rName)), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error setting new primary cache cluster: %s", err) - } - if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for new primary cache cluster: %s", err) + // Set primary + if err := resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, rName, formatReplicationGroupClusterID(rName, 3), timeout); err != nil { + t.Fatalf("error changing primary cache cluster: %s", err) } // Re-enable automatic failover like nothing ever happened - input = &elasticache.ModifyReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - AutomaticFailoverEnabled: aws.Bool(true), - ReplicationGroupId: aws.String(rName), - } - if _, err := conn.ModifyReplicationGroup(input); err != nil { - t.Fatalf("error enabled automatic failover: %s", err) - } - if _, err := waiter.ReplicationGroupAvailable(conn, rName, 40*time.Minute); err != nil { - t.Fatalf("error waiting for enabled automatic failover: %s", err) + if err := resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn, rName, timeout); err != nil { + t.Fatalf("error re-enabling automatic failover: %s", err) } }, Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 2, true), From e28560d8ef27a7a9ea14f0cec168e63a8c7e3ee9 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Jan 2021 22:06:58 -0800 Subject: [PATCH 0785/1212] Moves function --- aws/resource_aws_elasticache_replication_group.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 29b81287737..1074dcf6e71 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -757,10 +757,6 @@ func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiC return err } -func formatReplicationGroupClusterID(replicationGroupID string, clusterID int) string { - return fmt.Sprintf("%s-%03d", replicationGroupID, clusterID) -} - func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration) error { var addClusterIDs []string for clusterID := o + 1; clusterID <= n; clusterID++ { @@ -921,3 +917,7 @@ func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, _, err = waiter.ReplicationGroupAvailable(conn, aws.StringValue(input.ReplicationGroupId), timeout) return err } + +func formatReplicationGroupClusterID(replicationGroupID string, clusterID int) string { + return fmt.Sprintf("%s-%03d", replicationGroupID, clusterID) +} From 0ba00f592ebf9c5ccd118c8fb10cede0b07c8e89 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Jan 2021 20:07:40 -0500 Subject: [PATCH 0786/1212] resource/lakeformation_permissions: Address multiple permissions --- aws/resource_aws_lakeformation_permissions.go | 3 ++- aws/resource_aws_lakeformation_permissions_test.go | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 151b83f9ef9..f13649e8766 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -300,7 +300,8 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf } if len(principalResourcePermissions) > 1 { - return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found") + //return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found") + return fmt.Errorf("error reading Lake Formation permissions: %v\nINPUT: %v", principalResourcePermissions, input) } for _, permissions := range principalResourcePermissions { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 2f96730804d..876ce6cdb7e 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -558,8 +558,8 @@ resource "aws_lakeformation_data_lake_settings" "test" { } resource "aws_lakeformation_permissions" "test" { - permissions = ["SELECT"] - principal = aws_iam_role.test.arn + permissions = ["ALTER", "SELECT"] + principal = data.aws_caller_identity.current.arn table_with_columns { database_name = aws_glue_catalog_table.test.database_name From 332ac25c45d0a0e9ccd43b841408dee294f49f0a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Jan 2021 11:08:27 -0500 Subject: [PATCH 0787/1212] resource/lakeformation_permissions: Work out bug with multiple --- ...urce_aws_lakeformation_permissions_test.go | 2 + ...urce_aws_lakeformation_permissions_test.go | 130 +++++++++++++++--- aws/resource_aws_lakeformation_test.go | 1 + 3 files changed, 114 insertions(+), 19 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions_test.go b/aws/data_source_aws_lakeformation_permissions_test.go index 4b59b28c8c8..c6299c985c3 100644 --- a/aws/data_source_aws_lakeformation_permissions_test.go +++ b/aws/data_source_aws_lakeformation_permissions_test.go @@ -228,6 +228,8 @@ resource "aws_lakeformation_permissions" "test" { data_location { arn = aws_s3_bucket.test.arn } + + depends_on = ["aws_lakeformation_data_lake_settings.test"] } data "aws_lakeformation_permissions" "test" { diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 876ce6cdb7e..6c4cd131956 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -28,7 +28,7 @@ func testAccAWSLakeFormationPermissions_basic(t *testing.T) { testAccCheckAWSLakeFormationPermissionsExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "CREATE_DATABASE"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", lakeformation.PermissionCreateDatabase), resource.TestCheckResourceAttr(resourceName, "catalog_resource", "true"), ), }, @@ -53,7 +53,7 @@ func testAccAWSLakeFormationPermissions_dataLocation(t *testing.T) { testAccCheckAWSLakeFormationPermissionsExists(resourceName), resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "DATA_LOCATION_ACCESS"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", lakeformation.PermissionDataLocationAccess), resource.TestCheckResourceAttr(resourceName, "catalog_resource", "false"), resource.TestCheckResourceAttr(resourceName, "data_location.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "data_location.0.arn", bucketName, "arn"), @@ -85,10 +85,10 @@ func testAccAWSLakeFormationPermissions_database(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "database.0.name", dbName, "name"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "3"), resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALTER"), - resource.TestCheckResourceAttr(resourceName, "permissions.1", "CREATE_TABLE"), - resource.TestCheckResourceAttr(resourceName, "permissions.2", "DROP"), + resource.TestCheckResourceAttr(resourceName, "permissions.1", lakeformation.PermissionCreateTable), + resource.TestCheckResourceAttr(resourceName, "permissions.2", lakeformation.PermissionDrop), resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.0", "CREATE_TABLE"), + resource.TestCheckResourceAttr(resourceName, "permissions_with_grant_option.0", lakeformation.PermissionCreateTable), ), }, }, @@ -114,8 +114,10 @@ func testAccAWSLakeFormationPermissions_table(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "table.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "table.0.database_name", tableName, "database_name"), resource.TestCheckResourceAttrPair(resourceName, "table.0.name", tableName, "name"), - resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "ALL"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "3"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", lakeformation.PermissionAlter), + resource.TestCheckResourceAttr(resourceName, "permissions.1", lakeformation.PermissionDelete), + resource.TestCheckResourceAttr(resourceName, "permissions.2", lakeformation.PermissionDescribe), ), }, }, @@ -145,7 +147,37 @@ func testAccAWSLakeFormationPermissions_tableWithColumns(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.0", "event"), resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.1", "timestamp"), resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), - resource.TestCheckResourceAttr(resourceName, "permissions.0", "SELECT"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", lakeformation.PermissionSelect), + ), + }, + }, + }) +} + +func testAccAWSLakeFormationPermissions_tableWithColumnsAndTable(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + roleName := "aws_iam_role.test" + tableName := "aws_glue_catalog_table.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_tableWithColumnsAndTable(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "principal", roleName, "arn"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.database_name", tableName, "database_name"), + resource.TestCheckResourceAttrPair(resourceName, "table_with_columns.0.name", tableName, "name"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.#", "2"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.0", "event"), + resource.TestCheckResourceAttr(resourceName, "table_with_columns.0.column_names.1", "timestamp"), + resource.TestCheckResourceAttr(resourceName, "permissions.#", "1"), + resource.TestCheckResourceAttr(resourceName, "permissions.0", lakeformation.PermissionSelect), ), }, }, @@ -420,10 +452,6 @@ EOF data "aws_caller_identity" "current" {} -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - resource "aws_glue_catalog_database" "test" { name = %[1]q } @@ -473,10 +501,6 @@ EOF data "aws_caller_identity" "current" {} -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - resource "aws_glue_catalog_database" "test" { name = %[1]q } @@ -491,7 +515,7 @@ resource "aws_lakeformation_data_lake_settings" "test" { } resource "aws_lakeformation_permissions" "test" { - permissions = ["ALL"] + permissions = ["ALTER", "DELETE", "DESCRIBE"] principal = aws_iam_role.test.arn table { @@ -558,8 +582,8 @@ resource "aws_lakeformation_data_lake_settings" "test" { } resource "aws_lakeformation_permissions" "test" { - permissions = ["ALTER", "SELECT"] - principal = data.aws_caller_identity.current.arn + permissions = ["SELECT"] + principal = aws_iam_role.test.arn table_with_columns { database_name = aws_glue_catalog_table.test.database_name @@ -571,3 +595,71 @@ resource "aws_lakeformation_permissions" "test" { } `, rName) } + +func testAccAWSLakeFormationPermissionsConfig_tableWithColumnsAndTable(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + + assume_role_policy = < Date: Wed, 20 Jan 2021 18:19:40 -0500 Subject: [PATCH 0788/1212] lakeformation_permissions: Work out bug --- aws/resource_aws_lakeformation_permissions.go | 81 ++++++++++++++----- ...urce_aws_lakeformation_permissions_test.go | 10 +++ 2 files changed, 70 insertions(+), 21 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index f13649e8766..ffea1211742 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -251,6 +251,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf } input.Resource = expandLakeFormationResource(d, true) + resourceType := expandLakeFormationResourceType(d) log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions @@ -258,11 +259,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf err := resource.Retry(2*time.Minute, func() *resource.RetryError { err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { - if permission == nil { - continue - } - - principalResourcePermissions = append(principalResourcePermissions, permission) + principalResourcePermissions = resourceAwsLakeFormationPermissionsAppend(resourceType, principalResourcePermissions, permission) } return !lastPage }) @@ -279,11 +276,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf if isResourceTimeoutError(err) { err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { - if permission == nil { - continue - } - - principalResourcePermissions = append(principalResourcePermissions, permission) + principalResourcePermissions = resourceAwsLakeFormationPermissionsAppend(resourceType, principalResourcePermissions, permission) } return !lastPage }) @@ -386,34 +379,80 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte return nil } -func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { - res := &lakeformation.Resource{} +func resourceAwsLakeFormationPermissionsAppend(resourceType string, perms []*lakeformation.PrincipalResourcePermissions, newPerm *lakeformation.PrincipalResourcePermissions) []*lakeformation.PrincipalResourcePermissions { + if newPerm == nil { + return perms + } - if v, ok := d.GetOk("catalog_resource"); ok { - if v.(bool) { - res.Catalog = &lakeformation.CatalogResource{} + switch resourceType { + case lakeformation.DataLakeResourceTypeCatalog: + if newPerm.Resource.Catalog != nil { + perms = append(perms, newPerm) + } + case lakeformation.DataLakeResourceTypeDataLocation: + if newPerm.Resource.DataLocation != nil { + perms = append(perms, newPerm) + } + case lakeformation.DataLakeResourceTypeDatabase: + if newPerm.Resource.Database != nil { + perms = append(perms, newPerm) + } + case lakeformation.DataLakeResourceTypeTable: + if newPerm.Resource.Table != nil { + perms = append(perms, newPerm) + } + case DataLakeResourceTypeTableWithColumns: + if newPerm.Resource.TableWithColumns != nil { + perms = append(perms, newPerm) } } + return perms +} + +// expandLakeFormationResourceType returns the Lake Formation resource type represented by the resource. +// This is helpful in distinguishing between TABLE and TABLE_WITH_COLUMNS types when filtering ListPermission results. +func expandLakeFormationResourceType(d *schema.ResourceData) string { + if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) { + return lakeformation.DataLakeResourceTypeCatalog + } + if v, ok := d.GetOk("data_location"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - res.DataLocation = expandLakeFormationDataLocationResource(v.([]interface{})[0].(map[string]interface{})) + return lakeformation.DataLakeResourceTypeDataLocation } if v, ok := d.GetOk("database"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - res.Database = expandLakeFormationDatabaseResource(v.([]interface{})[0].(map[string]interface{})) + return lakeformation.DataLakeResourceTypeDatabase } if v, ok := d.GetOk("table"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - res.Table = expandLakeFormationTableResource(v.([]interface{})[0].(map[string]interface{})) + return lakeformation.DataLakeResourceTypeTable } - if v, ok := d.GetOk("table_with_columns"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + return DataLakeResourceTypeTableWithColumns +} + +const DataLakeResourceTypeTableWithColumns = "TABLE_WITH_COLUMNS" // no lakeformation package enum value for this type + +func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { + res := &lakeformation.Resource{} + + switch expandLakeFormationResourceType(d) { + case lakeformation.DataLakeResourceTypeCatalog: + res.Catalog = &lakeformation.CatalogResource{} + case lakeformation.DataLakeResourceTypeDataLocation: + res.DataLocation = expandLakeFormationDataLocationResource(d.Get("data_location").([]interface{})[0].(map[string]interface{})) + case lakeformation.DataLakeResourceTypeDatabase: + res.Database = expandLakeFormationDatabaseResource(d.Get("database").([]interface{})[0].(map[string]interface{})) + case lakeformation.DataLakeResourceTypeTable: + res.Table = expandLakeFormationTableResource(d.Get("table").([]interface{})[0].(map[string]interface{})) + case DataLakeResourceTypeTableWithColumns: if squashTableWithColumns { // ListPermissions does not support getting privileges by tables with columns. Instead, // use the table which will return both table and table with columns. - res.Table = expandLakeFormationTableResource(v.([]interface{})[0].(map[string]interface{})) + res.Table = expandLakeFormationTableResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) } else { - res.TableWithColumns = expandLakeFormationTableWithColumnsResource(v.([]interface{})[0].(map[string]interface{})) + res.TableWithColumns = expandLakeFormationTableWithColumnsResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) } } diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 6c4cd131956..367eaeeaf36 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -661,5 +661,15 @@ resource "aws_lakeformation_permissions" "test" { column_names = ["event", "timestamp"] } } + +resource "aws_lakeformation_permissions" "table" { + permissions = ["ALL"] + principal = aws_lakeformation_permissions.test.principal + + table { + database_name = aws_glue_catalog_table.test.database_name + name = aws_glue_catalog_table.test.name + } +} `, rName) } From 0439a4951614e124f04d1193cededf1813b38505 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jan 2021 09:24:52 -0500 Subject: [PATCH 0789/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17211) Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.4.0 to 2.4.1. - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/master/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.4.0...v2.4.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index f9e2fc55c58..38649ac4693 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba github.com/mattn/go-colorable v0.1.7 // indirect diff --git a/go.sum b/go.sum index 82e535a7230..13fafac0a13 100644 --- a/go.sum +++ b/go.sum @@ -209,10 +209,10 @@ github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tn github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 h1:2c+vG46celrDCsfYEIzaXxvBaAXCqlVG77LwtFz8cfs= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= +github.com/hashicorp/terraform-plugin-go v0.2.1 h1:EW/R8bB2Zbkjmugzsy1d27yS8/0454b3MtYHkzOknqA= +github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 h1:k2rpom9wG2cdi5iLRH80EdQB7UX/E6UzYzUfzgsNLuU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= From beca542b863294b5fb21a49422b6823b74a5c205 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 21 Jan 2021 09:28:11 -0500 Subject: [PATCH 0790/1212] Update CHANGELOG for #17211 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2815d0f8718..f3fc825d6d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,8 @@ ENHANCEMENTS BUG FIXES +* provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform [GH-17211] +* provider: Fix error messages for missing required blocks not including the block name [GH-17211] * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] * resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] From f28b7abf3f602e21c5f06a2e331f136c4376fe5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Jan 2021 15:10:28 +0000 Subject: [PATCH 0791/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17210) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 8 +- .../tfprotov5/server/server.go | 1 + .../terraform-plugin-go/tfprotov5/state.go | 64 +++++++ .../tfprotov5/tftypes/value.go | 179 +++++++++++++++++- .../v2/helper/resource/testing_sets.go | 2 +- .../v2/helper/schema/schema.go | 6 +- .../terraform-plugin-sdk/v2/meta/meta.go | 2 +- awsproviderlint/vendor/modules.txt | 4 +- 9 files changed, 251 insertions(+), 17 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index 8d08dcde54e..d177937aafa 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -5,6 +5,6 @@ go 1.15 require ( github.com/aws/aws-sdk-go v1.36.28 github.com/bflad/tfproviderlint v0.21.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab ) diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 237bfe97c47..377c1e6d855 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -211,14 +211,14 @@ github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-plugin-go v0.1.0 h1:kyXZ0nkHxiRev/q18N40IbRRk4AV0zE/MDJkDM3u8dY= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-go v0.2.1 h1:EW/R8bB2Zbkjmugzsy1d27yS8/0454b3MtYHkzOknqA= +github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= github.com/hashicorp/terraform-plugin-sdk v1.9.0 h1:WBHHIX/RgF6/lbfMCzx0qKl96BbQy3bexWFvDqt1bhE= github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0 h1:jPPqctLDg75CilV3IpypAz6on3MSMOiUMzXNz+Xex6E= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0/go.mod h1:xOf85UtHJ0/9/EF3eKgZFlJ6feN8sDtjQRWRHhimCUw= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 h1:2c+vG46celrDCsfYEIzaXxvBaAXCqlVG77LwtFz8cfs= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 h1:k2rpom9wG2cdi5iLRH80EdQB7UX/E6UzYzUfzgsNLuU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= github.com/hashicorp/terraform-plugin-test v1.2.0 h1:AWFdqyfnOj04sxTdaAF57QqvW7XXrT8PseUHkbKsE8I= github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/terraform-plugin-test/v2 v2.0.0-20200724200815-faa9931ac59e h1:Q8lNGrk3SVdXEbLuUJD03jghIjykJT9pu1aReKgb858= diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go index 9e59c3bee06..34da72a4e64 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/server/server.go @@ -135,6 +135,7 @@ func (s *server) stoppableContext(ctx context.Context) context.Context { func New(serve tfprotov5.ProviderServer) tfplugin5.ProviderServer { return &server{ downstream: serve, + stopCh: make(chan struct{}), } } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go index 4c0bc454fac..d66ed5c5dd6 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/state.go @@ -1,5 +1,25 @@ package tfprotov5 +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes" +) + +// ErrUnknownRawStateType is returned when a RawState has no Flatmap or JSON +// bytes set. This should never be returned during the normal operation of a +// provider, and indicates one of the following: +// +// 1. terraform-plugin-go is out of sync with the protocol and should be +// updated. +// +// 2. terrafrom-plugin-go has a bug. +// +// 3. The `RawState` was generated or modified by something other than +// terraform-plugin-go and is no longer a valid value. +var ErrUnknownRawStateType = errors.New("RawState had no JSON or flatmap data set") + // RawState is the raw, undecoded state for providers to upgrade. It is // undecoded as Terraform, for whatever reason, doesn't have the previous // schema available to it, and so cannot decode the state itself and pushes @@ -13,3 +33,47 @@ type RawState struct { JSON []byte Flatmap map[string]string } + +// Unmarshal returns a `tftypes.Value` that represents the information +// contained in the RawState in an easy-to-interact-with way. It is the +// main purpose of the RawState type, and is how provider developers should +// obtain state values from the UpgradeResourceState RPC call. +// +// Pass in the type you want the `Value` to be interpreted as. Terraform's type +// system encodes in a lossy manner, meaning the type information is not +// preserved losslessly when going over the wire. Sets, lists, and tuples all +// look the same. Objects and maps all look the same, as well, as do +// user-specified values when DynamicPseudoType is used in the schema. +// Fortunately, the provider should already know the type; it should be the +// type of the schema, or DynamicPseudoType if that's what's in the schema. +// `Unmarshal` will then parse the value as though it belongs to that type, if +// possible, and return a `tftypes.Value` with the appropriate information. If +// the data can't be interpreted as that type, an error will be returned saying +// so. In these cases, double check to make sure the schema is declaring the +// same type being passed into `Unmarshal`. +// +// In the event an ErrUnknownRawStateType is returned, one of three things +// has happened: +// +// 1. terraform-plugin-go is out of date and out of sync with the protocol, and +// an issue should be opened on its repo to get it updated. +// +// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on +// its repo to get it fixed. +// +// 3. The provider or a dependency has modified the `RawState` in an +// unsupported way, or has created one from scratch, and should treat it as +// opaque and not modify it, only calling `Unmarshal` on `RawState`s received +// from RPC requests. +// +// State files written before Terraform 0.12 that haven't been upgraded yet +// cannot be unmarshaled, and must have their Flatmap property read directly. +func (s RawState) Unmarshal(typ tftypes.Type) (tftypes.Value, error) { + if s.JSON != nil { + return jsonUnmarshal(s.JSON, typ, tftypes.AttributePath{}) + } + if s.Flatmap != nil { + return tftypes.Value{}, fmt.Errorf("flatmap states cannot be unmarshaled, only states written by Terraform 0.12 and higher can be unmarshaled") + } + return tftypes.Value{}, ErrUnknownRawStateType +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go index 040405ea119..2b882d099d0 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes/value.go @@ -58,15 +58,17 @@ type Value struct { // // The builtin Value representations are: // -// * string for String +// * String: string, *string // -// * *big.Float for Number +// * Number: *big.Float, int64, *int64, int32, *int32, int16, *int16, int8, *int8, int, *int, +// uint64, *uint64, uint32, *uint32, uint16, *uint16, uint8, *uint8, byte, *byte, uint, *uint, +// float64, *float64, float32, *float32 // -// * bool for Bool +// * Bool: bool, *bool // -// * map[string]Value for Map and Object +// * Map and Object: map[string]Value // -// * []Value for Tuple, List, and Set +// * Tuple, List, and Set: []Value func NewValue(t Type, val interface{}) Value { if val == nil || val == UnknownValue { return Value{ @@ -81,7 +83,172 @@ func NewValue(t Type, val interface{}) Value { panic("error creating tftypes.Value: " + err.Error()) } } - switch val.(type) { + + switch val := val.(type) { + case *string: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: *val, + } + case *bool: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: *val, + } + case *uint: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetUint64(uint64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetUint64(uint64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint8: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint16: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *uint32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int8: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int16: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(int64(*val)) + return Value{ + typ: t, + value: f, + } + case *int64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + f := new(big.Float).SetInt64(*val) + return Value{ + typ: t, + value: f, + } + case *float32: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: big.NewFloat(float64(*val)), + } + case *float64: + if val == nil { + return Value{ + typ: t, + value: nil, + } + } + return Value{ + typ: t, + value: big.NewFloat(*val), + } case string, *big.Float, bool, map[string]Value, []Value: return Value{ typ: t, diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go index e01d9ef5f53..ada1d6e7eca 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_sets.go @@ -214,7 +214,7 @@ func testCheckTypeSetElemNestedAttrsInState(is *terraform.InstanceState, attrPar // a Set/List item with nested attrs would have a flatmap address of // at least length 3 // foo.0.name = "bar" - if len(stateKeyParts) < 3 { + if len(stateKeyParts) < 3 || len(attrParts) > len(stateKeyParts) { continue } var pathMatch bool diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index 26e736bf5a0..f3df1bb4eea 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -1447,7 +1447,8 @@ func (m schemaMap) validate( if schema.Required { return append(diags, diag.Diagnostic{ Severity: diag.Error, - Summary: "Required attribute is not set", + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", k), AttributePath: path, }) } @@ -1458,7 +1459,8 @@ func (m schemaMap) validate( // This is a computed-only field return append(diags, diag.Diagnostic{ Severity: diag.Error, - Summary: "Computed attribute cannot be set", + Summary: "Computed attributes cannot be set", + Detail: fmt.Sprintf("Computed attributes cannot be set, but a value was set for %q.", k), AttributePath: path, }) } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go index 97353430aaf..c6084c41d3f 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var SDKVersion = "2.4.0" +var SDKVersion = "2.4.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index 18a6f81bdae..a7795dbf411 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -263,14 +263,14 @@ github.com/hashicorp/terraform-exec/tfexec github.com/hashicorp/terraform-exec/tfinstall # github.com/hashicorp/terraform-json v0.8.0 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-go v0.1.0 +# github.com/hashicorp/terraform-plugin-go v0.2.1 github.com/hashicorp/terraform-plugin-go/tfprotov5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov5/server github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 ## explicit github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging From 72637cc0a2b2e7d74322213ca2087e5f91c0ccbf Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 10:35:18 -0500 Subject: [PATCH 0792/1212] lakeformation_permissions: Address multiple permissions --- ...ta_source_aws_lakeformation_permissions.go | 12 +++- aws/resource_aws_lakeformation_permissions.go | 59 ++++++++++++++++--- ...urce_aws_lakeformation_permissions_test.go | 13 +--- 3 files changed, 61 insertions(+), 23 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go index 7773b8a552e..395233c5d81 100644 --- a/aws/data_source_aws_lakeformation_permissions.go +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "reflect" "time" "github.com/aws/aws-sdk-go/aws" @@ -176,7 +177,8 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte input.CatalogId = aws.String(v.(string)) } - input.Resource = expandLakeFormationResource(d, true) + input.Resource = expandLakeFormationResource(d, meta, true) + matchResource := expandLakeFormationResource(d, meta, false) log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions @@ -188,7 +190,9 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte continue } - principalResourcePermissions = append(principalResourcePermissions, permission) + if reflect.DeepEqual(matchResource, permission.Resource) { + principalResourcePermissions = append(principalResourcePermissions, permission) + } } return !lastPage }) @@ -209,7 +213,9 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte continue } - principalResourcePermissions = append(principalResourcePermissions, permission) + if reflect.DeepEqual(matchResource, permission.Resource) { + principalResourcePermissions = append(principalResourcePermissions, permission) + } } return !lastPage }) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index ffea1211742..3e3d6c41b90 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -3,10 +3,12 @@ package aws import ( "fmt" "log" + "reflect" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" + "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -192,7 +194,7 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - input.Resource = expandLakeFormationResource(d, false) + input.Resource = expandLakeFormationResource(d, meta, false) var output *lakeformation.GrantPermissionsOutput err := resource.Retry(2*time.Minute, func() *resource.RetryError { @@ -250,8 +252,8 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf input.CatalogId = aws.String(v.(string)) } - input.Resource = expandLakeFormationResource(d, true) - resourceType := expandLakeFormationResourceType(d) + input.Resource = expandLakeFormationResource(d, meta, true) + matchResource := expandLakeFormationResource(d, meta, false) log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions @@ -259,7 +261,13 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf err := resource.Retry(2*time.Minute, func() *resource.RetryError { err := conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { - principalResourcePermissions = resourceAwsLakeFormationPermissionsAppend(resourceType, principalResourcePermissions, permission) + if permission == nil { + continue + } + + if reflect.DeepEqual(matchResource, permission.Resource) { + principalResourcePermissions = append(principalResourcePermissions, permission) + } } return !lastPage }) @@ -276,7 +284,13 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf if isResourceTimeoutError(err) { err = conn.ListPermissionsPages(input, func(resp *lakeformation.ListPermissionsOutput, lastPage bool) bool { for _, permission := range resp.PrincipalResourcePermissions { - principalResourcePermissions = resourceAwsLakeFormationPermissionsAppend(resourceType, principalResourcePermissions, permission) + if permission == nil { + continue + } + + if reflect.DeepEqual(matchResource, permission.Resource) { + principalResourcePermissions = append(principalResourcePermissions, permission) + } } return !lastPage }) @@ -293,8 +307,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf } if len(principalResourcePermissions) > 1 { - //return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found") - return fmt.Errorf("error reading Lake Formation permissions: %v\nINPUT: %v", principalResourcePermissions, input) + return fmt.Errorf("error reading Lake Formation permissions: %s", "multiple permissions found for same resource") } for _, permissions := range principalResourcePermissions { @@ -350,7 +363,7 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - input.Resource = expandLakeFormationResource(d, false) + input.Resource = expandLakeFormationResource(d, meta, false) err := resource.Retry(2*time.Minute, func() *resource.RetryError { var err error @@ -379,6 +392,7 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte return nil } +/* func resourceAwsLakeFormationPermissionsAppend(resourceType string, perms []*lakeformation.PrincipalResourcePermissions, newPerm *lakeformation.PrincipalResourcePermissions) []*lakeformation.PrincipalResourcePermissions { if newPerm == nil { return perms @@ -409,6 +423,7 @@ func resourceAwsLakeFormationPermissionsAppend(resourceType string, perms []*lak return perms } +*/ // expandLakeFormationResourceType returns the Lake Formation resource type represented by the resource. // This is helpful in distinguishing between TABLE and TABLE_WITH_COLUMNS types when filtering ListPermission results. @@ -434,7 +449,7 @@ func expandLakeFormationResourceType(d *schema.ResourceData) string { const DataLakeResourceTypeTableWithColumns = "TABLE_WITH_COLUMNS" // no lakeformation package enum value for this type -func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { +func expandLakeFormationResource(d *schema.ResourceData, meta interface{}, squashTableWithColumns bool) *lakeformation.Resource { res := &lakeformation.Resource{} switch expandLakeFormationResourceType(d) { @@ -442,23 +457,49 @@ func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns res.Catalog = &lakeformation.CatalogResource{} case lakeformation.DataLakeResourceTypeDataLocation: res.DataLocation = expandLakeFormationDataLocationResource(d.Get("data_location").([]interface{})[0].(map[string]interface{})) + if res.DataLocation.CatalogId == nil { + res.DataLocation.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) + } case lakeformation.DataLakeResourceTypeDatabase: res.Database = expandLakeFormationDatabaseResource(d.Get("database").([]interface{})[0].(map[string]interface{})) + if res.Database.CatalogId == nil { + res.Database.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) + } case lakeformation.DataLakeResourceTypeTable: res.Table = expandLakeFormationTableResource(d.Get("table").([]interface{})[0].(map[string]interface{})) + if res.Table.CatalogId == nil { + res.Table.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) + } case DataLakeResourceTypeTableWithColumns: if squashTableWithColumns { // ListPermissions does not support getting privileges by tables with columns. Instead, // use the table which will return both table and table with columns. res.Table = expandLakeFormationTableResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) + if res.Table.CatalogId == nil { + res.Table.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) + } } else { res.TableWithColumns = expandLakeFormationTableWithColumnsResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) + if res.TableWithColumns.CatalogId == nil { + res.TableWithColumns.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) + } } } return res } +func resourceAwsLakeFormationPermissionsDefaultCatalogID(meta interface{}) string { + stsClient := meta.(*AWSClient).stsconn + + res, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + return "" + } + + return aws.StringValue(res.Account) +} + func expandLakeFormationDataLocationResource(tfMap map[string]interface{}) *lakeformation.DataLocationResource { if tfMap == nil { return nil diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 367eaeeaf36..25db4a1ab59 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -648,7 +648,8 @@ resource "aws_glue_catalog_table" "test" { } resource "aws_lakeformation_data_lake_settings" "test" { - admins = [data.aws_caller_identity.current.arn, aws_iam_role.test.arn] + // this will result in multiple permissions for iam role + admins = [aws_iam_role.test.arn, data.aws_caller_identity.current.arn] } resource "aws_lakeformation_permissions" "test" { @@ -661,15 +662,5 @@ resource "aws_lakeformation_permissions" "test" { column_names = ["event", "timestamp"] } } - -resource "aws_lakeformation_permissions" "table" { - permissions = ["ALL"] - principal = aws_lakeformation_permissions.test.principal - - table { - database_name = aws_glue_catalog_table.test.database_name - name = aws_glue_catalog_table.test.name - } -} `, rName) } From 9015d6ca941097e4079d9ee497dbb57c5af883c5 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 11:42:44 -0500 Subject: [PATCH 0793/1212] lakeformation_settings: Move admins to TypeSet --- ...source_aws_lakeformation_data_lake_settings.go | 2 +- ...source_aws_lakeformation_data_lake_settings.go | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/aws/data_source_aws_lakeformation_data_lake_settings.go b/aws/data_source_aws_lakeformation_data_lake_settings.go index 27244797a9c..3d7f18f7b1b 100644 --- a/aws/data_source_aws_lakeformation_data_lake_settings.go +++ b/aws/data_source_aws_lakeformation_data_lake_settings.go @@ -17,7 +17,7 @@ func dataSourceAwsLakeFormationDataLakeSettings() *schema.Resource { Schema: map[string]*schema.Schema{ "admins": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 8e3c18d01f6..795a723ee74 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -26,7 +26,7 @@ func resourceAwsLakeFormationDataLakeSettings() *schema.Resource { Schema: map[string]*schema.Schema{ "admins": { - Type: schema.TypeList, + Type: schema.TypeSet, Computed: true, Optional: true, Elem: &schema.Schema{ @@ -122,7 +122,7 @@ func resourceAwsLakeFormationDataLakeSettingsCreate(d *schema.ResourceData, meta } if v, ok := d.GetOk("admins"); ok { - settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.([]interface{})) + settings.DataLakeAdmins = expandDataLakeSettingsAdmins(v.(*schema.Set)) } if v, ok := d.GetOk("trusted_resource_owners"); ok { @@ -282,7 +282,8 @@ func flattenDataLakeSettingsCreateDefaultPermission(apiObject *lakeformation.Pri return tfMap } -func expandDataLakeSettingsAdmins(tfSlice []interface{}) []*lakeformation.DataLakePrincipal { +func expandDataLakeSettingsAdmins(tfSet *schema.Set) []*lakeformation.DataLakePrincipal { + tfSlice := tfSet.List() apiObjects := make([]*lakeformation.DataLakePrincipal, 0, len(tfSlice)) for _, tfItem := range tfSlice { @@ -297,16 +298,16 @@ func expandDataLakeSettingsAdmins(tfSlice []interface{}) []*lakeformation.DataLa return apiObjects } -func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) []interface{} { +func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) *schema.Set { if apiObjects == nil { return nil } - tfSlice := make([]interface{}, 0, len(apiObjects)) + tfSlice := make([]*string, 0, len(apiObjects)) for _, apiObject := range apiObjects { - tfSlice = append(tfSlice, *apiObject.DataLakePrincipalIdentifier) + tfSlice = append(tfSlice, apiObject.DataLakePrincipalIdentifier) } - return tfSlice + return flattenStringSet(tfSlice) } From 2ad761d2043c3387c5adcb0ef6a8f03a0a2af2b0 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 21 Jan 2021 12:41:56 -0500 Subject: [PATCH 0794/1212] Update CHANGELOG for #17180 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3fc825d6d2..64f7307e070 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ BUG FIXES * provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform [GH-17211] * provider: Fix error messages for missing required blocks not including the block name [GH-17211] * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] +* data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read [GH-17180] * resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] From ec9b35807878b56c1e605f8adf85b5222a09382f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 12:58:17 -0500 Subject: [PATCH 0795/1212] lakeformation_permissions: Obviate sts need --- ...ta_source_aws_lakeformation_permissions.go | 4 +- aws/resource_aws_lakeformation_permissions.go | 120 ++++++------------ ...formation_data_lake_settings.html.markdown | 2 +- 3 files changed, 45 insertions(+), 81 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go index 395233c5d81..d5f3db1a7cf 100644 --- a/aws/data_source_aws_lakeformation_permissions.go +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -177,8 +177,8 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte input.CatalogId = aws.String(v.(string)) } - input.Resource = expandLakeFormationResource(d, meta, true) - matchResource := expandLakeFormationResource(d, meta, false) + input.Resource = expandLakeFormationResource(d, true) + matchResource := expandLakeFormationResource(d, false) log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 3e3d6c41b90..3a9aaf86145 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lakeformation" - "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -36,10 +35,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { Default: false, }, "data_location": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"database", "table", "table_with_columns"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "arn": { @@ -57,10 +57,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { }, }, "database": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"data_location", "table", "table_with_columns"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "catalog_id": { @@ -103,10 +104,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { ValidateFunc: validatePrincipal, }, "table": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"data_location", "database", "table_with_columns"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "catalog_id": { @@ -133,10 +135,11 @@ func resourceAwsLakeFormationPermissions() *schema.Resource { }, }, "table_with_columns": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{"data_location", "database", "table"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "catalog_id": { @@ -194,7 +197,7 @@ func resourceAwsLakeFormationPermissionsCreate(d *schema.ResourceData, meta inte input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - input.Resource = expandLakeFormationResource(d, meta, false) + input.Resource = expandLakeFormationResource(d, false) var output *lakeformation.GrantPermissionsOutput err := resource.Retry(2*time.Minute, func() *resource.RetryError { @@ -252,8 +255,8 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf input.CatalogId = aws.String(v.(string)) } - input.Resource = expandLakeFormationResource(d, meta, true) - matchResource := expandLakeFormationResource(d, meta, false) + input.Resource = expandLakeFormationResource(d, true) + matchResource := expandLakeFormationResource(d, false) log.Printf("[DEBUG] Reading Lake Formation permissions: %v", input) var principalResourcePermissions []*lakeformation.PrincipalResourcePermissions @@ -265,7 +268,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf continue } - if reflect.DeepEqual(matchResource, permission.Resource) { + if resourceAwsLakeFormationPermissionsCompareResource(*matchResource, *permission.Resource) { principalResourcePermissions = append(principalResourcePermissions, permission) } } @@ -288,7 +291,7 @@ func resourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta interf continue } - if reflect.DeepEqual(matchResource, permission.Resource) { + if resourceAwsLakeFormationPermissionsCompareResource(*matchResource, *permission.Resource) { principalResourcePermissions = append(principalResourcePermissions, permission) } } @@ -363,7 +366,7 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte input.PermissionsWithGrantOption = expandStringList(v.([]interface{})) } - input.Resource = expandLakeFormationResource(d, meta, false) + input.Resource = expandLakeFormationResource(d, false) err := resource.Retry(2*time.Minute, func() *resource.RetryError { var err error @@ -392,43 +395,30 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte return nil } -/* -func resourceAwsLakeFormationPermissionsAppend(resourceType string, perms []*lakeformation.PrincipalResourcePermissions, newPerm *lakeformation.PrincipalResourcePermissions) []*lakeformation.PrincipalResourcePermissions { - if newPerm == nil { - return perms +func resourceAwsLakeFormationPermissionsCompareResource(in, out lakeformation.Resource) bool { + if in.DataLocation != nil && out.DataLocation != nil && in.DataLocation.CatalogId == nil { + out.DataLocation.CatalogId = nil } - switch resourceType { - case lakeformation.DataLakeResourceTypeCatalog: - if newPerm.Resource.Catalog != nil { - perms = append(perms, newPerm) - } - case lakeformation.DataLakeResourceTypeDataLocation: - if newPerm.Resource.DataLocation != nil { - perms = append(perms, newPerm) - } - case lakeformation.DataLakeResourceTypeDatabase: - if newPerm.Resource.Database != nil { - perms = append(perms, newPerm) - } - case lakeformation.DataLakeResourceTypeTable: - if newPerm.Resource.Table != nil { - perms = append(perms, newPerm) - } - case DataLakeResourceTypeTableWithColumns: - if newPerm.Resource.TableWithColumns != nil { - perms = append(perms, newPerm) - } + if in.Database != nil && out.Database != nil && in.Database.CatalogId == nil { + out.Database.CatalogId = nil + } + + if in.Table != nil && out.Table != nil && in.Table.CatalogId == nil { + out.Table.CatalogId = nil } - return perms + if in.TableWithColumns != nil && out.TableWithColumns != nil && in.TableWithColumns.CatalogId == nil { + out.TableWithColumns.CatalogId = nil + } + + return reflect.DeepEqual(in, out) } -*/ // expandLakeFormationResourceType returns the Lake Formation resource type represented by the resource. // This is helpful in distinguishing between TABLE and TABLE_WITH_COLUMNS types when filtering ListPermission results. func expandLakeFormationResourceType(d *schema.ResourceData) string { - if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) { + if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) == true { return lakeformation.DataLakeResourceTypeCatalog } @@ -449,7 +439,7 @@ func expandLakeFormationResourceType(d *schema.ResourceData) string { const DataLakeResourceTypeTableWithColumns = "TABLE_WITH_COLUMNS" // no lakeformation package enum value for this type -func expandLakeFormationResource(d *schema.ResourceData, meta interface{}, squashTableWithColumns bool) *lakeformation.Resource { +func expandLakeFormationResource(d *schema.ResourceData, squashTableWithColumns bool) *lakeformation.Resource { res := &lakeformation.Resource{} switch expandLakeFormationResourceType(d) { @@ -457,49 +447,23 @@ func expandLakeFormationResource(d *schema.ResourceData, meta interface{}, squas res.Catalog = &lakeformation.CatalogResource{} case lakeformation.DataLakeResourceTypeDataLocation: res.DataLocation = expandLakeFormationDataLocationResource(d.Get("data_location").([]interface{})[0].(map[string]interface{})) - if res.DataLocation.CatalogId == nil { - res.DataLocation.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) - } case lakeformation.DataLakeResourceTypeDatabase: res.Database = expandLakeFormationDatabaseResource(d.Get("database").([]interface{})[0].(map[string]interface{})) - if res.Database.CatalogId == nil { - res.Database.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) - } case lakeformation.DataLakeResourceTypeTable: res.Table = expandLakeFormationTableResource(d.Get("table").([]interface{})[0].(map[string]interface{})) - if res.Table.CatalogId == nil { - res.Table.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) - } case DataLakeResourceTypeTableWithColumns: if squashTableWithColumns { // ListPermissions does not support getting privileges by tables with columns. Instead, // use the table which will return both table and table with columns. res.Table = expandLakeFormationTableResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) - if res.Table.CatalogId == nil { - res.Table.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) - } } else { res.TableWithColumns = expandLakeFormationTableWithColumnsResource(d.Get("table_with_columns").([]interface{})[0].(map[string]interface{})) - if res.TableWithColumns.CatalogId == nil { - res.TableWithColumns.SetCatalogId(resourceAwsLakeFormationPermissionsDefaultCatalogID(meta)) - } } } return res } -func resourceAwsLakeFormationPermissionsDefaultCatalogID(meta interface{}) string { - stsClient := meta.(*AWSClient).stsconn - - res, err := stsClient.GetCallerIdentity(&sts.GetCallerIdentityInput{}) - if err != nil { - return "" - } - - return aws.StringValue(res.Account) -} - func expandLakeFormationDataLocationResource(tfMap map[string]interface{}) *lakeformation.DataLocationResource { if tfMap == nil { return nil diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index 0b4123bc10e..768efe4400d 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -44,7 +44,7 @@ resource "aws_lakeformation_data_lake_settings" "example" { The following arguments are optional: -* `admins` – (Optional) List of ARNs of AWS Lake Formation principals (IAM users or roles). +* `admins` – (Optional) Set of ARNs of AWS Lake Formation principals (IAM users or roles). * `catalog_id` – (Optional) Identifier for the Data Catalog. By default, the account ID. * `create_database_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create database permissions. Detailed below. * `create_table_default_permissions` - (Optional) Up to three configuration blocks of principal permissions for default create table permissions. Detailed below. From 135d2f0ccadf8765606182e66b6f0a4c68bca786 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 13:16:21 -0500 Subject: [PATCH 0796/1212] lakeformation_permissions: Fix data source issue --- aws/data_source_aws_lakeformation_permissions.go | 5 ++--- aws/resource_aws_lakeformation_permissions.go | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/aws/data_source_aws_lakeformation_permissions.go b/aws/data_source_aws_lakeformation_permissions.go index d5f3db1a7cf..71fb02d8721 100644 --- a/aws/data_source_aws_lakeformation_permissions.go +++ b/aws/data_source_aws_lakeformation_permissions.go @@ -3,7 +3,6 @@ package aws import ( "fmt" "log" - "reflect" "time" "github.com/aws/aws-sdk-go/aws" @@ -190,7 +189,7 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte continue } - if reflect.DeepEqual(matchResource, permission.Resource) { + if resourceAwsLakeFormationPermissionsCompareResource(*matchResource, *permission.Resource) { principalResourcePermissions = append(principalResourcePermissions, permission) } } @@ -213,7 +212,7 @@ func dataSourceAwsLakeFormationPermissionsRead(d *schema.ResourceData, meta inte continue } - if reflect.DeepEqual(matchResource, permission.Resource) { + if resourceAwsLakeFormationPermissionsCompareResource(*matchResource, *permission.Resource) { principalResourcePermissions = append(principalResourcePermissions, permission) } } diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index 3a9aaf86145..a9fdc513407 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -397,19 +397,19 @@ func resourceAwsLakeFormationPermissionsDelete(d *schema.ResourceData, meta inte func resourceAwsLakeFormationPermissionsCompareResource(in, out lakeformation.Resource) bool { if in.DataLocation != nil && out.DataLocation != nil && in.DataLocation.CatalogId == nil { - out.DataLocation.CatalogId = nil + in.DataLocation.CatalogId = out.DataLocation.CatalogId } if in.Database != nil && out.Database != nil && in.Database.CatalogId == nil { - out.Database.CatalogId = nil + in.Database.CatalogId = out.Database.CatalogId } if in.Table != nil && out.Table != nil && in.Table.CatalogId == nil { - out.Table.CatalogId = nil + in.Table.CatalogId = out.Table.CatalogId } if in.TableWithColumns != nil && out.TableWithColumns != nil && in.TableWithColumns.CatalogId == nil { - out.TableWithColumns.CatalogId = nil + in.TableWithColumns.CatalogId = out.TableWithColumns.CatalogId } return reflect.DeepEqual(in, out) From 9b7f8696bfb1419370ff534e8dbc2db54c073bf6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 13:26:58 -0500 Subject: [PATCH 0797/1212] lakeformation_data_lake_settings: Switch to interface --- aws/resource_aws_lakeformation_data_lake_settings.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_lakeformation_data_lake_settings.go b/aws/resource_aws_lakeformation_data_lake_settings.go index 795a723ee74..9ba78871847 100644 --- a/aws/resource_aws_lakeformation_data_lake_settings.go +++ b/aws/resource_aws_lakeformation_data_lake_settings.go @@ -298,16 +298,16 @@ func expandDataLakeSettingsAdmins(tfSet *schema.Set) []*lakeformation.DataLakePr return apiObjects } -func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) *schema.Set { +func flattenDataLakeSettingsAdmins(apiObjects []*lakeformation.DataLakePrincipal) []interface{} { if apiObjects == nil { return nil } - tfSlice := make([]*string, 0, len(apiObjects)) + tfSlice := make([]interface{}, 0, len(apiObjects)) for _, apiObject := range apiObjects { - tfSlice = append(tfSlice, apiObject.DataLakePrincipalIdentifier) + tfSlice = append(tfSlice, *apiObject.DataLakePrincipalIdentifier) } - return flattenStringSet(tfSlice) + return tfSlice } From f6205278da28dd85667fbd38ae2b08120260a263 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 13:41:56 -0500 Subject: [PATCH 0798/1212] lakeformation_permissions: Fix lint issue --- aws/resource_aws_lakeformation_permissions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_lakeformation_permissions.go b/aws/resource_aws_lakeformation_permissions.go index a9fdc513407..beeba8b41e3 100644 --- a/aws/resource_aws_lakeformation_permissions.go +++ b/aws/resource_aws_lakeformation_permissions.go @@ -418,7 +418,7 @@ func resourceAwsLakeFormationPermissionsCompareResource(in, out lakeformation.Re // expandLakeFormationResourceType returns the Lake Formation resource type represented by the resource. // This is helpful in distinguishing between TABLE and TABLE_WITH_COLUMNS types when filtering ListPermission results. func expandLakeFormationResourceType(d *schema.ResourceData) string { - if v, ok := d.GetOk("catalog_resource"); ok && v.(bool) == true { + if d.Get("catalog_resource").(bool) { return lakeformation.DataLakeResourceTypeCatalog } From bfda64cc876b992a1a5a104f644f48eb4bddab97 Mon Sep 17 00:00:00 2001 From: Peter Lavigne <31020403+Peter-Lavigne@users.noreply.github.com> Date: Thu, 21 Jan 2021 14:30:57 -0500 Subject: [PATCH 0799/1212] Update elasticsearch argument reference --- website/docs/r/elasticsearch_domain.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index ddaca8b3f36..b8cdc88a62a 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -223,6 +223,7 @@ The following arguments are supported: * `snapshot_options` - (Optional) Snapshot related options, see below. * `vpc_options` - (Optional) VPC related options, see below. Adding or removing this configuration forces a new resource ([documentation](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-vpc-limitations)). * `log_publishing_options` - (Optional) Options for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. +* `cognito_options` - (Optional) Options for authenticating Kibana with Cognito. See below. * `elasticsearch_version` - (Optional) The version of Elasticsearch to deploy. Defaults to `1.5` * `domain_endpoint_options` - (Optional) Domain endpoint HTTP(S) related options. See below. * `tags` - (Optional) A map of tags to assign to the resource From 8dfc01907eb7e12bf791f42d3e27aa684aa7e4d0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 21 Jan 2021 14:53:58 -0500 Subject: [PATCH 0800/1212] resource/aws_api_gateway_rest_api: Fix disable_execute_api_endpoint and endpoint_configuration vpc_endpoint_ids handling with OpenAPI specification import (body argument) (#17209) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13841 Before code updates, these new acceptance tests show how the Terraform configuration value would not be applied if an OpenAPI specification was imported: ``` === CONT TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_OverrideBody resource_aws_api_gateway_rest_api_test.go:847: Step 1/4 error: Check failed: 1 error occurred: * Check 2/2 error: aws_api_gateway_rest_api.test: Attribute 'disable_execute_api_endpoint' expected "false", got "true" --- FAIL: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_OverrideBody (10.30s) === CONT TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_OverrideBody resource_aws_api_gateway_rest_api_test.go:369: Step 1/2 error: After applying this test step and performing a `terraform refresh`, the plan was not empty. stdout An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { id = "m9ajz6izjl" name = "tf-acc-test-6139822644948363723" tags = {} # (9 unchanged attributes hidden) ~ endpoint_configuration { ~ vpc_endpoint_ids = [ + "vpce-0ba0b61be45886a6f", - "vpce-0ee1a2ccd6af8f011", ] # (1 unchanged attribute hidden) } } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_OverrideBody (190.95s) ``` Before code updates, these new acceptance tests show how the Terraform resource would report an unexpected difference for missing configurations that were imported by the OpenAPI specification: ``` === CONT TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_SetByBody resource_aws_api_gateway_rest_api_test.go:890: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { ~ disable_execute_api_endpoint = true -> false id = "c2t6iky152" name = "tf-acc-test-8877194198775672292" # (8 unchanged attributes hidden) # (1 unchanged block hidden) } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_SetByBody (11.03s) === CONT TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_SetByBody resource_aws_api_gateway_rest_api_test.go:415: Step 1/2 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_api_gateway_rest_api.test will be updated in-place ~ resource "aws_api_gateway_rest_api" "test" { id = "adeg2zcky2" name = "tf-acc-test-7848512816247428885" # (9 unchanged attributes hidden) ~ endpoint_configuration { ~ vpc_endpoint_ids = [ - "vpce-0653ca468e4c6ba4a", ] # (1 unchanged attribute hidden) } } Plan: 0 to add, 1 to change, 0 to destroy. --- FAIL: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_SetByBody (150.67s) ``` Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource (30.53s) --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource_OverrideBody (37.78s) --- PASS: TestAccAWSAPIGatewayRestApi_ApiKeySource_SetByBody (15.25s) --- PASS: TestAccAWSAPIGatewayRestApi_basic (669.92s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes (34.56s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_OverrideBody (34.23s) --- PASS: TestAccAWSAPIGatewayRestApi_BinaryMediaTypes_SetByBody (514.96s) --- PASS: TestAccAWSAPIGatewayRestApi_Body (38.01s) --- PASS: TestAccAWSAPIGatewayRestApi_Description (23.87s) --- PASS: TestAccAWSAPIGatewayRestApi_Description_OverrideBody (60.58s) --- PASS: TestAccAWSAPIGatewayRestApi_Description_SetByBody (92.79s) --- PASS: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint (3442.94s) --- PASS: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_OverrideBody (2292.47s) --- PASS: TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_SetByBody (1083.96s) --- PASS: TestAccAWSAPIGatewayRestApi_disappears (529.45s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration (1034.47s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private (15.42s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds (330.11s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_OverrideBody (205.73s) --- PASS: TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_SetByBody (159.85s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize (39.32s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_OverrideBody (1278.64s) --- PASS: TestAccAWSAPIGatewayRestApi_MinimumCompressionSize_SetByBody (753.00s) --- PASS: TestAccAWSAPIGatewayRestApi_Name_OverrideBody (2212.45s) --- PASS: TestAccAWSAPIGatewayRestApi_Parameters (948.67s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy (28.00s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy_OverrideBody (80.78s) --- PASS: TestAccAWSAPIGatewayRestApi_Policy_SetByBody (1967.74s) --- PASS: TestAccAWSAPIGatewayRestApi_tags (1413.12s) ``` --- ...ta_source_aws_api_gateway_rest_api_test.go | 5 +- aws/resource_aws_api_gateway_rest_api.go | 67 +- aws/resource_aws_api_gateway_rest_api_test.go | 642 ++++++++++++++---- .../docs/r/api_gateway_rest_api.html.markdown | 6 +- 4 files changed, 568 insertions(+), 152 deletions(-) diff --git a/aws/data_source_aws_api_gateway_rest_api_test.go b/aws/data_source_aws_api_gateway_rest_api_test.go index 71684c7e072..3489e700e5d 100644 --- a/aws/data_source_aws_api_gateway_rest_api_test.go +++ b/aws/data_source_aws_api_gateway_rest_api_test.go @@ -48,7 +48,7 @@ func TestAccDataSourceAwsApiGatewayRestApi_EndpointConfiguration_VpcEndpointIds( Steps: []resource.TestStep{ { Config: composeConfig( - testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration(rName), + testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds1(rName), testAccDataSourceAwsApiGatewayRestApiConfigName(), ), Check: resource.ComposeTestCheckFunc( @@ -61,7 +61,8 @@ func TestAccDataSourceAwsApiGatewayRestApi_EndpointConfiguration_VpcEndpointIds( resource.TestCheckResourceAttrPair(dataSourceName, "api_key_source", resourceName, "api_key_source"), resource.TestCheckResourceAttrPair(dataSourceName, "minimum_compression_size", resourceName, "minimum_compression_size"), resource.TestCheckResourceAttrPair(dataSourceName, "binary_media_types", resourceName, "binary_media_types"), - resource.TestCheckResourceAttrPair(dataSourceName, "endpoint_configuration", resourceName, "endpoint_configuration"), + resource.TestCheckResourceAttrPair(dataSourceName, "endpoint_configuration.#", resourceName, "endpoint_configuration.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#"), resource.TestCheckResourceAttrPair(dataSourceName, "execution_arn", resourceName, "execution_arn"), ), }, diff --git a/aws/resource_aws_api_gateway_rest_api.go b/aws/resource_aws_api_gateway_rest_api.go index 72c3f0355cb..f48ef8551b9 100644 --- a/aws/resource_aws_api_gateway_rest_api.go +++ b/aws/resource_aws_api_gateway_rest_api.go @@ -67,7 +67,7 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { "disable_execute_api_endpoint": { Type: schema.TypeBool, Optional: true, - Default: false, + Computed: true, }, "parameters": { @@ -122,6 +122,7 @@ func resourceAwsApiGatewayRestApi() *schema.Resource { "vpc_endpoint_ids": { Type: schema.TypeSet, Optional: true, + Computed: true, MinItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, @@ -250,6 +251,38 @@ func resourceAwsApiGatewayRestApiCreate(d *schema.ResourceData, meta interface{} }) } + if v, ok := d.GetOk("disable_execute_api_endpoint"); ok && v.(bool) != aws.BoolValue(output.DisableExecuteApiEndpoint) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/disableExecuteApiEndpoint"), + Value: aws.String(strconv.FormatBool(v.(bool))), + }) + } + + if v, ok := d.GetOk("endpoint_configuration"); ok { + endpointConfiguration := expandApiGatewayEndpointConfiguration(v.([]interface{})) + + if endpointConfiguration != nil && len(endpointConfiguration.VpcEndpointIds) > 0 { + if output.EndpointConfiguration != nil { + for _, elem := range output.EndpointConfiguration.VpcEndpointIds { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpRemove), + Path: aws.String("/endpointConfiguration/vpcEndpointIds"), + Value: elem, + }) + } + } + + for _, elem := range endpointConfiguration.VpcEndpointIds { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpAdd), + Path: aws.String("/endpointConfiguration/vpcEndpointIds"), + Value: elem, + }) + } + } + } + if v := d.Get("minimum_compression_size").(int); v > -1 && int64(v) != aws.Int64Value(output.MinimumCompressionSize) { updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ Op: aws.String(apigateway.OpReplace), @@ -577,6 +610,38 @@ func resourceAwsApiGatewayRestApiUpdate(d *schema.ResourceData, meta interface{} }) } + if v, ok := d.GetOk("disable_execute_api_endpoint"); ok && v.(bool) != aws.BoolValue(output.DisableExecuteApiEndpoint) { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpReplace), + Path: aws.String("/disableExecuteApiEndpoint"), + Value: aws.String(strconv.FormatBool(v.(bool))), + }) + } + + if v, ok := d.GetOk("endpoint_configuration"); ok { + endpointConfiguration := expandApiGatewayEndpointConfiguration(v.([]interface{})) + + if endpointConfiguration != nil && len(endpointConfiguration.VpcEndpointIds) > 0 { + if output.EndpointConfiguration != nil { + for _, elem := range output.EndpointConfiguration.VpcEndpointIds { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpRemove), + Path: aws.String("/endpointConfiguration/vpcEndpointIds"), + Value: elem, + }) + } + } + + for _, elem := range endpointConfiguration.VpcEndpointIds { + updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ + Op: aws.String(apigateway.OpAdd), + Path: aws.String("/endpointConfiguration/vpcEndpointIds"), + Value: elem, + }) + } + } + } + if v := d.Get("minimum_compression_size").(int); v > -1 && int64(v) != aws.Int64Value(output.MinimumCompressionSize) { updateInput.PatchOperations = append(updateInput.PatchOperations, &apigateway.PatchOperation{ Op: aws.String(apigateway.OpReplace), diff --git a/aws/resource_aws_api_gateway_rest_api_test.go b/aws/resource_aws_api_gateway_rest_api_test.go index ab157922eb7..2efa40cd621 100644 --- a/aws/resource_aws_api_gateway_rest_api_test.go +++ b/aws/resource_aws_api_gateway_rest_api_test.go @@ -302,80 +302,6 @@ func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_Private(t *testing.T) { }) } -func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VPCEndpoint(t *testing.T) { - var restApi apigateway.RestApi - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_api_gateway_rest_api.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, - Steps: []resource.TestStep{ - { - PreConfig: func() { - // Ensure region supports PRIVATE endpoint - // This can eventually be moved to a PreCheck function - conn := testAccProvider.Meta().(*AWSClient).apigatewayconn - output, err := conn.CreateRestApi(&apigateway.CreateRestApiInput{ - Name: aws.String(acctest.RandomWithPrefix("tf-acc-test-private-endpoint-precheck")), - EndpointConfiguration: &apigateway.EndpointConfiguration{ - Types: []*string{aws.String("PRIVATE")}, - }, - }) - if err != nil { - if isAWSErr(err, apigateway.ErrCodeBadRequestException, "Endpoint Configuration type PRIVATE is not supported in this region") { - t.Skip("Region does not support PRIVATE endpoint type") - } - t.Fatal(err) - } - - // Be kind and rewind. :) - _, err = conn.DeleteRestApi(&apigateway.DeleteRestApiInput{ - RestApiId: output.Id, - }) - if err != nil { - t.Fatal(err) - } - }, - Config: testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration2(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "2"), - ), - }, - { - Config: testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), - resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), - ), - }, - }, - }) -} - func TestAccAWSAPIGatewayRestApi_ApiKeySource(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_api_gateway_rest_api.test" @@ -749,7 +675,7 @@ func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint(t *testing.T) { CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpoint(rName, false), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), ), @@ -760,13 +686,13 @@ func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, true), + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpoint(rName, true), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `true`), ), }, { - Config: testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName, false), + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpoint(rName, false), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", `false`), ), @@ -775,6 +701,214 @@ func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint(t *testing.T) { }) } +func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointOverrideBody(rName, true, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify override can be unset (only for body set to false) + { + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointOverrideBody(rName, false, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", "false"), + ), + }, + // Verify override can be reset + { + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointOverrideBody(rName, true, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", "true"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_DisableExecuteApiEndpoint_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccAPIGatewayTypeEDGEPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointSetByBody(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "disable_execute_api_endpoint", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds(t *testing.T) { + var restApi apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + vpcEndpointResourceName1 := "aws_vpc_endpoint.test" + vpcEndpointResourceName2 := "aws_vpc_endpoint.test2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName1, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "2"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName1, "id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName2, "id"), + ), + }, + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &restApi), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.types.0", "PRIVATE"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName1, "id"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_OverrideBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + vpcEndpointResourceName1 := "aws_vpc_endpoint.test.0" + vpcEndpointResourceName2 := "aws_vpc_endpoint.test.1" + vpcEndpointResourceName3 := "aws_vpc_endpoint.test.2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsOverrideBody(rName, vpcEndpointResourceName1, vpcEndpointResourceName2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName1, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + // Verify updated configuration value still overrides + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsOverrideBody(rName, vpcEndpointResourceName3, vpcEndpointResourceName2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName3, "id"), + ), + }, + // Verify updated body value is still overridden + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsOverrideBody(rName, vpcEndpointResourceName3, vpcEndpointResourceName1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName3, "id"), + ), + }, + }, + }) +} + +func TestAccAWSAPIGatewayRestApi_EndpointConfiguration_VpcEndpointIds_SetByBody(t *testing.T) { + var conf apigateway.RestApi + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_api_gateway_rest_api.test" + vpcEndpointResourceName := "aws_vpc_endpoint.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayRestAPIDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsSetByBody(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSAPIGatewayRestAPIExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint_configuration.0.vpc_endpoint_ids.*", vpcEndpointResourceName, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"body"}, + }, + }, + }) +} + func TestAccAWSAPIGatewayRestApi_MinimumCompressionSize(t *testing.T) { var conf apigateway.RestApi rName := acctest.RandomWithPrefix("tf-acc-test") @@ -1176,13 +1310,96 @@ resource "aws_api_gateway_rest_api" "test" { `, rName, endpointType) } -func testAccAWSAPIGatewayRestAPIConfig_DisableExecuteApiEndpoint(rName string, disabled bool) string { +func testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpoint(rName string, disableExecuteApiEndpoint bool) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + disable_execute_api_endpoint = %[2]t + name = %[1]q +} +`, rName, disableExecuteApiEndpoint) +} + +func testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointOverrideBody(rName string, configDisableExecuteApiEndpoint bool, bodyDisableExecuteApiEndpoint bool) string { + return fmt.Sprintf(` +resource "aws_api_gateway_rest_api" "test" { + disable_execute_api_endpoint = %[2]t + name = %[1]q + + body = jsonencode({ + swagger = "2.0" + info = { + title = "test" + version = "2017-04-20T04:08:08Z" + } + schemes = ["https"] + paths = { + "/test" = { + get = { + responses = { + "200" = { + description = "OK" + } + } + x-amazon-apigateway-integration = { + httpMethod = "GET" + type = "HTTP" + responses = { + default = { + statusCode = 200 + } + } + uri = "https://aws.amazon.com/" + } + } + } + } + x-amazon-apigateway-endpoint-configuration = { + disableExecuteApiEndpoint = %[3]t + } + }) +} +`, rName, configDisableExecuteApiEndpoint, bodyDisableExecuteApiEndpoint) +} + +func testAccAWSAPIGatewayRestAPIConfigDisableExecuteApiEndpointSetByBody(rName string, bodyDisableExecuteApiEndpoint bool) string { return fmt.Sprintf(` resource "aws_api_gateway_rest_api" "test" { - name = "%s" - disable_execute_api_endpoint = %t + name = %[1]q + + body = jsonencode({ + swagger = "2.0" + info = { + title = "test" + version = "2017-04-20T04:08:08Z" + } + schemes = ["https"] + paths = { + "/test" = { + get = { + responses = { + "200" = { + description = "OK" + } + } + x-amazon-apigateway-integration = { + httpMethod = "GET" + type = "HTTP" + responses = { + default = { + statusCode = 200 + } + } + uri = "https://aws.amazon.com/" + } + } + } + } + x-amazon-apigateway-endpoint-configuration = { + disableExecuteApiEndpoint = %[2]t + } + }) } -`, rName, disabled) +`, rName, bodyDisableExecuteApiEndpoint) } func testAccAWSAPIGatewayRestAPIConfig_Name(rName string) string { @@ -1193,10 +1410,14 @@ resource "aws_api_gateway_rest_api" "test" { `, rName) } -func testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration(rName string) string { - return fmt.Sprintf(` +func testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds1(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_vpc" "test" { - cidr_block = "11.0.0.0/16" + cidr_block = "10.0.0.0/16" enable_dns_support = true enable_dns_hostnames = true @@ -1205,45 +1426,27 @@ resource "aws_vpc" "test" { } } -data "aws_security_group" "test" { +resource "aws_default_security_group" "test" { vpc_id = aws_vpc.test.id - name = "default" -} - -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } } resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = aws_vpc.test.cidr_block availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id tags = { Name = %[1]q } } -data "aws_region" "current" {} - resource "aws_vpc_endpoint" "test" { - vpc_id = aws_vpc.test.id + private_dns_enabled = false + security_group_ids = [aws_default_security_group.test.id] service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" + subnet_ids = [aws_subnet.test.id] vpc_endpoint_type = "Interface" - private_dns_enabled = false - - subnet_ids = [ - aws_subnet.test.id, - ] - - security_group_ids = [ - data.aws_security_group.test.id, - ] + vpc_id = aws_vpc.test.id } resource "aws_api_gateway_rest_api" "test" { @@ -1254,13 +1457,17 @@ resource "aws_api_gateway_rest_api" "test" { vpc_endpoint_ids = [aws_vpc_endpoint.test.id] } } -`, rName) +`, rName)) } -func testAccAWSAPIGatewayRestAPIConfig_VPCEndpointConfiguration2(rName string) string { - return fmt.Sprintf(` +func testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIds2(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +data "aws_region" "current" {} + resource "aws_vpc" "test" { - cidr_block = "11.0.0.0/16" + cidr_block = "10.0.0.0/16" enable_dns_support = true enable_dns_hostnames = true @@ -1269,71 +1476,214 @@ resource "aws_vpc" "test" { } } -data "aws_security_group" "test" { +resource "aws_default_security_group" "test" { vpc_id = aws_vpc.test.id - name = "default" } -data "aws_availability_zones" "available" { - state = "available" +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_vpc_endpoint" "test" { + private_dns_enabled = false + security_group_ids = [aws_default_security_group.test.id] + service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" + subnet_ids = [aws_subnet.test.id] + vpc_endpoint_type = "Interface" + vpc_id = aws_vpc.test.id +} + +resource "aws_vpc_endpoint" "test2" { + private_dns_enabled = false + security_group_ids = [aws_default_security_group.test.id] + service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" + subnet_ids = [aws_subnet.test.id] + vpc_endpoint_type = "Interface" + vpc_id = aws_vpc.test.id +} + +resource "aws_api_gateway_rest_api" "test" { + name = %[1]q + + endpoint_configuration { + types = ["PRIVATE"] + vpc_endpoint_ids = [aws_vpc_endpoint.test.id, aws_vpc_endpoint.test2.id] + } +} +`, rName)) +} + +func testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsOverrideBody(rName string, configVpcEndpointResourceName string, bodyVpcEndpointResourceName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +data "aws_region" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] + tags = { + Name = %[1]q } } +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id +} + resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = aws_vpc.test.cidr_block availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id tags = { Name = %[1]q } } -data "aws_region" "current" {} - resource "aws_vpc_endpoint" "test" { - vpc_id = aws_vpc.test.id + count = 3 + + private_dns_enabled = false + security_group_ids = [aws_default_security_group.test.id] service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" + subnet_ids = [aws_subnet.test.id] vpc_endpoint_type = "Interface" - private_dns_enabled = false + vpc_id = aws_vpc.test.id +} - subnet_ids = [ - aws_subnet.test.id, - ] +resource "aws_api_gateway_rest_api" "test" { + name = %[1]q - security_group_ids = [ - data.aws_security_group.test.id, - ] + endpoint_configuration { + types = ["PRIVATE"] + vpc_endpoint_ids = [%[2]s] + } + + body = jsonencode({ + swagger = "2.0" + info = { + title = "test" + version = "2017-04-20T04:08:08Z" + } + schemes = ["https"] + paths = { + "/test" = { + get = { + responses = { + "200" = { + description = "OK" + } + } + x-amazon-apigateway-integration = { + httpMethod = "GET" + type = "HTTP" + responses = { + default = { + statusCode = 200 + } + } + uri = "https://aws.amazon.com/" + } + } + } + } + x-amazon-apigateway-endpoint-configuration = { + vpcEndpointIds = [%[3]s] + } + }) +} +`, rName, configVpcEndpointResourceName+".id", bodyVpcEndpointResourceName+".id")) } -resource "aws_vpc_endpoint" "test2" { - vpc_id = aws_vpc.test.id - service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" - vpc_endpoint_type = "Interface" - private_dns_enabled = false +func testAccAWSAPIGatewayRestAPIConfigEndpointConfigurationVpcEndpointIdsSetByBody(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +data "aws_region" "current" {} - subnet_ids = [ - aws_subnet.test.id, - ] +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + enable_dns_support = true + enable_dns_hostnames = true - security_group_ids = [ - data.aws_security_group.test.id, - ] + tags = { + Name = %[1]q + } +} + +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_vpc_endpoint" "test" { + private_dns_enabled = false + security_group_ids = [aws_default_security_group.test.id] + service_name = "com.amazonaws.${data.aws_region.current.name}.execute-api" + subnet_ids = [aws_subnet.test.id] + vpc_endpoint_type = "Interface" + vpc_id = aws_vpc.test.id } resource "aws_api_gateway_rest_api" "test" { name = %[1]q endpoint_configuration { - types = ["PRIVATE"] - vpc_endpoint_ids = [aws_vpc_endpoint.test.id, aws_vpc_endpoint.test2.id] + types = ["PRIVATE"] } + + body = jsonencode({ + swagger = "2.0" + info = { + title = "test" + version = "2017-04-20T04:08:08Z" + } + schemes = ["https"] + paths = { + "/test" = { + get = { + responses = { + "200" = { + description = "OK" + } + } + x-amazon-apigateway-integration = { + httpMethod = "GET" + type = "HTTP" + responses = { + default = { + statusCode = 200 + } + } + uri = "https://aws.amazon.com/" + } + } + } + } + x-amazon-apigateway-endpoint-configuration = { + vpcEndpointIds = [aws_vpc_endpoint.test.id] + } + }) } -`, rName) +`, rName)) } func testAccAWSAPIGatewayRestAPIConfigTags1(rName, tagKey1, tagValue1 string) string { diff --git a/website/docs/r/api_gateway_rest_api.html.markdown b/website/docs/r/api_gateway_rest_api.html.markdown index 1df9f5ebf6b..136635e4407 100644 --- a/website/docs/r/api_gateway_rest_api.html.markdown +++ b/website/docs/r/api_gateway_rest_api.html.markdown @@ -41,14 +41,14 @@ The following arguments are supported: * `name` - (Required) Name of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.title` field. If the argument value is different than the OpenAPI value, the argument value will override the OpenAPI value. * `description` - (Optional) Description of the REST API. If importing an OpenAPI specification via the `body` argument, this corresponds to the `info.description` field. If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. -* `endpoint_configuration` - (Optional) Nested argument defining API endpoint configuration including endpoint type. Defined below. +* `endpoint_configuration` - (Optional) Configuration block defining API endpoint configuration including endpoint type. Defined below. * `binary_media_types` - (Optional) List of binary media types supported by the REST API. By default, the REST API supports only UTF-8-encoded text payloads. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-binary-media-types` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-binary-media-types.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `minimum_compression_size` - (Optional) Minimum response size to compress for the REST API. Integer between `-1` and `10485760` (10MB). Setting a value greater than `-1` will enable compression, `-1` disables compression (default). If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-minimum-compression-size` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-openapi-minimum-compression-size.html). If the argument value (_except_ `-1`) is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `body` - (Optional) OpenAPI specification that defines the set of routes and integrations to create as part of the REST API. This configuration, and any updates to it, will replace all REST API configuration except values overridden in this resource configuration and other resource updates applied after this resource but before any `aws_api_gateway_deployment` creation. More information about REST API OpenAPI support can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). * `parameters` - (Optional) Map of customizations for importing the specification in the `body` argument. For example, to exclude DocumentationParts from an imported API, set `ignore` equal to `documentation`. Additional documentation, including other parameters such as `basepath`, can be found in the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-import-api.html). * `policy` - (Optional) JSON formatted policy document that controls access to the API Gateway. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Terraform will only perform drift detection of its value when present in a configuration. It is recommended to use the [`aws_api_gateway_rest_api_policy` resource](/docs/providers/aws/r/api_gateway_rest_api_policy.html) instead. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-policy` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/openapi-extensions-policy.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `api_key_source` - (Optional) Source of the API key for requests. Valid values are `HEADER` (default) and `AUTHORIZER`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-api-key-source` extension](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-api-key-source.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. -* `disable_execute_api_endpoint` - (Optional) Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. +* `disable_execute_api_endpoint` - (Optional) Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint. Defaults to `false`. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `disableExecuteApiEndpoint` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is `true` and is different than the OpenAPI value, the argument value will override the OpenAPI value. * `tags` - (Optional) Key-value map of resource tags __Note__: If the `body` argument is provided, the OpenAPI specification will be used to configure the resources, methods and integrations for the Rest API. If this argument is provided, the following resources should not be managed as separate ones, as updates may cause manual resource updates to be overwritten: @@ -65,7 +65,7 @@ __Note__: If the `body` argument is provided, the OpenAPI specification will be ### endpoint_configuration * `types` - (Required) A list of endpoint types. This resource currently only supports managing a single value. Valid values: `EDGE`, `REGIONAL` or `PRIVATE`. If unspecified, defaults to `EDGE`. Must be declared as `REGIONAL` in non-Commercial partitions. Refer to the [documentation](https://docs.aws.amazon.com/apigateway/latest/developerguide/create-regional-api.html) for more information on the difference between edge-optimized and regional APIs. -* `vpc_endpoint_ids` - (Optional) A list of VPC Endpoint Ids. It is only supported for PRIVATE endpoint type. +* `vpc_endpoint_ids` - (Optional) Set of VPC Endpoint identifiers. It is only supported for `PRIVATE` endpoint type. If importing an OpenAPI specification via the `body` argument, this corresponds to the [`x-amazon-apigateway-endpoint-configuration` extension `vpcEndpointIds` property](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html). If the argument value is provided and is different than the OpenAPI value, the argument value will override the OpenAPI value. ## Attributes Reference From 510d8b956eb9d0dc96e6e3abb6ec8a9a4d6892cb Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 21 Jan 2021 14:58:05 -0500 Subject: [PATCH 0801/1212] Update CHANGELOG for #17209 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64f7307e070..b723fae9950 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ BUG FIXES * data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read [GH-17180] * resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] +* resource/aws_api_gateway_rest_api: Fix `disable_execute_api_endpoint` and `endpoint_configuration` `vpc_endpoint_ids` handling with OpenAPI specification import (`body` argument) [GH-17209] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] * resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] * resource/aws_sagemaker_image: Fix catching image not found on read error [GH-17141] From 42727bca4cf1e034077d3539c4b1c4d571cc7f14 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 21 Jan 2021 14:59:11 -0500 Subject: [PATCH 0802/1212] docs/resource/aws_api_gateway_integration_response: Remove "-" selection pattern documentation (#17212) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10222 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14204 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16950 API Gateway now expects the selection pattern to not be configured to set the default mapping. The current AWS documentation makes no mention of the "-" value. Verified in the AWS Console and with the AWS CLI that the API response is empty with no selectionPattern or no value for the default mapping. --- website/docs/r/api_gateway_integration_response.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/api_gateway_integration_response.html.markdown b/website/docs/r/api_gateway_integration_response.html.markdown index 14958396ac6..9347801b5e5 100644 --- a/website/docs/r/api_gateway_integration_response.html.markdown +++ b/website/docs/r/api_gateway_integration_response.html.markdown @@ -76,7 +76,7 @@ The following arguments are supported: * `http_method` - (Required) The HTTP method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTIONS`, `ANY`) * `status_code` - (Required) The HTTP status code * `selection_pattern` - (Optional) Specifies the regular expression pattern used to choose - an integration response based on the response from the backend. Setting this to `-` makes the integration the default one. + an integration response based on the response from the backend. Omit configuring this to make the integration the default one. If the backend is an `AWS` Lambda function, the AWS Lambda function error header is matched. For all other `HTTP` and `AWS` backends, the HTTP status code is matched. * `response_templates` - (Optional) A map specifying the templates used to transform the integration response body From 446c10aabf2a917a0405009703e110df377784f7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 21 Jan 2021 12:39:57 -0800 Subject: [PATCH 0803/1212] Handles gaps in member cluster IDs --- ...ource_aws_elasticache_replication_group.go | 27 +++++++--- ..._aws_elasticache_replication_group_test.go | 52 +++++++++++++++++++ 2 files changed, 73 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index df0a7e3aff3..2884f692ad7 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -758,18 +758,17 @@ func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiC var err error if newNumberCacheClusters > oldNumberCacheClusters { - err = elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) + currentClusterIDs := d.Get("member_clusters").(*schema.Set) + countToAdd := newNumberCacheClusters - oldNumberCacheClusters + err = elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), countToAdd, currentClusterIDs, d.Timeout(schema.TimeoutUpdate)) } else { err = elasticacheReplicationGroupReduceNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate), d) } return err } -func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration) error { - var addClusterIDs []string - for clusterID := o + 1; clusterID <= n; clusterID++ { - addClusterIDs = append(addClusterIDs, formatReplicationGroupClusterID(replicationGroupID, clusterID)) - } +func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, countToAdd int, currentClusterIDs *schema.Set, timeout time.Duration) error { + addClusterIDs := elasticacheReplicationGroupIncreaseCacheClusterIDs(replicationGroupID, countToAdd, currentClusterIDs) // Kick off all the Cache Cluster creations for _, cacheClusterID := range addClusterIDs { @@ -796,6 +795,22 @@ func elasticacheReplicationGroupIncreaseNumCacheClusters(conn *elasticache.Elast return nil } +func elasticacheReplicationGroupIncreaseCacheClusterIDs(replicationGroupID string, countToAdd int, currentClusterIDs *schema.Set) []string { + var addClusterIDs []string + ci := 1 + for c := 1; c <= countToAdd; c++ { + for { + clusterID := formatReplicationGroupClusterID(replicationGroupID, ci) + ci++ + if !currentClusterIDs.Contains(clusterID) { + addClusterIDs = append(addClusterIDs, clusterID) + break + } + } + } + return addClusterIDs +} + func elasticacheReplicationGroupReduceNumCacheClusters(conn *elasticache.ElastiCache, replicationGroupID string, o, n int, timeout time.Duration, d *schema.ResourceData) error { var removeClusterIDs []string for clusterID := o; clusterID >= (n + 1); clusterID-- { diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 5957e1c9a07..4baa83d4217 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elasticache/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -871,6 +872,57 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }) } +func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_MemberClusterDisappears_NoChange(t *testing.T) { + var replicationGroup elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 3, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 1)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 2)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 3)), + ), + }, + { + PreConfig: func() { + // Remove one of the Cache Clusters + conn := testAccProvider.Meta().(*AWSClient).elasticacheconn + timeout := 40 * time.Minute + + cacheClusterID := formatReplicationGroupClusterID(rName, 2) + + if err := deleteElasticacheCacheCluster(conn, cacheClusterID, ""); err != nil { + t.Fatalf("error deleting Cache Cluster (%s): %s", cacheClusterID, err) + } + + if _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, timeout); err != nil { + t.Fatalf("error deleting Cache Cluster (%s): %s", cacheClusterID, err) + } + }, + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 3, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 1)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 2)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 3)), + ), + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_tags(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") From 31678a163d3e43eba7a40d875370862ca7117a32 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 21 Jan 2021 15:48:50 -0500 Subject: [PATCH 0804/1212] lakeformation: Add changelog file --- .changelog/17189.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/17189.txt diff --git a/.changelog/17189.txt b/.changelog/17189.txt new file mode 100644 index 00000000000..45a7187b60b --- /dev/null +++ b/.changelog/17189.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/lakeformation_permissions: Handle resources with multiple permissions +``` + +```release-note:bug +resource/lakeformation_data_lake_settings: Avoid unnecessary resource cycling +``` \ No newline at end of file From 4bd169a4786a8a35674fa7b93b4dadc17d8d9090 Mon Sep 17 00:00:00 2001 From: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Date: Thu, 21 Jan 2021 16:09:05 -0500 Subject: [PATCH 0805/1212] Update CHANGELOG.md --- CHANGELOG.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b723fae9950..d890b2e9692 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,13 +25,15 @@ ENHANCEMENTS BUG FIXES -* provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform [GH-17211] +* data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read [GH-17180] * provider: Fix error messages for missing required blocks not including the block name [GH-17211] +* provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform [GH-17211] * resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] -* data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read [GH-17180] -* resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] +* resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] * resource/aws_api_gateway_rest_api: Fix `disable_execute_api_endpoint` and `endpoint_configuration` `vpc_endpoint_ids` handling with OpenAPI specification import (`body` argument) [GH-17209] +* resource/aws_lakeformation_data_lake_settings: Avoid unnecessary resource cycling [GH-17189] +* resource/aws_lakeformation_permissions: Handle resources with multiple permissions [GH-17189] * resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] * resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] * resource/aws_sagemaker_image: Fix catching image not found on read error [GH-17141] From 114e43d74d0b848895f255534e94e9f6aadb34de Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 21 Jan 2021 13:58:33 -0800 Subject: [PATCH 0806/1212] Handle gaps when adding member clusters --- ...ource_aws_elasticache_replication_group.go | 2 +- ..._aws_elasticache_replication_group_test.go | 52 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 2884f692ad7..62eac70b2d2 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -758,8 +758,8 @@ func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiC var err error if newNumberCacheClusters > oldNumberCacheClusters { - currentClusterIDs := d.Get("member_clusters").(*schema.Set) countToAdd := newNumberCacheClusters - oldNumberCacheClusters + currentClusterIDs := d.Get("member_clusters").(*schema.Set) err = elasticacheReplicationGroupIncreaseNumCacheClusters(conn, d.Id(), countToAdd, currentClusterIDs, d.Timeout(schema.TimeoutUpdate)) } else { err = elasticacheReplicationGroupReduceNumCacheClusters(conn, d.Id(), oldNumberCacheClusters, newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate), d) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 4baa83d4217..b8cf39bc364 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -923,6 +923,58 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_MemberClusterDisa }) } +func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_MemberClusterDisappears_AddMemberCluster(t *testing.T) { + var replicationGroup elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 3, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 1)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 2)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 3)), + ), + }, + { + PreConfig: func() { + // Remove one of the Cache Clusters + conn := testAccProvider.Meta().(*AWSClient).elasticacheconn + timeout := 40 * time.Minute + + cacheClusterID := formatReplicationGroupClusterID(rName, 2) + + if err := deleteElasticacheCacheCluster(conn, cacheClusterID, ""); err != nil { + t.Fatalf("error deleting Cache Cluster (%s): %s", cacheClusterID, err) + } + + if _, err := waiter.CacheClusterDeleted(conn, cacheClusterID, timeout); err != nil { + t.Fatalf("error deleting Cache Cluster (%s): %s", cacheClusterID, err) + } + }, + Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 4, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 1)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 2)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 3)), + resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", formatReplicationGroupClusterID(rName, 4)), + ), + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_tags(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") From cd7288cdd350d48b808f03c9ddc576859a5ed849 Mon Sep 17 00:00:00 2001 From: Shell Xu <41312778+shell-autonomic-sh@users.noreply.github.com> Date: Fri, 22 Jan 2021 06:01:09 +0800 Subject: [PATCH 0807/1212] Update data_source_aws_elb_hosted_zone_id To fix this bug. https://github.com/hashicorp/terraform-provider-aws/issues/17225 --- aws/data_source_aws_elb_hosted_zone_id.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_elb_hosted_zone_id.go b/aws/data_source_aws_elb_hosted_zone_id.go index da6c8a3877e..e21767ee043 100644 --- a/aws/data_source_aws_elb_hosted_zone_id.go +++ b/aws/data_source_aws_elb_hosted_zone_id.go @@ -19,8 +19,8 @@ var elbHostedZoneIdPerRegionMap = map[string]string{ endpoints.ApSoutheast1RegionID: "Z1LMS91P8CMLE5", endpoints.ApSoutheast2RegionID: "Z1GM3OXH4ZPM65", endpoints.CaCentral1RegionID: "ZQSVJUPU6J1EY", - endpoints.CnNorth1RegionID: "Z3BX2TMKNYI13Y", - endpoints.CnNorthwest1RegionID: "Z3BX2TMKNYI13Y", + endpoints.CnNorth1RegionID: "Z1GDH35T77C1KE", + endpoints.CnNorthwest1RegionID: "ZM7IZAIOVVDZF", endpoints.EuCentral1RegionID: "Z215JYRZR1TBD5", endpoints.EuNorth1RegionID: "Z23TAZ6LKFMNIO", endpoints.EuSouth1RegionID: "Z3ULH7SSC9OV64", From 97a96d777b460f89c8925cd0d4547073fdefa030 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 21 Jan 2021 14:35:21 -0800 Subject: [PATCH 0808/1212] Update CHANGELOG for #15348 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d890b2e9692..8b55aca45a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,11 +15,13 @@ ENHANCEMENTS * data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute [GH-16631] * data-source/aws_ebs_volume: Add `throughput` attribute [GH-16517] +* data-source/aws_elasticache_replication_group: Adds `arn` attribute [GH-15348] * data-source/aws_iam_user: Add `tags` attribute [GH-13287] * resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] * resource/aws_ebs_volume: Add `throughput` argument [GH-16517] +* resource/aws_elasticache_replication_group: Adds `arn` attribute [GH-15348] * resource/aws_lightsail_instance: Add `ipv6_addresses` attribute [GH-17155] * resource/aws_sagemaker_domain: Delete implicit EFS file system [GH-17123] From 431cf300646399be24796e304033f620f65f0a5c Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 22 Jan 2021 00:41:09 +0000 Subject: [PATCH 0809/1212] v3.25.0 --- CHANGELOG.md | 58 ++++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b55aca45a3..eaf556c0e6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,44 +1,44 @@ -## 3.25.0 (Unreleased) +## 3.25.0 (January 22, 2021) NOTES -* resource/aws_lightsail_instance: The `ipv6_address` attribute has been deprecated. Use the `ipv6_addresses` attribute instead. This is due to a backwards incompatible change in the Lightsail API. [GH-17155] +* resource/aws_lightsail_instance: The `ipv6_address` attribute has been deprecated. Use the `ipv6_addresses` attribute instead. This is due to a backwards incompatible change in the Lightsail API. ([#17155](https://github.com/hashicorp/terraform-provider-aws/issues/17155)) FEATURES -* **New Resource:** `aws_backup_global_settings` [GH-16475] -* **New Resource:** `aws_sagemaker_feature_group` [GH-16728] -* **New Resource:** `aws_sagemaker_image_version` [GH-17141] -* **New Resource:** `aws_sagemaker_user_profile` [GH-17123] +* **New Resource:** `aws_backup_global_settings` ([#16475](https://github.com/hashicorp/terraform-provider-aws/issues/16475)) +* **New Resource:** `aws_sagemaker_feature_group` ([#16728](https://github.com/hashicorp/terraform-provider-aws/issues/16728)) +* **New Resource:** `aws_sagemaker_image_version` ([#17141](https://github.com/hashicorp/terraform-provider-aws/issues/17141)) +* **New Resource:** `aws_sagemaker_user_profile` ([#17123](https://github.com/hashicorp/terraform-provider-aws/issues/17123)) ENHANCEMENTS -* data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute [GH-16631] -* data-source/aws_ebs_volume: Add `throughput` attribute [GH-16517] -* data-source/aws_elasticache_replication_group: Adds `arn` attribute [GH-15348] -* data-source/aws_iam_user: Add `tags` attribute [GH-13287] -* resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block [GH-16631] -* resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] -* resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block [GH-16631] -* resource/aws_ebs_volume: Add `throughput` argument [GH-16517] -* resource/aws_elasticache_replication_group: Adds `arn` attribute [GH-15348] -* resource/aws_lightsail_instance: Add `ipv6_addresses` attribute [GH-17155] -* resource/aws_sagemaker_domain: Delete implicit EFS file system [GH-17123] +* data-source/aws_ami: Add `throughput` attribute to `block_device_mappings` `ebs` attribute ([#16631](https://github.com/hashicorp/terraform-provider-aws/issues/16631)) +* data-source/aws_ebs_volume: Add `throughput` attribute ([#16517](https://github.com/hashicorp/terraform-provider-aws/issues/16517)) +* data-source/aws_elasticache_replication_group: Adds `arn` attribute ([#15348](https://github.com/hashicorp/terraform-provider-aws/issues/15348)) +* data-source/aws_iam_user: Add `tags` attribute ([#13287](https://github.com/hashicorp/terraform-provider-aws/issues/13287)) +* resource/aws_ami: Support `volume_type` value of `gp3` and add `throughput` argument to `ebs_block_device` configuration block ([#16631](https://github.com/hashicorp/terraform-provider-aws/issues/16631)) +* resource/aws_ami_copy: Add `throughput` argument to `ebs_block_device` configuration block ([#16631](https://github.com/hashicorp/terraform-provider-aws/issues/16631)) +* resource/aws_ami_from_instance: Add `throughput` argument to `ebs_block_device` configuration block ([#16631](https://github.com/hashicorp/terraform-provider-aws/issues/16631)) +* resource/aws_ebs_volume: Add `throughput` argument ([#16517](https://github.com/hashicorp/terraform-provider-aws/issues/16517)) +* resource/aws_elasticache_replication_group: Adds `arn` attribute ([#15348](https://github.com/hashicorp/terraform-provider-aws/issues/15348)) +* resource/aws_lightsail_instance: Add `ipv6_addresses` attribute ([#17155](https://github.com/hashicorp/terraform-provider-aws/issues/17155)) +* resource/aws_sagemaker_domain: Delete implicit EFS file system ([#17123](https://github.com/hashicorp/terraform-provider-aws/issues/17123)) BUG FIXES -* data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read [GH-17180] -* provider: Fix error messages for missing required blocks not including the block name [GH-17211] -* provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform [GH-17211] -* resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` [GH-16614] -* resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) [GH-17099] -* resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) [GH-17099] -* resource/aws_api_gateway_rest_api: Fix `disable_execute_api_endpoint` and `endpoint_configuration` `vpc_endpoint_ids` handling with OpenAPI specification import (`body` argument) [GH-17209] -* resource/aws_lakeformation_data_lake_settings: Avoid unnecessary resource cycling [GH-17189] -* resource/aws_lakeformation_permissions: Handle resources with multiple permissions [GH-17189] -* resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response [GH-16544] -* resource/aws_lambda_function: Prevent panic with missing environment variable value [GH-17056] -* resource/aws_sagemaker_image: Fix catching image not found on read error [GH-17141] +* data-source/aws_lambda_function: Prevent error when getting Code Signing Config for container image based lambdas during read ([#17180](https://github.com/hashicorp/terraform-provider-aws/issues/17180)) +* provider: Fix error messages for missing required blocks not including the block name ([#17211](https://github.com/hashicorp/terraform-provider-aws/issues/17211)) +* provider: Prevent panic when sending Ctrl-C (SIGINT) to Terraform ([#17211](https://github.com/hashicorp/terraform-provider-aws/issues/17211)) +* resource/aws_api_gateway_authorizer: Ensure `authorizer_credentials` are configured when `type` is `COGNITO_USER_POOLS` ([#16614](https://github.com/hashicorp/terraform-provider-aws/issues/16614)) +* resource/aws_api_gateway_rest_api: Allow `api_key_source`, `binary_media_types`, and `description` arguments to be omitted from configuration with OpenAPI specification import (`body` argument) ([#17099](https://github.com/hashicorp/terraform-provider-aws/issues/17099)) +* resource/aws_api_gateway_rest_api: Ensure `api_key_source`, `binary_media_types`, `description`, `minimum_compression_size`, `name`, and `policy` configuration values are correctly applied as an override after OpenAPI specification import (`body` argument) ([#17099](https://github.com/hashicorp/terraform-provider-aws/issues/17099)) +* resource/aws_api_gateway_rest_api: Fix `disable_execute_api_endpoint` and `endpoint_configuration` `vpc_endpoint_ids` handling with OpenAPI specification import (`body` argument) ([#17209](https://github.com/hashicorp/terraform-provider-aws/issues/17209)) +* resource/aws_lakeformation_data_lake_settings: Avoid unnecessary resource cycling ([#17189](https://github.com/hashicorp/terraform-provider-aws/issues/17189)) +* resource/aws_lakeformation_permissions: Handle resources with multiple permissions ([#17189](https://github.com/hashicorp/terraform-provider-aws/issues/17189)) +* resource/aws_lambda_function: Prevent panic with missing `FunctionConfiguration` `PackageType` attribute in API response ([#16544](https://github.com/hashicorp/terraform-provider-aws/issues/16544)) +* resource/aws_lambda_function: Prevent panic with missing environment variable value ([#17056](https://github.com/hashicorp/terraform-provider-aws/issues/17056)) +* resource/aws_sagemaker_image: Fix catching image not found on read error ([#17141](https://github.com/hashicorp/terraform-provider-aws/issues/17141)) ## 3.24.1 (January 15, 2021) From 605c6a127589a1e9d144a249d112ba25347b9680 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 21 Jan 2021 17:08:06 -0800 Subject: [PATCH 0810/1212] Update CHANGELOG.md after v3.25.0 release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eaf556c0e6a..e9cc05fc998 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## 3.26.0 (Unreleased) + ## 3.25.0 (January 22, 2021) NOTES From 9adaeee23e6650af9fd4545634ddc48fc5d1d225 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 21 Jan 2021 17:23:16 -0800 Subject: [PATCH 0811/1212] IPv6 subnet mapping for lb Add support for IPv6 subnet mapping to the aws_lb resource. Resolves #17150 --- aws/resource_aws_lb.go | 11 +++++ aws/resource_aws_lb_test.go | 80 +++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/aws/resource_aws_lb.go b/aws/resource_aws_lb.go index 9084b7f555d..dd978663a9b 100644 --- a/aws/resource_aws_lb.go +++ b/aws/resource_aws_lb.go @@ -109,6 +109,12 @@ func resourceAwsLb() *schema.Resource { Required: true, ForceNew: true, }, + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IsIPv6Address, + }, "outpost_id": { Type: schema.TypeString, Computed: true, @@ -298,6 +304,10 @@ func resourceAwsLbCreate(d *schema.ResourceData, meta interface{}) error { if subnetMap["private_ipv4_address"].(string) != "" { elbOpts.SubnetMappings[i].PrivateIPv4Address = aws.String(subnetMap["private_ipv4_address"].(string)) } + + if subnetMap["ipv6_address"].(string) != "" { + elbOpts.SubnetMappings[i].IPv6Address = aws.String(subnetMap["ipv6_address"].(string)) + } } } @@ -668,6 +678,7 @@ func flattenSubnetMappingsFromAvailabilityZones(availabilityZones []*elbv2.Avail for _, loadBalancerAddress := range availabilityZone.LoadBalancerAddresses { m["allocation_id"] = aws.StringValue(loadBalancerAddress.AllocationId) m["private_ipv4_address"] = aws.StringValue(loadBalancerAddress.PrivateIPv4Address) + m["ipv6_address"] = aws.StringValue(loadBalancerAddress.IPv6Address) } l = append(l, m) diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index fc7691a697d..f2521e5cd1b 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -194,6 +194,37 @@ func TestAccAWSLB_LoadBalancerType_Gateway(t *testing.T) { }) } +func TestAccAWSLB_IPv6SubnetMapping(t *testing.T) { + var conf elbv2.LoadBalancer + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lb.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckElbv2GatewayLoadBalancer(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAWSLBDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLBConfig_IPv6(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSLBExists(resourceName, &conf), + resource.TestMatchResourceAttr(resourceName, "subnet_mapping.0.ipv6_address", regexp.MustCompile("[a-f0-6]+:[a-f0-6:]+")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "drop_invalid_header_fields", + "enable_http2", + "idle_timeout", + }, + }, + }, + }) +} + func TestAccAWSLB_LoadBalancerType_Gateway_EnableCrossZoneLoadBalancing(t *testing.T) { var conf elbv2.LoadBalancer rName := acctest.RandomWithPrefix("tf-acc-test") @@ -1956,6 +1987,55 @@ resource "aws_lb" "test" { `, rName)) } +func testAccAWSLBConfig_IPv6(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + assign_generated_ipv6_cidr_block = true + cidr_block = "10.10.10.0/25" + + tags = { + Name = "tf-acc-test-load-balancer" + } +} + +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.test.id + + tags = { + Name = "main" + } +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 2, 0) + ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 16) + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-test-load-balancer" + } +} + +resource "aws_lb" "test" { + name = %[1]q + load_balancer_type = "network" + enable_deletion_protection = false + + subnet_mapping { + subnet_id = aws_subnet.test.id + ipv6_address = cidrhost(cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 16), 5) + } + + tags = { + Name = "TestAccAWSALB_ipv6address" + } +} +`, rName)) +} + func testAccAWSLBConfig_LoadBalancerType_Gateway_EnableCrossZoneLoadBalancing(rName string, enableCrossZoneLoadBalancing bool) string { return composeConfig( testAccAvailableAZsNoOptInConfig(), From f3a52a89f1865e013dbb39f6f8805223f808b0ec Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 21 Jan 2021 18:00:57 -0800 Subject: [PATCH 0812/1212] Changelog Automation (#16215) * add-changelog-automation --- .github/PULL_REQUEST_TEMPLATE.md | 9 -- .github/workflows/generate_changelog.yml | 20 +++ .gitignore | 1 + GNUmakefile | 7 +- docs/MAINTAINING.md | 23 ---- .../pullrequest-submission-and-lifecycle.md | 128 +++++++++++++++++- scripts/changelog.tmpl | 41 ++++++ scripts/generate-changelog.sh | 53 ++++++++ scripts/release-note.tmpl | 11 ++ tools/go.mod | 1 + tools/go.sum | 28 ++++ tools/main.go | 1 + 12 files changed, 288 insertions(+), 35 deletions(-) create mode 100644 .github/workflows/generate_changelog.yml create mode 100644 scripts/changelog.tmpl create mode 100755 scripts/generate-changelog.sh create mode 100644 scripts/release-note.tmpl diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 14938b37499..0c1adb8701e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,15 +12,6 @@ Relates OR Closes #0000 -Release note for [CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/master/CHANGELOG.md): - - -```release-note - -``` - Output from acceptance testing: + diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index 8dce20eb365..7a24e1ae113 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -2,7 +2,7 @@ name: Acceptance Test Linting on: push: branches: - - master + - main - "release/**" pull_request: paths: diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 9e80f9fd261..2086b017faf 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -2,7 +2,7 @@ name: CHANGELOG Checks on: push: branches: - - master + - main - 'release/**' pull_request: paths: @@ -51,7 +51,7 @@ jobs: body: |- Thank you for your contribution! :rocket: - Please note that the `CHANGELOG.md` file contents are handled by the maintainers during merge. This is to prevent pull request merge conflicts, especially for contributions which may not be merged immediately. Please see the [Contributing Guide](https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/CONTRIBUTING.md) for additional pull request review items. + Please note that the `CHANGELOG.md` file contents are handled by the maintainers during merge. This is to prevent pull request merge conflicts, especially for contributions which may not be merged immediately. Please see the [Contributing Guide](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/CONTRIBUTING.md) for additional pull request review items. Remove any changes to the `CHANGELOG.md` file and commit them in this pull request to prevent delays with reviewing and potentially merging this pull request. misspell: diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 0674c4983be..99a472fd44c 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -3,7 +3,7 @@ name: Dependency Checks on: push: branches: - - master + - main - 'release/**' pull_request_target: @@ -54,7 +54,7 @@ jobs: * Check [open pull requests with the `dependencies` label](https://github.com/hashicorp/terraform-provider-aws/pulls?q=is%3Aopen+is%3Apr+label%3Adependencies) to view other dependency updates. * If this pull request includes an update the AWS Go SDK (or any other dependency) version, only updates submitted via dependabot will be merged. This pull request will need to remove these changes and will need to be rebased after the existing dependency update via dependabot has been merged for this pull request to be reviewed. * If this pull request is for supporting a new AWS service: - * Ensure the new AWS service changes are following the [Contributing Guide section on new services](https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/contributing/contribution-checklists.md#new-service), in particular that the dependency addition and initial provider support are in a separate pull request from other changes (e.g. new resources). Contributions not following this item will not be reviewed until the changes are split. + * Ensure the new AWS service changes are following the [Contributing Guide section on new services](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/contribution-checklists.md#new-service), in particular that the dependency addition and initial provider support are in a separate pull request from other changes (e.g. new resources). Contributions not following this item will not be reviewed until the changes are split. * If this pull request is already a separate pull request from the above item, you can ignore this message. go_mod: name: go mod diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index f05b37d19df..0eef549dfee 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -2,7 +2,7 @@ name: Documentation Checks on: push: branches: - - master + - main pull_request: paths: - .markdownlinkcheck.json diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index e6cf40feee5..181d896fae6 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -2,7 +2,7 @@ name: Examples Checks on: push: branches: - - master + - main pull_request: paths: - .github/workflows/examples.yml diff --git a/.github/workflows/pull_requests.yml b/.github/workflows/pull_requests.yml index 1f3f7da3a42..7b400f74c05 100644 --- a/.github/workflows/pull_requests.yml +++ b/.github/workflows/pull_requests.yml @@ -23,8 +23,8 @@ jobs: pr-message: |- Welcome @${{github.actor}} :wave: - It looks like this is your first Pull Request submission to the [Terraform AWS Provider](https://github.com/terraform-providers/terraform-provider-aws)! If you haven’t already done so please make sure you have checked out our [CONTRIBUTING](https://github.com/terraform-providers/terraform-provider-aws/blob/master/docs/CONTRIBUTING.md) guide and [FAQ](https://github.com/terraform-providers/terraform-provider-aws/blob/master/docs/FAQ.md) to make sure your contribution is adhering to best practice and has all the necessary elements in place for a successful approval. + It looks like this is your first Pull Request submission to the [Terraform AWS Provider](https://github.com/terraform-providers/terraform-provider-aws)! If you haven’t already done so please make sure you have checked out our [CONTRIBUTING](https://github.com/terraform-providers/terraform-provider-aws/blob/main/docs/CONTRIBUTING.md) guide and [FAQ](https://github.com/terraform-providers/terraform-provider-aws/blob/main/docs/FAQ.md) to make sure your contribution is adhering to best practice and has all the necessary elements in place for a successful approval. - Also take a look at our [FAQ](https://github.com/terraform-providers/terraform-provider-aws/blob/master/docs/FAQ.md) which details how we prioritize Pull Requests for inclusion. + Also take a look at our [FAQ](https://github.com/terraform-providers/terraform-provider-aws/blob/main/docs/FAQ.md) which details how we prioritize Pull Requests for inclusion. Thanks again, and welcome to the community! :smiley: diff --git a/.github/workflows/terraform_provider.yml b/.github/workflows/terraform_provider.yml index 857d829f980..b62824e2dd7 100644 --- a/.github/workflows/terraform_provider.yml +++ b/.github/workflows/terraform_provider.yml @@ -3,7 +3,7 @@ name: Terraform Provider Checks on: push: branches: - - master + - main - 'release/**' pull_request: paths: diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml index a3ba7eeb3d3..2de93dd19af 100644 --- a/.github/workflows/website.yml +++ b/.github/workflows/website.yml @@ -5,7 +5,7 @@ name: Website Checks on: push: branches: - - master + - main - 'release/**' pull_request: paths: diff --git a/.hashibot.hcl b/.hashibot.hcl index fbf7a56a9ce..e50c88fb991 100644 --- a/.hashibot.hcl +++ b/.hashibot.hcl @@ -22,14 +22,14 @@ behavior "deprecated_import_commenter" "hashicorp_terraform" { This pull request appears to include the Go import path `${var.import_path}`, which was from the older SDK. The newer SDK uses import paths beginning with `github.com/hashicorp/terraform-plugin-sdk/`. - To resolve this situation without losing any existing work, you may be able to Git rebase your branch against the current master branch (example below); replacing any remaining old import paths with the newer ones. + To resolve this situation without losing any existing work, you may be able to Git rebase your branch against the current default (main) branch (example below); replacing any remaining old import paths with the newer ones. ```console $ git fetch --all - $ git rebase origin/master + $ git rebase origin/main ``` - Another option is to create a new branch from the current master with the same code changes (replacing the import paths), submit a new pull request, and close this existing pull request. + Another option is to create a new branch from the current default (main) with the same code changes (replacing the import paths), submit a new pull request, and close this existing pull request. We apologize for this inconvenience and appreciate your effort. Thank you for contributing and helping make the Terraform AWS Provider better for everyone. EOF @@ -46,14 +46,14 @@ behavior "deprecated_import_commenter" "sdkv1" { This pull request appears to include at least one V1 import path of the SDK (`${var.import_path}`). Please import the V2 path `github.com/hashicorp/terraform-plugin-sdk/v2/helper/PACKAGE` - To resolve this situation without losing any existing work, you may be able to Git rebase your branch against the current master branch (example below); replacing any remaining old import paths with the newer ones. + To resolve this situation without losing any existing work, you may be able to Git rebase your branch against the current default (main) branch (example below); replacing any remaining old import paths with the newer ones. ```console $ git fetch --all - $ git rebase origin/master + $ git rebase origin/main ``` - Another option is to create a new branch from the current master with the same code changes (replacing the import paths), submit a new pull request, and close this existing pull request. + Another option is to create a new branch from the current default (main) with the same code changes (replacing the import paths), submit a new pull request, and close this existing pull request. We apologize for this inconvenience and appreciate your effort. Thank you for contributing and helping make the Terraform AWS Provider better for everyone. EOF diff --git a/GNUmakefile b/GNUmakefile index fb5c9704f02..cc657a9d3de 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -31,7 +31,7 @@ testacc: fmtcheck echo "For example if updating aws/resource_aws_acm_certificate.go, use the test names in aws/resource_aws_acm_certificate_test.go starting with TestAcc and up to the underscore:"; \ echo "make testacc TESTARGS='-run=TestAccAWSAcmCertificate_'"; \ echo ""; \ - echo "See the contributing guide for more information: https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/contributing/running-and-writing-acceptance-tests.md"; \ + echo "See the contributing guide for more information: https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md"; \ exit 1; \ fi TF_ACC=1 go test ./$(PKG_NAME) -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) diff --git a/docs/MAINTAINING.md b/docs/MAINTAINING.md index 2efac296872..bcb27a651a9 100644 --- a/docs/MAINTAINING.md +++ b/docs/MAINTAINING.md @@ -47,11 +47,11 @@ Notes for each type of pull request are (or will be) available in subsections be This pull request appears to be related to/solve #1234, so I have edited the pull request description to denote the issue reference. ``` -- Review the contents of the pull request and ensure the change follows the relevant section of the [Contributing Guide](https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/contributing/contribution-checklists.md) +- Review the contents of the pull request and ensure the change follows the relevant section of the [Contributing Guide](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/contribution-checklists.md) - If the change is not acceptable, leave a long form comment about the reasoning and close the pull request - If the change is acceptable with modifications, leave a pull request review marked using the `Request Changes` option (for maintainer pull requests with minor modification requests, giving feedback with the `Approve` option is recommended so they do not need to wait for another round of review) - If the author is unresponsive for changes (by default we give two weeks), determine importance and level of effort to finish the pull request yourself including their commits or close the pull request -- Run relevant acceptance testing ([locally](https://github.com/hashicorp/terraform-provider-aws/blob/master/docs/contributing/running-and-writing-acceptance-tests.md) or in TeamCity) against AWS Commercial and AWS GovCloud (US) to ensure no new failures are being introduced +- Run relevant acceptance testing ([locally](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/running-and-writing-acceptance-tests.md) or in TeamCity) against AWS Commercial and AWS GovCloud (US) to ensure no new failures are being introduced - Approve the pull request with a comment outlining what steps you took that ensure the change is acceptable, e.g. acceptance testing output ``````markdown @@ -304,7 +304,7 @@ Run the acceptance testing pattern, `TestAccAWSCloudFormationStack(_dataSource)? - Add any linked issues that will be closed by the pull request to the same upcoming release milestone - Merge the pull request - Delete the branch (if the branch is on this repository) -- Determine if the pull request should have a CHANGELOG entry by reviewing the [Pull Request Types to CHANGELOG section](#pull-request-types-to-changelog). If so, update the repository `CHANGELOG.md` by directly committing to the `master` branch (e.g. editing the file in the GitHub web interface). See also the [Extending Terraform documentation](https://www.terraform.io/docs/extend/best-practices/versioning.html) for more information about the expected CHANGELOG format. +- Determine if the pull request should have a CHANGELOG entry by reviewing the [Pull Request Types to CHANGELOG section](#pull-request-types-to-changelog), and follow the CHANGELOG specification [here](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/pullrequest-submission-and-lifecycle.md#changelog-process) - Leave a comment on any issues closed by the pull request noting that it has been merged and when to expect the release containing it, e.g. ```markdown @@ -325,7 +325,7 @@ The following branch conventions are used: | Branch | Example | Description | |--------|---------|-------------| -| `master` | `master` | Main, unreleased code branch. | +| `main` | `main` | Main, unreleased code branch. | | `release/*` | `release/2.x` | Backport branches for previous major releases. | Additional branch naming recommendations can be found in the [Pull Request Submission and Lifecycle documentation](contributing/pullrequest-submission-and-lifecycle.md#branch-prefixes). diff --git a/docs/contributing/data-handling-and-conversion.md b/docs/contributing/data-handling-and-conversion.md index 452515269a5..3c26f5a2692 100644 --- a/docs/contributing/data-handling-and-conversion.md +++ b/docs/contributing/data-handling-and-conversion.md @@ -42,7 +42,7 @@ At the bottom of this documentation is a [Glossary section](#glossary), which ma Before getting into highly specific documentation about the Terraform AWS Provider handling of data, it may be helpful to briefly highlight how Terraform Plugins (Terraform Providers in this case) interact with Terraform CLI and the Terraform State in general and where this documentation fits into the whole process. -There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/master/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). +There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/main/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). As a generic walkthrough, the following data handling occurs when creating a Terraform Resource: diff --git a/website/docs/guides/version-2-upgrade.html.md b/website/docs/guides/version-2-upgrade.html.md index a90c5ae0a3c..090f198a800 100644 --- a/website/docs/guides/version-2-upgrade.html.md +++ b/website/docs/guides/version-2-upgrade.html.md @@ -10,7 +10,7 @@ description: |- Version 2.0.0 of the AWS provider for Terraform is a major release and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on changes from version 1.60.0 to version 2.0.0. -Most of the changes outlined in this guide have been previously marked as deprecated in the Terraform plan/apply output throughout previous provider releases. These changes, such as deprecation notices, can always be found in the [Terraform AWS Provider CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/master/CHANGELOG.md). +Most of the changes outlined in this guide have been previously marked as deprecated in the Terraform plan/apply output throughout previous provider releases. These changes, such as deprecation notices, can always be found in the [Terraform AWS Provider CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md). Upgrade topics: diff --git a/website/docs/guides/version-3-upgrade.html.md b/website/docs/guides/version-3-upgrade.html.md index ba3c4896032..435c5ccd0fe 100644 --- a/website/docs/guides/version-3-upgrade.html.md +++ b/website/docs/guides/version-3-upgrade.html.md @@ -10,7 +10,7 @@ description: |- Version 3.0.0 of the AWS provider for Terraform is a major release and includes some changes that you will need to consider when upgrading. This guide is intended to help with that process and focuses only on changes from version 2.X to version 3.0.0. See the [Version 2 Upgrade Guide](/docs/providers/aws/guides/version-2-upgrade.html) for information about upgrading from 1.X to version 2.0.0. -Most of the changes outlined in this guide have been previously marked as deprecated in the Terraform plan/apply output throughout previous provider releases. These changes, such as deprecation notices, can always be found in the [Terraform AWS Provider CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/master/CHANGELOG.md). +Most of the changes outlined in this guide have been previously marked as deprecated in the Terraform plan/apply output throughout previous provider releases. These changes, such as deprecation notices, can always be found in the [Terraform AWS Provider CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md). ~> **NOTE:** Version 3.0.0 and later of the AWS Provider can only be automatically installed on Terraform 0.12 and later. From 9e55235d893c73ed628f5c0e30fde3522ac37f47 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Sat, 23 Jan 2021 01:03:59 +0000 Subject: [PATCH 0822/1212] Update CHANGELOG.md for #17246 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9cc05fc998..f443ab5c510 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.26.0 (Unreleased) +BUG FIXES: + +* resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values (https://github.com/hashicorp/terraform-provider-aws/issues/17201) + ## 3.25.0 (January 22, 2021) NOTES From 9bf8acdd538d28059de4dfbbe668de8bb299c0e3 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 22 Jan 2021 17:18:23 -0800 Subject: [PATCH 0823/1212] Add ipv6 to docs and data source --- aws/data_source_aws_lb.go | 4 ++++ website/docs/r/lb.html.markdown | 1 + 2 files changed, 5 insertions(+) diff --git a/aws/data_source_aws_lb.go b/aws/data_source_aws_lb.go index 9f4b4ebbde7..ed9e42ab3a6 100644 --- a/aws/data_source_aws_lb.go +++ b/aws/data_source_aws_lb.go @@ -74,6 +74,10 @@ func dataSourceAwsLb() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, diff --git a/website/docs/r/lb.html.markdown b/website/docs/r/lb.html.markdown index d6c35626706..168cf5e824b 100644 --- a/website/docs/r/lb.html.markdown +++ b/website/docs/r/lb.html.markdown @@ -135,6 +135,7 @@ Subnet Mapping (`subnet_mapping`) blocks support the following: * `subnet_id` - (Required) The id of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone. * `allocation_id` - (Optional) The allocation ID of the Elastic IP address. * `private_ipv4_address` - (Optional) A private ipv4 address within the subnet to assign to the internal-facing load balancer. +* `ipv6_address` - (Optional) An ipv6 address within the subnet to assign to the internal-facing load balancer. ## Attributes Reference From a8d235b204393ad10374a575eb8165c4c97910f5 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 22 Jan 2021 23:48:56 -0800 Subject: [PATCH 0824/1212] Adds test for Multi-AZ flag --- ..._aws_elasticache_replication_group_test.go | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 082c86341ab..7424d7e7238 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -928,6 +928,63 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail }) } +func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_MultiAZEnabled(t *testing.T) { + var replicationGroup elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + autoFailoverEnabled := true + multiAZEnabled := true + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 3, autoFailoverEnabled, multiAZEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", strconv.FormatBool(autoFailoverEnabled)), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", strconv.FormatBool(multiAZEnabled)), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "3"), + ), + }, + { + PreConfig: func() { + // Ensure that primary is on the node we are trying to delete + conn := testAccProvider.Meta().(*AWSClient).elasticacheconn + timeout := 40 * time.Minute + + // Must disable automatic failover first + if err := resourceAwsElasticacheReplicationGroupDisableAutomaticFailover(conn, rName, timeout); err != nil { + t.Fatalf("error disabling automatic failover: %s", err) + } + + // Set primary + if err := resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn, rName, formatReplicationGroupClusterID(rName, 3), timeout); err != nil { + t.Fatalf("error changing primary cache cluster: %s", err) + } + + // Re-enable automatic failover like nothing ever happened + if err := resourceAwsElasticacheReplicationGroupEnableAutomaticFailover(conn, rName, multiAZEnabled, timeout); err != nil { + t.Fatalf("error re-enabling automatic failover: %s", err) + } + }, + Config: testAccAWSElasticacheReplicationGroupConfig_FailoverMultiAZ(rName, 2, autoFailoverEnabled, multiAZEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &replicationGroup), + resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", strconv.FormatBool(autoFailoverEnabled)), + resource.TestCheckResourceAttr(resourceName, "multi_az_enabled", strconv.FormatBool(multiAZEnabled)), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "2"), + ), + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_MemberClusterDisappears_NoChange(t *testing.T) { var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") From 960c2d59710e43e052aec400ff0648fa28ea0f9b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 18 Jan 2021 19:04:39 +0200 Subject: [PATCH 0825/1212] add app image config resource --- .../service/sagemaker/finder/finder.go | 19 ++ ...resource_aws_sagemaker_app_image_config.go | 315 ++++++++++++++++++ 2 files changed, 334 insertions(+) create mode 100644 aws/resource_aws_sagemaker_app_image_config.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 37249d612eb..f7d201d2bb6 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -119,3 +119,22 @@ func UserProfileByName(conn *sagemaker.SageMaker, domainID, userProfileName stri return output, nil } + +// AppImageConfigByName returns the App Image Config corresponding to the specified App Image Config ID. +// Returns nil if no App Image Cofnig is found. +func AppImageConfigByName(conn *sagemaker.SageMaker, appImageConfigID string) (*sagemaker.DescribeAppImageConfigOutput, error) { + input := &sagemaker.DescribeAppImageConfigInput{ + AppImageConfigName: aws.String(appImageConfigID), + } + + output, err := conn.DescribeAppImageConfig(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go new file mode 100644 index 00000000000..c9e202da81a --- /dev/null +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -0,0 +1,315 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func resourceAwsSagemakerAppImageConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerAppImageConfigCreate, + Read: resourceAwsSagemakerAppImageConfigRead, + Update: resourceAwsSagemakerAppImageConfigUpdate, + Delete: resourceAwsSagemakerAppImageConfigDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "app_image_config_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*$`), "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + ), + }, + "kernel_gateway_image_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file_system_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_gid": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + ValidateFunc: validation.IntBetween(0, 65535), + }, + "default_uid": { + Type: schema.TypeInt, + Optional: true, + Default: 1000, + ValidateFunc: validation.IntBetween(0, 65535), + }, + "mount_path": { + Type: schema.TypeString, + Optional: true, + Default: "/home/sagemaker-user", + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^\/.*`), "Must start with `/`."), + ), + }, + }, + }, + }, + "kernal_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + }, + }, + }, + }, + }, + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSagemakerAppImageConfigCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + name := d.Get("app_image_config_name").(string) + input := &sagemaker.CreateAppImageConfigInput{ + AppImageConfigName: aws.String(name), + } + + if v, ok := d.GetOk("kernel_gateway_image_config"); ok && len(v.([]interface{})) > 0 { + input.KernelGatewayImageConfig = expandSagemakerAppImageConfigKernelGatewayImageConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + + _, err := conn.CreateAppImageConfig(input) + if err != nil { + return fmt.Errorf("error creating SageMaker App Image Config %s: %w", name, err) + } + + d.SetId(name) + + return resourceAwsSagemakerAppImageConfigRead(d, meta) +} + +func resourceAwsSagemakerAppImageConfigRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + image, err := finder.AppImageConfigByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + d.SetId("") + log.Printf("[WARN] Unable to find SageMaker App Image Config (%s); removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading SageMaker App Image Config (%s): %w", d.Id(), err) + + } + + arn := aws.StringValue(image.AppImageConfigArn) + d.Set("app_image_config_name", image.AppImageConfigName) + d.Set("arn", arn) + + if err := d.Set("kernel_gateway_image_config", flattenSagemakerAppImageConfigKernelGatewayImageConfig(image.KernelGatewayImageConfig)); err != nil { + return fmt.Errorf("error setting kernel_gateway_image_config: %w", err) + } + + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for SageMaker App Image Config (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsSagemakerAppImageConfigUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + if d.HasChangeExcept("tags") { + + input := &sagemaker.UpdateAppImageConfigInput{ + AppImageConfigName: aws.String(d.Id()), + } + + if v, ok := d.GetOk("kernel_gateway_image_config"); ok && len(v.([]interface{})) > 0 { + input.KernelGatewayImageConfig = expandSagemakerAppImageConfigKernelGatewayImageConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Sagemaker App Image Config update config: %#v", *input) + _, err := conn.UpdateAppImageConfig(input) + if err != nil { + return fmt.Errorf("error updating SageMaker App Image Config: %w", err) + } + + } + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating SageMaker App Image Config (%s) tags: %s", d.Id(), err) + } + } + + return resourceAwsSagemakerAppImageConfigRead(d, meta) +} + +func resourceAwsSagemakerAppImageConfigDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteAppImageConfigInput{ + AppImageConfigName: aws.String(d.Id()), + } + + if _, err := conn.DeleteAppImageConfig(input); err != nil { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + return nil + } + return fmt.Errorf("error deleting SageMaker App Image Config (%s): %w", d.Id(), err) + } + + return nil +} + +func expandSagemakerAppImageConfigKernelGatewayImageConfig(l []interface{}) *sagemaker.KernelGatewayImageConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.KernelGatewayImageConfig{ + KernelSpecs: expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(m["kernel_spec"].([]interface{})), + } + + if v, ok := m["file_system_config"].([]interface{}); ok && len(v) > 0 { + config.FileSystemConfig = expandSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(v) + } + + return config +} + +func expandSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(l []interface{}) *sagemaker.FileSystemConfig { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.FileSystemConfig{ + DefaultGid: aws.Int64(int64(m["default_gid"].(int))), + DefaultUid: aws.Int64(int64(m["default_uid"].(int))), + MountPath: aws.String(m["mount_path"].(string)), + } + + return config +} + +func expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(l []interface{}) []*sagemaker.KernelSpec { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.KernelSpec{ + Name: aws.String(m["name"].(string)), + } + + if v, ok := m["display_name"].(string); ok && v != "" { + config.DisplayName = aws.String(v) + } + + return []*sagemaker.KernelSpec{config} +} + +func flattenSagemakerAppImageConfigKernelGatewayImageConfig(config *sagemaker.KernelGatewayImageConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "kernel_spec": flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(config.KernelSpecs), + } + + if config.FileSystemConfig != nil { + m["file_system_config"] = flattenSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(config.FileSystemConfig) + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(config *sagemaker.FileSystemConfig) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "mountPath": aws.StringValue(config.MountPath), + "default_gid": aws.Int64Value(config.DefaultGid), + "default_uid": aws.Int64Value(config.DefaultUid), + } + + return []map[string]interface{}{m} +} + +func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(config []*sagemaker.KernelSpec) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + kernel := config[0] + if kernel == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "name": aws.StringValue(kernel.Name), + } + + if kernel.DisplayName != nil { + m["display_name"] = aws.StringValue(kernel.DisplayName) + } + + return []map[string]interface{}{m} +} From 261eedba050a0b009c2524f772688d9baa207412 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 18 Jan 2021 19:05:29 +0200 Subject: [PATCH 0826/1212] add resource to provider --- aws/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/provider.go b/aws/provider.go index 367c9c4bf61..1721e92f683 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -876,6 +876,7 @@ func Provider() *schema.Provider { "aws_route_table": resourceAwsRouteTable(), "aws_default_route_table": resourceAwsDefaultRouteTable(), "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_sagemaker_app_image_config": resourceAwsSagemakerAppImageConfig(), "aws_sagemaker_code_repository": resourceAwsSagemakerCodeRepository(), "aws_sagemaker_domain": resourceAwsSagemakerDomain(), "aws_sagemaker_endpoint": resourceAwsSagemakerEndpoint(), From 0a3e3dd1973885ff62a84b7c1efab0f030fa5b8b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Mon, 18 Jan 2021 19:20:08 +0200 Subject: [PATCH 0827/1212] basic test --- ...rce_aws_sagemaker_app_image_config_test.go | 248 ++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 aws/resource_aws_sagemaker_app_image_config_test.go diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go new file mode 100644 index 00000000000..a3bf0ad15f6 --- /dev/null +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -0,0 +1,248 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +// func init() { +// resource.AddTestSweepers("aws_sagemaker_app_image_config", &resource.Sweeper{ +// Name: "aws_sagemaker_app_image_config", +// F: testSweepSagemakerAppImageConfigs, +// }) +// } + +// func testSweepSagemakerAppImageConfigs(region string) error { +// client, err := sharedClientForRegion(region) +// if err != nil { +// return fmt.Errorf("error getting client: %s", err) +// } +// conn := client.(*AWSClient).sagemakerconn + +// err = conn.ListAppImageConfigs(&sagemaker.ListAppImageConfigsInput{}, func(page *sagemaker.ListAppImageConfigsOutput) bool { +// for _, instance := range page.AppImageConfigs { +// name := aws.StringValue(instance.AppImageConfigName) + +// input := &sagemaker.DeleteAppImageConfigInput{ +// AppImageConfigName: instance.AppImageConfigName, +// } + +// log.Printf("[INFO] Deleting SageMaker App Image Config: %s", name) +// if _, err := conn.DeleteAppImageConfig(input); err != nil { +// log.Printf("[ERROR] Error deleting SageMaker App Image Config (%s): %s", name, err) +// continue +// } +// } +// }) + +// if testSweepSkipSweepError(err) { +// log.Printf("[WARN] Skipping SageMaker App Image Config sweep for %s: %s", region, err) +// return nil +// } + +// if err != nil { +// return fmt.Errorf("Error retrieving SageMaker App Image Configs: %w", err) +// } + +// return nil +// } + +func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { + var notebook sagemaker.DescribeAppImageConfigOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_app_image_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerAppImageConfigBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("app-image-config/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerAppImageConfig_gitConfig_branch(t *testing.T) { + var notebook sagemaker.DescribeAppImageConfigOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_app_image_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerAppImageConfigGitConfigBranchConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/hashicorp/terraform-provider-aws.git"), + resource.TestCheckResourceAttr(resourceName, "git_config.0.branch", "master"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerAppImageConfig_disappears(t *testing.T) { + var notebook sagemaker.DescribeAppImageConfigOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_app_image_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerAppImageConfigBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerAppImageConfig(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerAppImageConfigDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_app_image_config" { + continue + } + + codeRepository, err := finder.AppImageConfigByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + if aws.StringValue(codeRepository.AppImageConfigName) == rs.Primary.ID { + return fmt.Errorf("Sagemaker App Image Config %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerAppImageConfigExists(n string, codeRepo *sagemaker.DescribeAppImageConfigOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker App Image Config ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.AppImageConfigByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *codeRepo = *resp + + return nil + } +} + +func testAccAWSSagemakerAppImageConfigBasicConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q +} +`, rName) +} + +func testAccAWSSagemakerAppImageConfigGitConfigBranchConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + git_config { + repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" + branch = "master" + } +} +`, rName) +} + +func testAccAWSSagemakerAppImageConfigGitConfigSecretConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_secretsmanager_secret" "test" { + name = %[1]q +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = jsonencode({ username = "example", password = "example" }) +} + +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + git_config { + repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" + secret_arn = aws_secretsmanager_secret.test.arn + } + + depends_on = [aws_secretsmanager_secret_version.test] +} +`, rName) +} + +func testAccAWSSagemakerAppImageConfigGitConfigSecretUpdatedConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_secretsmanager_secret" "test2" { + name = "%[1]s-2" +} + +resource "aws_secretsmanager_secret_version" "test2" { + secret_id = aws_secretsmanager_secret.test2.id + secret_string = jsonencode({ username = "example", password = "example" }) +} + +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + git_config { + repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" + secret_arn = aws_secretsmanager_secret.test2.arn + } + + depends_on = [aws_secretsmanager_secret_version.test2] +} +`, rName) +} From a85c0f81a8de3919efbfb69cf960c669748c8b09 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 18:23:33 +0200 Subject: [PATCH 0828/1212] app iamge config --- ...resource_aws_sagemaker_app_image_config.go | 99 +++++++++++++------ ...rce_aws_sagemaker_app_image_config_test.go | 79 ++++++--------- 2 files changed, 100 insertions(+), 78 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go index c9e202da81a..0f79af4b5af 100644 --- a/aws/resource_aws_sagemaker_app_image_config.go +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -132,7 +132,7 @@ func resourceAwsSagemakerAppImageConfigRead(d *schema.ResourceData, meta interfa image, err := finder.AppImageConfigByName(conn, d.Id()) if err != nil { - if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { d.SetId("") log.Printf("[WARN] Unable to find SageMaker App Image Config (%s); removing from state", d.Id()) return nil @@ -202,7 +202,7 @@ func resourceAwsSagemakerAppImageConfigDelete(d *schema.ResourceData, meta inter } if _, err := conn.DeleteAppImageConfig(input); err != nil { - if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "No Image with the name") { + if isAWSErr(err, sagemaker.ErrCodeResourceNotFound, "does not exist") { return nil } return fmt.Errorf("error deleting SageMaker App Image Config (%s): %w", d.Id(), err) @@ -218,8 +218,10 @@ func expandSagemakerAppImageConfigKernelGatewayImageConfig(l []interface{}) *sag m := l[0].(map[string]interface{}) - config := &sagemaker.KernelGatewayImageConfig{ - KernelSpecs: expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(m["kernel_spec"].([]interface{})), + config := &sagemaker.KernelGatewayImageConfig{} + + if v, ok := m["kernal_spec"].([]interface{}); ok && len(v) > 0 { + config.KernelSpecs = expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(v) } if v, ok := m["file_system_config"].([]interface{}); ok && len(v) > 0 { @@ -245,22 +247,36 @@ func expandSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(l []i return config } -func expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(l []interface{}) []*sagemaker.KernelSpec { - if len(l) == 0 || l[0] == nil { +func expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(tfList []interface{}) []*sagemaker.KernelSpec { + if len(tfList) == 0 { return nil } - m := l[0].(map[string]interface{}) + var kernelSpecs []*sagemaker.KernelSpec - config := &sagemaker.KernelSpec{ - Name: aws.String(m["name"].(string)), - } + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + kernelSpec := &sagemaker.KernelSpec{ + Name: aws.String(tfMap["name"].(string)), + } - if v, ok := m["display_name"].(string); ok && v != "" { - config.DisplayName = aws.String(v) + if v, ok := tfMap["display_name"].(string); ok && v != "" { + kernelSpec.DisplayName = aws.String(v) + } + + if kernelSpec == nil { + continue + } + + kernelSpecs = append(kernelSpecs, kernelSpec) } - return []*sagemaker.KernelSpec{config} + return kernelSpecs } func flattenSagemakerAppImageConfigKernelGatewayImageConfig(config *sagemaker.KernelGatewayImageConfig) []map[string]interface{} { @@ -268,8 +284,10 @@ func flattenSagemakerAppImageConfigKernelGatewayImageConfig(config *sagemaker.Ke return []map[string]interface{}{} } - m := map[string]interface{}{ - "kernel_spec": flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(config.KernelSpecs), + m := map[string]interface{}{} + + if config.KernelSpecs != nil { + m["kernel_spec"] = flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(config.KernelSpecs) } if config.FileSystemConfig != nil { @@ -293,23 +311,46 @@ func flattenSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(conf return []map[string]interface{}{m} } -func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(config []*sagemaker.KernelSpec) []map[string]interface{} { - if config == nil { - return []map[string]interface{}{} - } +func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(kernelSpecs []*sagemaker.KernelSpec) []map[string]interface{} { + res := make([]map[string]interface{}, 0, len(kernelSpecs)) - kernel := config[0] - if kernel == nil { - return []map[string]interface{}{} - } + for _, raw := range kernelSpecs { + kernelSpec := make(map[string]interface{}) - m := map[string]interface{}{ - "name": aws.StringValue(kernel.Name), - } + kernelSpec["name"] = aws.StringValue(raw.Name) + + if raw.DisplayName != nil { + kernelSpec["display_name"] = aws.StringValue(raw.DisplayName) + } - if kernel.DisplayName != nil { - m["display_name"] = aws.StringValue(kernel.DisplayName) + res = append(res, kernelSpec) } - return []map[string]interface{}{m} + return res } + +// func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(kernelSpecs []*sagemaker.KernelSpec) []interface{} { +// if len(kernelSpecs) == 0 { +// return nil +// } + +// var tfList []interface{} + +// for _, kernelSpec := range kernelSpecs { +// if kernelSpec == nil { +// continue +// } + +// m := map[string]interface{}{ +// "name": aws.StringValue(kernelSpec.Name), +// } + +// if kernelSpec.DisplayName != nil { +// m["display_name"] = aws.StringValue(kernelSpec.DisplayName) +// } + +// tfList = append(tfList, m) +// } + +// return tfList +// } diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index a3bf0ad15f6..12f2f20f024 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -70,6 +70,7 @@ func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("app-image-config/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -82,7 +83,7 @@ func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { }) } -func TestAccAWSSagemakerAppImageConfig_gitConfig_branch(t *testing.T) { +func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *testing.T) { var notebook sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" @@ -93,14 +94,14 @@ func TestAccAWSSagemakerAppImageConfig_gitConfig_branch(t *testing.T) { CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSagemakerAppImageConfigGitConfigBranchConfig(rName), + Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs1(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("code-repository/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "git_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "git_config.0.repository_url", "https://github.com/hashicorp/terraform-provider-aws.git"), - resource.TestCheckResourceAttr(resourceName, "git_config.0.branch", "master"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.name", rName), ), }, { @@ -108,6 +109,18 @@ func TestAccAWSSagemakerAppImageConfig_gitConfig_branch(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.name", fmt.Sprintf("%s-2", rName)), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.display_name", rName), + ), + }, }, }) } @@ -186,63 +199,31 @@ resource "aws_sagemaker_app_image_config" "test" { `, rName) } -func testAccAWSSagemakerAppImageConfigGitConfigBranchConfig(rName string) string { +func testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs1(rName string) string { return fmt.Sprintf(` resource "aws_sagemaker_app_image_config" "test" { app_image_config_name = %[1]q - git_config { - repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" - branch = "master" - } -} -`, rName) -} - -func testAccAWSSagemakerAppImageConfigGitConfigSecretConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_secretsmanager_secret" "test" { - name = %[1]q -} - -resource "aws_secretsmanager_secret_version" "test" { - secret_id = aws_secretsmanager_secret.test.id - secret_string = jsonencode({ username = "example", password = "example" }) -} - -resource "aws_sagemaker_app_image_config" "test" { - app_image_config_name = %[1]q - - git_config { - repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" - secret_arn = aws_secretsmanager_secret.test.arn + kernel_gateway_image_config { + kernal_spec { + name = %[1]q + } } - - depends_on = [aws_secretsmanager_secret_version.test] } `, rName) } -func testAccAWSSagemakerAppImageConfigGitConfigSecretUpdatedConfig(rName string) string { +func testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs2(rName string) string { return fmt.Sprintf(` -resource "aws_secretsmanager_secret" "test2" { - name = "%[1]s-2" -} - -resource "aws_secretsmanager_secret_version" "test2" { - secret_id = aws_secretsmanager_secret.test2.id - secret_string = jsonencode({ username = "example", password = "example" }) -} - resource "aws_sagemaker_app_image_config" "test" { app_image_config_name = %[1]q - git_config { - repository_url = "https://github.com/hashicorp/terraform-provider-aws.git" - secret_arn = aws_secretsmanager_secret.test2.arn + kernel_gateway_image_config { + kernal_spec { + name = "%[1]s-2" + display_name = %[1]q + } } - - depends_on = [aws_secretsmanager_secret_version.test2] } `, rName) } From e28405919774bb51909ac96ff55504abe5e19dda Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 19:36:33 +0200 Subject: [PATCH 0829/1212] fix argument name --- ...resource_aws_sagemaker_app_image_config.go | 30 ++----------------- 1 file changed, 2 insertions(+), 28 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go index 0f79af4b5af..7efaee25588 100644 --- a/aws/resource_aws_sagemaker_app_image_config.go +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -73,7 +73,7 @@ func resourceAwsSagemakerAppImageConfig() *schema.Resource { }, }, }, - "kernal_spec": { + "kernel_spec": { Type: schema.TypeList, Required: true, MaxItems: 1, @@ -220,7 +220,7 @@ func expandSagemakerAppImageConfigKernelGatewayImageConfig(l []interface{}) *sag config := &sagemaker.KernelGatewayImageConfig{} - if v, ok := m["kernal_spec"].([]interface{}); ok && len(v) > 0 { + if v, ok := m["kernel_spec"].([]interface{}); ok && len(v) > 0 { config.KernelSpecs = expandSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(v) } @@ -328,29 +328,3 @@ func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(kernelSpe return res } - -// func flattenSagemakerAppImageConfigKernelGatewayImageConfigKernelSpecs(kernelSpecs []*sagemaker.KernelSpec) []interface{} { -// if len(kernelSpecs) == 0 { -// return nil -// } - -// var tfList []interface{} - -// for _, kernelSpec := range kernelSpecs { -// if kernelSpec == nil { -// continue -// } - -// m := map[string]interface{}{ -// "name": aws.StringValue(kernelSpec.Name), -// } - -// if kernelSpec.DisplayName != nil { -// m["display_name"] = aws.StringValue(kernelSpec.DisplayName) -// } - -// tfList = append(tfList, m) -// } - -// return tfList -// } From 2dfb264952a449231c8bddedbbae4330d27f8de5 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 19:36:44 +0200 Subject: [PATCH 0830/1212] add sweeper --- ...rce_aws_sagemaker_app_image_config_test.go | 104 ++++++++++-------- 1 file changed, 61 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index 12f2f20f024..07f01a918fe 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -2,57 +2,75 @@ package aws import ( "fmt" + "log" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" ) -// func init() { -// resource.AddTestSweepers("aws_sagemaker_app_image_config", &resource.Sweeper{ -// Name: "aws_sagemaker_app_image_config", -// F: testSweepSagemakerAppImageConfigs, -// }) -// } - -// func testSweepSagemakerAppImageConfigs(region string) error { -// client, err := sharedClientForRegion(region) -// if err != nil { -// return fmt.Errorf("error getting client: %s", err) -// } -// conn := client.(*AWSClient).sagemakerconn - -// err = conn.ListAppImageConfigs(&sagemaker.ListAppImageConfigsInput{}, func(page *sagemaker.ListAppImageConfigsOutput) bool { -// for _, instance := range page.AppImageConfigs { -// name := aws.StringValue(instance.AppImageConfigName) - -// input := &sagemaker.DeleteAppImageConfigInput{ -// AppImageConfigName: instance.AppImageConfigName, -// } - -// log.Printf("[INFO] Deleting SageMaker App Image Config: %s", name) -// if _, err := conn.DeleteAppImageConfig(input); err != nil { -// log.Printf("[ERROR] Error deleting SageMaker App Image Config (%s): %s", name, err) -// continue -// } -// } -// }) - -// if testSweepSkipSweepError(err) { -// log.Printf("[WARN] Skipping SageMaker App Image Config sweep for %s: %s", region, err) -// return nil -// } - -// if err != nil { -// return fmt.Errorf("Error retrieving SageMaker App Image Configs: %w", err) -// } - -// return nil -// } +func init() { + resource.AddTestSweepers("aws_sagemaker_app_image_config", &resource.Sweeper{ + Name: "aws_sagemaker_app_image_config", + F: testSweepSagemakerAppImageConfigs, + }) +} + +func testSweepSagemakerAppImageConfigs(region string) error { + client, err := sharedClientForRegion(region) + + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*AWSClient).sagemakerconn + input := &sagemaker.ListAppImageConfigsInput{} + var sweeperErrs *multierror.Error + + for { + output, err := conn.ListAppImageConfigs(input) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker App Image Config for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving Example Thing: %w", err)) + return sweeperErrs + } + + for _, config := range output.AppImageConfigs { + name := aws.StringValue(config.AppImageConfigName) + input := &sagemaker.DeleteAppImageConfigInput{ + AppImageConfigName: aws.String(name), + } + + log.Printf("[INFO] Deleting SageMaker App Image Config: %s", name) + _, err := conn.DeleteAppImageConfig(input) + + if err != nil { + sweeperErr := fmt.Errorf("error deleting SageMaker App Image Config (%s): %w", name, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return sweeperErrs.ErrorOrNil() +} func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { var notebook sagemaker.DescribeAppImageConfigOutput @@ -205,7 +223,7 @@ resource "aws_sagemaker_app_image_config" "test" { app_image_config_name = %[1]q kernel_gateway_image_config { - kernal_spec { + kernel_spec { name = %[1]q } } @@ -219,7 +237,7 @@ resource "aws_sagemaker_app_image_config" "test" { app_image_config_name = %[1]q kernel_gateway_image_config { - kernal_spec { + kernel_spec { name = "%[1]s-2" display_name = %[1]q } From f5f58a45ee096fd80767501678e45fd35d95efd1 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 19:53:12 +0200 Subject: [PATCH 0831/1212] add tags test --- ...resource_aws_sagemaker_app_image_config.go | 2 +- ...rce_aws_sagemaker_app_image_config_test.go | 69 +++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go index 7efaee25588..4103dfa75f8 100644 --- a/aws/resource_aws_sagemaker_app_image_config.go +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -187,7 +187,7 @@ func resourceAwsSagemakerAppImageConfigUpdate(d *schema.ResourceData, meta inter o, n := d.GetChange("tags") if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating SageMaker App Image Config (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating SageMaker App Image Config (%s) tags: %w", d.Id(), err) } } diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index 07f01a918fe..0600c50edb8 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -143,6 +143,50 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t }) } +func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { + var notebook sagemaker.DescribeAppImageConfigOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_app_image_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerAppImageConfigConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccAWSSagemakerAppImageConfig_disappears(t *testing.T) { var notebook sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -245,3 +289,28 @@ resource "aws_sagemaker_app_image_config" "test" { } `, rName) } + +func testAccAWSSagemakerAppImageConfigConfigTags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSagemakerAppImageConfigConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From eb13fe166d9c21533d940ff844f2500930f49e42 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 22:35:34 +0200 Subject: [PATCH 0832/1212] docs --- .../sagemaker_app_image_config.html.markdown | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 website/docs/r/sagemaker_app_image_config.html.markdown diff --git a/website/docs/r/sagemaker_app_image_config.html.markdown b/website/docs/r/sagemaker_app_image_config.html.markdown new file mode 100644 index 00000000000..5a20c890dc6 --- /dev/null +++ b/website/docs/r/sagemaker_app_image_config.html.markdown @@ -0,0 +1,66 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_app_image_config" +description: |- + Provides a Sagemaker App Image Config resource. +--- + +# Resource: aws_sagemaker_app_image_config + +Provides a Sagemaker App Image Config resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = "example" + + kernel_gateway_image_config { + kernel_spec { + name = "example" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `app_image_config_name` - (Required) The name of the App Image Config. +* `kernel_gateway_image_config` - (Optional) The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app. See [Kernel Gateway Image Config](#kernel-gateway-image-config) details below. +* `tags` - (Optional) A map of tags to assign to the resource. + +### Kernel Gateway Image Config + +* `file_system_config` - (Optional) The URL where the Git repository is located. See [File System Config](#file-system-config) details below. +* `kernel_spec` - (Required) The default branch for the Git repository. See [Kernel Spec](#kernel-spec) details below. + +#### File System Config + +* `default_gid` - (Required) The default POSIX group ID (GID). If not specified, defaults to `100`. +* `default_uid` - (Optional) The default POSIX user ID (UID). If not specified, defaults to `1000`. +* `mount_path` - (Optional) The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to `/home/sagemaker-user`. + +#### Kernel Spec + +* `name` - (Required) The name of the kernel. +* `display_name` - (Optional) The display name of the kernel. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The name of the App Image Config. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this App Image Config. + +## Import + +Sagemaker App Image Configs can be imported using the `name`, e.g. + +``` +$ terraform import aws_sagemaker_app_image_config.example example +``` From 90a667e560b8f136bbf42c1c9c38acc4676dc5ab Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 21 Jan 2021 22:35:44 +0200 Subject: [PATCH 0833/1212] dix code repo doc file name --- ...itory.htm.markdown => sagemaker_code_repository.html.markdown} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/docs/r/{sagemaker_code_repository.htm.markdown => sagemaker_code_repository.html.markdown} (100%) diff --git a/website/docs/r/sagemaker_code_repository.htm.markdown b/website/docs/r/sagemaker_code_repository.html.markdown similarity index 100% rename from website/docs/r/sagemaker_code_repository.htm.markdown rename to website/docs/r/sagemaker_code_repository.html.markdown From d92690f3830294c8dbaf3bbdbd59cf7840bdb041 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 09:57:34 +0200 Subject: [PATCH 0834/1212] fmt --- aws/resource_aws_sagemaker_app_image_config_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index 0600c50edb8..812c5b1b0ad 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -269,7 +269,7 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { name = %[1]q - } + } } } `, rName) @@ -282,9 +282,9 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { - name = "%[1]s-2" - display_name = %[1]q - } + name = "%[1]s-2" + display_name = %[1]q + } } } `, rName) From 4f3205c1a2ac2852b416008ea108a5f63e53173e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 09:59:29 +0200 Subject: [PATCH 0835/1212] changelog --- .changelog/17221.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/17221.txt diff --git a/.changelog/17221.txt b/.changelog/17221.txt new file mode 100644 index 00000000000..eb796c34396 --- /dev/null +++ b/.changelog/17221.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_sagemaker_app_image_config +``` + +```release-note:bug +resource/aws_sagemaker_code_repository: fix doc name +``` \ No newline at end of file From f53e5bf7b331c086d16d5ff15aa3138c3c2a70ac Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 10:02:25 +0200 Subject: [PATCH 0836/1212] fmt --- aws/resource_aws_sagemaker_app_image_config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index 812c5b1b0ad..59cb3b547af 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -283,7 +283,7 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { name = "%[1]s-2" - display_name = %[1]q + display_name = %[1]q } } } From 7ac07e62c1528fa82ea09be28b1b65857f47a5c5 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 10:06:41 +0200 Subject: [PATCH 0837/1212] fmt and fix tests --- aws/resource_aws_sagemaker_app_image_config_test.go | 12 ++++++------ .../docs/r/sagemaker_app_image_config.html.markdown | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index 59cb3b547af..c3d69bf5459 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -117,9 +117,9 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.name", rName), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.file_system_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.name", rName), ), }, { @@ -133,10 +133,10 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "0"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.name", fmt.Sprintf("%s-2", rName)), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_sepcs.0.display_name", rName), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.name", fmt.Sprintf("%s-2", rName)), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.display_name", rName), ), }, }, diff --git a/website/docs/r/sagemaker_app_image_config.html.markdown b/website/docs/r/sagemaker_app_image_config.html.markdown index 5a20c890dc6..65235e87a71 100644 --- a/website/docs/r/sagemaker_app_image_config.html.markdown +++ b/website/docs/r/sagemaker_app_image_config.html.markdown @@ -21,7 +21,7 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { name = "example" - } + } } } ``` From 9d3357b548c50e991c3437e4ac8ea25ae679a0bf Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 10:10:17 +0200 Subject: [PATCH 0838/1212] fix test + rename --- ...rce_aws_sagemaker_app_image_config_test.go | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index c3d69bf5459..be6085a9b9a 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -73,7 +73,7 @@ func testSweepSagemakerAppImageConfigs(region string) error { } func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { - var notebook sagemaker.DescribeAppImageConfigOutput + var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" @@ -85,7 +85,7 @@ func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { { Config: testAccAWSSagemakerAppImageConfigBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("app-image-config/%s", rName)), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "0"), @@ -102,7 +102,7 @@ func TestAccAWSSagemakerAppImageConfig_basic(t *testing.T) { } func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *testing.T) { - var notebook sagemaker.DescribeAppImageConfigOutput + var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" @@ -114,7 +114,7 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t { Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), @@ -128,9 +128,9 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t ImportStateVerify: true, }, { - Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs1(rName), + Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigKernalSpecs2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), @@ -144,7 +144,7 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t } func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { - var notebook sagemaker.DescribeAppImageConfigOutput + var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" @@ -156,7 +156,7 @@ func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { { Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -169,7 +169,7 @@ func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { { Config: testAccAWSSagemakerAppImageConfigConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -178,7 +178,7 @@ func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { { Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -188,7 +188,7 @@ func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { } func TestAccAWSSagemakerAppImageConfig_disappears(t *testing.T) { - var notebook sagemaker.DescribeAppImageConfigOutput + var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" @@ -200,7 +200,7 @@ func TestAccAWSSagemakerAppImageConfig_disappears(t *testing.T) { { Config: testAccAWSSagemakerAppImageConfigBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, ¬ebook), + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerAppImageConfig(), resourceName), ), ExpectNonEmptyPlan: true, From 086d2ec1e067e10618572f3aacadf5e61a08b97c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 12:05:06 +0200 Subject: [PATCH 0839/1212] docs --- .../sagemaker_app_image_config.html.markdown | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/website/docs/r/sagemaker_app_image_config.html.markdown b/website/docs/r/sagemaker_app_image_config.html.markdown index 65235e87a71..a3cd5b657d4 100644 --- a/website/docs/r/sagemaker_app_image_config.html.markdown +++ b/website/docs/r/sagemaker_app_image_config.html.markdown @@ -26,6 +26,22 @@ resource "aws_sagemaker_app_image_config" "test" { } ``` +### Default File System Config + +```hcl +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = "example" + + kernel_gateway_image_config { + kernel_spec { + name = "example" + } + + file_system_config {} + } +} +``` + ## Argument Reference The following arguments are supported: @@ -41,10 +57,12 @@ The following arguments are supported: #### File System Config -* `default_gid` - (Required) The default POSIX group ID (GID). If not specified, defaults to `100`. -* `default_uid` - (Optional) The default POSIX user ID (UID). If not specified, defaults to `1000`. +* `default_gid` - (Optional) The default POSIX group ID (GID). If not specified, defaults to `100`. Valid values are `0` and `100`. +* `default_uid` - (Optional) The default POSIX user ID (UID). If not specified, defaults to `1000`. Valid values are `0` and `1000`. * `mount_path` - (Optional) The path within the image to mount the user's EFS home directory. The directory should be empty. If not specified, defaults to `/home/sagemaker-user`. +~> **Note:** When specifying `default_gid` and `default_uid`, Valid value pairs are [`0`, `0`] and [`100`, `1000`]. + #### Kernel Spec * `name` - (Required) The name of the kernel. From fea4363aeb88008b8686f4316861b8b08d83e539 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 12:05:36 +0200 Subject: [PATCH 0840/1212] and file system config test and fix valdation --- ...resource_aws_sagemaker_app_image_config.go | 6 +- ...rce_aws_sagemaker_app_image_config_test.go | 83 ++++++++++++++++++- 2 files changed, 85 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go index 4103dfa75f8..be27f13c7d0 100644 --- a/aws/resource_aws_sagemaker_app_image_config.go +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -53,13 +53,13 @@ func resourceAwsSagemakerAppImageConfig() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 100, - ValidateFunc: validation.IntBetween(0, 65535), + ValidateFunc: validation.IntInSlice([]int{0, 100}), }, "default_uid": { Type: schema.TypeInt, Optional: true, Default: 1000, - ValidateFunc: validation.IntBetween(0, 65535), + ValidateFunc: validation.IntInSlice([]int{0, 1000}), }, "mount_path": { Type: schema.TypeString, @@ -303,7 +303,7 @@ func flattenSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig(conf } m := map[string]interface{}{ - "mountPath": aws.StringValue(config.MountPath), + "mount_path": aws.StringValue(config.MountPath), "default_gid": aws.Int64Value(config.DefaultGid), "default_uid": aws.Int64Value(config.DefaultUid), } diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index be6085a9b9a..fb9738c9a64 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -118,7 +118,7 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), - resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.file_system_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "0"), resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.0.name", rName), ), }, @@ -143,6 +143,51 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_kernalSpecs(t *t }) } +func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_fileSystemConfig(t *testing.T) { + var config sagemaker.DescribeAppImageConfigOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_app_image_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), + resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.default_gid", "100"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.default_uid", "1000"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.mount_path", "/home/sagemaker-user"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), + resource.TestCheckResourceAttr(resourceName, "app_image_config_name", rName), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.kernel_spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.default_gid", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.default_uid", "0"), + resource.TestCheckResourceAttr(resourceName, "kernel_gateway_image_config.0.file_system_config.0.mount_path", "/test"), + ), + }, + }, + }) +} + func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -290,6 +335,42 @@ resource "aws_sagemaker_app_image_config" "test" { `, rName) } +func testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig1(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + kernel_gateway_image_config { + kernel_spec { + name = %[1]q + } + + file_system_config {} + } +} +`, rName) +} + +func testAccAWSSagemakerAppImageConfigKernelGatewayImageConfigFileSystemConfig2(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q + + kernel_gateway_image_config { + kernel_spec { + name = %[1]q + } + + file_system_config { + default_gid = 0 + default_uid = 0 + mount_path = "/test" + } + } +} +`, rName) +} + func testAccAWSSagemakerAppImageConfigConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_sagemaker_app_image_config" "test" { From bd0ca606c1dd36b829bf16d1c3de0b47adb85f74 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 12:26:00 +0200 Subject: [PATCH 0841/1212] fmt --- aws/resource_aws_sagemaker_app_image_config_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index fb9738c9a64..a451d706503 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -262,12 +262,12 @@ func testAccCheckAWSSagemakerAppImageConfigDestroy(s *terraform.State) error { continue } - codeRepository, err := finder.AppImageConfigByName(conn, rs.Primary.ID) + config, err := finder.AppImageConfigByName(conn, rs.Primary.ID) if err != nil { return nil } - if aws.StringValue(codeRepository.AppImageConfigName) == rs.Primary.ID { + if aws.StringValue(config.AppImageConfigName) == rs.Primary.ID { return fmt.Errorf("Sagemaker App Image Config %q still exists", rs.Primary.ID) } } @@ -275,7 +275,7 @@ func testAccCheckAWSSagemakerAppImageConfigDestroy(s *terraform.State) error { return nil } -func testAccCheckAWSSagemakerAppImageConfigExists(n string, codeRepo *sagemaker.DescribeAppImageConfigOutput) resource.TestCheckFunc { +func testAccCheckAWSSagemakerAppImageConfigExists(n string, config *sagemaker.DescribeAppImageConfigOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -292,7 +292,7 @@ func testAccCheckAWSSagemakerAppImageConfigExists(n string, codeRepo *sagemaker. return err } - *codeRepo = *resp + *config = *resp return nil } From 887a927ea4598a23946daa9310ed3ce32683e58e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 12:26:56 +0200 Subject: [PATCH 0842/1212] fmt --- aws/resource_aws_sagemaker_app_image_config_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index a451d706503..afcdcf41347 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -343,9 +343,9 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { name = %[1]q - } + } - file_system_config {} + file_system_config {} } } `, rName) From cff12265e4b93942af713723f08746a7abaf753b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 12:27:41 +0200 Subject: [PATCH 0843/1212] fmt --- aws/resource_aws_sagemaker_app_image_config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index afcdcf41347..dfc8f4cee8d 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -359,7 +359,7 @@ resource "aws_sagemaker_app_image_config" "test" { kernel_gateway_image_config { kernel_spec { name = %[1]q - } + } file_system_config { default_gid = 0 From 4804ddbe81811e829ae9161d315163a036060284 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 13:26:05 +0200 Subject: [PATCH 0844/1212] skip tags --- aws/resource_aws_sagemaker_app_image_config_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index dfc8f4cee8d..bf78cac7d23 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -189,6 +189,9 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_fileSystemConfig } func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { + + t.Skip("Flaky Test, possibly related to https://github.com/hashicorp/terraform-provider-aws/issues/15572") + var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_sagemaker_app_image_config.test" From d88f9ea857a24392552e1737d1bc6a9c7cb2935a Mon Sep 17 00:00:00 2001 From: Jacob Parker Date: Fri, 9 Oct 2020 09:32:11 -0400 Subject: [PATCH 0845/1212] CodeArtifact encryption_key argument should be optional --- aws/resource_aws_codeartifact_domain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_codeartifact_domain.go b/aws/resource_aws_codeartifact_domain.go index 612d46edde6..1698e63787a 100644 --- a/aws/resource_aws_codeartifact_domain.go +++ b/aws/resource_aws_codeartifact_domain.go @@ -35,7 +35,7 @@ func resourceAwsCodeArtifactDomain() *schema.Resource { }, "encryption_key": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validateArn, }, From a250ee9e0d12aaf8d8020337faca15dd05d8cd93 Mon Sep 17 00:00:00 2001 From: Jacob Parker Date: Wed, 14 Oct 2020 11:52:25 -0400 Subject: [PATCH 0846/1212] Set encryption_key to be a computed attribute --- aws/resource_aws_codeartifact_domain.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_codeartifact_domain.go b/aws/resource_aws_codeartifact_domain.go index 1698e63787a..63159d9785b 100644 --- a/aws/resource_aws_codeartifact_domain.go +++ b/aws/resource_aws_codeartifact_domain.go @@ -36,6 +36,7 @@ func resourceAwsCodeArtifactDomain() *schema.Resource { "encryption_key": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, ValidateFunc: validateArn, }, From 6f9f0c3aab5161f341440b254813ead216036216 Mon Sep 17 00:00:00 2001 From: Jacob Parker Date: Wed, 14 Oct 2020 13:53:54 -0400 Subject: [PATCH 0847/1212] Add acceptance test + docs --- aws/resource_aws_codeartifact_domain_test.go | 41 +++++++++++++++++++ .../docs/r/codeartifact_domain.html.markdown | 7 +--- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_codeartifact_domain_test.go b/aws/resource_aws_codeartifact_domain_test.go index b7817c0d098..18132b1dce3 100644 --- a/aws/resource_aws_codeartifact_domain_test.go +++ b/aws/resource_aws_codeartifact_domain_test.go @@ -98,7 +98,41 @@ func TestAccAWSCodeArtifactDomain_basic(t *testing.T) { }) } +<<<<<<< HEAD func TestAccAWSCodeArtifactDomain_tags(t *testing.T) { +======= +func TestAccAWSCodeArtifactDomain_defaultencryptionkey(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_codeartifact_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck("codeartifact", t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeArtifactDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodeArtifactDomainDefaultEncryptionKeyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeArtifactDomainExists(resourceName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "codeartifact", fmt.Sprintf("domain/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "domain", rName), + resource.TestCheckResourceAttr(resourceName, "asset_size_bytes", "0"), + resource.TestCheckResourceAttr(resourceName, "repository_count", "0"), + resource.TestCheckResourceAttrSet(resourceName, "created_time"), + testAccCheckResourceAttrAccountID(resourceName, "owner"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSCodeArtifactDomain_disappears(t *testing.T) { +>>>>>>> a4649a922 (Add acceptance test + docs) rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_codeartifact_domain.test" @@ -271,4 +305,11 @@ resource "aws_codeartifact_domain" "test" { } } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) + +func testAccAWSCodeArtifactDomainDefaultEncryptionKeyConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_codeartifact_domain" "test" { + domain = %[1]q +} +`, rName) } diff --git a/website/docs/r/codeartifact_domain.html.markdown b/website/docs/r/codeartifact_domain.html.markdown index 83274cec4ba..b875cc10c69 100644 --- a/website/docs/r/codeartifact_domain.html.markdown +++ b/website/docs/r/codeartifact_domain.html.markdown @@ -13,13 +13,8 @@ Provides a CodeArtifact Domain Resource. ## Example Usage ```hcl -resource "aws_kms_key" "example" { - description = "domain key" -} - resource "aws_codeartifact_domain" "example" { domain = "example" - encryption_key = aws_kms_key.example.arn } ``` @@ -28,7 +23,7 @@ resource "aws_codeartifact_domain" "example" { The following arguments are supported: * `domain` - (Required) The name of the domain to create. All domain names in an AWS Region that are in the same AWS account must be unique. The domain name is used as the prefix in DNS hostnames. Do not use sensitive information in a domain name because it is publicly discoverable. -* `encryption_key` - (Required) The encryption key for the domain. This is used to encrypt content stored in a domain. The KMS Key Amazon Resource Name (ARN). +* `encryption_key` - (Optional) The encryption key for the domain. This is used to encrypt content stored in a domain. The KMS Key Amazon Resource Name (ARN). The default aws/codeartifact AWS KMS master key is used if this element is absent. * `tags` - (Optional) Key-value map of resource tags. ## Attributes Reference From 340600b6c97e6f2143facd1994170b43f9198b7f Mon Sep 17 00:00:00 2001 From: Jacob Parker Date: Wed, 14 Oct 2020 14:27:29 -0400 Subject: [PATCH 0848/1212] Fix format in docs --- website/docs/r/codeartifact_domain.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/codeartifact_domain.html.markdown b/website/docs/r/codeartifact_domain.html.markdown index b875cc10c69..e8832269a9e 100644 --- a/website/docs/r/codeartifact_domain.html.markdown +++ b/website/docs/r/codeartifact_domain.html.markdown @@ -14,7 +14,7 @@ Provides a CodeArtifact Domain Resource. ```hcl resource "aws_codeartifact_domain" "example" { - domain = "example" + domain = "example" } ``` From 63050b5d3d6777e0d0f8ddb4f8004f7cbdfccc5f Mon Sep 17 00:00:00 2001 From: Jacob Parker Date: Fri, 6 Nov 2020 11:32:48 -0500 Subject: [PATCH 0849/1212] Fix broken merge --- aws/resource_aws_codeartifact_domain_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_codeartifact_domain_test.go b/aws/resource_aws_codeartifact_domain_test.go index 18132b1dce3..189a43e9f39 100644 --- a/aws/resource_aws_codeartifact_domain_test.go +++ b/aws/resource_aws_codeartifact_domain_test.go @@ -98,9 +98,6 @@ func TestAccAWSCodeArtifactDomain_basic(t *testing.T) { }) } -<<<<<<< HEAD -func TestAccAWSCodeArtifactDomain_tags(t *testing.T) { -======= func TestAccAWSCodeArtifactDomain_defaultencryptionkey(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_codeartifact_domain.test" @@ -131,8 +128,7 @@ func TestAccAWSCodeArtifactDomain_defaultencryptionkey(t *testing.T) { }) } -func TestAccAWSCodeArtifactDomain_disappears(t *testing.T) { ->>>>>>> a4649a922 (Add acceptance test + docs) +func TestAccAWSCodeArtifactDomain_tags(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_codeartifact_domain.test" @@ -305,6 +301,7 @@ resource "aws_codeartifact_domain" "test" { } } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} func testAccAWSCodeArtifactDomainDefaultEncryptionKeyConfig(rName string) string { return fmt.Sprintf(` From f3c4876e4c316f96669152efdf2d4cdcc75b051b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 13:47:55 +0200 Subject: [PATCH 0850/1212] optional kms --- aws/resource_aws_codeartifact_domain.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_codeartifact_domain.go b/aws/resource_aws_codeartifact_domain.go index 63159d9785b..7d789b0dbbd 100644 --- a/aws/resource_aws_codeartifact_domain.go +++ b/aws/resource_aws_codeartifact_domain.go @@ -66,9 +66,12 @@ func resourceAwsCodeArtifactDomainCreate(d *schema.ResourceData, meta interface{ log.Print("[DEBUG] Creating CodeArtifact Domain") params := &codeartifact.CreateDomainInput{ - Domain: aws.String(d.Get("domain").(string)), - EncryptionKey: aws.String(d.Get("encryption_key").(string)), - Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().CodeartifactTags(), + Domain: aws.String(d.Get("domain").(string)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().CodeartifactTags(), + } + + if v, ok := d.GetOk("encryption_key"); ok { + params.EncryptionKey = aws.String(v.(string)) } domain, err := conn.CreateDomain(params) From 1d15079a1f61b24fef035c67fe59a58adc257d39 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 13:48:20 +0200 Subject: [PATCH 0851/1212] refactor tests to not explictly create kms keys unless needed --- aws/resource_aws_codeartifact_domain_test.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/aws/resource_aws_codeartifact_domain_test.go b/aws/resource_aws_codeartifact_domain_test.go index 189a43e9f39..67db2241c86 100644 --- a/aws/resource_aws_codeartifact_domain_test.go +++ b/aws/resource_aws_codeartifact_domain_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -112,6 +113,7 @@ func TestAccAWSCodeArtifactDomain_defaultencryptionkey(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodeArtifactDomainExists(resourceName), testAccCheckResourceAttrRegionalARN(resourceName, "arn", "codeartifact", fmt.Sprintf("domain/%s", rName)), + testAccMatchResourceAttrRegionalARN(resourceName, "encryption_key", "kms", regexp.MustCompile(`key/.+`)), resource.TestCheckResourceAttr(resourceName, "domain", rName), resource.TestCheckResourceAttr(resourceName, "asset_size_bytes", "0"), resource.TestCheckResourceAttr(resourceName, "repository_count", "0"), @@ -180,7 +182,7 @@ func TestAccAWSCodeArtifactDomain_disappears(t *testing.T) { CheckDestroy: testAccCheckAWSCodeArtifactDomainDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodeArtifactDomainBasicConfig(rName), + Config: testAccAWSCodeArtifactDomainDefaultEncryptionKeyConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCodeArtifactDomainExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsCodeArtifactDomain(), resourceName), @@ -268,14 +270,8 @@ resource "aws_codeartifact_domain" "test" { func testAccAWSCodeArtifactDomainConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` -resource "aws_kms_key" "test" { - description = %[1]q - deletion_window_in_days = 7 -} - resource "aws_codeartifact_domain" "test" { domain = %[1]q - encryption_key = aws_kms_key.test.arn tags = { %[2]q = %[3]q @@ -286,14 +282,8 @@ resource "aws_codeartifact_domain" "test" { func testAccAWSCodeArtifactDomainConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` -resource "aws_kms_key" "test" { - description = %[1]q - deletion_window_in_days = 7 -} - resource "aws_codeartifact_domain" "test" { domain = %[1]q - encryption_key = aws_kms_key.test.arn tags = { %[2]q = %[3]q From 2cc584713a017e36b6b3e4ee0f28ec6b7243d57e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 13:58:29 +0200 Subject: [PATCH 0852/1212] changelog --- .changelog/17262.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17262.txt diff --git a/.changelog/17262.txt b/.changelog/17262.txt new file mode 100644 index 00000000000..01928c213a2 --- /dev/null +++ b/.changelog/17262.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codeartifact_domain: make `encryption_key` optional +``` \ No newline at end of file From 3024eb3a5e2590d2907642b1a7e4b1474562b399 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 23 Jan 2021 14:02:00 +0200 Subject: [PATCH 0853/1212] fmt --- aws/resource_aws_codeartifact_domain_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_codeartifact_domain_test.go b/aws/resource_aws_codeartifact_domain_test.go index 67db2241c86..2ef97e4a645 100644 --- a/aws/resource_aws_codeartifact_domain_test.go +++ b/aws/resource_aws_codeartifact_domain_test.go @@ -271,7 +271,7 @@ resource "aws_codeartifact_domain" "test" { func testAccAWSCodeArtifactDomainConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_codeartifact_domain" "test" { - domain = %[1]q + domain = %[1]q tags = { %[2]q = %[3]q @@ -283,7 +283,7 @@ resource "aws_codeartifact_domain" "test" { func testAccAWSCodeArtifactDomainConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` resource "aws_codeartifact_domain" "test" { - domain = %[1]q + domain = %[1]q tags = { %[2]q = %[3]q From 84d377812e7a31d407f37e0c5c7faa76a32c8903 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Sat, 23 Jan 2021 23:48:13 +0200 Subject: [PATCH 0854/1212] Update .changelog/17262.txt Co-authored-by: Kit Ewbank --- .changelog/17262.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/17262.txt b/.changelog/17262.txt index 01928c213a2..a1453e12534 100644 --- a/.changelog/17262.txt +++ b/.changelog/17262.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_codeartifact_domain: make `encryption_key` optional -``` \ No newline at end of file +resource/aws_codeartifact_domain: Make `encryption_key` optional +``` From f57f5f47857a25e761b06aacfa42cc91b9c82de0 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sun, 24 Jan 2021 09:12:50 +0200 Subject: [PATCH 0855/1212] remove tags logic --- ...resource_aws_sagemaker_app_image_config.go | 27 +------ ...rce_aws_sagemaker_app_image_config_test.go | 72 ------------------- 2 files changed, 1 insertion(+), 98 deletions(-) diff --git a/aws/resource_aws_sagemaker_app_image_config.go b/aws/resource_aws_sagemaker_app_image_config.go index be27f13c7d0..b6fb91fccf2 100644 --- a/aws/resource_aws_sagemaker_app_image_config.go +++ b/aws/resource_aws_sagemaker_app_image_config.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" ) @@ -95,7 +94,6 @@ func resourceAwsSagemakerAppImageConfig() *schema.Resource { }, }, }, - "tags": tagsSchema(), }, } } @@ -112,10 +110,6 @@ func resourceAwsSagemakerAppImageConfigCreate(d *schema.ResourceData, meta inter input.KernelGatewayImageConfig = expandSagemakerAppImageConfigKernelGatewayImageConfig(v.([]interface{})) } - if v, ok := d.GetOk("tags"); ok { - input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() - } - _, err := conn.CreateAppImageConfig(input) if err != nil { return fmt.Errorf("error creating SageMaker App Image Config %s: %w", name, err) @@ -128,7 +122,6 @@ func resourceAwsSagemakerAppImageConfigCreate(d *schema.ResourceData, meta inter func resourceAwsSagemakerAppImageConfigRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn - ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig image, err := finder.AppImageConfigByName(conn, d.Id()) if err != nil { @@ -149,23 +142,13 @@ func resourceAwsSagemakerAppImageConfigRead(d *schema.ResourceData, meta interfa return fmt.Errorf("error setting kernel_gateway_image_config: %w", err) } - tags, err := keyvaluetags.SagemakerListTags(conn, arn) - - if err != nil { - return fmt.Errorf("error listing tags for SageMaker App Image Config (%s): %w", d.Id(), err) - } - - if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %w", err) - } - return nil } func resourceAwsSagemakerAppImageConfigUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sagemakerconn - if d.HasChangeExcept("tags") { + if d.HasChange("kernel_gateway_image_config") { input := &sagemaker.UpdateAppImageConfigInput{ AppImageConfigName: aws.String(d.Id()), @@ -183,14 +166,6 @@ func resourceAwsSagemakerAppImageConfigUpdate(d *schema.ResourceData, meta inter } - if d.HasChange("tags") { - o, n := d.GetChange("tags") - - if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating SageMaker App Image Config (%s) tags: %w", d.Id(), err) - } - } - return resourceAwsSagemakerAppImageConfigRead(d, meta) } diff --git a/aws/resource_aws_sagemaker_app_image_config_test.go b/aws/resource_aws_sagemaker_app_image_config_test.go index bf78cac7d23..69427a74cf9 100644 --- a/aws/resource_aws_sagemaker_app_image_config_test.go +++ b/aws/resource_aws_sagemaker_app_image_config_test.go @@ -188,53 +188,6 @@ func TestAccAWSSagemakerAppImageConfig_kernelGatewayImageConfig_fileSystemConfig }) } -func TestAccAWSSagemakerAppImageConfig_tags(t *testing.T) { - - t.Skip("Flaky Test, possibly related to https://github.com/hashicorp/terraform-provider-aws/issues/15572") - - var config sagemaker.DescribeAppImageConfigOutput - rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_sagemaker_app_image_config.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSagemakerAppImageConfigDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key1", "value1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccAWSSagemakerAppImageConfigConfigTags2(rName, "key1", "value1updated", "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - { - Config: testAccAWSSagemakerAppImageConfigConfigTags1(rName, "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSagemakerAppImageConfigExists(resourceName, &config), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - }, - }) -} - func TestAccAWSSagemakerAppImageConfig_disappears(t *testing.T) { var config sagemaker.DescribeAppImageConfigOutput rName := acctest.RandomWithPrefix("tf-acc-test") @@ -373,28 +326,3 @@ resource "aws_sagemaker_app_image_config" "test" { } `, rName) } - -func testAccAWSSagemakerAppImageConfigConfigTags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_sagemaker_app_image_config" "test" { - app_image_config_name = %[1]q - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccAWSSagemakerAppImageConfigConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_sagemaker_app_image_config" "test" { - app_image_config_name = %[1]q - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} From cc846034828a646a7997ffbdd06ff20231004d4d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 25 Jan 2021 10:02:08 -0500 Subject: [PATCH 0856/1212] data-source/aws_vpc_peering_connection: Finalize cidr_block_set and peer_cidr_block-set additions, refactor testing for less failures Output from acceptance testing: ``` --- PASS: TestAccDataSourceAwsVpcPeeringConnection_PeerVpcId (23.55s) --- PASS: TestAccDataSourceAwsVpcPeeringConnection_VpcId (23.89s) --- PASS: TestAccDataSourceAwsVpcPeeringConnection_Id (24.30s) --- PASS: TestAccDataSourceAwsVpcPeeringConnection_CidrBlock (29.94s) --- PASS: TestAccDataSourceAwsVpcPeeringConnection_PeerCidrBlock (30.37s) ``` --- .changelog/13420.txt | 3 + aws/data_source_aws_vpc_peering_connection.go | 6 +- ..._source_aws_vpc_peering_connection_test.go | 314 ++++++++++++------ .../d/vpc_peering_connection.html.markdown | 8 +- 4 files changed, 219 insertions(+), 112 deletions(-) create mode 100644 .changelog/13420.txt diff --git a/.changelog/13420.txt b/.changelog/13420.txt new file mode 100644 index 00000000000..f9bd084bb8c --- /dev/null +++ b/.changelog/13420.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes +``` diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index 3adaeb8b2ba..a77747bd07a 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -42,7 +42,6 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { }, "cidr_block_set": { Type: schema.TypeList, - Optional: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -75,7 +74,6 @@ func dataSourceAwsVpcPeeringConnection() *schema.Resource { }, "peer_cidr_block_set": { Type: schema.TypeList, - Optional: true, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -172,7 +170,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac cidrBlockSet = append(cidrBlockSet, association) } if err := d.Set("cidr_block_set", cidrBlockSet); err != nil { - return fmt.Errorf("error setting cidr_block_set: %s", err) + return fmt.Errorf("error setting cidr_block_set: %w", err) } d.Set("region", pcx.RequesterVpcInfo.Region) d.Set("peer_vpc_id", pcx.AccepterVpcInfo.VpcId) @@ -186,7 +184,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac peerCidrBlockSet = append(peerCidrBlockSet, association) } if err := d.Set("peer_cidr_block_set", peerCidrBlockSet); err != nil { - return fmt.Errorf("error setting peer_cidr_block_set: %s", err) + return fmt.Errorf("error setting peer_cidr_block_set: %w", err) } d.Set("peer_region", pcx.AccepterVpcInfo.Region) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pcx.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index 19258a3cb68..ab4bff7c2fd 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -1,199 +1,305 @@ package aws import ( - "fmt" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { +func TestAccDataSourceAwsVpcPeeringConnection_CidrBlock(t *testing.T) { + dataSourceName := "data.aws_vpc_peering_connection.test" + resourceName := "aws_vpc_peering_connection.test" + requesterVpcResourceName := "aws_vpc.requester" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsVpcPeeringConnectionConfigCidrBlock(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "cidr_block", requesterVpcResourceName, "cidr_block"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsVpcPeeringConnection_Id(t *testing.T) { + dataSourceName := "data.aws_vpc_peering_connection.test" + resourceName := "aws_vpc_peering_connection.test" + accepterVpcResourceName := "aws_vpc.accepter" + requesterVpcResourceName := "aws_vpc.requester" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsVpcPeeringConnectionConfigId(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), + // resource.TestCheckResourceAttrPair(dataSourceName, "cidr_block", resourceName, "cidr_block"), // not in resource + resource.TestCheckResourceAttrPair(dataSourceName, "cidr_block", requesterVpcResourceName, "cidr_block"), + // resource.TestCheckResourceAttrPair(dataSourceName, "cidr_block_set.#", resourceName, "cidr_block_set.#"), // not in resource + resource.TestCheckResourceAttr(dataSourceName, "cidr_block_set.#", "1"), + resource.TestCheckTypeSetElemAttrPair(dataSourceName, "cidr_block_set.*.cidr_block", requesterVpcResourceName, "cidr_block"), + // resource.TestCheckResourceAttrPair(dataSourceName, "region", resourceName, "region"), // not in resource + // resource.TestCheckResourceAttrPair(dataSourceName, "peer_cidr_block", resourceName, "peer_cidr_block"), // not in resource + resource.TestCheckResourceAttrPair(dataSourceName, "peer_cidr_block", accepterVpcResourceName, "cidr_block"), + // resource.TestCheckResourceAttrPair(dataSourceName, "peer_cidr_block_set.#", resourceName, "peer_cidr_block_set.#"), // not in resource + resource.TestCheckResourceAttr(dataSourceName, "peer_cidr_block_set.#", "1"), + resource.TestCheckTypeSetElemAttrPair(dataSourceName, "peer_cidr_block_set.*.cidr_block", accepterVpcResourceName, "cidr_block"), + resource.TestCheckResourceAttrPair(dataSourceName, "peer_owner_id", resourceName, "peer_owner_id"), + // resource.TestCheckResourceAttrPair(dataSourceName, "peer_region", resourceName, "peer_region"), //not in resource + resource.TestCheckResourceAttrPair(dataSourceName, "peer_vpc_id", resourceName, "peer_vpc_id"), + // resource.TestCheckResourceAttrPair(dataSourceName, "owner_id", resourceName, "owner_id"), // not in resource + // resource.TestCheckResourceAttrPair(dataSourceName, "region", resourceName, "region"), // not in resource + resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), + resource.TestCheckResourceAttrPair(dataSourceName, "vpc_id", resourceName, "vpc_id"), + ), + }, + }, + }) +} + +func TestAccDataSourceAwsVpcPeeringConnection_PeerCidrBlock(t *testing.T) { + dataSourceName := "data.aws_vpc_peering_connection.test" + resourceName := "aws_vpc_peering_connection.test" + accepterVpcResourceName := "aws_vpc.accepter" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsVpcPeeringConnectionConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_vpc_id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_vpc_id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block"), - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_owner_ids"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block"), + Config: testAccDataSourceAwsVpcPeeringConnectionConfigPeerCidrBlock(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "peer_cidr_block", accepterVpcResourceName, "cidr_block"), ), - ExpectNonEmptyPlan: true, }, }, }) } -func TestAccDataSourceAwsVpcPeeringConnection_cidrBlockSets(t *testing.T) { +func TestAccDataSourceAwsVpcPeeringConnection_PeerVpcId(t *testing.T) { + dataSourceName := "data.aws_vpc_peering_connection.test" + resourceName := "aws_vpc_peering_connection.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsVpcPeeringConnectionCidrBlockSetConfig, - Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block_set.1.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.0.cidr_block"), - resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "peer_cidr_block_set.1.cidr_block"), + Config: testAccDataSourceAwsVpcPeeringConnectionConfigPeerVpcId(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "peer_vpc_id", resourceName, "peer_vpc_id"), ), }, }, }) } -func testAccDataSourceAwsVpcPeeringConnectionCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } +func TestAccDataSourceAwsVpcPeeringConnection_VpcId(t *testing.T) { + dataSourceName := "data.aws_vpc_peering_connection.test" + resourceName := "aws_vpc_peering_connection.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsVpcPeeringConnectionConfigVpcId(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "id", resourceName, "id"), + resource.TestCheckResourceAttrPair(dataSourceName, "vpc_id", resourceName, "vpc_id"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsVpcPeeringConnectionConfigCidrBlock() string { + return ` +resource "aws_vpc" "requester" { + cidr_block = "10.250.0.0/16" # CIDR must be different than other tests + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } +} + +resource "aws_vpc" "accepter" { + cidr_block = "10.251.0.0/16" # CIDR must be different than other tests + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } +} - pcxRs, ok := s.RootModule().Resources["aws_vpc_peering_connection.test"] - if !ok { - return fmt.Errorf("can't find aws_vpc_peering_connection.test in state") - } +resource "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc.requester.id + peer_vpc_id = aws_vpc.accepter.id + auto_accept = true - attr := rs.Primary.Attributes + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } +} - if attr["id"] != pcxRs.Primary.Attributes["id"] { - return fmt.Errorf( - "id is %s; want %s", - attr["id"], - pcxRs.Primary.Attributes["id"], - ) - } +# aws_vpc_peering_connection does not have cidr_block +# Defer read of aws_vpc_peering_connection data source until after resource +data "aws_vpc" "requester" { + id = aws_vpc_peering_connection.test.vpc_id +} - return nil - } +data "aws_vpc_peering_connection" "test" { + cidr_block = data.aws_vpc.requester.cidr_block +} +` } -const testAccDataSourceAwsVpcPeeringConnectionConfig = ` -resource "aws_vpc" "foo" { +func testAccDataSourceAwsVpcPeeringConnectionConfigId() string { + return ` +resource "aws_vpc" "requester" { cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo" + Name = "terraform-testacc-vpc-peering-connection-data-source" } } -resource "aws_vpc" "bar" { +resource "aws_vpc" "accepter" { cidr_block = "10.2.0.0/16" tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-bar" + Name = "terraform-testacc-vpc-peering-connection-data-source" } } resource "aws_vpc_peering_connection" "test" { - vpc_id = aws_vpc.foo.id - peer_vpc_id = aws_vpc.bar.id + vpc_id = aws_vpc.requester.id + peer_vpc_id = aws_vpc.accepter.id auto_accept = true tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar" + Name = "terraform-testacc-vpc-peering-connection-data-source" } } -data "aws_caller_identity" "current" {} - -data "aws_vpc_peering_connection" "test_by_id" { +data "aws_vpc_peering_connection" "test" { id = aws_vpc_peering_connection.test.id } +` +} -data "aws_vpc_peering_connection" "test_by_requester_vpc_id" { - vpc_id = aws_vpc.foo.id +func testAccDataSourceAwsVpcPeeringConnectionConfigPeerCidrBlock() string { + return ` +resource "aws_vpc" "requester" { + cidr_block = "10.252.0.0/16" # CIDR must be different than other tests - depends_on = [aws_vpc_peering_connection.test] + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } } -data "aws_vpc_peering_connection" "test_by_accepter_vpc_id" { - peer_vpc_id = aws_vpc.bar.id +resource "aws_vpc" "accepter" { + cidr_block = "10.253.0.0/16" # CIDR must be different than other tests - depends_on = [aws_vpc_peering_connection.test] + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } } -data "aws_vpc_peering_connection" "test_by_requester_cidr_block" { - cidr_block = "10.1.0.0/16" - status = "active" +resource "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc.requester.id + peer_vpc_id = aws_vpc.accepter.id + auto_accept = true + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } +} - depends_on = [aws_vpc_peering_connection.test] +# aws_vpc_peering_connection does not have cidr_block +# Defer read of aws_vpc_peering_connection data source until after resource +data "aws_vpc" "accepter" { + id = aws_vpc_peering_connection.test.peer_vpc_id } -data "aws_vpc_peering_connection" "test_by_accepter_cidr_block" { - peer_cidr_block = "10.2.0.0/16" - status = "active" +data "aws_vpc_peering_connection" "test" { + peer_cidr_block = data.aws_vpc.accepter.cidr_block +} +` +} - depends_on = [aws_vpc_peering_connection.test] +func testAccDataSourceAwsVpcPeeringConnectionConfigPeerVpcId() string { + return ` +resource "aws_vpc" "requester" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } } -data "aws_vpc_peering_connection" "test_by_owner_ids" { - owner_id = data.aws_caller_identity.current.account_id - peer_owner_id = data.aws_caller_identity.current.account_id - status = "active" +resource "aws_vpc" "accepter" { + cidr_block = "10.2.0.0/16" - depends_on = [aws_vpc_peering_connection.test] + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } } -` -const testAccDataSourceAwsVpcPeeringConnectionCidrBlockSetConfig = ` -resource "aws_vpc" "foo" { - cidr_block = "10.4.0.0/16" +resource "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc.requester.id + peer_vpc_id = aws_vpc.accepter.id + auto_accept = true tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo-cidr-block-set" + Name = "terraform-testacc-vpc-peering-connection-data-source" } } -resource "aws_vpc_ipv4_cidr_block_association" "foo_secondary_cidr" { - vpc_id = aws_vpc.foo.id - cidr_block = "10.5.0.0/16" +data "aws_vpc_peering_connection" "test" { + peer_vpc_id = aws_vpc_peering_connection.test.peer_vpc_id +} +` } -resource "aws_vpc" "bar" { - cidr_block = "10.6.0.0/16" +func testAccDataSourceAwsVpcPeeringConnectionConfigVpcId() string { + return ` +resource "aws_vpc" "requester" { + cidr_block = "10.1.0.0/16" tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-bar-cidr-block-set" + Name = "terraform-testacc-vpc-peering-connection-data-source" } } -resource "aws_vpc_ipv4_cidr_block_association" "bar_secondary_cidr" { - vpc_id = aws_vpc.bar.id - cidr_block = "10.7.0.0/16" +resource "aws_vpc" "accepter" { + cidr_block = "10.2.0.0/16" + + tags = { + Name = "terraform-testacc-vpc-peering-connection-data-source" + } } resource "aws_vpc_peering_connection" "test" { - vpc_id = aws_vpc.foo.id - peer_vpc_id = aws_vpc.bar.id - auto_accept = true + vpc_id = aws_vpc.requester.id + peer_vpc_id = aws_vpc.accepter.id + auto_accept = true tags = { - Name = "terraform-testacc-vpc-peering-connection-data-source-foo-to-bar-cidr-block-set" + Name = "terraform-testacc-vpc-peering-connection-data-source" } - - depends_on = ["aws_vpc_ipv4_cidr_block_association.foo_secondary_cidr", "aws_vpc_ipv4_cidr_block_association.bar_secondary_cidr"] } -data "aws_vpc_peering_connection" "test_by_id" { - id = aws_vpc_peering_connection.test.id +data "aws_vpc_peering_connection" "test" { + vpc_id = aws_vpc_peering_connection.test.vpc_id } ` +} diff --git a/website/docs/d/vpc_peering_connection.html.markdown b/website/docs/d/vpc_peering_connection.html.markdown index 1df602ea429..568e6095685 100644 --- a/website/docs/d/vpc_peering_connection.html.markdown +++ b/website/docs/d/vpc_peering_connection.html.markdown @@ -79,12 +79,12 @@ All of the argument attributes except `filter` are also exported as result attri * `accepter` - A configuration block that describes [VPC Peering Connection] (https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the accepter VPC. -* `requester` - A configuration block that describes [VPC Peering Connection] -(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. +* `cidr_block_set` - List of objects with CIDR blocks of the requester VPC. -* `cidr_block_set` - (Optional) The list of all CIDR blocks of the requester VPC of the specific VPC Peering Connection to retrieve. +* `peer_cidr_block_set` - List of objects with CIDR blocks of the accepter VPC. -* `peer_cidr_block_set` - (Optional) The list of all CIDR blocks of the accepter VPC of the specific VPC Peering Connection to retrieve. +* `requester` - A configuration block that describes [VPC Peering Connection] +(https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html) options set for the requester VPC. #### Accepter and Requester Attributes Reference From ac4f1178f26133e37f49d610a699f67ef347d438 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 25 Jan 2021 10:05:01 -0500 Subject: [PATCH 0857/1212] Fix formatting error in 'tools/main.go' (#17242) --- tools/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/main.go b/tools/main.go index 1e8cedff106..f16d5ded289 100644 --- a/tools/main.go +++ b/tools/main.go @@ -6,7 +6,7 @@ import ( _ "github.com/bflad/tfproviderdocs" _ "github.com/client9/misspell/cmd/misspell" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "github.com/hashicorp/go-changelog/cmd/changelog-build" _ "github.com/katbyte/terrafmt" _ "github.com/terraform-linters/tflint" - _ "github.com/hashicorp/go-changelog/cmd/changelog-build" ) From 46a538d0f5c3fdbc41b1637ee6948db3fddb4163 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Jan 2021 12:10:10 -0800 Subject: [PATCH 0858/1212] Allows changing replica count without re-creating --- ...ource_aws_elasticache_replication_group.go | 71 +++++++++++++-- ..._aws_elasticache_replication_group_test.go | 88 ++++++++++++++----- ...lasticache_replication_group.html.markdown | 2 +- 3 files changed, 129 insertions(+), 32 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index df0a7e3aff3..124f41538d8 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -78,12 +78,8 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Computed: true, }, "cluster_mode": { - Type: schema.TypeList, - Optional: true, - // We allow Computed: true here since using number_cache_clusters - // and a cluster mode enabled parameter_group_name will create - // a single shard replication group with number_cache_clusters - 1 - // read replicas. Otherwise, the resource is marked ForceNew. + Type: schema.TypeList, + Optional: true, Computed: true, MaxItems: 1, ExactlyOneOf: []string{"cluster_mode", "number_cache_clusters"}, @@ -92,7 +88,6 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { "replicas_per_node_group": { Type: schema.TypeInt, Required: true, - ForceNew: true, }, "num_node_groups": { Type: schema.TypeInt, @@ -281,7 +276,9 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { CustomizeDiff: customdiff.Sequence( customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { - return diff.HasChange("number_cache_clusters") || diff.HasChange("cluster_mode.0.num_node_groups") + return diff.HasChange("number_cache_clusters") || + diff.HasChange("cluster_mode.0.num_node_groups") || + diff.HasChange("cluster_mode.0.replicas_per_node_group") }), ), } @@ -523,7 +520,7 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int func resourceAwsElasticacheReplicationGroupUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - if d.HasChange("cluster_mode.0.num_node_groups") { + if d.HasChanges("cluster_mode.0.num_node_groups", "cluster_mode.0.replicas_per_node_group") { err := elasticacheReplicationGroupModifyShardConfiguration(conn, d) if err != nil { return fmt.Errorf("error modifying ElastiCache Replication Group (%s) shard configuration: %w", d.Id(), err) @@ -716,6 +713,24 @@ func validateAwsElastiCacheReplicationGroupEngine(v interface{}, k string) (ws [ } func elasticacheReplicationGroupModifyShardConfiguration(conn *elasticache.ElastiCache, d *schema.ResourceData) error { + if d.HasChange("cluster_mode.0.num_node_groups") { + err := elasticacheReplicationGroupModifyShardConfigurationNumNodeGroups(conn, d) + if err != nil { + return err + } + } + + if d.HasChange("cluster_mode.0.replicas_per_node_group") { + err := elasticacheReplicationGroupModifyShardConfigurationReplicasPerNodeGroup(conn, d) + if err != nil { + return err + } + } + + return nil +} + +func elasticacheReplicationGroupModifyShardConfigurationNumNodeGroups(conn *elasticache.ElastiCache, d *schema.ResourceData) error { o, n := d.GetChange("cluster_mode.0.num_node_groups") oldNumNodeGroups := o.(int) newNumNodeGroups := n.(int) @@ -751,6 +766,44 @@ func elasticacheReplicationGroupModifyShardConfiguration(conn *elasticache.Elast return nil } +func elasticacheReplicationGroupModifyShardConfigurationReplicasPerNodeGroup(conn *elasticache.ElastiCache, d *schema.ResourceData) error { + o, n := d.GetChange("cluster_mode.0.replicas_per_node_group") + oldReplicas := o.(int) + newReplicas := n.(int) + + if newReplicas > oldReplicas { + input := &elasticache.IncreaseReplicaCountInput{ + ApplyImmediately: aws.Bool(true), + NewReplicaCount: aws.Int64(int64(newReplicas)), + ReplicationGroupId: aws.String(d.Id()), + } + _, err := conn.IncreaseReplicaCount(input) + if err != nil { + return fmt.Errorf("error adding ElastiCache Replication Group (%s) replicas: %w", d.Id(), err) + } + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) replica addition: %w", d.Id(), err) + } + } else { + input := &elasticache.DecreaseReplicaCountInput{ + ApplyImmediately: aws.Bool(true), + NewReplicaCount: aws.Int64(int64(newReplicas)), + ReplicationGroupId: aws.String(d.Id()), + } + _, err := conn.DecreaseReplicaCount(input) + if err != nil { + return fmt.Errorf("error removing ElastiCache Replication Group (%s) replicas: %w", d.Id(), err) + } + _, err = waiter.ReplicationGroupAvailable(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("error waiting for ElastiCache Replication Group (%s) replica removal: %w", d.Id(), err) + } + } + + return nil +} + func elasticacheReplicationGroupModifyNumCacheClusters(conn *elasticache.ElastiCache, d *schema.ResourceData) error { o, n := d.GetChange("number_cache_clusters") oldNumberCacheClusters := o.(int) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 5957e1c9a07..161e92b611c 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -455,7 +455,7 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NonClusteredParameterGrou }) } -func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing.T) { +func TestAccAWSElasticacheReplicationGroup_ClusterMode_UpdateNumNodeGroups(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -484,12 +484,6 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0003-002", rName)), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately"}, - }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 1, 1), Check: resource.ComposeTestCheckFunc( @@ -526,7 +520,7 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. }) } -func TestAccAWSElasticacheReplicationGroup_ClusterMode_ReplicasPerNodeGroup(t *testing.T) { +func TestAccAWSElasticacheReplicationGroup_ClusterMode_UpdateReplicasPerNodeGroup(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -536,6 +530,32 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_ReplicasPerNodeGroup(t *t Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), + ), + }, + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 3), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "3"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "8"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "8"), + ), + }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 2), Check: resource.ComposeTestCheckFunc( @@ -547,19 +567,47 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_ReplicasPerNodeGroup(t *t resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "2"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "6"), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-003", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-003", rName)), + ), + }, + }, + }) +} + +func TestAccAWSElasticacheReplicationGroup_ClusterMode_UpdateNumNodeGroupsAndReplicasPerNodeGroup(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately"}, + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 3, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "3"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "2"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "9"), + resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "9"), + ), }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), @@ -572,10 +620,6 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_ReplicasPerNodeGroup(t *t resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), resource.TestCheckResourceAttr(resourceName, "member_clusters.#", "4"), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-001", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0001-002", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-001", rName)), - resource.TestCheckTypeSetElemAttr(resourceName, "member_clusters.*", fmt.Sprintf("%s-0002-002", rName)), ), }, }, diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 9aa04cf0c14..e5c4eb9739a 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -142,7 +142,7 @@ Please note that setting a `snapshot_retention_limit` is not supported on cache. Cluster Mode (`cluster_mode`) supports the following: -* `replicas_per_node_group` - (Required) Specify the number of replica nodes in each node group. Valid values are 0 to 5. Changing this number will force a new resource. +* `replicas_per_node_group` - (Required) Specify the number of replica nodes in each node group. Valid values are 0 to 5. Changing this number will trigger an online resizing operation before other settings modifications. * `num_node_groups` - (Required) Specify the number of node groups (shards) for this Redis replication group. Changing this number will trigger an online resizing operation before other settings modifications. ## Attributes Reference From c18639afefb43b508364028b67f3c65ed1689974 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 25 Jan 2021 13:37:03 -0800 Subject: [PATCH 0859/1212] Separates NumNodeGroups scale up and down tests --- ..._aws_elasticache_replication_group_test.go | 40 ++++++++++++++----- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 4695eb3bba9..a859a5f9645 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -445,7 +445,7 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NonClusteredParameterGrou }) } -func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing.T) { +func TestAccAWSElasticacheReplicationGroup_ClusterMode_UpdateNumNodeGroups_ScaleUp(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -455,6 +455,18 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. Providers: testAccProviders, CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 2, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "4"), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "2"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), + ), + }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 3, 1), Check: resource.ComposeTestCheckFunc( @@ -467,21 +479,29 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), ), }, + }, + }) +} + +func TestAccAWSElasticacheReplicationGroup_ClusterMode_UpdateNumNodeGroups_ScaleDown(t *testing.T) { + var rg elasticache.ReplicationGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately"}, - }, - { - Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 1, 1), + Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 3, 1), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), - resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), + resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "6"), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.redis6.x.cluster.on"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.#", "1"), - resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.num_node_groups", "3"), resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), ), }, From b7197d362b3e9a66c0e904bcc73eb74bb3384bda Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Jan 2021 08:28:12 -0500 Subject: [PATCH 0860/1212] Fix Terraform repository link --- docs/contributing/data-handling-and-conversion.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/contributing/data-handling-and-conversion.md b/docs/contributing/data-handling-and-conversion.md index 3c26f5a2692..452515269a5 100644 --- a/docs/contributing/data-handling-and-conversion.md +++ b/docs/contributing/data-handling-and-conversion.md @@ -42,7 +42,7 @@ At the bottom of this documentation is a [Glossary section](#glossary), which ma Before getting into highly specific documentation about the Terraform AWS Provider handling of data, it may be helpful to briefly highlight how Terraform Plugins (Terraform Providers in this case) interact with Terraform CLI and the Terraform State in general and where this documentation fits into the whole process. -There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/main/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). +There are two primary data flows that are typically handled by resources within a Terraform Provider. Data is either being converted from a planned new Terraform State into making a remote system request or a remote system response is being converted into a applied new Terraform State. The semantics of how the data of the planned new Terraform State is surfaced to the resource implementation is determined by where a resource is in its lifecycle and mainly handled by Terraform CLI. This concept can be explored further in the [Terraform Resource Instance Change Lifecycle documentation](https://github.com/hashicorp/terraform/blob/master/docs/resource-instance-change-lifecycle.md), with the caveat that some additional behaviors occur within the Terraform Plugin SDK as well (if the Terraform Plugin uses that implementation detail). As a generic walkthrough, the following data handling occurs when creating a Terraform Resource: From 93afeacb1a70a62ed2e78592f4c2e1191b610f51 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Jan 2021 08:33:58 -0500 Subject: [PATCH 0861/1212] docs/contributing: Initial Provider Design page (#17034) * docs/contributing: Initial Provider Design page The goal for this Contributing Guide page is to capture high level design rationale that is applicable across the provider. This serves as a home for long-form details about why certain design considerations exist such as not implementing raw HTTP clients, separating out certain resources types from others, and keeping resources to one AWS service API. * Apply suggestions from code review Co-authored-by: Kit Ewbank * docs/contributing: Try to clarify Authorization and Acceptance Resources better when there is implicit acceptance * Apply suggestions from code review Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Kit Ewbank Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> --- docs/CONTRIBUTING.md | 1 + docs/contributing/contribution-checklists.md | 12 +- docs/contributing/provider-design.md | 129 +++++++++++++++++++ 3 files changed, 133 insertions(+), 9 deletions(-) create mode 100644 docs/contributing/provider-design.md diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 8c0cabf9d4a..9d96f08339c 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -18,6 +18,7 @@ ability to merge PRs and respond to issues. This documentation also contains reference material specific to certain functionality: +- [Provider Design](contributing/provider-design.md) - [Running and Writing Acceptance Tests](contributing/running-and-writing-acceptance-tests.md) - [Data Handling and Conversion](contributing/data-handling-and-conversion.md) - [Error Handling](contributing/error-handling.md) diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 6b359cce4bf..75e6167dfdb 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -216,15 +216,7 @@ resource "aws_service_thing" "test" { ## Adding Resource Policy Support -Some AWS components support [resource-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html) to control permissions. When implementing this support in the Terraform AWS Provider, we typically prefer creating a separate resource, `aws_{SERVICE}_{THING}_policy` (e.g. `aws_s3_bucket_policy`) for a few reasons: - -- Many of these policies require the Amazon Resource Name (ARN) of the resource in the policy itself. It is difficult to workaround this requirement with custom difference handling within a self-contained resource. -- Sometimes policies between two resources need to be written where they cross-reference each other resource's ARN within each policy. Without a separate resource, this introduces a configuration cycle. -- Splitting the resources allows operators to logically split their infrastructure on purely operational and security boundaries with separate configurations/modules. -- Splitting the resources prevents any separate policy API calls from needing to be permitted in the main resource in environments with restrictive IAM permissions, which can be undesirable. - -See the [New Resource section](#new-resource) for more information about implementing the separate resource. - +Some AWS components support [resource-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html) to control permissions. When implementing this support in the Terraform AWS Provider, we typically prefer creating a separate resource, `aws_{SERVICE}_{THING}_policy` (e.g. `aws_s3_bucket_policy`). See the [New Resource section](#new-resource) for more information about implementing the separate resource and the [Provider Design page](provider-design.md) for rationale. ## Adding Resource Tagging Support AWS provides key-value metadata across many services and resources, which can be used for a variety of use cases including billing, ownership, and more. See the [AWS Tagging Strategy page](https://aws.amazon.com/answers/account-management/aws-tagging-strategies/) for more information about tagging at a high level. @@ -447,6 +439,8 @@ More details about this code generation, including fixes for potential error mes ## New Resource +_Before submitting this type of contribution, it is highly recommended to read and understand the other pages of the [Contributing Guide](../CONTRIBUTING.md)._ + Implementing a new resource is a good way to learn more about how Terraform interacts with upstream APIs. There are plenty of examples to draw from in the existing resources, but you still get to implement something completely new. diff --git a/docs/contributing/provider-design.md b/docs/contributing/provider-design.md new file mode 100644 index 00000000000..2a183901aea --- /dev/null +++ b/docs/contributing/provider-design.md @@ -0,0 +1,129 @@ +# Provider Design + +_Please Note: This documentation is intended for Terraform AWS Provider code developers. Typical operators writing and applying Terraform configurations do not need to read or understand this material._ + +The Terraform AWS Provider follows the guidelines established in the [HashiCorp Provider Design Principles](https://www.terraform.io/docs/extend/hashicorp-provider-design-principles.html). That general documentation provides many high-level design points gleaned from years of experience with Terraform's design and implementation concepts. Sections below will expand on specific design details between that documentation and this provider, while others will capture other pertinent information that may not be covered there. Other pages of the contributing guide cover implementation details such as code, testing, and documentation specifics. + +- [API and SDK Boundary](#api-and-sdk-boundary) +- [Infrastructure as Code Suitability](#infrastructure-as-code-suitability) +- [Resource Type Considerations](#resource-type-considerations) + - [Authorization and Acceptance Resources](#authorization-and-acceptance-resources) + - [Cross-Service Functionality](#cross-service-functionality) + - [IAM Resource-Based Policy Resources](#iam-resource-based-policy-resources) + - [Managing Resource Running State](#managing-resource-running-state) + - [Task Execution and Waiter Resources](#task-execution-and-waiter-resources) +- [Other Considerations](#other-considerations) + - [AWS Credential Exfiltration](#aws-credential-exfiltration) + +## API and SDK Boundary + +The AWS provider implements support for the [AWS](https://aws.amazon.com/) service APIs using the [AWS Go SDK](https://aws.amazon.com/sdk-for-go/). The API and SDK limits extend to the provider. In general, SDK operations manage the lifecycle of AWS components, such as creating, describing, updating, and deleting a database. Operations do not usually handle functionality within those components, such as executing a query on a database. If you are interested in other APIs/SDKs, we invite you to view the many Terraform Providers available, as each has a community of domain expertise. + +Some examples of functionality that is not expected in this provider: + +* Raw HTTP(S) handling. See the [Terraform HTTP Provider](https://registry.terraform.io/providers/hashicorp/http/latest) and [Terraform TLS Provider](https://registry.terraform.io/providers/hashicorp/tls/latest) instead. +* Kubernetes resource management beyond the EKS service APIs. See the [Terraform Kubernetes Provider](https://registry.terraform.io/providers/hashicorp/kubernetes/latest) instead. +* Active Directory or other protocol clients. See the [Terraform Active Directory Provider](https://registry.terraform.io/providers/hashicorp/ad/latest/docs) and other available provider instead. +* Functionality that requires additional software beyond the Terraform AWS Provider to be installed on the host executing Terraform. This currently includes the AWS CLI. See the [Terraform External Provider](https://registry.terraform.io/providers/hashicorp/external/latest) and other available providers instead. + +## Infrastructure as Code Suitability + +The provider maintainers' design goal is to cover as much of the AWS API as pragmatically possible. However, not every aspect of the API is compatible with an infrastructure-as-code (IaC) conception. If such limits affect you, we recommend that you open an AWS Support case and encourage others to do the same. Request that AWS components be made more self-contained and compatible with IaC. These AWS Support cases can also yield insights into the AWS service and API that are not well documented. + +## Resource Type Considerations + +Terraform resources work best as the smallest infrastructure blocks on which practitioners can build more complex configurations and abstractions, such as [Terraform Modules](https://www.terraform.io/docs/modules/). The general heuristic guiding when to implement a new Terraform resource for an aspect of AWS is whether the AWS service API provides create, read, update, and delete (CRUD) operations. However, not all AWS service API functionality falls cleanly into CRUD lifecycle management. In these situations, there is extra consideration necessary for properly mapping API operations to Terraform resources. + +This section highlights design patterns when to consider an implementation within a singular Terraform resource or as separate Terraform resources. + +Please note: the overall design and implementation across all AWS functionality is federated: individual services may implement concepts and use terminology differently. As such, this guide is not exhaustive. The aim is to provide general concepts and basic terminology that points contributors in the right direction, especially in understanding previous implementations. + +### Authorization and Acceptance Resources + +Some AWS services use an authorization-acceptance model for cross-account associations or access. Examples include: + +* Direct Connect Association Proposals +* GuardDuty Member Invitations +* RAM Resource Share Associations +* Route 53 VPC Associations +* Security Hub Member Invitations + +Depending on the API and components, AWS uses two basic ways of creating cross-region and cross-account associations. One way is to generate an invitation (or proposal) identifier from one AWS account to another. Then in the other AWS account, that identifier is used to accept the invitation. The second way is configuring a reference to another AWS account identifier. These may not require explicit acceptance on the receiving account to finish creating the association or begin working. + +To model creating an association using an invitation or proposal, follow these guidelines. + +* Follow the naming in the AWS service API to determine whether to use the term "invitation" or "proposal." +* For the originating account, create an "invitation" or "proposal" resource. Make sure that the AWS service API has operations for creating and reading invitations. +* For the responding account, create an "accepter" resource. Ensure that the API has operations for accepting, reading, and rejecting invitations in the responding account. Map the operations as follows: + * Create: Accepts the invitation. + * Read: Reads the invitation to determine its status. Note that in some APIs, invitations expire and disappear, complicating associations. If a resource does not find an invitation, the developer should implement a fall back to read the API resource associated with the invitation/proposal. + * Delete: Rejects or otherwise deletes the invitation. + +To model the second type of association, implicit associations, create an "association" resource and, optionally, an "authorization" resource. Map create, read, and delete to the corresponding operations in the AWS service API. + +### Cross-Service Functionality + +Many AWS service APIs build on top of other AWS services. Some examples of these include: + +* EKS Node Groups managing Auto Scaling Groups +* Lambda Functions managing EC2 ENIs +* Transfer Servers managing EC2 VPC Endpoints + +Some cross-service API implementations lack the management or description capabilities of the other service. The lack can make the Terraform resource implementation seem incomplete or unsuccessful in end-to-end configurations. Given the overall “resources should represent a single API object” goal from the [HashiCorp Provider Design Principles](https://www.terraform.io/docs/extend/hashicorp-provider-design-principles.html), a resource must only communicate with a single AWS service API. As such, maintainers will not approve cross-service resources. + +The rationale behind this design decision includes the following: + +* Unexpected IAM permissions being necessary for the resource. In high-security environments, all the service permissions may not be available or acceptable. +* Unexpected services generating CloudTrail logs for the resource. +* Needing extra and unexpected API endpoints configuration for organizations using custom endpoints, such as VPC endpoints. +* Unexpected changes to the AWS service internals for the cross-service implementations. Given that this functionality is not part of the primary service API, these details can change over time and may not be considered as a breaking change by the service team for an API upgrade. + +A poignant real-world example of the last point involved a Lambda resource. The resource helped clean up extra resources (ENIs) due to a common misconfiguration. Practitioners found the functionality helpful since the issue was hard to diagnose. Years later, AWS updated the Lambda API. Immediately, practitioners reported that Terraform executions were failing. Downgrading the provider was not possible since many configurations depended on recent releases. For environments running many versions behind, forcing an upgrade with the fix would likely cause unrelated and unexpected changes. In the end, HashiCorp and AWS performed a large-scale outreach to help upgrade and fixing the misconfigurations. Provider maintainers and practitioners lost considerable time. + +### IAM Resource-Based Policy Resources + +For some AWS components, the AWS API allows specifying an [IAM resource-based policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html), the IAM policy to associate with a component. Some examples include: + +* ECR Repository Policies +* EFS File System Policies +* SNS Topic Policies + +Provider developers should implement this capability in a new resource rather than adding it to the associated resource. Reasons for this include: + +* Many of the policies must include the Amazon Resource Name (ARN) of the resource. Working around this requirement with custom difference handling within a self-contained resource is unnecessarily cumbersome. +* Some policies involving multiple resources need to cross-reference each other's ARNs. Without a separate resource, this introduces a configuration cycle. +* Splitting the resources allows operators to logically split their configurations into purely operational and security boundaries. This allows environments to have distinct practitioners roles and permissions for IAM versus infrastructure changes. + +One rare exception to this guideline is where the policy is _required_ during resource creation. + +### Managing Resource Running State + +The AWS API provides the ability to start, stop, enable, or disable some AWS components. Some examples include: + +* Batch Job Queues +* CloudFront Distributions +* RDS DB Event Subscriptions + +In this situation, provider developers should implement this ability within the resource instead of creating a separate resource. Since a practitioner cannot practically manage interaction with a resource's states in Terraform's declarative configuration, developers should implement the state management in the resource. This design provides consistency and future-proofing even where updating a resource in the current API is not problematic. + +### Task Execution and Waiter Resources + +Some AWS operations are asynchronous. Terraform requests that AWS perform a task. Initially, AWS only notifies Terraform that it received the request. Terraform then requests the status while awaiting completion. Examples of this include: + +* ACM Certificate validation +* EC2 AMI copying +* RDS DB Cluster Snapshot management + +In this situation, provider developers should create a separate resource representing the task, assuming that the AWS service API provides operations to start the task and read its status. Adding the task functionality to the parent resource muddies its infrastructure-management purpose. The maintainers prefer this approach even though there is some duplication of an existing resource. For example, the provider has a resource for copying an EC2 AMI in addition to the EC2 AMI resource itself. This modularity allows practitioners to manage the result of the task resource with another resource. + +For a related consideration, see the [Managing Resource Running State section](#managing-resource-running-state). + +## Other Considerations + +### AWS Credential Exfiltration + +In the interest of security, the maintainers will not approve data sources that provide the ability to reference or export the AWS credentials of the running provider. There are valid use cases for this information, such as to execute AWS CLI calls as part of the same Terraform configuration. However, this mechanism may allow credentials to be discovered and used outside of Terraform. Some specific concerns include: + +* The values may be visible in Terraform user interface output or logging, allowing anyone with user interface or log access to see the credentials. +* The values are currently stored in plaintext in the Terraform state, allowing anyone with access to the state file or another Terraform configuration that references the state access to the credentials. +* Any new related functionality, while opt-in to implement, is also opt-in to prevent via security controls or policies. Adopting a weaker default security posture requires advance notice and prevents organizations that implement those controls from updating to a version with any such functionality. From dfbee2cd0afe386fa90851456d1dffeeae8e8192 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 26 Jan 2021 13:42:43 +0000 Subject: [PATCH 0862/1212] Update CHANGELOG.md for #17034 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f443ab5c510..d4cc3cd3308 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.26.0 (Unreleased) +ENHANCEMENTS: + +* data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes (https://github.com/hashicorp/terraform-provider-aws/issues/13420) + BUG FIXES: * resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values (https://github.com/hashicorp/terraform-provider-aws/issues/17201) From 2de9854f693e92264ddfe609f54832d488d130e2 Mon Sep 17 00:00:00 2001 From: Lucas Cantor Date: Tue, 26 Jan 2021 06:53:29 -0800 Subject: [PATCH 0863/1212] docs/resource/aws_ssoadmin_account_assignment: correct example aws_ssoadmin_permission_set name (#17263) The previously used name of `selected` appears to be invalid. Perhaps this is a remnant from copying [other examples from the terraform-provider-aws docs](https://github.com/hashicorp/terraform-provider-aws/search?q=%22selected%22). Correcting this to `example` resolves the error. --- website/docs/r/ssoadmin_account_assignment.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssoadmin_account_assignment.html.markdown b/website/docs/r/ssoadmin_account_assignment.html.markdown index 5d4d5d8f592..d3fe7164466 100644 --- a/website/docs/r/ssoadmin_account_assignment.html.markdown +++ b/website/docs/r/ssoadmin_account_assignment.html.markdown @@ -21,7 +21,7 @@ data "aws_ssoadmin_permission_set" "example" { } data "aws_identitystore_group" "example" { - identity_store_id = tolist(data.aws_ssoadmin_instances.selected.identity_store_ids)[0] + identity_store_id = tolist(data.aws_ssoadmin_instances.example.identity_store_ids)[0] filter { attribute_path = "DisplayName" From 74fe84d001f283173fd5234fd74db2e44535d67e Mon Sep 17 00:00:00 2001 From: Lucas Maxwell Date: Wed, 27 Jan 2021 01:59:05 +1100 Subject: [PATCH 0864/1212] docs/resource/aws_ssm_patch_baseline: Clarify valid operating_system/patch_filter key combinations (#17231) * Clarify valid patch_filter combinations Some of the key types are windows only, link to the API docs * Add note for valid patch_filter value values Values can be exact or wildcards, described by the PatchFilter reference: https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html --- website/docs/r/ssm_patch_baseline.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 259a3437d77..48a02bbd29d 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -136,7 +136,7 @@ The following arguments are supported: The `approval_rule` block supports: * `approve_after_days` - (Required) The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline. Valid Range: 0 to 100. -* `patch_filter` - (Required) The patch filter group that defines the criteria for the rule. Up to 5 patch filters can be specified per approval rule using Key/Value pairs. Valid Keys are `PATCH_SET | PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`. +* `patch_filter` - (Required) The patch filter group that defines the criteria for the rule. Up to 5 patch filters can be specified per approval rule using Key/Value pairs. Valid Keys are `PATCH_SET | PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`. Valid combinations of these Keys and the `operating_system` value can be found in the [SSM DescribePatchProperties API Reference](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_DescribePatchProperties.html). Valid Values are exact values for the patch property given as the key, or a wildcard `*`, which matches all values. * `PATCH_SET` defaults to `OS` if unspecified * `compliance_level` - (Optional) Defines the compliance level for patches approved by this rule. Valid compliance levels include the following: `CRITICAL`, `HIGH`, `MEDIUM`, `LOW`, `INFORMATIONAL`, `UNSPECIFIED`. The default value is `UNSPECIFIED`. * `enable_non_security` - (Optional) Boolean enabling the application of non-security updates. The default value is 'false'. Valid for Linux instances only. From a72c8de433c71c2112e4c4936306f7e580dadd4f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 26 Jan 2021 09:55:50 -0500 Subject: [PATCH 0865/1212] r/aws_imagebuilder_image_recipe: Add support for 'volume_type = "gp3"'. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType -timeout 120m === RUN TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp2 === PAUSE TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp2 === RUN TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp3 === PAUSE TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp3 === CONT TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp2 === CONT TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp3 --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp3 (22.45s) --- PASS: TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp2 (22.86s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 22.956s --- aws/internal/service/imagebuilder/id.go | 7 +++++ aws/resource_aws_imagebuilder_image_recipe.go | 10 +++--- ...urce_aws_imagebuilder_image_recipe_test.go | 31 ++++++++++++++++++- 3 files changed, 43 insertions(+), 5 deletions(-) create mode 100644 aws/internal/service/imagebuilder/id.go diff --git a/aws/internal/service/imagebuilder/id.go b/aws/internal/service/imagebuilder/id.go new file mode 100644 index 00000000000..1000e75f720 --- /dev/null +++ b/aws/internal/service/imagebuilder/id.go @@ -0,0 +1,7 @@ +package imagebuilder + +const ( + // Missing from upstream aws-sdk-go. + // https://github.com/aws/aws-sdk-go/issues/3751. + EbsVolumeTypeGp3 = "gp3" +) diff --git a/aws/resource_aws_imagebuilder_image_recipe.go b/aws/resource_aws_imagebuilder_image_recipe.go index d37b44cc2c0..2f5687a4543 100644 --- a/aws/resource_aws_imagebuilder_image_recipe.go +++ b/aws/resource_aws_imagebuilder_image_recipe.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfimagebuilder "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/imagebuilder" ) func resourceAwsImageBuilderImageRecipe() *schema.Resource { @@ -94,10 +95,11 @@ func resourceAwsImageBuilderImageRecipe() *schema.Resource { ValidateFunc: validation.IntBetween(1, 16000), }, "volume_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(imagebuilder.EbsVolumeType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // https://github.com/hashicorp/terraform-provider-aws/issues/17274. + ValidateFunc: validation.StringInSlice(append(imagebuilder.EbsVolumeType_Values(), tfimagebuilder.EbsVolumeTypeGp3), false), }, }, }, diff --git a/aws/resource_aws_imagebuilder_image_recipe_test.go b/aws/resource_aws_imagebuilder_image_recipe_test.go index 39a19483cbf..97321b489a1 100644 --- a/aws/resource_aws_imagebuilder_image_recipe_test.go +++ b/aws/resource_aws_imagebuilder_image_recipe_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + tfimagebuilder "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/imagebuilder" ) func init() { @@ -326,7 +327,7 @@ func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeSize(t *test }) } -func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType(t *testing.T) { +func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp2(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_imagebuilder_image_recipe.test" @@ -354,6 +355,34 @@ func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeType(t *test }) } +func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_Ebs_VolumeTypeGp3(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image_recipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageRecipeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageRecipeConfigBlockDeviceMappingEbsVolumeType(rName, tfimagebuilder.EbsVolumeTypeGp3), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageRecipeExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "block_device_mapping.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "block_device_mapping.*", map[string]string{ + "ebs.0.volume_type": tfimagebuilder.EbsVolumeTypeGp3, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAwsImageBuilderImageRecipe_BlockDeviceMapping_NoDevice(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_imagebuilder_image_recipe.test" From 64670fb75c7c9784df3fbfe60060e6cc38ab51cb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 26 Jan 2021 10:01:26 -0500 Subject: [PATCH 0866/1212] Add CHANGELOG entry. --- .changelog/17286.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17286.txt diff --git a/.changelog/17286.txt b/.changelog/17286.txt new file mode 100644 index 00000000000..596ef924c92 --- /dev/null +++ b/.changelog/17286.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_imagebuilder_image_recipe: Add `gp3` as a valid value for the `volume_type` attribute +``` From 77fab4e48afd8959d402817f86459d667d0d4c53 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Jan 2021 10:13:40 -0500 Subject: [PATCH 0867/1212] Update CHANGELOG for #17226 --- .changelog/17226.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17226.txt diff --git a/.changelog/17226.txt b/.changelog/17226.txt new file mode 100644 index 00000000000..fff1cf4c30b --- /dev/null +++ b/.changelog/17226.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_elb_hosted_zone_id: Correct values for `cn-north-1` and `cn-northwest-1` regions +``` From 970a3be105a11d3794c961fb2fe3224ef4d9b2ae Mon Sep 17 00:00:00 2001 From: Ivan Zhamoidzin Date: Tue, 26 Jan 2021 18:27:29 +0300 Subject: [PATCH 0868/1212] Add "linkTag" SES event dimension options to docs (#17173) Add "linkTag" option to the documentation of SES events CloudWatch destination dimensions. --- website/docs/r/ses_event_destination.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ses_event_destination.markdown b/website/docs/r/ses_event_destination.markdown index 48f4638bb33..ef701579f94 100644 --- a/website/docs/r/ses_event_destination.markdown +++ b/website/docs/r/ses_event_destination.markdown @@ -78,7 +78,7 @@ The following arguments are supported: * `default_value` - (Required) The default value for the event * `dimension_name` - (Required) The name for the dimension -* `value_source` - (Required) The source for the value. It can be either `"messageTag"` or `"emailHeader"` +* `value_source` - (Required) The source for the value. May be any of `"messageTag"`, `"emailHeader"` or `"linkTag"`. ### kinesis_destination Argument Reference From 3cae7e8fccae6b8cb3d99a0f43f7fc8d1bc43dad Mon Sep 17 00:00:00 2001 From: John Date: Tue, 26 Jan 2021 10:29:40 -0500 Subject: [PATCH 0869/1212] docs/resource/aws_instance: Update documentation for #15474 (#17174) --- website/docs/r/instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 9e899d97fb0..de57fcfd861 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -170,7 +170,7 @@ The `root_block_device` mapping supports the following: * `volume_size` - (Optional) Size of the volume in gibibytes (GiB). * `volume_type` - (Optional) Type of volume. Valid values include `standard`, `gp2`, `gp3`, `io1`, `io2`, `sc1`, or `st1`. Defaults to `gp2`. -Modifying any of the `root_block_device` settings other than `volume_size` requires resource replacement. +Modifying any of the `root_block_device` settings other than `volume_size` or `tags` requires resource replacement. Each `ebs_block_device` supports the following: From 2bb2457c61561b6f07d977e557eaf98e58f661dc Mon Sep 17 00:00:00 2001 From: andrew quartey Date: Tue, 26 Jan 2021 10:35:45 -0500 Subject: [PATCH 0870/1212] docs/resource/aws_transfer_user: update resource documentation example (#17162) --- website/docs/r/transfer_user.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/docs/r/transfer_user.html.markdown b/website/docs/r/transfer_user.html.markdown index a6dfc63f118..e5c12663b74 100644 --- a/website/docs/r/transfer_user.html.markdown +++ b/website/docs/r/transfer_user.html.markdown @@ -64,6 +64,12 @@ resource "aws_transfer_user" "foo" { server_id = aws_transfer_server.foo.id user_name = "tftestuser" role = aws_iam_role.foo.arn + + home_directory_type = "LOGICAL" + home_directory_mappings { + entry = "/test.pdf" + target = "/bucket3/test-path/tftestuser.pdf" + } } ``` From 73d1aaa1c533317de24649fb9d66de2dc17c2f92 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 26 Jan 2021 10:50:00 -0500 Subject: [PATCH 0871/1212] resource/aws_ebs_volume: Allow both 'snapshot_id' and 'size' (#17243) * resource/aws_ebs_volume: Allow both 'snapshot_id' and 'size'. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSEBSVolume_snapshotID' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSEBSVolume_snapshotID -timeout 120m === RUN TestAccAWSEBSVolume_snapshotID === PAUSE TestAccAWSEBSVolume_snapshotID === RUN TestAccAWSEBSVolume_snapshotIDAndSize === PAUSE TestAccAWSEBSVolume_snapshotIDAndSize === CONT TestAccAWSEBSVolume_snapshotID === CONT TestAccAWSEBSVolume_snapshotIDAndSize --- PASS: TestAccAWSEBSVolume_snapshotIDAndSize (44.60s) --- PASS: TestAccAWSEBSVolume_snapshotID (46.74s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 46.829s * Fix formatting error in 'tools/main.go' * Add CHANGELOG entry. * resource/aws_ebs_volume: Ensure at least one of 'snapshot_id' or 'size'. Acceptance test output: $ make testacc TEST=./aws TESTARGS='-run=TestAccAWSEBSVolume_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSEBSVolume_ -timeout 120m === RUN TestAccAWSEBSVolume_basic === PAUSE TestAccAWSEBSVolume_basic === RUN TestAccAWSEBSVolume_updateAttachedEbsVolume === PAUSE TestAccAWSEBSVolume_updateAttachedEbsVolume === RUN TestAccAWSEBSVolume_updateSize === PAUSE TestAccAWSEBSVolume_updateSize === RUN TestAccAWSEBSVolume_updateType === PAUSE TestAccAWSEBSVolume_updateType === RUN TestAccAWSEBSVolume_updateIops_Io1 === PAUSE TestAccAWSEBSVolume_updateIops_Io1 === RUN TestAccAWSEBSVolume_updateIops_Io2 === PAUSE TestAccAWSEBSVolume_updateIops_Io2 === RUN TestAccAWSEBSVolume_kmsKey === PAUSE TestAccAWSEBSVolume_kmsKey === RUN TestAccAWSEBSVolume_NoIops === PAUSE TestAccAWSEBSVolume_NoIops === RUN TestAccAWSEBSVolume_InvalidIopsForType === PAUSE TestAccAWSEBSVolume_InvalidIopsForType === RUN TestAccAWSEBSVolume_InvalidThroughputForType === PAUSE TestAccAWSEBSVolume_InvalidThroughputForType === RUN TestAccAWSEBSVolume_withTags === PAUSE TestAccAWSEBSVolume_withTags === RUN TestAccAWSEBSVolume_multiAttach === PAUSE TestAccAWSEBSVolume_multiAttach === RUN TestAccAWSEBSVolume_outpost === PAUSE TestAccAWSEBSVolume_outpost === RUN TestAccAWSEBSVolume_gp3_basic === PAUSE TestAccAWSEBSVolume_gp3_basic === RUN TestAccAWSEBSVolume_gp3_iops === PAUSE TestAccAWSEBSVolume_gp3_iops === RUN TestAccAWSEBSVolume_gp3_throughput === PAUSE TestAccAWSEBSVolume_gp3_throughput === RUN TestAccAWSEBSVolume_snapshotID === PAUSE TestAccAWSEBSVolume_snapshotID === RUN TestAccAWSEBSVolume_snapshotIDAndSize === PAUSE TestAccAWSEBSVolume_snapshotIDAndSize === RUN TestAccAWSEBSVolume_disappears === PAUSE TestAccAWSEBSVolume_disappears === CONT TestAccAWSEBSVolume_basic === CONT TestAccAWSEBSVolume_withTags === CONT TestAccAWSEBSVolume_disappears === CONT TestAccAWSEBSVolume_snapshotIDAndSize === CONT TestAccAWSEBSVolume_snapshotID === CONT TestAccAWSEBSVolume_gp3_throughput === CONT TestAccAWSEBSVolume_gp3_iops === CONT TestAccAWSEBSVolume_gp3_basic === CONT TestAccAWSEBSVolume_outpost === CONT TestAccAWSEBSVolume_multiAttach === CONT TestAccAWSEBSVolume_updateIops_Io2 === CONT TestAccAWSEBSVolume_InvalidThroughputForType === CONT TestAccAWSEBSVolume_InvalidIopsForType === CONT TestAccAWSEBSVolume_NoIops === CONT TestAccAWSEBSVolume_kmsKey === CONT TestAccAWSEBSVolume_updateType === CONT TestAccAWSEBSVolume_updateIops_Io1 === CONT TestAccAWSEBSVolume_updateSize === CONT TestAccAWSEBSVolume_updateAttachedEbsVolume === CONT TestAccAWSEBSVolume_outpost data_source_aws_outposts_outposts_test.go:66: skipping since no Outposts found --- SKIP: TestAccAWSEBSVolume_outpost (2.98s) --- PASS: TestAccAWSEBSVolume_InvalidThroughputForType (20.53s) --- PASS: TestAccAWSEBSVolume_InvalidIopsForType (20.60s) --- PASS: TestAccAWSEBSVolume_disappears (57.32s) --- PASS: TestAccAWSEBSVolume_NoIops (65.27s) --- PASS: TestAccAWSEBSVolume_multiAttach (72.48s) --- PASS: TestAccAWSEBSVolume_withTags (72.54s) --- PASS: TestAccAWSEBSVolume_basic (72.60s) --- PASS: TestAccAWSEBSVolume_gp3_basic (73.47s) --- PASS: TestAccAWSEBSVolume_kmsKey (74.54s) --- PASS: TestAccAWSEBSVolume_snapshotIDAndSize (80.58s) --- PASS: TestAccAWSEBSVolume_snapshotID (84.31s) --- PASS: TestAccAWSEBSVolume_updateIops_Io2 (84.79s) --- PASS: TestAccAWSEBSVolume_updateType (97.39s) --- PASS: TestAccAWSEBSVolume_updateIops_Io1 (100.53s) --- PASS: TestAccAWSEBSVolume_gp3_throughput (101.28s) --- PASS: TestAccAWSEBSVolume_updateSize (101.72s) --- PASS: TestAccAWSEBSVolume_gp3_iops (102.74s) --- PASS: TestAccAWSEBSVolume_updateAttachedEbsVolume (189.96s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 190.076s --- .changelog/17243.txt | 3 + aws/resource_aws_ebs_volume.go | 4 +- aws/resource_aws_ebs_volume_test.go | 143 ++++++++++++++++++++++++ website/docs/r/ebs_volume.html.markdown | 2 +- 4 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 .changelog/17243.txt diff --git a/.changelog/17243.txt b/.changelog/17243.txt new file mode 100644 index 00000000000..0d66d523ba5 --- /dev/null +++ b/.changelog/17243.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified +``` diff --git a/aws/resource_aws_ebs_volume.go b/aws/resource_aws_ebs_volume.go index 47c6ae8122f..6b57529222d 100644 --- a/aws/resource_aws_ebs_volume.go +++ b/aws/resource_aws_ebs_volume.go @@ -65,14 +65,14 @@ func resourceAwsEbsVolume() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ExactlyOneOf: []string{"size", "snapshot_id"}, + AtLeastOneOf: []string{"size", "snapshot_id"}, }, "snapshot_id": { Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, - ExactlyOneOf: []string{"size", "snapshot_id"}, + AtLeastOneOf: []string{"size", "snapshot_id"}, }, "outpost_arn": { Type: schema.TypeString, diff --git a/aws/resource_aws_ebs_volume_test.go b/aws/resource_aws_ebs_volume_test.go index 8550bbebe5e..2d5f2919518 100644 --- a/aws/resource_aws_ebs_volume_test.go +++ b/aws/resource_aws_ebs_volume_test.go @@ -595,6 +595,84 @@ func TestAccAWSEBSVolume_gp3_throughput(t *testing.T) { }) } +func TestAccAWSEBSVolume_snapshotID(t *testing.T) { + var v ec2.Volume + resourceName := "aws_ebs_volume.test" + snapshotResourceName := "aws_ebs_snapshot.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigSnapshotId(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "100"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "1"), + resource.TestCheckResourceAttrPair(resourceName, "snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), + resource.TestCheckResourceAttr(resourceName, "type", "gp2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSEBSVolume_snapshotIDAndSize(t *testing.T) { + var v ec2.Volume + resourceName := "aws_ebs_volume.test" + snapshotResourceName := "aws_ebs_snapshot.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEbsVolumeConfigSnapshotIdAndSize(rName, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckVolumeExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`volume/vol-.+`)), + resource.TestCheckResourceAttr(resourceName, "encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "iops", "100"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "multi_attach_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + resource.TestCheckResourceAttr(resourceName, "size", "20"), + resource.TestCheckResourceAttrPair(resourceName, "snapshot_id", snapshotResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "throughput", "0"), + resource.TestCheckResourceAttr(resourceName, "type", "gp2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSEBSVolume_disappears(t *testing.T) { var v ec2.Volume resourceName := "aws_ebs_volume.test" @@ -1166,3 +1244,68 @@ resource "aws_ebs_volume" "test" { } `, rName, iops)) } + +func testAccAwsEbsVolumeConfigSnapshotId(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_ebs_volume" "source" { + availability_zone = data.aws_availability_zones.available.names[0] + size = 1 + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_snapshot" "test" { + volume_id = aws_ebs_volume.source.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + snapshot_id = aws_ebs_snapshot.test.id + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccAwsEbsVolumeConfigSnapshotIdAndSize(rName string, size int) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_ebs_volume" "source" { + availability_zone = data.aws_availability_zones.available.names[0] + size = 10 + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_snapshot" "test" { + volume_id = aws_ebs_volume.source.id + + tags = { + Name = %[1]q + } +} + +resource "aws_ebs_volume" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + snapshot_id = aws_ebs_snapshot.test.id + size = %[2]d + + tags = { + Name = %[1]q + } +} +`, rName, size)) +} diff --git a/website/docs/r/ebs_volume.html.markdown b/website/docs/r/ebs_volume.html.markdown index e0c1e716ead..f5746c4142d 100644 --- a/website/docs/r/ebs_volume.html.markdown +++ b/website/docs/r/ebs_volume.html.markdown @@ -23,7 +23,7 @@ resource "aws_ebs_volume" "example" { } ``` -~> **NOTE**: One of `size` or `snapshot_id` is required when specifying an EBS volume +~> **NOTE**: At least one of `size` or `snapshot_id` is required when specifying an EBS volume ## Argument Reference From 698dd314d8cb2b1060ce87dd1c3ecf81724f28e8 Mon Sep 17 00:00:00 2001 From: Shuhei Kitagawa Date: Wed, 27 Jan 2021 00:54:41 +0900 Subject: [PATCH 0872/1212] provider: Add emrcontainers service (#17065) * Add emrcontainers service * Apply suggestions from code review Co-authored-by: Brian Flad --- .hashibot.hcl | 8 ++++++++ aws/config.go | 3 +++ aws/provider.go | 1 + infrastructure/repository/labels-service.tf | 1 + website/allowed-subcategories.txt | 1 + website/docs/guides/custom-service-endpoints.html.md | 1 + 6 files changed, 15 insertions(+) diff --git a/.hashibot.hcl b/.hashibot.hcl index e50c88fb991..06d6e092463 100644 --- a/.hashibot.hcl +++ b/.hashibot.hcl @@ -316,6 +316,9 @@ behavior "regexp_issue_labeler_v2" "service_labels" { "service/emr" = [ "aws_emr_", ], + "service/emrcontainers" = [ + "aws_emrcontainers_", + ], "service/eventbridge" = [ # EventBridge is rebranded CloudWatch Events "aws_cloudwatch_event_", @@ -1078,6 +1081,11 @@ behavior "pull_request_path_labeler" "service_labels" { "**/*_emr_*", "**/emr_*" ] + "service/emrcontainers" = [ + "aws/internal/service/emrcontainers/**/*", + "**/*_emrcontainers_*", + "**/emrcontainers_*" + ] "service/eventbridge" = [ # EventBridge is rebranded CloudWatch Events "aws/internal/service/cloudwatchevents/**/*", diff --git a/aws/config.go b/aws/config.go index 00333097e04..9bc18b0aa60 100644 --- a/aws/config.go +++ b/aws/config.go @@ -71,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/emr" + "github.com/aws/aws-sdk-go/service/emrcontainers" "github.com/aws/aws-sdk-go/service/firehose" "github.com/aws/aws-sdk-go/service/fms" "github.com/aws/aws-sdk-go/service/forecastservice" @@ -267,6 +268,7 @@ type AWSClient struct { elbconn *elb.ELB elbv2conn *elbv2.ELBV2 emrconn *emr.EMR + emrcontainersconn *emrcontainers.EMRContainers esconn *elasticsearch.ElasticsearchService firehoseconn *firehose.Firehose fmsconn *fms.FMS @@ -507,6 +509,7 @@ func (c *Config) Client() (interface{}, error) { elbconn: elb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["elb"])})), elbv2conn: elbv2.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["elb"])})), emrconn: emr.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["emr"])})), + emrcontainersconn: emrcontainers.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["emrcontainers"])})), esconn: elasticsearch.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["es"])})), firehoseconn: firehose.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["firehose"])})), fmsconn: fms.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["fms"])})), diff --git a/aws/provider.go b/aws/provider.go index 367c9c4bf61..ef76a2851e7 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -1206,6 +1206,7 @@ func init() { "elastictranscoder", "elb", "emr", + "emrcontainers", "es", "firehose", "fms", diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index b525fc0fe4a..57d9e43f2ad 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -85,6 +85,7 @@ variable "service_labels" { "elb", "elbv2", "emr", + "emrcontainers", "eventbridge", "firehose", "fms", diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 6b36cececc6..b45b2e9eadc 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -51,6 +51,7 @@ Elastic Beanstalk Elastic Load Balancing (ELB Classic) Elastic Load Balancing v2 (ALB/NLB) Elastic Map Reduce (EMR) +Elastic Map Reduce Containers Elastic Transcoder ElasticSearch EventBridge (CloudWatch Events) diff --git a/website/docs/guides/custom-service-endpoints.html.md b/website/docs/guides/custom-service-endpoints.html.md index 318e4ec4334..f91175ce951 100644 --- a/website/docs/guides/custom-service-endpoints.html.md +++ b/website/docs/guides/custom-service-endpoints.html.md @@ -113,6 +113,7 @@ The Terraform AWS Provider allows the following endpoints to be customized:
    • elastictranscoder
    • elb
    • emr
    • +
    • emrcontainers
    • es
    • firehose
    • fms
    • From 69c4b2951aad594a7108ce75402877ef638c398d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 26 Jan 2021 11:43:56 -0500 Subject: [PATCH 0873/1212] data-source/aws_lb_listener: Add acceptance test and CHANGELOG for #17238 Output from acceptance testing: ``` --- PASS: TestAccDataSourceAWSLBListener_DefaultAction_Forward (1065.87s) ``` --- .changelog/17238.txt | 3 + aws/data_source_aws_lb_listener_test.go | 93 +++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 .changelog/17238.txt diff --git a/.changelog/17238.txt b/.changelog/17238.txt new file mode 100644 index 00000000000..435b238b818 --- /dev/null +++ b/.changelog/17238.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_lb_listener: Prevent error when retrieving a listener whose default action contains weighted target groups +``` diff --git a/aws/data_source_aws_lb_listener_test.go b/aws/data_source_aws_lb_listener_test.go index 437bb9d806e..8c717c4f4de 100644 --- a/aws/data_source_aws_lb_listener_test.go +++ b/aws/data_source_aws_lb_listener_test.go @@ -107,6 +107,26 @@ func TestAccDataSourceAWSLBListener_https(t *testing.T) { }) } +func TestAccDataSourceAWSLBListener_DefaultAction_Forward(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_lb_listener.test" + resourceName := "aws_lb_listener.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAWSLBListenerConfigDefaultActionForward(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "default_action.#", resourceName, "default_action.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "default_action.0.forward.#", resourceName, "default_action.0.forward.#"), + ), + }, + }, + }) +} + func testAccDataSourceAWSLBListenerConfigBasic(lbName, targetGroupName string) string { return fmt.Sprintf(` resource "aws_lb_listener" "front_end" { @@ -479,3 +499,76 @@ data "aws_lb_listener" "from_lb_and_port" { } `, lbName, targetGroupName, acctest.RandInt(), certificate, key) } + +func testAccDataSourceAWSLBListenerConfigDefaultActionForward(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInConfig(), + fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "tf-acc-test-load-balancer" + } +} + +resource "aws_subnet" "test" { + count = 2 + + availability_zone = data.aws_availability_zones.available.names[count.index] + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, count.index) + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-test-load-balancer" + } +} + +resource "aws_lb" "test" { + internal = true + name = %[1]q + + subnet_mapping { + subnet_id = aws_subnet.test[0].id + } + + subnet_mapping { + subnet_id = aws_subnet.test[1].id + } +} + +resource "aws_lb_target_group" "test" { + count = 2 + + port = 80 + protocol = "HTTP" + vpc_id = aws_vpc.test.id +} + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.id + port = 80 + protocol = "HTTP" + + default_action { + type = "forward" + + forward { + target_group { + arn = aws_lb_target_group.test[0].arn + weight = 1 + } + + target_group { + arn = aws_lb_target_group.test[1].arn + weight = 2 + } + } + } +} + +data "aws_lb_listener" "test" { + arn = aws_lb_listener.test.arn +} +`, rName)) +} From ab85d08e90671a1abda57867482dbce1aa85b0d7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Jan 2021 11:37:35 -0800 Subject: [PATCH 0874/1212] Adds error message wrapping --- aws/resource_aws_elasticache_replication_group.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 1074dcf6e71..795102fed8c 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -912,10 +912,10 @@ func resourceAwsElasticacheReplicationGroupSetPrimaryClusterID(conn *elasticache func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, timeout time.Duration, input *elasticache.ModifyReplicationGroupInput) error { _, err := conn.ModifyReplicationGroup(input) if err != nil { - return err + return fmt.Errorf("error requesting modification: %w", err) } _, err = waiter.ReplicationGroupAvailable(conn, aws.StringValue(input.ReplicationGroupId), timeout) - return err + return fmt.Errorf("error waiting for modification: %w", err) } func formatReplicationGroupClusterID(replicationGroupID string, clusterID int) string { From 373b958fe0ff6d767089c56829da64ddf238008d Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Jan 2021 13:33:16 -0800 Subject: [PATCH 0875/1212] Missed conditional --- aws/resource_aws_elasticache_replication_group.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 25d2d427376..3850c9ceb3a 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -920,8 +920,12 @@ func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, if err != nil { return fmt.Errorf("error requesting modification: %w", err) } + _, err = waiter.ReplicationGroupAvailable(conn, aws.StringValue(input.ReplicationGroupId), timeout) - return fmt.Errorf("error waiting for modification: %w", err) + if err != nil { + fmt.Errorf("error waiting for modification: %w", err) + } + return nil } func formatReplicationGroupClusterID(replicationGroupID string, clusterID int) string { From 5a2ec5897d7bca415cf6dcd0e964b4515b1b3914 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Jan 2021 13:37:47 -0800 Subject: [PATCH 0876/1212] Missed return --- aws/resource_aws_elasticache_replication_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index 3850c9ceb3a..39310844648 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -923,7 +923,7 @@ func resourceAwsElasticacheReplicationGroupModify(conn *elasticache.ElastiCache, _, err = waiter.ReplicationGroupAvailable(conn, aws.StringValue(input.ReplicationGroupId), timeout) if err != nil { - fmt.Errorf("error waiting for modification: %w", err) + return fmt.Errorf("error waiting for modification: %w", err) } return nil } From 1179ef390f4a070b6ca83d0c11cf37ec6353e58b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 26 Jan 2021 21:12:05 -0800 Subject: [PATCH 0877/1212] Adds changelog entry --- .changelog/17301.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17301.txt diff --git a/.changelog/17301.txt b/.changelog/17301.txt new file mode 100644 index 00000000000..044dde175d4 --- /dev/null +++ b/.changelog/17301.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_elasticache_replication_group: Allow changing `cluster_mode.replica_count` without re-creatiion +``` From 8ca3cb688f9a839ad7d7e601f1a1fe22b25f1e5f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 27 Jan 2021 08:19:02 -0500 Subject: [PATCH 0878/1212] docs/contributing: Add short note about deep error wrapping (#17292) Reference: https://github.com/hashicorp/terraform-provider-aws/pull/17206#discussion_r564596327 --- docs/contributing/error-handling.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/contributing/error-handling.md b/docs/contributing/error-handling.md index 301fb1924e0..1ad7c8b9f8e 100644 --- a/docs/contributing/error-handling.md +++ b/docs/contributing/error-handling.md @@ -61,6 +61,8 @@ For most use cases in this codebase, this means if code is receiving an error an return fmt.Errorf("adding some additional message: %w", err) ``` +This type of error wrapping should be applied to all Terraform resource logic. It should also be applied to any nested functions that contains two or more error conditions (e.g. a function that calls an update API and waits for the update to finish) so practitioners and code maintainers have a clear idea which generated the error. When returning errors in those situations, it is important to only include necessary additional context. Resource logic will typically include the information such as the type of operation and resource identifier (e.g. `error updating Service Thing (%s): %w`), so these messages can be more terse such as `error waiting for completion: %w`. + ### AWS Go SDK Errors The [AWS Go SDK documentation](https://docs.aws.amazon.com/sdk-for-go/) includes a [section on handling errors](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/handling-errors.html), which is recommended reading. From 3aac166d2976aa4057cb47ad03889be51a5bdd3a Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Wed, 27 Jan 2021 08:20:14 -0500 Subject: [PATCH 0879/1212] Added AWS Config Conformance Pack resource --- aws/configservice.go | 56 +++ aws/provider.go | 1 + aws/resource_aws_config_conformance_pack.go | 256 +++++++++++++ ...source_aws_config_conformance_pack_test.go | 340 ++++++++++++++++++ aws/resource_aws_config_test.go | 7 + .../r/config_conformance_pack.html.markdown | 56 +++ 6 files changed, 716 insertions(+) create mode 100644 aws/resource_aws_config_conformance_pack.go create mode 100644 aws/resource_aws_config_conformance_pack_test.go create mode 100644 website/docs/r/config_conformance_pack.html.markdown diff --git a/aws/configservice.go b/aws/configservice.go index 53dbfc10559..d83d1e43bbf 100644 --- a/aws/configservice.go +++ b/aws/configservice.go @@ -177,3 +177,59 @@ func configWaitForOrganizationRuleStatusUpdateSuccessful(conn *configservice.Con return err } + +func configDescribeConformancePack(conn *configservice.ConfigService, name string) (*configservice.ConformancePackDetail, error) { + input := &configservice.DescribeConformancePacksInput{ + ConformancePackNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeConformancePacks(input) + + if err != nil { + return nil, err + } + + for _, pack := range output.ConformancePackDetails { + if aws.StringValue(pack.ConformancePackName) == name { + return pack, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} + +func configDescribeConformancePackStatus(conn *configservice.ConfigService, name string) (*configservice.ConformancePackStatusDetail, error) { + input := &configservice.DescribeConformancePackStatusInput{ + ConformancePackNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeConformancePackStatus(input) + + if err != nil { + return nil, err + } + + for _, status := range output.ConformancePackStatusDetails { + if aws.StringValue(status.ConformancePackName) == name { + return status, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} diff --git a/aws/provider.go b/aws/provider.go index 367c9c4bf61..a60026468de 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -503,6 +503,7 @@ func Provider() *schema.Provider { "aws_config_configuration_aggregator": resourceAwsConfigConfigurationAggregator(), "aws_config_configuration_recorder": resourceAwsConfigConfigurationRecorder(), "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), + "aws_config_conformance_pack": resourceAwsConfigConformancePack(), "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), "aws_config_organization_custom_rule": resourceAwsConfigOrganizationCustomRule(), "aws_config_organization_managed_rule": resourceAwsConfigOrganizationManagedRule(), diff --git a/aws/resource_aws_config_conformance_pack.go b/aws/resource_aws_config_conformance_pack.go new file mode 100644 index 00000000000..4e9e156ba2b --- /dev/null +++ b/aws/resource_aws_config_conformance_pack.go @@ -0,0 +1,256 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsConfigConformancePack() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsConfigConformancePackPut, + Read: resourceAwsConfigConformancePackRead, + Update: resourceAwsConfigConformancePackPut, + Delete: resourceAwsConfigConformancePackDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 51200), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z][-a-zA-Z0-9]*$`), "must be a valid conformance pack name"), + ), + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "template_s3_uri": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "template_body": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 51200), + validateStringIsJsonOrYaml), + StateFunc: func(v interface{}) string { + template, _ := normalizeJsonOrYamlString(v) + return template + }, + }, + "delivery_s3_bucket": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile("awsconfigconforms.+"), "must start with 'awsconfigconforms'"), + ), + }, + "delivery_s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "input_parameters": { + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + }, + } +} + +func resourceAwsConfigConformancePackPut(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + input := configservice.PutConformancePackInput{ + ConformancePackName: aws.String(name), + } + + if v, ok := d.GetOk("delivery_s3_bucket"); ok { + input.DeliveryS3Bucket = aws.String(v.(string)) + } + if v, ok := d.GetOk("delivery_s3_key_prefix"); ok { + input.DeliveryS3KeyPrefix = aws.String(v.(string)) + } + if v, ok := d.GetOk("input_parameters"); ok { + input.ConformancePackInputParameters = expandConfigConformancePackParameters(v.(map[string]interface{})) + } + if v, ok := d.GetOk("template_body"); ok { + input.TemplateBody = aws.String(v.(string)) + } + if v, ok := d.GetOk("template_s3_uri"); ok { + input.TemplateS3Uri = aws.String(v.(string)) + } + + _, err := conn.PutConformancePack(&input) + if err != nil { + return fmt.Errorf("failed to put AWSConfig conformance pack %q: %s", name, err) + } + + d.SetId(name) + conf := resource.StateChangeConf{ + Pending: []string{ + configservice.ConformancePackStateCreateInProgress, + }, + Target: []string{ + configservice.ConformancePackStateCreateComplete, + }, + Timeout: 30 * time.Minute, + Refresh: refreshConformancePackStatus(d, conn), + } + if _, err := conf.WaitForState(); err != nil { + return err + } + return resourceAwsConfigConformancePackRead(d, meta) +} + +func expandConfigConformancePackParameters(m map[string]interface{}) (params []*configservice.ConformancePackInputParameter) { + for k, v := range m { + params = append(params, &configservice.ConformancePackInputParameter{ + ParameterName: aws.String(k), + ParameterValue: aws.String(v.(string)), + }) + } + return +} + +func refreshConformancePackStatus(d *schema.ResourceData, conn *configservice.ConfigService) func() (interface{}, string, error) { + return func() (interface{}, string, error) { + out, err := conn.DescribeConformancePackStatus(&configservice.DescribeConformancePackStatusInput{ + ConformancePackNames: []*string{aws.String(d.Id())}, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && isAWSErr(awsErr, configservice.ErrCodeNoSuchConformancePackException, "") { + return 42, "", nil + } + return 42, "", fmt.Errorf("failed to describe conformance pack %q: %s", d.Id(), err) + } + if len(out.ConformancePackStatusDetails) < 1 { + return 42, "", nil + } + status := out.ConformancePackStatusDetails[0] + return out, *status.ConformancePackState, nil + } +} + +func resourceAwsConfigConformancePackRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + out, err := conn.DescribeConformancePacks(&configservice.DescribeConformancePacksInput{ + ConformancePackNames: []*string{aws.String(d.Id())}, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && isAWSErr(err, configservice.ErrCodeNoSuchConformancePackException, "") { + log.Printf("[WARN] Conformance Pack %q is gone (%s)", d.Id(), awsErr.Code()) + d.SetId("") + return nil + } + return err + } + + numberOfPacks := len(out.ConformancePackDetails) + if numberOfPacks < 1 { + log.Printf("[WARN] Conformance Pack %q is gone (no packs found)", d.Id()) + d.SetId("") + return nil + } + + if numberOfPacks > 1 { + return fmt.Errorf("expected exactly 1 conformance pack, received %d: %#v", + numberOfPacks, out.ConformancePackDetails) + } + + log.Printf("[DEBUG] AWS Config conformance packs received: %s", out) + + pack := out.ConformancePackDetails[0] + if err = d.Set("arn", pack.ConformancePackArn); err != nil { + return err + } + if err = d.Set("name", pack.ConformancePackName); err != nil { + return err + } + if err = d.Set("delivery_s3_bucket", pack.DeliveryS3Bucket); err != nil { + return err + } + if err = d.Set("delivery_s3_key_prefix", pack.DeliveryS3KeyPrefix); err != nil { + return err + } + + if pack.ConformancePackInputParameters != nil { + if err = d.Set("input_parameters", flattenConformancePackInputParameters(pack.ConformancePackInputParameters)); err != nil { + return err + } + } + + return nil +} + +func flattenConformancePackInputParameters(parameters []*configservice.ConformancePackInputParameter) (m map[string]string) { + m = make(map[string]string) + for _, p := range parameters { + m[*p.ParameterName] = *p.ParameterValue + } + return +} + +func resourceAwsConfigConformancePackDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).configconn + + name := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting AWS Config conformance pack %q", name) + input := &configservice.DeleteConformancePackInput{ + ConformancePackName: aws.String(name), + } + err := resource.Retry(30*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteConformancePack(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceInUseException" { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if isResourceTimeoutError(err) { + _, err = conn.DeleteConformancePack(input) + } + if err != nil { + return fmt.Errorf("deleting conformance pack failed: %s", err) + } + + conf := resource.StateChangeConf{ + Pending: []string{ + configservice.ConformancePackStateDeleteInProgress, + }, + Target: []string{""}, + Timeout: 30 * time.Minute, + Refresh: refreshConformancePackStatus(d, conn), + } + _, err = conf.WaitForState() + if err != nil { + return err + } + + log.Printf("[DEBUG] AWS conformance pack %q deleted", name) + + return nil +} diff --git a/aws/resource_aws_config_conformance_pack_test.go b/aws/resource_aws_config_conformance_pack_test.go new file mode 100644 index 00000000000..fd1f2e99b1a --- /dev/null +++ b/aws/resource_aws_config_conformance_pack_test.go @@ -0,0 +1,340 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testAccConfigConformancePack_basic(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + rId := "IAM_PASSWORD_POLICY" + resourceName := "aws_config_conformance_pack.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackConfigRuleIdentifier(rName, rId), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), + testAccCheckConfigConformancePackSuccessful(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_body"}, + }, + }, + }) +} + +func testAccConfigConformancePack_disappears(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackConfigRuleIdentifier(rName, "IAM_PASSWORD_POLICY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccCheckResourceDisappears(testAccProvider, resourceAwsConfigConformancePack(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccConfigConformancePack_InputParameters(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + rId := "IAM_PASSWORD_POLICY" + pKey := "ParamKey" + pValue := "ParamValue" + resourceName := "aws_config_conformance_pack.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackConfigRuleIdentifierParameter(rName, rId, pKey, pValue), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "input_parameters."+pKey, pValue), + testAccCheckConfigConformancePackSuccessful(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_body"}, + }, + }, + }) +} + +func testAccConfigConformancePack_S3Delivery(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + bName := "awsconfigconforms" + rName + rId := "IAM_PASSWORD_POLICY" + resourceName := "aws_config_conformance_pack.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackConfigRuleIdentifierS3Delivery(rName, rId, bName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", bName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", rId), + resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), + testAccCheckConfigConformancePackSuccessful(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_body"}, + }, + }, + }) +} + +func testAccConfigConformancePack_S3Template(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + bName := rName + kName := rName + ".yaml" + rId := "IAM_PASSWORD_POLICY" + resourceName := "aws_config_conformance_pack.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackConfigRuleIdentifierS3Template(rName, rId, bName, kName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), + testAccCheckConfigConformancePackSuccessful(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_s3_uri"}, + }, + }, + }) +} + +func testAccCheckConfigConformancePackDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).configconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_config_conformance_pack" { + continue + } + + rule, err := configDescribeConformancePack(conn, rs.Primary.ID) + + if isAWSErr(err, configservice.ErrCodeNoSuchConformancePackException, "") { + continue + } + + if err != nil { + return fmt.Errorf("error describing Config Managed Rule (%s): %s", rs.Primary.ID, err) + } + + if rule != nil { + return fmt.Errorf("Config Managed Rule (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckConfigConformancePackExists(resourceName string, ocr *configservice.ConformancePackDetail) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not Found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).configconn + + pack, err := configDescribeConformancePack(conn, rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error describing conformance pack (%s): %s", rs.Primary.ID, err) + } + + if pack == nil { + return fmt.Errorf(" conformance pack (%s) not found", rs.Primary.ID) + } + + *ocr = *pack + + return nil + } +} + +func testAccCheckConfigConformancePackSuccessful(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not Found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).configconn + + packStatus, err := configDescribeConformancePackStatus(conn, rs.Primary.ID) + if err != nil { + return fmt.Errorf("error describing conformance pack status (%s): %s", rs.Primary.ID, err) + } + if packStatus == nil { + return fmt.Errorf("conformance pack status (%s) not found", rs.Primary.ID) + } + if *packStatus.ConformancePackState != configservice.ConformancePackStateCreateComplete { + return fmt.Errorf("conformance pack (%s) returned %s status: %s", rs.Primary.ID, *packStatus.ConformancePackState, *packStatus.ConformancePackStatusReason) + } + + return nil + } +} +func testAccConfigConformancePackConfigRuleIdentifier(rName, ruleIdentifier string) string { + return fmt.Sprintf(` +resource "aws_config_conformance_pack" "test" { + name = %[1]q + template_body = < **NOTE:** The account must have a Configuration Recorder with proper IAM permissions before the conformance pack will successfully create or update. See also the [`aws_config_configuration_recorder` resource](/docs/providers/aws/r/config_configuration_recorder.html). + +## Example Usage + +```hcl +resource "aws_config_conformance_pack" "test" { + name = "example" + template_body = < Date: Wed, 27 Jan 2021 13:22:03 +0000 Subject: [PATCH 0880/1212] Update CHANGELOG.md for #17292 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4cc3cd3308..f2ff7c51a84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,9 @@ ENHANCEMENTS: BUG FIXES: +* data-source/aws_elb_hosted_zone_id: Correct values for `cn-north-1` and `cn-northwest-1` regions (https://github.com/hashicorp/terraform-provider-aws/issues/17226) +* data-source/aws_lb_listener: Prevent error when retrieving a listener whose default action contains weighted target groups (https://github.com/hashicorp/terraform-provider-aws/issues/17238) +* resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified (https://github.com/hashicorp/terraform-provider-aws/issues/17243) * resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values (https://github.com/hashicorp/terraform-provider-aws/issues/17201) ## 3.25.0 (January 22, 2021) From 43238a981b986c80badf1f5e9c5b970492d1bf89 Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Wed, 27 Jan 2021 08:26:07 -0500 Subject: [PATCH 0881/1212] added changelog --- .changelog/17313.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .changelog/17313.txt diff --git a/.changelog/17313.txt b/.changelog/17313.txt new file mode 100644 index 00000000000..e69de29bb2d From f24444988e86d8d4129a97945024a185db8e4961 Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Wed, 27 Jan 2021 08:38:19 -0500 Subject: [PATCH 0882/1212] fixed doc links --- .changelog/17313.txt | 3 +++ website/docs/r/config_conformance_pack.html.markdown | 11 ++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.changelog/17313.txt b/.changelog/17313.txt index e69de29bb2d..b07daddea2c 100644 --- a/.changelog/17313.txt +++ b/.changelog/17313.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_config_conformance_pack +``` \ No newline at end of file diff --git a/website/docs/r/config_conformance_pack.html.markdown b/website/docs/r/config_conformance_pack.html.markdown index d47cfd22e84..892f68eb267 100644 --- a/website/docs/r/config_conformance_pack.html.markdown +++ b/website/docs/r/config_conformance_pack.html.markdown @@ -3,14 +3,19 @@ subcategory: "Config" layout: "aws" page_title: "AWS: aws_config_conformance_pack" description: |- -Manages a Config Conformance Pack + Manages a Config Conformance Pack --- # Resource: aws_config_conformance_pack -Manages a Config Conformance Pack. More information about these rules can be found in the [Managing Conformance Packs Across all Accounts in Your Organization](https://docs.aws.amazon.com/config/latest/developerguide/conformance-pack-organization-apis.html) and [AWS Config Managed Rules](https://docs.aws.amazon.com/config/latest/developerguide/evaluate-config_use-managed-rules.html) documentation. Example conformance pack templates may be found in the [AWS Config Rules Repository](https://github.com/awslabs/aws-config-rules/tree/master/aws-config-conformance-packs). +Manages a Config Conformance Pack. More information about these rules can be found in the +[Conformance Packs](https://docs.aws.amazon.com/config/latest/developerguide/conformance-packs.html) documentation. +Example conformance pack templates may be found in the +[AWS Config Rules Repository](https://github.com/awslabs/aws-config-rules/tree/master/aws-config-conformance-packs). -~> **NOTE:** The account must have a Configuration Recorder with proper IAM permissions before the conformance pack will successfully create or update. See also the [`aws_config_configuration_recorder` resource](/docs/providers/aws/r/config_configuration_recorder.html). +~> **NOTE:** The account must have a Configuration Recorder with proper IAM permissions before the conformance pack will +successfully create or update. See also the +[`aws_config_configuration_recorder` resource](/docs/providers/aws/r/config_configuration_recorder.html). ## Example Usage From ec744cce7f0afffcd7cb41fd4338cee0fc170c96 Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Wed, 27 Jan 2021 09:43:37 -0500 Subject: [PATCH 0883/1212] Serialized tests for consistency with other config tests --- ...source_aws_config_conformance_pack_test.go | 70 ++++++++++++++++--- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_config_conformance_pack_test.go b/aws/resource_aws_config_conformance_pack_test.go index fd1f2e99b1a..b8787a554f8 100644 --- a/aws/resource_aws_config_conformance_pack_test.go +++ b/aws/resource_aws_config_conformance_pack_test.go @@ -17,7 +17,7 @@ func testAccConfigConformancePack_basic(t *testing.T) { rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckConfigConformancePackDestroy, @@ -49,7 +49,7 @@ func testAccConfigConformancePack_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_config_conformance_pack.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckConfigConformancePackDestroy, @@ -74,7 +74,7 @@ func testAccConfigConformancePack_InputParameters(t *testing.T) { pValue := "ParamValue" resourceName := "aws_config_conformance_pack.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckConfigConformancePackDestroy, @@ -108,7 +108,7 @@ func testAccConfigConformancePack_S3Delivery(t *testing.T) { rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckConfigConformancePackDestroy, @@ -143,7 +143,7 @@ func testAccConfigConformancePack_S3Template(t *testing.T) { rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckConfigConformancePackDestroy, @@ -244,9 +244,52 @@ func testAccCheckConfigConformancePackSuccessful(resourceName string) resource.T return nil } } + +func testAccConfigConformancePackConfigBase(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" { +} + +resource "aws_config_configuration_recorder" "test" { + depends_on = [aws_iam_role_policy_attachment.test] + + name = %[1]q + role_arn = aws_iam_role.test.arn +} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Wed, 27 Jan 2021 11:34:09 -0500 Subject: [PATCH 0884/1212] added more whitespace to strings --- aws/resource_aws_config_conformance_pack_test.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_config_conformance_pack_test.go b/aws/resource_aws_config_conformance_pack_test.go index b8787a554f8..02b97b8d632 100644 --- a/aws/resource_aws_config_conformance_pack_test.go +++ b/aws/resource_aws_config_conformance_pack_test.go @@ -252,9 +252,8 @@ data "aws_partition" "current" { resource "aws_config_configuration_recorder" "test" { depends_on = [aws_iam_role_policy_attachment.test] - - name = %[1]q - role_arn = aws_iam_role.test.arn + name = %[1]q + role_arn = aws_iam_role.test.arn } resource "aws_iam_role" "test" { @@ -289,7 +288,7 @@ func testAccConfigConformancePackConfigRuleIdentifier(rName, ruleIdentifier stri %[3]s resource "aws_config_conformance_pack" "test" { - depends_on = [aws_config_configuration_recorder.test] + depends_on = [aws_config_configuration_recorder.test] name = %[1]q template_body = < Date: Wed, 27 Jan 2021 08:36:52 -0800 Subject: [PATCH 0885/1212] update tooling to use a fork of go-changelog to test storage mode feature (#17297) --- scripts/generate-changelog.sh | 4 ++-- tools/go.mod | 3 +++ tools/go.sum | 30 ++++++++++++------------------ 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/scripts/generate-changelog.sh b/scripts/generate-changelog.sh index 183b2ef01e1..a72ac73c7d3 100755 --- a/scripts/generate-changelog.sh +++ b/scripts/generate-changelog.sh @@ -29,8 +29,8 @@ CHANGELOG=$($(go env GOPATH)/bin/changelog-build -this-release $TARGET_SHA \ -git-dir $__parent \ -entries-dir .changelog \ -changelog-template $__dir/changelog.tmpl \ - -note-template $__dir/release-note.tmpl) - + -note-template $__dir/release-note.tmpl \ + -storage-mode filesystem) if [ -z "$CHANGELOG" ] then echo "No changelog generated." diff --git a/tools/go.mod b/tools/go.mod index 69b4bf6a6cc..2e6c49e71a3 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -8,7 +8,10 @@ require ( github.com/golangci/golangci-lint v1.35.2 github.com/hashicorp/go-changelog v0.0.0-20201005170154-56335215ce3a github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 + github.com/pavius/impi v0.0.3 // indirect github.com/terraform-linters/tflint v0.20.3 ) replace github.com/katbyte/terrafmt => github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af + +replace github.com/hashicorp/go-changelog => github.com/breathingdust/go-changelog v0.0.0-20210127001721-f985d5709c15 diff --git a/tools/go.sum b/tools/go.sum index bb430a73fbf..e252b7ba572 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -136,6 +136,8 @@ github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx2 github.com/bombsimon/wsl/v3 v3.1.0 h1:E5SRssoBgtVFPcYWUOFJEcgaySgdtTNYzsSKDOY7ss8= github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/breathingdust/go-changelog v0.0.0-20210127001721-f985d5709c15 h1:OUv8PSGE8S6CPWWKc+2T7tyLwcKKERcvWn19O4KiUu4= +github.com/breathingdust/go-changelog v0.0.0-20210127001721-f985d5709c15/go.mod h1:3cN0yNLxr97LobXDDmNQBh8tgBssK7ftuGC5y1sc17M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= @@ -155,7 +157,6 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/daixiang0/gci v0.2.8 h1:1mrIGMBQsBu0P7j7m1M8Lb+ZeZxsZL+jyGX4YoMJJpg= github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= @@ -200,10 +201,16 @@ github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-critic/go-critic v0.5.3 h1:xQEweNxzBNpSqI3wotXZAixRarETng3PTG4pkcrLCOA= github.com/go-critic/go-critic v0.5.3/go.mod h1:2Lrs1m4jtOnnG/EdezbSpAoL0F2pRW+9HWJUZ+QaktY= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= +github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -328,9 +335,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -391,8 +398,6 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.12.0/go.mod h1:Zc3v4DNeX6PDdy7NljlYpnrdac1++qNW0I4U+ofGwpg= -github.com/hashicorp/go-changelog v0.0.0-20201005170154-56335215ce3a h1:qjGV7j9NUKL4R6WM3vk/lJTHHWZX5U238dgzw4C5wIw= -github.com/hashicorp/go-changelog v0.0.0-20201005170154-56335215ce3a/go.mod h1:IBDEkaLHr020JRb+nj0hv1IFU7aGA7kQ2n/oJlffSXw= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= @@ -475,6 +480,7 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -532,7 +538,6 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -667,8 +672,9 @@ github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pavius/impi v0.0.3 h1:DND6MzU+BLABhOZXbELR3FU8b+zDgcq4dOCNLhiTYuI= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= @@ -787,15 +793,11 @@ github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -893,7 +895,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1082,7 +1083,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1244,12 +1244,6 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= -gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= -gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= From 1a18635b18f07dfe046fdc018c96ed16dc5ba344 Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Wed, 27 Jan 2021 11:41:15 -0500 Subject: [PATCH 0886/1212] removed whitespace from a string, wishing the linter would make up its mind --- aws/resource_aws_config_conformance_pack_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_config_conformance_pack_test.go b/aws/resource_aws_config_conformance_pack_test.go index 02b97b8d632..63cecfb863e 100644 --- a/aws/resource_aws_config_conformance_pack_test.go +++ b/aws/resource_aws_config_conformance_pack_test.go @@ -309,8 +309,8 @@ func testAccConfigConformancePackConfigRuleIdentifierParameter(rName, ruleIdenti %[5]s resource "aws_config_conformance_pack" "test" { - depends_on = [aws_config_configuration_recorder.test] - name = %[1]q + depends_on = [aws_config_configuration_recorder.test] + name = %[1]q input_parameters = { %[3]s = %[4]q } From 65fea776c29b932896ce145c416ac0663442cf44 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 27 Jan 2021 12:39:44 -0500 Subject: [PATCH 0887/1212] New Resource: aws_imagebuilder_image (#16710) * New Resource: aws_imagebuilder_image Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16375 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16377 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsImageBuilderImage_basic (1596.87s) --- PASS: TestAccAwsImageBuilderImage_disappears (1593.54s) --- PASS: TestAccAwsImageBuilderImage_DistributionConfigurationArn (1844.49s) --- PASS: TestAccAwsImageBuilderImage_EnhancedImageMetadataEnabled (2636.68s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_ImageTestsEnabled (2616.36s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_TimeoutMinutes (3026.17s) --- PASS: TestAccAwsImageBuilderImage_Tags (1514.62s) --- PASS: TestAccAwsImageBuilderImageDataSource_Arn_Aws (12.69s) --- PASS: TestAccAwsImageBuilderImageDataSource_Arn_Self (1458.04s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAwsImageBuilderImage_basic (1463.62s) --- PASS: TestAccAwsImageBuilderImage_disappears (1460.27s) --- PASS: TestAccAwsImageBuilderImage_DistributionConfigurationArn (1850.90s) --- PASS: TestAccAwsImageBuilderImage_EnhancedImageMetadataEnabled (2648.38s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_ImageTestsEnabled (2648.21s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_TimeoutMinutes (2914.17s) --- PASS: TestAccAwsImageBuilderImage_Tags (1643.87s) --- PASS: TestAccAwsImageBuilderImageDataSource_Arn_Aws (19.19s) --- PASS: TestAccAwsImageBuilderImageDataSource_Arn_Self (1462.29s) ``` * Update CHANGELOG for #16710 --- .changelog/16710.txt | 7 + aws/data_source_aws_imagebuilder_image.go | 186 ++++++ ...data_source_aws_imagebuilder_image_test.go | 204 +++++++ .../service/imagebuilder/waiter/status.go | 36 ++ .../service/imagebuilder/waiter/waiter.go | 33 ++ aws/provider.go | 2 + aws/resource_aws_imagebuilder_image.go | 351 +++++++++++ aws/resource_aws_imagebuilder_image_test.go | 549 ++++++++++++++++++ .../docs/d/imagebuilder_image.html.markdown | 51 ++ .../docs/r/imagebuilder_image.html.markdown | 73 +++ 10 files changed, 1492 insertions(+) create mode 100644 .changelog/16710.txt create mode 100644 aws/data_source_aws_imagebuilder_image.go create mode 100644 aws/data_source_aws_imagebuilder_image_test.go create mode 100644 aws/internal/service/imagebuilder/waiter/status.go create mode 100644 aws/internal/service/imagebuilder/waiter/waiter.go create mode 100644 aws/resource_aws_imagebuilder_image.go create mode 100644 aws/resource_aws_imagebuilder_image_test.go create mode 100644 website/docs/d/imagebuilder_image.html.markdown create mode 100644 website/docs/r/imagebuilder_image.html.markdown diff --git a/.changelog/16710.txt b/.changelog/16710.txt new file mode 100644 index 00000000000..46ed56618bc --- /dev/null +++ b/.changelog/16710.txt @@ -0,0 +1,7 @@ +```release-note:new-datasource +aws_imagebuilder_image +``` + +```release-note:new-resource +aws_imagebuilder_image +``` diff --git a/aws/data_source_aws_imagebuilder_image.go b/aws/data_source_aws_imagebuilder_image.go new file mode 100644 index 00000000000..69e2d35674e --- /dev/null +++ b/aws/data_source_aws_imagebuilder_image.go @@ -0,0 +1,186 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func dataSourceAwsImageBuilderImage() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsImageBuilderImageRead, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "build_version_arn": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "distribution_configuration_arn": { + Type: schema.TypeString, + Computed: true, + }, + "enhanced_image_metadata_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "image_recipe_arn": { + Type: schema.TypeString, + Computed: true, + }, + "image_tests_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_tests_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "timeout_minutes": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "infrastructure_configuration_arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "os_version": { + Type: schema.TypeString, + Computed: true, + }, + "output_resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amis": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "image": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "platform": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchemaComputed(), + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsImageBuilderImageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).imagebuilderconn + + input := &imagebuilder.GetImageInput{} + + if v, ok := d.GetOk("arn"); ok { + input.ImageBuildVersionArn = aws.String(v.(string)) + } + + output, err := conn.GetImage(input) + + if err != nil { + return fmt.Errorf("error getting Image Builder Image: %w", err) + } + + if output == nil || output.Image == nil { + return fmt.Errorf("error getting Image Builder Image: empty response") + } + + image := output.Image + + d.SetId(aws.StringValue(image.Arn)) + + // To prevent Terraform errors, only reset arn if not configured. + // The configured ARN may contain x.x.x wildcards while the API returns + // the full build version #.#.#/# suffix. + if _, ok := d.GetOk("arn"); !ok { + d.Set("arn", image.Arn) + } + + d.Set("build_version_arn", image.Arn) + d.Set("date_created", image.DateCreated) + + if image.DistributionConfiguration != nil { + d.Set("distribution_configuration_arn", image.DistributionConfiguration.Arn) + } + + d.Set("enhanced_image_metadata_enabled", image.EnhancedImageMetadataEnabled) + + if image.ImageRecipe != nil { + d.Set("image_recipe_arn", image.ImageRecipe.Arn) + } + + if image.ImageTestsConfiguration != nil { + d.Set("image_tests_configuration", []interface{}{flattenImageBuilderImageTestsConfiguration(image.ImageTestsConfiguration)}) + } else { + d.Set("image_tests_configuration", nil) + } + + if image.InfrastructureConfiguration != nil { + d.Set("infrastructure_configuration_arn", image.InfrastructureConfiguration.Arn) + } + + d.Set("name", image.Name) + d.Set("platform", image.Platform) + d.Set("os_version", image.OsVersion) + + if image.OutputResources != nil { + d.Set("output_resources", []interface{}{flattenImageBuilderOutputResources(image.OutputResources)}) + } else { + d.Set("output_resources", nil) + } + + d.Set("tags", keyvaluetags.ImagebuilderKeyValueTags(image.Tags).IgnoreAws().IgnoreConfig(meta.(*AWSClient).IgnoreTagsConfig).Map()) + d.Set("version", image.Version) + + return nil +} diff --git a/aws/data_source_aws_imagebuilder_image_test.go b/aws/data_source_aws_imagebuilder_image_test.go new file mode 100644 index 00000000000..cc4fbfd4981 --- /dev/null +++ b/aws/data_source_aws_imagebuilder_image_test.go @@ -0,0 +1,204 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAwsImageBuilderImageDataSource_Arn_Aws(t *testing.T) { + dataSourceName := "data.aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageDataSourceConfigArnAws(), + Check: resource.ComposeTestCheckFunc( + testAccMatchResourceAttrRegionalARNAccountID(dataSourceName, "arn", "imagebuilder", "aws", regexp.MustCompile(`image/amazon-linux-2-x86/x.x.x`)), + testAccMatchResourceAttrRegionalARNAccountID(dataSourceName, "build_version_arn", "imagebuilder", "aws", regexp.MustCompile(`image/amazon-linux-2-x86/\d+\.\d+\.\d+/\d+`)), + testAccCheckResourceAttrRfc3339(dataSourceName, "date_created"), + resource.TestCheckNoResourceAttr(dataSourceName, "distribution_configuration_arn"), + resource.TestCheckResourceAttr(dataSourceName, "enhanced_image_metadata_enabled", "true"), + resource.TestCheckNoResourceAttr(dataSourceName, "image_recipe_arn"), + resource.TestCheckResourceAttr(dataSourceName, "image_tests_configuration.#", "0"), + resource.TestCheckNoResourceAttr(dataSourceName, "infrastructure_configuration_arn"), + resource.TestCheckResourceAttr(dataSourceName, "name", "Amazon Linux 2 x86"), + resource.TestCheckResourceAttr(dataSourceName, "os_version", "Amazon Linux 2"), + resource.TestCheckResourceAttr(dataSourceName, "output_resources.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "platform", imagebuilder.PlatformLinux), + resource.TestCheckResourceAttr(dataSourceName, "tags.%", "0"), + resource.TestMatchResourceAttr(dataSourceName, "version", regexp.MustCompile(`\d+\.\d+\.\d+/\d+`)), + ), + }, + }, + }) +} + +// Verify additional fields returned by Self owned Images +func TestAccAwsImageBuilderImageDataSource_Arn_Self(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + dataSourceName := "data.aws_imagebuilder_image.test" + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageDataSourceConfigArnSelf(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "build_version_arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "date_created", resourceName, "date_created"), + resource.TestCheckResourceAttrPair(dataSourceName, "distribution_configuration_arn", resourceName, "distribution_configuration_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "enhanced_image_metadata_enabled", resourceName, "enhanced_image_metadata_enabled"), + resource.TestCheckResourceAttrPair(dataSourceName, "image_recipe_arn", resourceName, "image_recipe_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "image_tests_configuration.#", resourceName, "image_tests_configuration.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "infrastructure_configuration_arn", resourceName, "infrastructure_configuration_arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "name", resourceName, "name"), + resource.TestCheckResourceAttrPair(dataSourceName, "os_version", resourceName, "os_version"), + resource.TestCheckResourceAttrPair(dataSourceName, "output_resources.#", resourceName, "output_resources.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "platform", resourceName, "platform"), + resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), + resource.TestCheckResourceAttrPair(dataSourceName, "version", resourceName, "version"), + ), + }, + }, + }) +} + +func testAccAwsImageBuilderImageDataSourceConfigArnAws() string { + return ` +data "aws_partition" "current" {} + +data "aws_region" "current" {} + +data "aws_imagebuilder_image" "test" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" +} +` +} + +func testAccAwsImageBuilderImageDataSourceConfigArnSelf(rName string) string { + return fmt.Sprintf(` +data "aws_imagebuilder_component" "update-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/1.0.0" +} + +data "aws_region" "current" {} + +data "aws_partition" "current" {} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + Sid = "" + }] + }) + name = %[1]q +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_subnet" "test" { + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true + vpc_id = aws_vpc.test.id +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = %[1]q + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = %[1]q + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} + +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} + +data "aws_imagebuilder_image" "test" { + arn = aws_imagebuilder_image.test.arn +} +`, rName) +} diff --git a/aws/internal/service/imagebuilder/waiter/status.go b/aws/internal/service/imagebuilder/waiter/status.go new file mode 100644 index 00000000000..d8f48ad3973 --- /dev/null +++ b/aws/internal/service/imagebuilder/waiter/status.go @@ -0,0 +1,36 @@ +package waiter + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// ImageStatus fetches the Image and its Status +func ImageStatus(conn *imagebuilder.Imagebuilder, imageBuildVersionArn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &imagebuilder.GetImageInput{ + ImageBuildVersionArn: aws.String(imageBuildVersionArn), + } + + output, err := conn.GetImage(input) + + if err != nil { + return nil, imagebuilder.ImageStatusPending, err + } + + if output == nil || output.Image == nil || output.Image.State == nil { + return nil, imagebuilder.ImageStatusPending, nil + } + + status := aws.StringValue(output.Image.State.Status) + + if status == imagebuilder.ImageStatusFailed { + return output.Image, status, fmt.Errorf("%s", aws.StringValue(output.Image.State.Reason)) + } + + return output.Image, status, nil + } +} diff --git a/aws/internal/service/imagebuilder/waiter/waiter.go b/aws/internal/service/imagebuilder/waiter/waiter.go new file mode 100644 index 00000000000..ae111980e12 --- /dev/null +++ b/aws/internal/service/imagebuilder/waiter/waiter.go @@ -0,0 +1,33 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// ImageStatusAvailable waits for an Image to return Available +func ImageStatusAvailable(conn *imagebuilder.Imagebuilder, imageBuildVersionArn string, timeout time.Duration) (*imagebuilder.Image, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + imagebuilder.ImageStatusBuilding, + imagebuilder.ImageStatusCreating, + imagebuilder.ImageStatusDistributing, + imagebuilder.ImageStatusIntegrating, + imagebuilder.ImageStatusPending, + imagebuilder.ImageStatusTesting, + }, + Target: []string{imagebuilder.ImageStatusAvailable}, + Refresh: ImageStatus(conn, imageBuildVersionArn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*imagebuilder.Image); ok { + return v, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index ef76a2851e7..53a745319ca 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -272,6 +272,7 @@ func Provider() *schema.Provider { "aws_identitystore_user": dataSourceAwsIdentityStoreUser(), "aws_imagebuilder_component": dataSourceAwsImageBuilderComponent(), "aws_imagebuilder_distribution_configuration": datasourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": dataSourceAwsImageBuilderImage(), "aws_imagebuilder_image_pipeline": dataSourceAwsImageBuilderImagePipeline(), "aws_imagebuilder_image_recipe": dataSourceAwsImageBuilderImageRecipe(), "aws_imagebuilder_infrastructure_configuration": datasourceAwsImageBuilderInfrastructureConfiguration(), @@ -729,6 +730,7 @@ func Provider() *schema.Provider { "aws_iam_user_login_profile": resourceAwsIamUserLoginProfile(), "aws_imagebuilder_component": resourceAwsImageBuilderComponent(), "aws_imagebuilder_distribution_configuration": resourceAwsImageBuilderDistributionConfiguration(), + "aws_imagebuilder_image": resourceAwsImageBuilderImage(), "aws_imagebuilder_image_pipeline": resourceAwsImageBuilderImagePipeline(), "aws_imagebuilder_image_recipe": resourceAwsImageBuilderImageRecipe(), "aws_imagebuilder_infrastructure_configuration": resourceAwsImageBuilderInfrastructureConfiguration(), diff --git a/aws/resource_aws_imagebuilder_image.go b/aws/resource_aws_imagebuilder_image.go new file mode 100644 index 00000000000..99c3f3128d7 --- /dev/null +++ b/aws/resource_aws_imagebuilder_image.go @@ -0,0 +1,351 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/imagebuilder/waiter" +) + +func resourceAwsImageBuilderImage() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsImageBuilderImageCreate, + Read: resourceAwsImageBuilderImageRead, + Update: resourceAwsImageBuilderImageUpdate, + Delete: resourceAwsImageBuilderImageDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "date_created": { + Type: schema.TypeString, + Computed: true, + }, + "distribution_configuration_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^arn:aws[^:]*:imagebuilder:[^:]+:(?:\d{12}|aws):distribution-configuration/[a-z0-9-_]+$`), "valid distribution configuration ARN must be provided"), + }, + "enhanced_image_metadata_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + "image_recipe_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^arn:aws[^:]*:imagebuilder:[^:]+:(?:\d{12}|aws):image-recipe/[a-z0-9-_]+/\d+\.\d+\.\d+$`), "valid image recipe ARN must be provided"), + }, + "image_tests_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_tests_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + "timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 720, + ValidateFunc: validation.IntBetween(60, 1440), + }, + }, + }, + }, + "infrastructure_configuration_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^arn:aws[^:]*:imagebuilder:[^:]+:(?:\d{12}|aws):infrastructure-configuration/[a-z0-9-_]+$`), "valid infrastructure configuration ARN must be provided"), + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "os_version": { + Type: schema.TypeString, + Computed: true, + }, + "output_resources": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amis": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_id": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "image": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "platform": { + Type: schema.TypeString, + Computed: true, + }, + "tags": tagsSchema(), + "version": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsImageBuilderImageCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).imagebuilderconn + + input := &imagebuilder.CreateImageInput{ + ClientToken: aws.String(resource.UniqueId()), + EnhancedImageMetadataEnabled: aws.Bool(d.Get("enhanced_image_metadata_enabled").(bool)), + } + + if v, ok := d.GetOk("distribution_configuration_arn"); ok { + input.DistributionConfigurationArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("image_recipe_arn"); ok { + input.ImageRecipeArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("image_tests_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ImageTestsConfiguration = expandImageBuilderImageTestConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + + if v, ok := d.GetOk("infrastructure_configuration_arn"); ok { + input.InfrastructureConfigurationArn = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().ImagebuilderTags() + } + + output, err := conn.CreateImage(input) + + if err != nil { + return fmt.Errorf("error creating Image Builder Image: %w", err) + } + + if output == nil { + return fmt.Errorf("error creating Image Builder Image: empty response") + } + + d.SetId(aws.StringValue(output.ImageBuildVersionArn)) + + if _, err := waiter.ImageStatusAvailable(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for Image Builder Image (%s) to become available: %w", d.Id(), err) + } + + return resourceAwsImageBuilderImageRead(d, meta) +} + +func resourceAwsImageBuilderImageRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).imagebuilderconn + + input := &imagebuilder.GetImageInput{ + ImageBuildVersionArn: aws.String(d.Id()), + } + + output, err := conn.GetImage(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, imagebuilder.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Image Builder Image (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error getting Image Builder Image (%s): %w", d.Id(), err) + } + + if output == nil || output.Image == nil { + return fmt.Errorf("error getting Image Builder Image (%s): empty response", d.Id()) + } + + image := output.Image + + d.Set("arn", image.Arn) + d.Set("date_created", image.DateCreated) + + if image.DistributionConfiguration != nil { + d.Set("distribution_configuration_arn", image.DistributionConfiguration.Arn) + } + + d.Set("enhanced_image_metadata_enabled", image.EnhancedImageMetadataEnabled) + + if image.ImageRecipe != nil { + d.Set("image_recipe_arn", image.ImageRecipe.Arn) + } + + if image.ImageTestsConfiguration != nil { + d.Set("image_tests_configuration", []interface{}{flattenImageBuilderImageTestsConfiguration(image.ImageTestsConfiguration)}) + } else { + d.Set("image_tests_configuration", nil) + } + + if image.InfrastructureConfiguration != nil { + d.Set("infrastructure_configuration_arn", image.InfrastructureConfiguration.Arn) + } + + d.Set("name", image.Name) + d.Set("platform", image.Platform) + d.Set("os_version", image.OsVersion) + + if image.OutputResources != nil { + d.Set("output_resources", []interface{}{flattenImageBuilderOutputResources(image.OutputResources)}) + } else { + d.Set("output_resources", nil) + } + + d.Set("tags", keyvaluetags.ImagebuilderKeyValueTags(image.Tags).IgnoreAws().IgnoreConfig(meta.(*AWSClient).IgnoreTagsConfig).Map()) + d.Set("version", image.Version) + + return nil +} + +func resourceAwsImageBuilderImageUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).imagebuilderconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.ImagebuilderUpdateTags(conn, d.Id(), o, n); err != nil { + return fmt.Errorf("error updating tags for Image Builder Image (%s): %w", d.Id(), err) + } + } + + return resourceAwsImageBuilderImageRead(d, meta) +} + +func resourceAwsImageBuilderImageDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).imagebuilderconn + + input := &imagebuilder.DeleteImageInput{ + ImageBuildVersionArn: aws.String(d.Id()), + } + + _, err := conn.DeleteImage(input) + + if tfawserr.ErrCodeEquals(err, imagebuilder.ErrCodeResourceNotFoundException) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Image Builder Image (%s): %w", d.Id(), err) + } + + return nil +} + +func flattenImageBuilderOutputResources(apiObject *imagebuilder.OutputResources) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Amis; v != nil { + tfMap["amis"] = flattenImageBuilderAmis(v) + } + + return tfMap +} + +func flattenImageBuilderAmi(apiObject *imagebuilder.Ami) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AccountId; v != nil { + tfMap["account_id"] = aws.StringValue(v) + } + + if v := apiObject.Description; v != nil { + tfMap["description"] = aws.StringValue(v) + } + + if v := apiObject.Image; v != nil { + tfMap["image"] = aws.StringValue(v) + } + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := apiObject.Region; v != nil { + tfMap["region"] = aws.StringValue(v) + } + + return tfMap +} + +func flattenImageBuilderAmis(apiObjects []*imagebuilder.Ami) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + tfList = append(tfList, flattenImageBuilderAmi(apiObject)) + } + + return tfList +} diff --git a/aws/resource_aws_imagebuilder_image_test.go b/aws/resource_aws_imagebuilder_image_test.go new file mode 100644 index 00000000000..1bc883d76a7 --- /dev/null +++ b/aws/resource_aws_imagebuilder_image_test.go @@ -0,0 +1,549 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/imagebuilder" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func init() { + resource.AddTestSweepers("aws_imagebuilder_image", &resource.Sweeper{ + Name: "aws_imagebuilder_image", + F: testSweepImageBuilderImages, + }) +} + +func testSweepImageBuilderImages(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + conn := client.(*AWSClient).imagebuilderconn + + var sweeperErrs *multierror.Error + + input := &imagebuilder.ListImagesInput{ + Owner: aws.String(imagebuilder.OwnershipSelf), + } + + err = conn.ListImagesPages(input, func(page *imagebuilder.ListImagesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, imageVersion := range page.ImageVersionList { + if imageVersion == nil { + continue + } + + arn := aws.StringValue(imageVersion.Arn) + + r := resourceAwsImageBuilderImage() + d := r.Data(nil) + d.SetId(arn) + + err := r.Delete(d, client) + + if err != nil { + sweeperErr := fmt.Errorf("error deleting Image Builder Image (%s): %w", arn, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Image Builder Image sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Image Builder Images: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAwsImageBuilderImage_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + imageRecipeResourceName := "aws_imagebuilder_image_recipe.test" + infrastructureConfigurationResourceName := "aws_imagebuilder_infrastructure_configuration.test" + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigRequired(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "imagebuilder", regexp.MustCompile(fmt.Sprintf("image/%s/1.0.0/[1-9][0-9]*", rName))), + testAccCheckResourceAttrRfc3339(resourceName, "date_created"), + resource.TestCheckNoResourceAttr(resourceName, "distribution_configuration_arn"), + resource.TestCheckResourceAttr(resourceName, "enhanced_image_metadata_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "image_recipe_arn", imageRecipeResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.0.image_tests_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.0.timeout_minutes", "720"), + resource.TestCheckResourceAttrPair(resourceName, "infrastructure_configuration_arn", infrastructureConfigurationResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "platform", imagebuilder.PlatformLinux), + resource.TestCheckResourceAttr(resourceName, "os_version", "Amazon Linux 2"), + resource.TestCheckResourceAttr(resourceName, "output_resources.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestMatchResourceAttr(resourceName, "version", regexp.MustCompile(`1.0.0/[1-9][0-9]*`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_disappears(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigRequired(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsImageBuilderImage(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_DistributionConfigurationArn(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + distributionConfigurationResourceName := "aws_imagebuilder_distribution_configuration.test" + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigDistributionConfigurationArn(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "distribution_configuration_arn", distributionConfigurationResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_EnhancedImageMetadataEnabled(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigEnhancedImageMetadataEnabled(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "enhanced_image_metadata_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_ImageTestsConfiguration_ImageTestsEnabled(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigImageTestsConfigurationImageTestsEnabled(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.0.image_tests_enabled", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_ImageTestsConfiguration_TimeoutMinutes(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigImageTestsConfigurationTimeoutMinutes(rName, 721), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "image_tests_configuration.0.timeout_minutes", "721"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsImageBuilderImage_Tags(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_imagebuilder_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsImageBuilderImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsImageBuilderImageConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsImageBuilderImageConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAwsImageBuilderImageConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsImageBuilderImageExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckAwsImageBuilderImageDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).imagebuilderconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_imagebuilder_image_pipeline" { + continue + } + + input := &imagebuilder.GetImageInput{ + ImageBuildVersionArn: aws.String(rs.Primary.ID), + } + + output, err := conn.GetImage(input) + + if tfawserr.ErrCodeEquals(err, imagebuilder.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return fmt.Errorf("error getting Image Builder Image (%s): %w", rs.Primary.ID, err) + } + + if output != nil { + return fmt.Errorf("Image Builder Image (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAwsImageBuilderImageExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("resource not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).imagebuilderconn + + input := &imagebuilder.GetImageInput{ + ImageBuildVersionArn: aws.String(rs.Primary.ID), + } + + _, err := conn.GetImage(input) + + if err != nil { + return fmt.Errorf("error getting Image Builder Image (%s): %w", rs.Primary.ID, err) + } + + return nil + } +} + +func testAccAwsImageBuilderImageConfigBase(rName string) string { + return fmt.Sprintf(` +data "aws_imagebuilder_component" "update-linux" { + arn = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:component/update-linux/1.0.0" +} + +data "aws_region" "current" {} + +data "aws_partition" "current" {} + +resource "aws_iam_instance_profile" "test" { + name = aws_iam_role.test.name + role = aws_iam_role.test.name + + depends_on = [ + aws_iam_role_policy_attachment.AmazonSSMManagedInstanceCore, + aws_iam_role_policy_attachment.EC2InstanceProfileForImageBuilder, + ] +} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.${data.aws_partition.current.dns_suffix}" + } + Sid = "" + }] + }) + name = %[1]q +} + +resource "aws_iam_role_policy_attachment" "AmazonSSMManagedInstanceCore" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" + role = aws_iam_role.test.name +} + +resource "aws_iam_role_policy_attachment" "EC2InstanceProfileForImageBuilder" { + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/EC2InstanceProfileForImageBuilder" + role = aws_iam_role.test.name +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_default_route_table" "test" { + default_route_table_id = aws_vpc.test.default_route_table_id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_default_security_group" "test" { + vpc_id = aws_vpc.test.id + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + ingress { + from_port = 0 + protocol = -1 + self = true + to_port = 0 + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} + +resource "aws_subnet" "test" { + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = true + vpc_id = aws_vpc.test.id +} + +resource "aws_imagebuilder_image_recipe" "test" { + component { + component_arn = data.aws_imagebuilder_component.update-linux.arn + } + + name = %[1]q + parent_image = "arn:${data.aws_partition.current.partition}:imagebuilder:${data.aws_region.current.name}:aws:image/amazon-linux-2-x86/x.x.x" + version = "1.0.0" +} + +resource "aws_imagebuilder_infrastructure_configuration" "test" { + instance_profile_name = aws_iam_instance_profile.test.name + name = %[1]q + security_group_ids = [aws_default_security_group.test.id] + subnet_id = aws_subnet.test.id + + depends_on = [aws_default_route_table.test] +} +`, rName) +} + +func testAccAwsImageBuilderImageConfigDistributionConfigurationArn(rName string) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_distribution_configuration" "test" { + name = %[1]q + + distribution { + region = data.aws_region.current.name + } +} + +resource "aws_imagebuilder_image" "test" { + distribution_configuration_arn = aws_imagebuilder_distribution_configuration.test.arn + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} +`, rName)) +} + +func testAccAwsImageBuilderImageConfigEnhancedImageMetadataEnabled(rName string, enhancedImageMetadataEnabled bool) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image" "test" { + enhanced_image_metadata_enabled = %[2]t + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} +`, rName, enhancedImageMetadataEnabled)) +} + +func testAccAwsImageBuilderImageConfigImageTestsConfigurationImageTestsEnabled(rName string, imageTestsEnabled bool) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + + image_tests_configuration { + image_tests_enabled = %[2]t + } +} +`, rName, imageTestsEnabled)) +} + +func testAccAwsImageBuilderImageConfigImageTestsConfigurationTimeoutMinutes(rName string, timeoutMinutes int) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + + image_tests_configuration { + timeout_minutes = %[2]d + } +} +`, rName, timeoutMinutes)) +} + +func testAccAwsImageBuilderImageConfigRequired(rName string) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + ` +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn +} +`) +} + +func testAccAwsImageBuilderImageConfigTags1(rName string, tagKey1 string, tagValue1 string) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1)) +} + +func testAccAwsImageBuilderImageConfigTags2(rName string, tagKey1 string, tagValue1 string, tagKey2 string, tagValue2 string) string { + return composeConfig( + testAccAwsImageBuilderImageConfigBase(rName), + fmt.Sprintf(` +resource "aws_imagebuilder_image" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/website/docs/d/imagebuilder_image.html.markdown b/website/docs/d/imagebuilder_image.html.markdown new file mode 100644 index 00000000000..81d871924d0 --- /dev/null +++ b/website/docs/d/imagebuilder_image.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "Image Builder" +layout: "aws" +page_title: "AWS: aws_imagebuilder_image" +description: |- + Provides details about an Image Builder Image +--- + +# Data Source: aws_imagebuilder_image + +Provides details about an Image Builder Image. + +## Example Usage + +### Latest + +```hcl +data "aws_imagebuilder_image" "example" { + arn = "arn:aws:imagebuilder:us-west-2:aws:image/amazon-linux-2-x86/x.x.x" +} +``` + +## Argument Reference + +* `arn` - (Required) Amazon Resource Name (ARN) of the image. The suffix can either be specified with wildcards (`x.x.x`) to fetch the latest build version or a full build version (e.g. `2020.11.26/1`) to fetch an exact version. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `build_version_arn` - Build version Amazon Resource Name (ARN) of the image. This will always have the `#.#.#/#` suffix. +* `date_created` - Date the image was created. +* `distribution_configuration_arn` - Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. +* `enhanced_image_metadata_enabled` - Whether additional information about the image being created is collected. +* `image_recipe_arn` - Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe. +* `image_tests_configuration` - List of an object with image tests configuration. + * `image_tests_enabled` - Whether image tests are enabled. + * `timeout_minutes` - Number of minutes before image tests time out. +* `infrastructure_configuration_arn` - Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. +* `name` - Name of the image. +* `platform` - Platform of the image. +* `os_version` - Operating System version of the image. +* `output_resources` - List of objects with resources created by the image. + * `amis` - Set of objects with each Amazon Machine Image (AMI) created. + * `account_id` - Account identifier of the AMI. + * `description` - Description of the AMI. + * `image` - Identifier of the AMI. + * `name` - Name of the AMI. + * `region` - Region of the AMI. +* `tags` - Key-value map of resource tags for the image. +* `version` - Version of the image. diff --git a/website/docs/r/imagebuilder_image.html.markdown b/website/docs/r/imagebuilder_image.html.markdown new file mode 100644 index 00000000000..ec753e624ca --- /dev/null +++ b/website/docs/r/imagebuilder_image.html.markdown @@ -0,0 +1,73 @@ +--- +subcategory: "Image Builder" +layout: "aws" +page_title: "AWS: aws_imagebuilder_image" +description: |- + Manages an Image Builder Image +--- + +# Resource: aws_imagebuilder_image + +Manages an Image Builder Image. + +## Example Usage + +```hcl +resource "aws_imagebuilder_image" "example" { + distribution_configuration_arn = aws_imagebuilder_distribution_configuration.example.arn + image_recipe_arn = aws_imagebuilder_image_recipe.example.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.example.arn +} +``` + +## Argument Reference + +The following arguments are required: + +* `image_recipe_arn` - (Required) Amazon Resource Name (ARN) of the Image Builder Infrastructure Recipe. +* `infrastructure_configuration_arn` - (Required) Amazon Resource Name (ARN) of the Image Builder Infrastructure Configuration. + +The following arguments are optional: + +* `distribution_configuration_arn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. +* `enhanced_image_metadata_enabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. +* `image_tests_configuration` - (Optional) Configuration block with image tests configuration. Detailed below. +* `tags` - (Optional) Key-value map of resource tags for the Image Builder Image. + +### image_tests_configuration + +The following arguments are optional: + +* `image_tests_enabled` - (Optional) Whether image tests are enabled. Defaults to `true`. +* `timeout_minutes` - (Optional) Number of minutes before image tests time out. Valid values are between `60` and `1440`. Defaults to `720`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name (ARN) of the image. +* `date_created` - Date the image was created. +* `platform` - Platform of the image. +* `os_version` - Operating System version of the image. +* `output_resources` - List of objects with resources created by the image. + * `amis` - Set of objects with each Amazon Machine Image (AMI) created. + * `account_id` - Account identifier of the AMI. + * `description` - Description of the AMI. + * `image` - Identifier of the AMI. + * `name` - Name of the AMI. + * `region` - Region of the AMI. +* `version` - Version of the image. + +## Timeouts + +`aws_imagebuilder_image` provides the following [Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +* `create` - (Default `60m`) How long to wait for the image to be built, tested, and distributed. + +## Import + +`aws_imagebuilder_image` resources can be imported using the Amazon Resource Name (ARN), e.g. + +``` +$ terraform import aws_imagebuilder_image.example arn:aws:imagebuilder:us-east-1:123456789012:image/example/1.0.0/1 +``` From dfc7d1341b72269756043f16d77f8a5d5613e398 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 27 Jan 2021 17:41:23 +0000 Subject: [PATCH 0888/1212] Update CHANGELOG.md for #16710 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2ff7c51a84..91db488b888 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.26.0 (Unreleased) +FEATURES: + +* **New Resource:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) + ENHANCEMENTS: * data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes (https://github.com/hashicorp/terraform-provider-aws/issues/13420) From 9088ce1a8068df1127f54427aeeadb2a2e602dff Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 27 Jan 2021 12:45:01 -0500 Subject: [PATCH 0889/1212] Fix CHANGELOG for #16710 --- .changelog/16710.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/16710.txt b/.changelog/16710.txt index 46ed56618bc..d5ca1adcbea 100644 --- a/.changelog/16710.txt +++ b/.changelog/16710.txt @@ -1,4 +1,4 @@ -```release-note:new-datasource +```release-note:new-data-source aws_imagebuilder_image ``` From 0939a749ca1b8b36cde5fe96dd9422aeb46d84b4 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 27 Jan 2021 12:53:41 -0500 Subject: [PATCH 0890/1212] data-source/aws_route53_zone: Perform NS record lookup for private Hosted Zones (#17002) * data-source/aws_route53_zone: Perform NS record lookup for private Hosted Zones Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10391 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16862 Changes: ``` NOTES * data-source/aws_route53_zone: The Route 53 `ListResourceRecordSets` API call has been implemented to support the `name_servers` attribute for private Hosted Zones similar to the resource implementation. Environments using restrictive IAM permissions may require updates. BUG FIXES * data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones ``` Previously after testing updates: ``` === CONT TestAccAWSRoute53ZoneDataSource_vpc data_source_aws_route53_zone_test.go:89: Step 1/1 error: Check failed: Check 3/4 error: aws_route53_zone.test: Attribute "name_servers.#" is "4", but "name_servers.#" is not set in data.aws_route53_zone.test --- FAIL: TestAccAWSRoute53ZoneDataSource_vpc (83.04s) ``` Output from acceptance testing: ``` --- PASS: TestAccAWSRoute53ZoneDataSource_id (46.24s) --- PASS: TestAccAWSRoute53ZoneDataSource_name (47.24s) --- PASS: TestAccAWSRoute53ZoneDataSource_tags (94.61s) --- PASS: TestAccAWSRoute53ZoneDataSource_vpc (94.63s) --- PASS: TestAccAWSRoute53ZoneDataSource_serviceDiscovery (110.28s) ``` * Update CHANGELOG for #17002 --- .changelog/17002.txt | 7 ++++ aws/data_source_aws_route53_zone.go | 44 +++++++++++++++--------- aws/data_source_aws_route53_zone_test.go | 8 ++--- 3 files changed, 39 insertions(+), 20 deletions(-) create mode 100644 .changelog/17002.txt diff --git a/.changelog/17002.txt b/.changelog/17002.txt new file mode 100644 index 00000000000..9bf54d9194d --- /dev/null +++ b/.changelog/17002.txt @@ -0,0 +1,7 @@ +```release-note:note +data-source/aws_route53_zone: The Route 53 `ListResourceRecordSets` API call has been implemented to support the `name_servers` attribute for private Hosted Zones similar to the resource implementation. Environments using restrictive IAM permissions may require updates. +``` + +```release-note:bug +data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones +``` diff --git a/aws/data_source_aws_route53_zone.go b/aws/data_source_aws_route53_zone.go index 9c424c0824c..c3f95b39967 100644 --- a/aws/data_source_aws_route53_zone.go +++ b/aws/data_source_aws_route53_zone.go @@ -170,10 +170,12 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro d.Set("linked_service_description", hostedZoneFound.LinkedService.Description) } - nameServers, err := hostedZoneNameServers(idHostedZone, conn) + nameServers, err := hostedZoneNameServers(conn, idHostedZone, aws.StringValue(hostedZoneFound.Name)) + if err != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err) + return fmt.Errorf("error getting Route 53 Hosted Zone (%s) name servers: %w", idHostedZone, err) } + if err := d.Set("name_servers", nameServers); err != nil { return fmt.Errorf("error setting name_servers: %w", err) } @@ -181,35 +183,45 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro tags, err = keyvaluetags.Route53ListTags(conn, idHostedZone, route53.TagResourceTypeHostedzone) if err != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err) + return fmt.Errorf("error listing Route 53 Hosted Zone (%s) tags: %w", idHostedZone, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil } // used to retrieve name servers -func hostedZoneNameServers(id string, conn *route53.Route53) ([]string, error) { - req := &route53.GetHostedZoneInput{} - req.Id = aws.String(id) +func hostedZoneNameServers(conn *route53.Route53, id string, name string) ([]string, error) { + input := &route53.GetHostedZoneInput{ + Id: aws.String(id), + } + + output, err := conn.GetHostedZone(input) - resp, err := conn.GetHostedZone(req) if err != nil { - return []string{}, err + return nil, fmt.Errorf("error getting Route 53 Hosted Zone (%s): %w", id, err) + } + + if output == nil { + return nil, fmt.Errorf("error getting Route 53 Hosted Zone (%s): empty response", id) } - if resp.DelegationSet == nil { - return []string{}, nil + if output.DelegationSet != nil { + return aws.StringValueSlice(output.DelegationSet.NameServers), nil } - servers := []string{} - for _, server := range resp.DelegationSet.NameServers { - if server != nil { - servers = append(servers, aws.StringValue(server)) + if output.HostedZone != nil && output.HostedZone.Config != nil && aws.BoolValue(output.HostedZone.Config.PrivateZone) { + nameServers, err := getNameServers(id, name, conn) + + if err != nil { + return nil, fmt.Errorf("error listing Route 53 Hosted Zone (%s) NS records: %w", id, err) } + + return nameServers, nil } - return servers, nil + + return nil, nil } diff --git a/aws/data_source_aws_route53_zone_test.go b/aws/data_source_aws_route53_zone_test.go index 4183ed6648e..5e504e8418a 100644 --- a/aws/data_source_aws_route53_zone_test.go +++ b/aws/data_source_aws_route53_zone_test.go @@ -25,7 +25,7 @@ func TestAccAWSRoute53ZoneDataSource_id(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "id", dataSourceName, "id"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), - resource.TestCheckResourceAttrPair(resourceName, "name_servers", dataSourceName, "name_servers"), + resource.TestCheckResourceAttrPair(resourceName, "name_servers.#", dataSourceName, "name_servers.#"), resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), ), }, @@ -49,7 +49,7 @@ func TestAccAWSRoute53ZoneDataSource_name(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "id", dataSourceName, "id"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), - resource.TestCheckResourceAttrPair(resourceName, "name_servers", dataSourceName, "name_servers"), + resource.TestCheckResourceAttrPair(resourceName, "name_servers.#", dataSourceName, "name_servers.#"), resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), ), }, @@ -73,7 +73,7 @@ func TestAccAWSRoute53ZoneDataSource_tags(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "id", dataSourceName, "id"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), - resource.TestCheckResourceAttrPair(resourceName, "name_servers", dataSourceName, "name_servers"), + resource.TestCheckResourceAttrPair(resourceName, "name_servers.#", dataSourceName, "name_servers.#"), resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), ), }, @@ -97,7 +97,7 @@ func TestAccAWSRoute53ZoneDataSource_vpc(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "id", dataSourceName, "id"), resource.TestCheckResourceAttrPair(resourceName, "name", dataSourceName, "name"), - resource.TestCheckResourceAttrPair(resourceName, "name_servers", dataSourceName, "name_servers"), + resource.TestCheckResourceAttrPair(resourceName, "name_servers.#", dataSourceName, "name_servers.#"), resource.TestCheckResourceAttrPair(resourceName, "tags", dataSourceName, "tags"), ), }, From 0d3fd791cec0f7e9fb97ee64099a8ce168784c28 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 27 Jan 2021 17:55:20 +0000 Subject: [PATCH 0891/1212] Update CHANGELOG.md for #17002 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91db488b888..f7e774615df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ ## 3.26.0 (Unreleased) +NOTES: + +* data-source/aws_route53_zone: The Route 53 `ListResourceRecordSets` API call has been implemented to support the `name_servers` attribute for private Hosted Zones similar to the resource implementation. Environments using restrictive IAM permissions may require updates. (https://github.com/hashicorp/terraform-provider-aws/issues/17002) + FEATURES: +* **New Data Source:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) * **New Resource:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) ENHANCEMENTS: @@ -12,6 +17,7 @@ BUG FIXES: * data-source/aws_elb_hosted_zone_id: Correct values for `cn-north-1` and `cn-northwest-1` regions (https://github.com/hashicorp/terraform-provider-aws/issues/17226) * data-source/aws_lb_listener: Prevent error when retrieving a listener whose default action contains weighted target groups (https://github.com/hashicorp/terraform-provider-aws/issues/17238) +* data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones (https://github.com/hashicorp/terraform-provider-aws/issues/17002) * resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified (https://github.com/hashicorp/terraform-provider-aws/issues/17243) * resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values (https://github.com/hashicorp/terraform-provider-aws/issues/17201) From 3eb203323d6d94dd2982c6e58a79e7b578cbed21 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 27 Jan 2021 17:01:36 -0500 Subject: [PATCH 0892/1212] tests/service/ec2: Add PreCheck for Managed Prefix Lists (#16806) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16804 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccDataSourceAwsEc2ManagedPrefixList_matchesTooMany (3.65s) --- PASS: TestAccDataSourceAwsEc2ManagedPrefixList_basic (20.74s) --- PASS: TestAccDataSourceAwsEc2ManagedPrefixList_filter (20.86s) --- PASS: TestAccAwsEc2ManagedPrefixList_disappears (21.32s) --- PASS: TestAccAwsEc2ManagedPrefixList_basic (22.83s) --- PASS: TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6 (23.06s) --- PASS: TestAccAwsEc2ManagedPrefixList_Name (35.60s) --- PASS: TestAccAwsEc2ManagedPrefixList_Entry (40.87s) --- PASS: TestAccAwsEc2ManagedPrefixList_Tags (47.58s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- SKIP: TestAccDataSourceAwsEc2ManagedPrefixList_basic (1.74s) --- SKIP: TestAccDataSourceAwsEc2ManagedPrefixList_filter (1.72s) --- SKIP: TestAccDataSourceAwsEc2ManagedPrefixList_matchesTooMany (1.73s) --- SKIP: TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6 (1.75s) --- SKIP: TestAccAwsEc2ManagedPrefixList_basic (1.73s) --- SKIP: TestAccAwsEc2ManagedPrefixList_disappears (1.73s) --- SKIP: TestAccAwsEc2ManagedPrefixList_Entry (1.73s) --- SKIP: TestAccAwsEc2ManagedPrefixList_Name (1.75s) --- SKIP: TestAccAwsEc2ManagedPrefixList_Tags (1.74s) ``` --- ...source_aws_ec2_managed_prefix_list_test.go | 6 ++-- aws/provider_test.go | 3 ++ ...source_aws_ec2_managed_prefix_list_test.go | 29 +++++++++++++++---- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/aws/data_source_aws_ec2_managed_prefix_list_test.go b/aws/data_source_aws_ec2_managed_prefix_list_test.go index 332e1259c45..f313a68589f 100644 --- a/aws/data_source_aws_ec2_managed_prefix_list_test.go +++ b/aws/data_source_aws_ec2_managed_prefix_list_test.go @@ -44,7 +44,7 @@ func TestAccDataSourceAwsEc2ManagedPrefixList_basic(t *testing.T) { prefixListResourceName := "data.aws_prefix_list.s3_by_id" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -98,7 +98,7 @@ func TestAccDataSourceAwsEc2ManagedPrefixList_filter(t *testing.T) { resourceById := "data.aws_ec2_managed_prefix_list.s3_by_id" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -149,7 +149,7 @@ data "aws_ec2_managed_prefix_list" "s3_by_id" { func TestAccDataSourceAwsEc2ManagedPrefixList_matchesTooMany(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/provider_test.go b/aws/provider_test.go index 45991172454..cd15f855ff5 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -1039,6 +1039,9 @@ func testAccPreCheckSkipError(err error) bool { if isAWSErr(err, "InvalidInputException", "Unknown operation") { return true } + if isAWSErr(err, "InvalidAction", "is not valid") { + return true + } if isAWSErr(err, "InvalidAction", "Unavailable Operation") { return true } diff --git a/aws/resource_aws_ec2_managed_prefix_list_test.go b/aws/resource_aws_ec2_managed_prefix_list_test.go index 0116248f2c9..07216393049 100644 --- a/aws/resource_aws_ec2_managed_prefix_list_test.go +++ b/aws/resource_aws_ec2_managed_prefix_list_test.go @@ -5,6 +5,7 @@ import ( "regexp" "testing" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -18,7 +19,7 @@ func TestAccAwsEc2ManagedPrefixList_basic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -51,7 +52,7 @@ func TestAccAwsEc2ManagedPrefixList_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -72,7 +73,7 @@ func TestAccAwsEc2ManagedPrefixList_AddressFamily_IPv6(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -98,7 +99,7 @@ func TestAccAwsEc2ManagedPrefixList_Entry(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -156,7 +157,7 @@ func TestAccAwsEc2ManagedPrefixList_Name(t *testing.T) { rName2 := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -192,7 +193,7 @@ func TestAccAwsEc2ManagedPrefixList_Tags(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckEc2ManagedPrefixList(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsEc2ManagedPrefixListDestroy, Steps: []resource.TestStep{ @@ -287,6 +288,22 @@ func testAccAwsEc2ManagedPrefixListExists(resourceName string) resource.TestChec } } +func testAccPreCheckEc2ManagedPrefixList(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + input := &ec2.DescribeManagedPrefixListsInput{} + + _, err := conn.DescribeManagedPrefixLists(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAwsEc2ManagedPrefixListConfig_AddressFamily(rName string, addressFamily string) string { return fmt.Sprintf(` resource "aws_ec2_managed_prefix_list" "test" { From 5fa67ebff035e519f462775d0cf574081a6b1eae Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 27 Jan 2021 16:33:09 -0800 Subject: [PATCH 0893/1212] Adds CHANGELOG entry --- .changelog/17320.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/17320.txt diff --git a/.changelog/17320.txt b/.changelog/17320.txt new file mode 100644 index 00000000000..defc80418ca --- /dev/null +++ b/.changelog/17320.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument +``` + +```release-note:enhancement +resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument +``` From d7cee0e4467c5b88825d18ff3e44087953ced5e0 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 27 Jan 2021 23:24:54 -0800 Subject: [PATCH 0894/1212] Add cloudfront_origin_request_policy resource --- ...udfront_origin_request_policy_structure.go | 182 +++++++++++++++++ aws/provider.go | 1 + ...ce_aws_cloudfront_origin_request_policy.go | 191 ++++++++++++++++++ ...s_cloudfront_origin_request_policy_test.go | 179 ++++++++++++++++ 4 files changed, 553 insertions(+) create mode 100644 aws/cloudfront_origin_request_policy_structure.go create mode 100644 aws/resource_aws_cloudfront_origin_request_policy.go create mode 100644 aws/resource_aws_cloudfront_origin_request_policy_test.go diff --git a/aws/cloudfront_origin_request_policy_structure.go b/aws/cloudfront_origin_request_policy_structure.go new file mode 100644 index 00000000000..0c647dcada3 --- /dev/null +++ b/aws/cloudfront_origin_request_policy_structure.go @@ -0,0 +1,182 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func expandCloudFrontOriginRequestPolicyCookieNames(cookieNamesFlat map[string]interface{}) *cloudfront.CookieNames { + cookieNames := &cloudfront.CookieNames{} + + var newCookieItems []*string + for _, cookie := range cookieNamesFlat["items"].(*schema.Set).List() { + newCookieItems = append(newCookieItems, aws.String(cookie.(string))) + } + cookieNames.Items = newCookieItems + cookieNames.Quantity = aws.Int64(int64(len(newCookieItems))) + + return cookieNames +} + +func expandCloudFrontOriginRequestPolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyCookiesConfig { + cookies := &cloudfront.CookieNames{ + Quantity: aws.Int64(int64(0)), + } + + if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { + cookies = expandCloudFrontOriginRequestPolicyCookieNames(cookiesFlat[0].(map[string]interface{})) + } else { + cookies = nil + } + + cookiesConfig := &cloudfront.OriginRequestPolicyCookiesConfig{ + CookieBehavior: aws.String(cookiesConfigFlat["cookie_behavior"].(string)), + Cookies: cookies, + } + + return cookiesConfig +} + +func expandCloudFrontOriginRequestPolicyHeaders(headerNamesFlat map[string]interface{}) *cloudfront.Headers { + headers := &cloudfront.Headers{} + + var newHeaderItems []*string + for _, header := range headerNamesFlat["items"].(*schema.Set).List() { + newHeaderItems = append(newHeaderItems, aws.String(header.(string))) + } + headers.Items = newHeaderItems + headers.Quantity = aws.Int64(int64(len(newHeaderItems))) + + return headers +} + +func expandCloudFrontOriginRequestPolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyHeadersConfig { + headers := &cloudfront.Headers{} + + if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { + headers = expandCloudFrontOriginRequestPolicyHeaders(headersFlat[0].(map[string]interface{})) + } else { + headers = nil + } + + headersConfig := &cloudfront.OriginRequestPolicyHeadersConfig{ + HeaderBehavior: aws.String(headersConfigFlat["header_behavior"].(string)), + Headers: headers, + } + + return headersConfig +} + +func expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringNamesFlat map[string]interface{}) *cloudfront.QueryStringNames { + queryStringNames := &cloudfront.QueryStringNames{} + + var newQueryStringItems []*string + for _, queryString := range queryStringNamesFlat["items"].(*schema.Set).List() { + newQueryStringItems = append(newQueryStringItems, aws.String(queryString.(string))) + } + queryStringNames.Items = newQueryStringItems + queryStringNames.Quantity = aws.Int64(int64(len(newQueryStringItems))) + + return queryStringNames +} + +func expandCloudFrontOriginRequestPolicyQueryStringsConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyQueryStringsConfig { + queryStrings := &cloudfront.QueryStringNames{ + Quantity: aws.Int64(int64(0)), + } + + if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { + queryStrings = expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) + } else { + queryStrings = nil + } + + queryStringConfig := &cloudfront.OriginRequestPolicyQueryStringsConfig{ + QueryStringBehavior: aws.String(queryStringConfigFlat["query_string_behavior"].(string)), + QueryStrings: queryStrings, + } + + return queryStringConfig +} + +func expandCloudFrontOriginRequestPolicyConfig(d *schema.ResourceData) *cloudfront.OriginRequestPolicyConfig { + + originRequestPolicy := &cloudfront.OriginRequestPolicyConfig{ + Comment: aws.String(d.Get("comment").(string)), + Name: aws.String(d.Get("name").(string)), + CookiesConfig: expandCloudFrontOriginRequestPolicyCookiesConfig(d.Get("cookies_config").([]interface{})[0].(map[string]interface{})), + HeadersConfig: expandCloudFrontOriginRequestPolicyHeadersConfig(d.Get("headers_config").([]interface{})[0].(map[string]interface{})), + QueryStringsConfig: expandCloudFrontOriginRequestPolicyQueryStringsConfig(d.Get("query_strings_config").([]interface{})[0].(map[string]interface{})), + } + + return originRequestPolicy +} + +func flattenCloudFrontOriginRequestPolicyCookiesConfig(cookiesConfig *cloudfront.OriginRequestPolicyCookiesConfig) []map[string]interface{} { + cookiesConfigFlat := map[string]interface{}{} + + cookies := []map[string]interface{}{} + if cookiesConfig.Cookies != nil { + cookies = []map[string]interface{}{ + { + "items": cookiesConfig.Cookies.Items, + }, + } + } + + cookiesConfigFlat["cookie_behavior"] = aws.StringValue(cookiesConfig.CookieBehavior) + cookiesConfigFlat["cookies"] = cookies + + return []map[string]interface{}{ + cookiesConfigFlat, + } +} + +func flattenCloudFrontOriginRequestPolicyHeadersConfig(headersConfig *cloudfront.OriginRequestPolicyHeadersConfig) []map[string]interface{} { + headersConfigFlat := map[string]interface{}{} + + headers := []map[string]interface{}{} + if headersConfig.Headers != nil { + headers = []map[string]interface{}{ + { + "items": headersConfig.Headers.Items, + }, + } + } + + headersConfigFlat["header_behavior"] = aws.StringValue(headersConfig.HeaderBehavior) + headersConfigFlat["headers"] = headers + + return []map[string]interface{}{ + headersConfigFlat, + } +} + +func flattenCloudFrontOriginRequestPolicyQueryStringsConfig(queryStringsConfig *cloudfront.OriginRequestPolicyQueryStringsConfig) []map[string]interface{} { + queryStringsConfigFlat := map[string]interface{}{} + + queryStrings := []map[string]interface{}{} + if queryStringsConfig.QueryStrings != nil { + queryStrings = []map[string]interface{}{ + { + "items": queryStringsConfig.QueryStrings.Items, + }, + } + } + + queryStringsConfigFlat["query_string_behavior"] = aws.StringValue(queryStringsConfig.QueryStringBehavior) + queryStringsConfigFlat["query_strings"] = queryStrings + + return []map[string]interface{}{ + queryStringsConfigFlat, + } +} + +func flattenCloudFrontOriginRequestPolicy(d *schema.ResourceData, originRequestPolicy *cloudfront.OriginRequestPolicyConfig) { + d.Set("comment", aws.StringValue(originRequestPolicy.Comment)) + d.Set("name", aws.StringValue(originRequestPolicy.Name)) + d.Set("cookies_config", flattenCloudFrontOriginRequestPolicyCookiesConfig(originRequestPolicy.CookiesConfig)) + d.Set("headers_config", flattenCloudFrontOriginRequestPolicyHeadersConfig(originRequestPolicy.HeadersConfig)) + d.Set("query_strings_config", flattenCloudFrontOriginRequestPolicyQueryStringsConfig(originRequestPolicy.QueryStringsConfig)) +} diff --git a/aws/provider.go b/aws/provider.go index 53a745319ca..9aec8ccd060 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -486,6 +486,7 @@ func Provider() *schema.Provider { "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), + "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), "aws_cloudtrail": resourceAwsCloudTrail(), "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), diff --git a/aws/resource_aws_cloudfront_origin_request_policy.go b/aws/resource_aws_cloudfront_origin_request_policy.go new file mode 100644 index 00000000000..b97ffd8cf63 --- /dev/null +++ b/aws/resource_aws_cloudfront_origin_request_policy.go @@ -0,0 +1,191 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFrontOriginRequestPolicyCreate, + Read: resourceAwsCloudFrontOriginRequestPolicyRead, + Update: resourceAwsCloudFrontOriginRequestPolicyUpdate, + Delete: resourceAwsCloudFrontOriginRequestPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "comment": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "etag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cookies_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookie_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist", "all"}, false), + }, + "cookies": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "headers_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_behavior": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist", "allViewer", "allViewerAndWhitelistCloudFront"}, false), + }, + "headers": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "query_strings_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_string_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist", "allExcept", "all"}, false), + }, + "query_strings": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceAwsCloudFrontOriginRequestPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.CreateOriginRequestPolicyInput{ + OriginRequestPolicyConfig: expandCloudFrontOriginRequestPolicyConfig(d), + } + + resp, err := conn.CreateOriginRequestPolicy(request) + + if err != nil { + return err + } + + d.SetId(aws.StringValue(resp.OriginRequestPolicy.Id)) + + return resourceAwsCloudFrontOriginRequestPolicyRead(d, meta) +} + +func resourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + request := &cloudfront.GetOriginRequestPolicyInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetOriginRequestPolicy(request) + if err != nil { + return err + } + d.Set("etag", aws.StringValue(resp.ETag)) + + flattenCloudFrontOriginRequestPolicy(d, resp.OriginRequestPolicy.OriginRequestPolicyConfig) + + return nil +} + +func resourceAwsCloudFrontOriginRequestPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.UpdateOriginRequestPolicyInput{ + OriginRequestPolicyConfig: expandCloudFrontOriginRequestPolicyConfig(d), + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err := conn.UpdateOriginRequestPolicy(request) + if err != nil { + return err + } + + return resourceAwsCloudFrontOriginRequestPolicyRead(d, meta) +} + +func resourceAwsCloudFrontOriginRequestPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.DeleteOriginRequestPolicyInput{ + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err := conn.DeleteOriginRequestPolicy(request) + if err != nil { + if isAWSErr(err, cloudfront.ErrCodeNoSuchOriginRequestPolicy, "") { + return nil + } + return err + } + + return nil +} diff --git a/aws/resource_aws_cloudfront_origin_request_policy_test.go b/aws/resource_aws_cloudfront_origin_request_policy_test.go new file mode 100644 index 00000000000..1822f324413 --- /dev/null +++ b/aws/resource_aws_cloudfront_origin_request_policy_test.go @@ -0,0 +1,179 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSCloudFrontOriginRequestPolicy_basic(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontOriginRequestPolicyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + { + ResourceName: "aws_cloudfront_origin_request_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func TestAccAWSCloudFrontOriginRequestPolicy_update(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontOriginRequestPolicyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + { + Config: testAccAWSCloudFrontOriginRequestPolicyConfigUpdate(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment updated"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test2"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test2"), + ), + }, + { + ResourceName: "aws_cloudfront_origin_request_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func TestAccAWSCloudFrontOriginRequestPolicy_noneBehavior(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontOriginRequestPolicyConfigNoneBehavior(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.#", "0"), + ), + }, + { + ResourceName: "aws_cloudfront_origin_request_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func testAccAWSCloudFrontOriginRequestPolicyConfig(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_origin_request_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } +} +`, rInt) +} + +func testAccAWSCloudFrontOriginRequestPolicyConfigUpdate(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_origin_request_policy" "example" { + name = "test-policy-updated%[1]d" + comment = "test comment updated" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test2"] + } + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test2"] + } + } +} +`, rInt) +} + +func testAccAWSCloudFrontOriginRequestPolicyConfigNoneBehavior(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_origin_request_policy" "example" { + name = "test-policy-updated%[1]d" + comment = "test comment" + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } +} +`, rInt) +} From 22c43f9f9d09d1158b6fe5a5c94f9972b916cd16 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 00:03:06 -0800 Subject: [PATCH 0895/1212] wip --- ...nt_distribution_configuration_structure.go | 3 + aws/resource_aws_cloudfront_distribution.go | 8 ++ ...source_aws_cloudfront_distribution_test.go | 97 +++++++++++++++++++ 3 files changed, 108 insertions(+) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 71ca5c39316..40643c3bdaa 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -230,6 +230,7 @@ func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), MinTTL: aws.Int64(int64(m["min_ttl"].(int))), + OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), } @@ -266,6 +267,7 @@ func flattenCloudFrontDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) "viewer_protocol_policy": aws.StringValue(dcb.ViewerProtocolPolicy), "target_origin_id": aws.StringValue(dcb.TargetOriginId), "min_ttl": aws.Int64Value(dcb.MinTTL), + "origin_request_policy_id": aws.StringValue(dcb.OriginRequestPolicyId), } if dcb.ForwardedValues != nil { @@ -304,6 +306,7 @@ func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m["viewer_protocol_policy"] = aws.StringValue(cb.ViewerProtocolPolicy) m["target_origin_id"] = aws.StringValue(cb.TargetOriginId) m["min_ttl"] = int(aws.Int64Value(cb.MinTTL)) + m["origin_request_policy_id"] = aws.StringValue(cb.OriginRequestPolicyId) if cb.ForwardedValues != nil { m["forwarded_values"] = []interface{}{flattenForwardedValues(cb.ForwardedValues)} diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index 68e900ef6d3..f63cfd2745e 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -151,6 +151,10 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Optional: true, Default: 0, }, + "origin_request_policy_id": { + Type: schema.TypeString, + Optional: true, + }, "path_pattern": { Type: schema.TypeString, Required: true, @@ -313,6 +317,10 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Optional: true, Default: 0, }, + "origin_request_policy_id": { + Type: schema.TypeString, + Optional: true, + }, "smooth_streaming": { Type: schema.TypeBool, Optional: true, diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index 0f1fa7964eb..e4c703d63f3 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -212,6 +212,31 @@ func TestAccAWSCloudFrontDistribution_customOrigin(t *testing.T) { }) } +func TestAccAWSCloudFrontDistribution_originPolicy(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDistributionOriginRequestPolicyConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("aws_cloudfront_distribution.custom_distribution", "default_cache_behavior.0.origin_request_policy_id", regexp.MustCompile("[A-z0-9]+")), + ), + }, + { + ResourceName: "aws_cloudfront_distribution.custom_distribution", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + }, + }) +} + // TestAccAWSCloudFrontDistribution_multiOrigin runs an // aws_cloudfront_distribution acceptance test with multiple origins. // @@ -1363,6 +1388,78 @@ resource "aws_cloudfront_distribution" "custom_distribution" { } `, acctest.RandInt(), logBucket, testAccAWSCloudFrontDistributionRetainConfig()) +var testAccAWSCloudFrontDistributionOriginRequestPolicyConfig = fmt.Sprintf(` +variable rand_id { + default = %[1]d +} + +resource "aws_cloudfront_origin_request_policy" "test_policy" { + name = "test-policy%[1]d" + comment = "test comment" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } +} + +resource "aws_cloudfront_distribution" "custom_distribution" { + enabled = true + comment = "Some comment" + default_root_object = "index.html" + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + smooth_streaming = false + + origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id + + forwarded_values { + query_string = false + + cookies { + forward = "all" + } + } + + viewer_protocol_policy = "allow-all" + min_ttl = 0 + default_ttl = 3600 + max_ttl = 86400 + } + + price_class = "PriceClass_200" + + restrictions { + geo_restriction { + restriction_type = "whitelist" + locations = ["US", "CA", "GB", "DE"] + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } + + %[2]s +} +`, acctest.RandInt(), testAccAWSCloudFrontDistributionRetainConfig()) + var testAccAWSCloudFrontDistributionMultiOriginConfig = fmt.Sprintf(` variable rand_id { default = %d From 2713da47eb420a108d818a6c7ba04ea001062f19 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 15:28:36 +0200 Subject: [PATCH 0896/1212] add support for schema reference --- aws/internal/service/glue/finder/finder.go | 16 ++ aws/resource_aws_glue_catalog_table.go | 157 +++++++++++++++++++- aws/resource_aws_glue_catalog_table_test.go | 15 +- 3 files changed, 169 insertions(+), 19 deletions(-) diff --git a/aws/internal/service/glue/finder/finder.go b/aws/internal/service/glue/finder/finder.go index fdceee9166b..c537f066c16 100644 --- a/aws/internal/service/glue/finder/finder.go +++ b/aws/internal/service/glue/finder/finder.go @@ -6,6 +6,22 @@ import ( tfglue "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue" ) +// TableByName returns the Table corresponding to the specified name. +func TableByName(conn *glue.Glue, catalogID, dbName, name string) (*glue.GetTableOutput, error) { + input := &glue.GetTableInput{ + CatalogId: aws.String(catalogID), + DatabaseName: aws.String(dbName), + Name: aws.String(name), + } + + output, err := conn.GetTable(input) + if err != nil { + return nil, err + } + + return output, nil +} + // RegistryByID returns the Registry corresponding to the specified ID. func RegistryByID(conn *glue.Glue, id string) (*glue.GetRegistryOutput, error) { input := &glue.GetRegistryInput{ diff --git a/aws/resource_aws_glue_catalog_table.go b/aws/resource_aws_glue_catalog_table.go index 122581d4429..7428c6d071f 100644 --- a/aws/resource_aws_glue_catalog_table.go +++ b/aws/resource_aws_glue_catalog_table.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/glue" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" ) func resourceAwsGlueCatalogTable() *schema.Resource { @@ -181,6 +182,49 @@ func resourceAwsGlueCatalogTable() *schema.Resource { }, }, }, + "schema_reference": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema_id": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + ExactlyOneOf: []string{"storage_descriptor.0.schema_reference.0.schema_id.0.schema_arn", "storage_descriptor.0.schema_reference.0.schema_id.0.schema_name"}, + }, + "schema_name": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"storage_descriptor.0.schema_reference.0.schema_id.0.schema_arn", "storage_descriptor.0.schema_reference.0.schema_id.0.schema_name"}, + }, + "registry_name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "schema_version_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"storage_descriptor.0.schema_reference.0.schema_version_id", "storage_descriptor.0.schema_reference.0.schema_id"}, + }, + "schema_version_number": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 100000), + }, + }, + }, + }, "skewed_info": { Type: schema.TypeList, Optional: true, @@ -316,13 +360,7 @@ func resourceAwsGlueCatalogTableRead(d *schema.ResourceData, meta interface{}) e return err } - input := &glue.GetTableInput{ - CatalogId: aws.String(catalogID), - DatabaseName: aws.String(dbName), - Name: aws.String(name), - } - - out, err := conn.GetTable(input) + out, err := finder.TableByName(conn, catalogID, dbName, name) if err != nil { if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { @@ -548,6 +586,10 @@ func expandGlueStorageDescriptor(l []interface{}) *glue.StorageDescriptor { storageDescriptor.StoredAsSubDirectories = aws.Bool(v.(bool)) } + if v, ok := s["schema_reference"]; ok { + storageDescriptor.SchemaReference = expandGlueTableSchemaReference(v.([]interface{})) + } + return storageDescriptor } @@ -644,6 +686,52 @@ func expandGlueSkewedInfo(l []interface{}) *glue.SkewedInfo { return skewedInfo } +func expandGlueTableSchemaReference(l []interface{}) *glue.SchemaReference { + if len(l) == 0 || l[0] == nil { + return nil + } + + s := l[0].(map[string]interface{}) + schemaRef := &glue.SchemaReference{} + + if v, ok := s["schema_version_id"].(string); ok && v != "" { + schemaRef.SchemaVersionId = aws.String(v) + } + + if v, ok := s["schema_id"]; ok { + schemaRef.SchemaId = expandGlueTableSchemaReferenceSchemaID(v.([]interface{})) + } + + if v, ok := s["schema_version_number"].(int); ok { + schemaRef.SchemaVersionNumber = aws.Int64(int64(v)) + } + + return schemaRef +} + +func expandGlueTableSchemaReferenceSchemaID(l []interface{}) *glue.SchemaId { + if len(l) == 0 || l[0] == nil { + return nil + } + + s := l[0].(map[string]interface{}) + schemaID := &glue.SchemaId{} + + if v, ok := s["registry_name"].(string); ok && v != "" { + schemaID.RegistryName = aws.String(v) + } + + if v, ok := s["schema_name"].(string); ok && v != "" { + schemaID.SchemaName = aws.String(v) + } + + if v, ok := s["schema_arn"].(string); ok && v != "" { + schemaID.SchemaArn = aws.String(v) + } + + return schemaID +} + func flattenGlueStorageDescriptor(s *glue.StorageDescriptor) []map[string]interface{} { if s == nil { storageDescriptors := make([]map[string]interface{}, 0) @@ -665,6 +753,7 @@ func flattenGlueStorageDescriptor(s *glue.StorageDescriptor) []map[string]interf storageDescriptor["sort_columns"] = flattenGlueOrders(s.SortColumns) storageDescriptor["parameters"] = aws.StringValueMap(s.Parameters) storageDescriptor["skewed_info"] = flattenGlueSkewedInfo(s.SkewedInfo) + storageDescriptor["schema_reference"] = flattenGlueTableSchemaReference(s.SchemaReference) storageDescriptor["stored_as_sub_directories"] = aws.BoolValue(s.StoredAsSubDirectories) storageDescriptors[0] = storageDescriptor @@ -797,3 +886,57 @@ func flattenGlueSkewedInfo(s *glue.SkewedInfo) []map[string]interface{} { return skewedInfoSlice } + +func flattenGlueTableSchemaReference(s *glue.SchemaReference) []map[string]interface{} { + if s == nil { + schemaReferenceInfoSlice := make([]map[string]interface{}, 0) + return schemaReferenceInfoSlice + } + + schemaReferenceInfoSlice := make([]map[string]interface{}, 1) + + schemaReferenceInfo := make(map[string]interface{}) + + if s.SchemaVersionId != nil { + schemaReferenceInfo["schema_version_id"] = aws.StringValue(s.SchemaVersionId) + } + + if s.SchemaVersionNumber != nil { + schemaReferenceInfo["schema_version_number"] = aws.Int64Value(s.SchemaVersionNumber) + } + + if s.SchemaId != nil { + schemaReferenceInfo["schema_id"] = flattenGlueTableSchemaReferenceSchemaID(s.SchemaId) + } + + schemaReferenceInfoSlice[0] = schemaReferenceInfo + + return schemaReferenceInfoSlice +} + +func flattenGlueTableSchemaReferenceSchemaID(s *glue.SchemaId) []map[string]interface{} { + if s == nil { + schemaIDInfoSlice := make([]map[string]interface{}, 0) + return schemaIDInfoSlice + } + + schemaIDInfoSlice := make([]map[string]interface{}, 1) + + schemaIDInfo := make(map[string]interface{}) + + if s.RegistryName != nil { + schemaIDInfo["registry_name"] = aws.StringValue(s.RegistryName) + } + + if s.SchemaArn != nil { + schemaIDInfo["schema_arn"] = aws.StringValue(s.SchemaArn) + } + + if s.SchemaName != nil { + schemaIDInfo["schema_name"] = aws.StringValue(s.SchemaName) + } + + schemaIDInfoSlice[0] = schemaIDInfo + + return schemaIDInfoSlice +} diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index 8f575fe2b74..2a3a0ee39bb 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" ) func TestAccAWSGlueCatalogTable_basic(t *testing.T) { @@ -895,12 +896,7 @@ func testAccCheckGlueTableDestroy(s *terraform.State) error { return err } - input := &glue.GetTableInput{ - DatabaseName: aws.String(dbName), - CatalogId: aws.String(catalogId), - Name: aws.String(resourceName), - } - if _, err := conn.GetTable(input); err != nil { + if _, err := finder.TableByName(conn, catalogId, dbName, resourceName); err != nil { //Verify the error is what we want if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { continue @@ -930,12 +926,7 @@ func testAccCheckGlueCatalogTableExists(name string) resource.TestCheckFunc { } conn := testAccProvider.Meta().(*AWSClient).glueconn - out, err := conn.GetTable(&glue.GetTableInput{ - CatalogId: aws.String(catalogId), - DatabaseName: aws.String(dbName), - Name: aws.String(resourceName), - }) - + out, err := finder.TableByName(conn, catalogId, dbName, resourceName) if err != nil { return err } From 8ab77c243d0c4164a018dd1f7b1f02977fec2a68 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 18:55:48 +0200 Subject: [PATCH 0897/1212] tests --- aws/resource_aws_glue_catalog_table.go | 8 +- aws/resource_aws_glue_catalog_table_test.go | 142 ++++++++++++++++++++ 2 files changed, 147 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_glue_catalog_table.go b/aws/resource_aws_glue_catalog_table.go index 7428c6d071f..c58242d5b13 100644 --- a/aws/resource_aws_glue_catalog_table.go +++ b/aws/resource_aws_glue_catalog_table.go @@ -108,6 +108,7 @@ func resourceAwsGlueCatalogTable() *schema.Resource { "columns": { Type: schema.TypeList, Optional: true, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "comment": { @@ -206,8 +207,9 @@ func resourceAwsGlueCatalogTable() *schema.Resource { ExactlyOneOf: []string{"storage_descriptor.0.schema_reference.0.schema_id.0.schema_arn", "storage_descriptor.0.schema_reference.0.schema_id.0.schema_name"}, }, "registry_name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"storage_descriptor.0.schema_reference.0.schema_id.0.schema_arn"}, }, }, }, @@ -219,7 +221,7 @@ func resourceAwsGlueCatalogTable() *schema.Resource { }, "schema_version_number": { Type: schema.TypeInt, - Optional: true, + Required: true, ValidateFunc: validation.IntBetween(1, 100000), }, }, diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index 2a3a0ee39bb..f8f46e171e0 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -424,6 +424,55 @@ func TestAccAWSGlueCatalogTable_StorageDescriptor_SkewedInfo_EmptyConfigurationB }) } +func TestAccAWSGlueCatalogTable_StorageDescriptor_schemaReference(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlueTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlueCatalogTableConfigStorageDescriptorSchemaReference(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlueCatalogTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.0.schema_version_number", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.0.schema_id.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "storage_descriptor.0.schema_reference.0.schema_id.0.schema_name", "aws_glue_schema.test", "schema_name"), + resource.TestCheckResourceAttrPair(resourceName, "storage_descriptor.0.schema_reference.0.schema_id.0.registry_name", "aws_glue_schema.test", "registry_name"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.columns.#", "2"), + ), + }, + }, + }) +} + +func TestAccAWSGlueCatalogTable_StorageDescriptor_schemaReferenceArn(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlueTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlueCatalogTableConfigStorageDescriptorSchemaReferenceArn(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlueCatalogTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.#", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.0.schema_version_number", "1"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.schema_reference.0.schema_id.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "storage_descriptor.0.schema_reference.0.schema_id.0.schema_arn", "aws_glue_schema.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.columns.#", "2"), + ), + }, + }, + }) +} + func TestAccAWSGlueCatalogTable_partitionIndexesSingle(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_glue_catalog_table.test" @@ -487,6 +536,28 @@ func TestAccAWSGlueCatalogTable_partitionIndexesMultiple(t *testing.T) { }) } +func TestAccAWSGlueCatalogTable_disappears_database(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glue_catalog_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlueTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlueCatalogTable_basic(rName), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testAccCheckGlueCatalogTableExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsGlueCatalogDatabase(), "aws_glue_catalog_database.test"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSGlueCatalogTable_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_glue_catalog_table.test" @@ -802,6 +873,77 @@ resource "aws_glue_catalog_table" "test" { `, rName) } +func testAccGlueCatalogTableConfigStorageDescriptorSchemaReference(rName string) string { + return fmt.Sprintf(` +resource "aws_glue_catalog_database" "test" { + name = %[1]q +} + +resource "aws_glue_registry" "test" { + registry_name = %[1]q +} + +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} + +resource "aws_glue_catalog_table" "test" { + database_name = aws_glue_catalog_database.test.name + name = %[1]q + + storage_descriptor { + schema_reference { + schema_id { + schema_name = aws_glue_schema.test.schema_name + registry_name = aws_glue_schema.test.registry_name + } + + schema_version_number = aws_glue_schema.test.latest_schema_version + } + } +} +`, rName) +} + +func testAccGlueCatalogTableConfigStorageDescriptorSchemaReferenceArn(rName string) string { + return fmt.Sprintf(` +resource "aws_glue_catalog_database" "test" { + name = %[1]q +} + +resource "aws_glue_registry" "test" { + registry_name = %[1]q +} + +resource "aws_glue_schema" "test" { + schema_name = %[1]q + registry_arn = aws_glue_registry.test.arn + data_format = "AVRO" + compatibility = "NONE" + schema_definition = "{\"type\": \"record\", \"name\": \"r1\", \"fields\": [ {\"name\": \"f1\", \"type\": \"int\"}, {\"name\": \"f2\", \"type\": \"string\"} ]}" +} + +resource "aws_glue_catalog_table" "test" { + database_name = aws_glue_catalog_database.test.name + name = %[1]q + + storage_descriptor { + schema_reference { + schema_id { + schema_arn = aws_glue_schema.test.arn + } + + schema_version_number = aws_glue_schema.test.latest_schema_version + } + } +} +`, rName) +} + func testAccGlueCatalogTableColumnParameters(rName string) string { return fmt.Sprintf(` resource "aws_glue_catalog_database" "test" { From 488f056f7e61246e4b999861c95bc88f3ead16b2 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:02:57 +0200 Subject: [PATCH 0898/1212] docs --- website/docs/r/glue_catalog_table.html.markdown | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/website/docs/r/glue_catalog_table.html.markdown b/website/docs/r/glue_catalog_table.html.markdown index 47c589ad5bf..992bfdf8f89 100644 --- a/website/docs/r/glue_catalog_table.html.markdown +++ b/website/docs/r/glue_catalog_table.html.markdown @@ -123,6 +123,7 @@ The following arguments are supported: * `parameters` - (Optional) User-supplied properties in key-value form. * `skewed_info` - (Optional) Information about values that appear very frequently in a column (skewed values). * `stored_as_sub_directories` - (Optional) True if the table data is stored in subdirectories, or False if not. +* `schema_reference` - (Optional) An object that references a schema stored in the AWS Glue Schema Registry. When creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference. See [Schema Reference](#schema-reference) below. ##### Column @@ -148,6 +149,17 @@ The following arguments are supported: * `skewed_column_value_location_maps` - (Optional) A list of values that appear so frequently as to be considered skewed. * `skewed_column_values` - (Optional) A map of skewed values to the columns that contain them. +##### Schema Reference + +* `schema_id` - (Optional) A structure that contains schema identity fields. Either this or the `schema_version_id` has to be provided. See [Schema ID](#schema-id) below. +* `schema_version_id` - (Optional) The unique ID assigned to a version of the schema. Either this or the `schema_id` has to be provided. +* `schema_version_number` - (Required) The version number of the schema. + +###### Schema ID + +* `schema_arn` - (Optional) The Amazon Resource Name (ARN) of the schema. One of `schema_arn` or `schema_name` has to be provided. +* `schema_name` - (Optional) The name of the schema. One of `schema_arn` or `schema_name` has to be provided. +* `registry_name` - (Optional) The name of the schema registry that contains the schema. Must be provided when `schema_name` is specified and conflicts with `schema_arn`. ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 4173f363571802de56ad4359dcb9620c07e9a0b1 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:07:00 +0200 Subject: [PATCH 0899/1212] changelog --- .changelog/17335.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17335.txt diff --git a/.changelog/17335.txt b/.changelog/17335.txt new file mode 100644 index 00000000000..d16b387e4a5 --- /dev/null +++ b/.changelog/17335.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_catalog_table: Adds support for specifying schema from schema registry. +``` \ No newline at end of file From 1767257966b36e2082735db631eb29ff8d305690 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:09:16 +0200 Subject: [PATCH 0900/1212] fmt --- aws/resource_aws_glue_catalog_table_test.go | 22 ++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index f8f46e171e0..9a058053c39 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -897,13 +897,13 @@ resource "aws_glue_catalog_table" "test" { storage_descriptor { schema_reference { - schema_id { + schema_id { schema_name = aws_glue_schema.test.schema_name - registry_name = aws_glue_schema.test.registry_name - } - - schema_version_number = aws_glue_schema.test.latest_schema_version - } + registry_name = aws_glue_schema.test.registry_name + } + + schema_version_number = aws_glue_schema.test.latest_schema_version + } } } `, rName) @@ -933,12 +933,12 @@ resource "aws_glue_catalog_table" "test" { storage_descriptor { schema_reference { - schema_id { + schema_id { schema_arn = aws_glue_schema.test.arn - } - - schema_version_number = aws_glue_schema.test.latest_schema_version - } + } + + schema_version_number = aws_glue_schema.test.latest_schema_version + } } } `, rName) From dc7e5ed025f03668c9783a16de152ae348381b29 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:09:47 +0200 Subject: [PATCH 0901/1212] docs fmt --- website/docs/r/glue_catalog_table.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/glue_catalog_table.html.markdown b/website/docs/r/glue_catalog_table.html.markdown index 992bfdf8f89..42f0e11c846 100644 --- a/website/docs/r/glue_catalog_table.html.markdown +++ b/website/docs/r/glue_catalog_table.html.markdown @@ -160,6 +160,7 @@ The following arguments are supported: * `schema_arn` - (Optional) The Amazon Resource Name (ARN) of the schema. One of `schema_arn` or `schema_name` has to be provided. * `schema_name` - (Optional) The name of the schema. One of `schema_arn` or `schema_name` has to be provided. * `registry_name` - (Optional) The name of the schema registry that contains the schema. Must be provided when `schema_name` is specified and conflicts with `schema_arn`. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From bc8ac4aa1346409747a4f43eeb63aeb3c42ec55d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:13:59 +0200 Subject: [PATCH 0902/1212] import test --- aws/resource_aws_glue_catalog_table_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index 9a058053c39..20e01a96654 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -445,6 +445,11 @@ func TestAccAWSGlueCatalogTable_StorageDescriptor_schemaReference(t *testing.T) resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.columns.#", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -469,6 +474,11 @@ func TestAccAWSGlueCatalogTable_StorageDescriptor_schemaReferenceArn(t *testing. resource.TestCheckResourceAttr(resourceName, "storage_descriptor.0.columns.#", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } From 1d88048af6c00b19d5ea69f4b358272c35dd8d00 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 19:21:20 +0200 Subject: [PATCH 0903/1212] regression? --- aws/resource_aws_glue_catalog_table_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/aws/resource_aws_glue_catalog_table_test.go b/aws/resource_aws_glue_catalog_table_test.go index 20e01a96654..2abbb94bc1d 100644 --- a/aws/resource_aws_glue_catalog_table_test.go +++ b/aws/resource_aws_glue_catalog_table_test.go @@ -334,8 +334,6 @@ func TestAccAWSGlueCatalogTable_StorageDescriptor_EmptyConfigurationBlock(t *tes Check: resource.ComposeTestCheckFunc( testAccCheckGlueCatalogTableExists(resourceName), ), - // Expect non-empty instead of panic - ExpectNonEmptyPlan: true, }, }, }) From 2258cd79fe0b317b62729469aae976e4603b4d54 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Thu, 28 Jan 2021 09:46:21 -0800 Subject: [PATCH 0904/1212] Add support for Amazon Managed Service for Prometheus (#16882) * add support for AWS Managed Prometheus (preview) this PR adds support for the new AWS Managed Prometheus service (preview). * add support for AWS Managed Prometheus (preview) this PR adds support for the new AWS Managed Prometheus service (preview). * Apply suggestions from code review * Update aws/resource_aws_prometheus_workspace_test.go * Update aws/resource_aws_prometheus_workspace.go Co-authored-by: Brian Flad --- .hashibot.hcl | 8 ++ aws/config.go | 3 + .../prometheusservice/waiter/status.go | 62 ++++++++ .../prometheusservice/waiter/waiter.go | 50 +++++++ aws/provider.go | 1 + aws/resource_aws_prometheus_workspace.go | 133 +++++++++++++++++ aws/resource_aws_prometheus_workspace_test.go | 135 ++++++++++++++++++ infrastructure/repository/labels-service.tf | 1 + website/allowed-subcategories.txt | 1 + .../guides/custom-service-endpoints.html.md | 1 + .../docs/r/prometheus_workspace.html.markdown | 43 ++++++ 11 files changed, 438 insertions(+) create mode 100644 aws/internal/service/prometheusservice/waiter/status.go create mode 100644 aws/internal/service/prometheusservice/waiter/waiter.go create mode 100644 aws/resource_aws_prometheus_workspace.go create mode 100644 aws/resource_aws_prometheus_workspace_test.go create mode 100644 website/docs/r/prometheus_workspace.html.markdown diff --git a/.hashibot.hcl b/.hashibot.hcl index 06d6e092463..47e5cfe3a26 100644 --- a/.hashibot.hcl +++ b/.hashibot.hcl @@ -474,6 +474,9 @@ behavior "regexp_issue_labeler_v2" "service_labels" { "service/pricing" = [ "aws_pricing_", ], + "service/prometheusservice" = [ + "aws_prometheus_", + ], "service/qldb" = [ "aws_qldb_", ], @@ -1332,6 +1335,11 @@ behavior "pull_request_path_labeler" "service_labels" { "**/*_pricing_*", "**/pricing_*" ] + "service/prometheusservice" = [ + "aws/internal/service/prometheus/**/*", + "**/*_prometheus_*", + "**/prometheus_*", + ] "service/qldb" = [ "aws/internal/service/qldb/**/*", "**/*_qldb_*", diff --git a/aws/config.go b/aws/config.go index 9bc18b0aa60..3463639e39d 100644 --- a/aws/config.go +++ b/aws/config.go @@ -121,6 +121,7 @@ import ( "github.com/aws/aws-sdk-go/service/personalize" "github.com/aws/aws-sdk-go/service/pinpoint" "github.com/aws/aws-sdk-go/service/pricing" + "github.com/aws/aws-sdk-go/service/prometheusservice" "github.com/aws/aws-sdk-go/service/qldb" "github.com/aws/aws-sdk-go/service/quicksight" "github.com/aws/aws-sdk-go/service/ram" @@ -320,6 +321,7 @@ type AWSClient struct { outpostsconn *outposts.Outposts partition string personalizeconn *personalize.Personalize + prometheusserviceconn *prometheusservice.PrometheusService pinpointconn *pinpoint.Pinpoint pricingconn *pricing.Pricing qldbconn *qldb.QLDB @@ -559,6 +561,7 @@ func (c *Config) Client() (interface{}, error) { outpostsconn: outposts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["outposts"])})), partition: partition, personalizeconn: personalize.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["personalize"])})), + prometheusserviceconn: prometheusservice.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["prometheusservice"])})), pinpointconn: pinpoint.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["pinpoint"])})), pricingconn: pricing.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["pricing"])})), qldbconn: qldb.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["qldb"])})), diff --git a/aws/internal/service/prometheusservice/waiter/status.go b/aws/internal/service/prometheusservice/waiter/status.go new file mode 100644 index 00000000000..cb3df2bdcdd --- /dev/null +++ b/aws/internal/service/prometheusservice/waiter/status.go @@ -0,0 +1,62 @@ +package waiter + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/prometheusservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + ResourceStatusFailed = "Failed" + ResourceStatusUnknown = "Unknown" + ResourceStatusDeleted = "Deleted" +) + +// WorkspaceCreatedStatus fetches the Workspace and its Status. +func WorkspaceCreatedStatus(ctx context.Context, conn *prometheusservice.PrometheusService, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &prometheusservice.DescribeWorkspaceInput{ + WorkspaceId: aws.String(id), + } + + output, err := conn.DescribeWorkspaceWithContext(ctx, input) + + if err != nil { + return output, ResourceStatusFailed, err + } + + if output == nil || output.Workspace == nil { + return output, ResourceStatusUnknown, nil + } + + return output.Workspace, aws.StringValue(output.Workspace.Status.StatusCode), nil + } +} + +// WorkspaceDeletedStatus fetches the Workspace and its Status +func WorkspaceDeletedStatus(ctx context.Context, conn *prometheusservice.PrometheusService, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &prometheusservice.DescribeWorkspaceInput{ + WorkspaceId: aws.String(id), + } + + output, err := conn.DescribeWorkspaceWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, prometheusservice.ErrCodeResourceNotFoundException) { + return output, ResourceStatusDeleted, nil + } + + if err != nil { + return output, ResourceStatusUnknown, err + } + + if output == nil || output.Workspace == nil { + return output, ResourceStatusUnknown, nil + } + + return output.Workspace, aws.StringValue(output.Workspace.Status.StatusCode), nil + } +} diff --git a/aws/internal/service/prometheusservice/waiter/waiter.go b/aws/internal/service/prometheusservice/waiter/waiter.go new file mode 100644 index 00000000000..9fbc96350d5 --- /dev/null +++ b/aws/internal/service/prometheusservice/waiter/waiter.go @@ -0,0 +1,50 @@ +package waiter + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go/service/prometheusservice" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + // Maximum amount of time to wait for a Workspace to be created, updated, or deleted + WorkspaceTimeout = 5 * time.Minute +) + +// WorkspaceCreated waits for a Workspace to return "Active" +func WorkspaceCreated(ctx context.Context, conn *prometheusservice.PrometheusService, id string) (*prometheusservice.WorkspaceSummary, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{prometheusservice.WorkspaceStatusCodeCreating}, + Target: []string{prometheusservice.WorkspaceStatusCodeActive}, + Refresh: WorkspaceCreatedStatus(ctx, conn, id), + Timeout: WorkspaceTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*prometheusservice.WorkspaceSummary); ok { + return v, err + } + + return nil, err +} + +// WorkspaceDeleted waits for a Workspace to return "Deleted" +func WorkspaceDeleted(ctx context.Context, conn *prometheusservice.PrometheusService, arn string) (*prometheusservice.WorkspaceSummary, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{prometheusservice.WorkspaceStatusCodeDeleting}, + Target: []string{ResourceStatusDeleted}, + Refresh: WorkspaceDeletedStatus(ctx, conn, arn), + Timeout: WorkspaceTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if v, ok := outputRaw.(*prometheusservice.WorkspaceSummary); ok { + return v, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 53a745319ca..c393a116ac9 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -839,6 +839,7 @@ func Provider() *schema.Provider { "aws_organizations_policy_attachment": resourceAwsOrganizationsPolicyAttachment(), "aws_organizations_organizational_unit": resourceAwsOrganizationsOrganizationalUnit(), "aws_placement_group": resourceAwsPlacementGroup(), + "aws_prometheus_workspace": resourceAwsPrometheusWorkspace(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), "aws_qldb_ledger": resourceAwsQLDBLedger(), "aws_quicksight_group": resourceAwsQuickSightGroup(), diff --git a/aws/resource_aws_prometheus_workspace.go b/aws/resource_aws_prometheus_workspace.go new file mode 100644 index 00000000000..232ac6964cf --- /dev/null +++ b/aws/resource_aws_prometheus_workspace.go @@ -0,0 +1,133 @@ +package aws + +import ( + "context" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/prometheusservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/prometheusservice/waiter" +) + +func resourceAwsPrometheusWorkspace() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceAwsPrometheusWorkspaceCreate, + ReadContext: resourceAwsPrometheusWorkspaceRead, + UpdateContext: resourceAwsPrometheusWorkspaceUpdate, + DeleteContext: resourceAwsPrometheusWorkspaceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "alias": { + Type: schema.TypeString, + Optional: true, + }, + "prometheus_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsPrometheusWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + log.Printf("[INFO] Reading AMP workspace %s", d.Id()) + conn := meta.(*AWSClient).prometheusserviceconn + + details, err := conn.DescribeWorkspaceWithContext(ctx, &prometheusservice.DescribeWorkspaceInput{ + WorkspaceId: aws.String(d.Id()), + }) + if tfawserr.ErrCodeEquals(err, prometheusservice.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Prometheus Workspace (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error reading Prometheus Workspace (%s): %w", d.Id(), err)) + } + + if details == nil || details.Workspace == nil { + return diag.FromErr(fmt.Errorf("error reading Prometheus Workspace (%s): empty response", d.Id())) + } + + ws := details.Workspace + + d.Set("alias", ws.Alias) + d.Set("arn", ws.Arn) + d.Set("prometheus_endpoint", ws.PrometheusEndpoint) + + return nil +} + +func resourceAwsPrometheusWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + log.Printf("[INFO] Updating AMP workspace %s", d.Id()) + + req := &prometheusservice.UpdateWorkspaceAliasInput{ + WorkspaceId: aws.String(d.Id()), + } + if v, ok := d.GetOk("alias"); ok { + req.Alias = aws.String(v.(string)) + } + conn := meta.(*AWSClient).prometheusserviceconn + if _, err := conn.UpdateWorkspaceAliasWithContext(ctx, req); err != nil { + return diag.FromErr(fmt.Errorf("error updating Prometheus WorkSpace (%s): %w", d.Id(), err)) + } + + return resourceAwsPrometheusWorkspaceRead(ctx, d, meta) +} + +func resourceAwsPrometheusWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + log.Printf("[INFO] Creating AMP workspace %s", d.Id()) + conn := meta.(*AWSClient).prometheusserviceconn + + req := &prometheusservice.CreateWorkspaceInput{} + if v, ok := d.GetOk("alias"); ok { + req.Alias = aws.String(v.(string)) + } + + result, err := conn.CreateWorkspaceWithContext(ctx, req) + if err != nil { + return diag.FromErr(fmt.Errorf("error creating Prometheus WorkSpace (%s): %w", d.Id(), err)) + } + d.SetId(aws.StringValue(result.WorkspaceId)) + + if _, err := waiter.WorkspaceCreated(ctx, conn, d.Id()); err != nil { + return diag.FromErr(fmt.Errorf("error waiting for Workspace (%s) to be created: %w", d.Id(), err)) + } + + return resourceAwsPrometheusWorkspaceRead(ctx, d, meta) +} + +func resourceAwsPrometheusWorkspaceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + log.Printf("[INFO] Deleting AMP workspace %s", d.Id()) + conn := meta.(*AWSClient).prometheusserviceconn + + _, err := conn.DeleteWorkspaceWithContext(ctx, &prometheusservice.DeleteWorkspaceInput{ + WorkspaceId: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, prometheusservice.ErrCodeResourceNotFoundException) { + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error deleting Prometheus Workspace (%s): %w", d.Id(), err)) + } + + if _, err := waiter.WorkspaceDeleted(ctx, conn, d.Id()); err != nil { + return diag.FromErr(fmt.Errorf("error waiting for Prometheus Workspace (%s) to be deleted: %w", d.Id(), err)) + } + + return nil +} diff --git a/aws/resource_aws_prometheus_workspace_test.go b/aws/resource_aws_prometheus_workspace_test.go new file mode 100644 index 00000000000..7087e927517 --- /dev/null +++ b/aws/resource_aws_prometheus_workspace_test.go @@ -0,0 +1,135 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/prometheusservice" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAWSAMPWorkspace_basic(t *testing.T) { + workspaceAlias := acctest.RandomWithPrefix("tf_amp_workspace") + resourceName := "aws_prometheus_workspace.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAMPWorkspaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAWSAMPWorkspaceConfigWithAlias(workspaceAlias), + Check: resource.ComposeTestCheckFunc( + testCheckAWSAMPWorkspaceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alias", workspaceAlias), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAWSAMPWorkspaceConfigWithoutAlias(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "alias", ""), + ), + }, + { + Config: testAWSAMPWorkspaceConfigWithAlias(workspaceAlias), + Check: resource.ComposeTestCheckFunc( + testCheckAWSAMPWorkspaceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alias", workspaceAlias), + ), + }, + }, + }) +} + +func TestAccAWSAMPWorkspace_disappears(t *testing.T) { + resourceName := "aws_prometheus_workspace.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAMPWorkspaceDestroy, + Steps: []resource.TestStep{ + { + Config: testAWSAMPWorkspaceConfigWithoutAlias(), + Check: resource.ComposeTestCheckFunc( + testCheckAWSAMPWorkspaceExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsPrometheusWorkspace(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testCheckAWSAMPWorkspaceExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No AMP Workspace ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).prometheusserviceconn + + req := &prometheusservice.DescribeWorkspaceInput{ + WorkspaceId: aws.String(rs.Primary.ID), + } + describe, err := conn.DescribeWorkspace(req) + if err != nil { + return err + } + if describe == nil { + return fmt.Errorf("Got nil account ?!") + } + + return nil + } +} + +func testAccCheckAWSAMPWorkspaceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).prometheusserviceconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_prometheus_workspace" { + continue + } + + _, err := conn.DescribeWorkspace(&prometheusservice.DescribeWorkspaceInput{ + WorkspaceId: aws.String(rs.Primary.ID), + }) + if isAWSErr(err, prometheusservice.ErrCodeResourceNotFoundException, "") { + continue + } + + if err != nil { + return fmt.Errorf("error reading Prometheus WorkSpace (%s): %w", rs.Primary.ID, err) + } + } + + return nil +} + +func testAWSAMPWorkspaceConfigWithAlias(randName string) string { + return fmt.Sprintf(` +resource "aws_prometheus_workspace" "test" { + alias = %q +} +`, randName) +} + +func testAWSAMPWorkspaceConfigWithoutAlias() string { + return ` +resource "aws_prometheus_workspace" "test" { +} +` +} diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index 57d9e43f2ad..1a5d2ca5957 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -152,6 +152,7 @@ variable "service_labels" { "pinpointsmsvoice", "polly", "pricing", + "prometheusservice", "qldb", "quicksight", "ram", diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index b45b2e9eadc..ee70be760e5 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -10,6 +10,7 @@ Application Autoscaling Athena Autoscaling Autoscaling Plans +Amazon Managed Service for Prometheus (AMP) Backup Batch Budgets diff --git a/website/docs/guides/custom-service-endpoints.html.md b/website/docs/guides/custom-service-endpoints.html.md index f91175ce951..b6fa2a42320 100644 --- a/website/docs/guides/custom-service-endpoints.html.md +++ b/website/docs/guides/custom-service-endpoints.html.md @@ -164,6 +164,7 @@ The Terraform AWS Provider allows the following endpoints to be customized:
    • personalize
    • pinpoint
    • pricing
    • +
    • prometheusservice
    • qldb
    • quicksight
    • ram
    • diff --git a/website/docs/r/prometheus_workspace.html.markdown b/website/docs/r/prometheus_workspace.html.markdown new file mode 100644 index 00000000000..79e16ddc98f --- /dev/null +++ b/website/docs/r/prometheus_workspace.html.markdown @@ -0,0 +1,43 @@ +--- +subcategory: "Amazon Managed Service for Prometheus (AMP)" +layout: "aws" +page_title: "AWS: aws_prometheus_workspace" +description: |- + Manages an Amazon Managed Service for Prometheus (AMP) Workspace +--- + +# Resource: aws_prometheus_workspace + +Manages an Amazon Managed Service for Prometheus (AMP) Workspace. + +~> **NOTE:** This AWS functionality is in Preview and may change before General Availability release. Backwards compatibility is not guaranteed between Terraform AWS Provider releases. + +## Example Usage + +```hcl +resource "aws_prometheus_workspace" "demo" { + alias = "prometheus-test" +} +``` + +## Argument Reference + +The following argument is supported: + +* `alias` - (Optional) The alias of the prometheus workspace. See more [in AWS Docs](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html). + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name (ARN) of the workspace. +* `id` - Identifier of the workspace +* `prometheus_endpoint` - Prometheus endpoint available for this workspace. + +## Import + +AMP Workspaces can be imported using the identifier, e.g. + +``` +$ terraform import aws_prometheus_workspace.demo ws-C6DCB907-F2D7-4D96-957B-66691F865D8B +``` From 250ee7c0fb85f723ab9bf7fa225f5f8bd377aae5 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 28 Jan 2021 12:46:40 -0500 Subject: [PATCH 0905/1212] Update CHANGELOG for #16882 --- .changelog/16882.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/16882.txt diff --git a/.changelog/16882.txt b/.changelog/16882.txt new file mode 100644 index 00000000000..6a9da3fa299 --- /dev/null +++ b/.changelog/16882.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_prometheus_workspace +``` From 7fb8fa0113a2f7908db7ce1c9ed262c56a9f08f4 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 28 Jan 2021 10:20:00 -0800 Subject: [PATCH 0906/1212] Update .changelog/17301.txt Co-authored-by: Brian Flad --- .changelog/17301.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/17301.txt b/.changelog/17301.txt index 044dde175d4..484efdaea3b 100644 --- a/.changelog/17301.txt +++ b/.changelog/17301.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_elasticache_replication_group: Allow changing `cluster_mode.replica_count` without re-creatiion +resource/aws_elasticache_replication_group: Allow changing `cluster_mode.replica_count` without re-creation ``` From c52d3a44ca028d6916ae4cc3851d9ed71175e639 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 28 Jan 2021 18:21:24 +0000 Subject: [PATCH 0907/1212] Update CHANGELOG.md for #17320 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7e774615df..248404a40bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,13 @@ FEATURES: * **New Data Source:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) * **New Resource:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) +* **New Resource:** `aws_prometheus_workspace` (https://github.com/hashicorp/terraform-provider-aws/issues/16882) ENHANCEMENTS: +* data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) * data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes (https://github.com/hashicorp/terraform-provider-aws/issues/13420) +* resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) BUG FIXES: From a7b280e21dd73bd0bf6ce65796385685179b057f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 28 Jan 2021 14:09:54 -0500 Subject: [PATCH 0908/1212] docs/contributing: Add Versioned Resources section to Provider Design page (#17285) * docs/contributing: Add Versioned Resources section to Provider Design page Reference: https://github.com/hashicorp/terraform-provider-aws/pull/17034#issuecomment-766193857 * Apply suggestions from code review Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> Co-authored-by: Dirk Avery <31492422+YakDriver@users.noreply.github.com> --- docs/contributing/provider-design.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/contributing/provider-design.md b/docs/contributing/provider-design.md index 2a183901aea..9585d265768 100644 --- a/docs/contributing/provider-design.md +++ b/docs/contributing/provider-design.md @@ -12,6 +12,7 @@ The Terraform AWS Provider follows the guidelines established in the [HashiCorp - [IAM Resource-Based Policy Resources](#iam-resource-based-policy-resources) - [Managing Resource Running State](#managing-resource-running-state) - [Task Execution and Waiter Resources](#task-execution-and-waiter-resources) + - [Versioned Resources](#versioned-resources) - [Other Considerations](#other-considerations) - [AWS Credential Exfiltration](#aws-credential-exfiltration) @@ -118,6 +119,22 @@ In this situation, provider developers should create a separate resource represe For a related consideration, see the [Managing Resource Running State section](#managing-resource-running-state). +### Versioned Resources + +AWS supports having multiple versions of some components. Examples of this include: + +* ECS Task Definitions +* Lambda Functions +* Secrets Manager Secrets + +In general, provider developers should create a separate resource to represent a single version. For example, the provider has both the `aws_secretsmanager_secret` and `aws_secretsmanager_secret_version` resources. However, in some cases, developers should handle versioning in the main resource. + +In deciding when to create a separate resource, follow these guidelines: + +* If AWS necessarily creates a version when you make a new AWS component, include version handling in the same Terraform resource. Creating an AWS component with one Terraform resource and later using a different resource for updates is confusing. +* If the AWS service API allows deleting versions and practitioners will want to delete versions, provider developers should implement a separate version resource. +* If the API only supports publishing new versions, either method is acceptable, however most current implementations are self-contained. Terraform's current configuration language does not natively support triggering resource updates or recreation across resources without a state value change. This can make the implementation more difficult for practitioners without special resource and configuration workarounds, such as a `triggers` attribute. If this changes in the future, then this guidance may be updated towards separate resources, following the [Task Execution and Waiter Resources](#task-execution-and-waiter-resources) guidance. + ## Other Considerations ### AWS Credential Exfiltration From 8468b1af12ee57817febfcc05435cd587b16fb52 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 28 Jan 2021 19:11:29 +0000 Subject: [PATCH 0909/1212] Update CHANGELOG.md for #17285 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 248404a40bd..4581fcaa40f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ ENHANCEMENTS: * data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) * data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes (https://github.com/hashicorp/terraform-provider-aws/issues/13420) +* resource/aws_codeartifact_domain: Make `encryption_key` optional (https://github.com/hashicorp/terraform-provider-aws/issues/17262) * resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) BUG FIXES: From dab088d0ba3180abe6c519c7a3033227100b9661 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 11:12:53 -0800 Subject: [PATCH 0910/1212] Add cloudfront_origin_request_policy data source --- ...ce_aws_cloudfront_origin_request_policy.go | 154 ++++++++++++++++++ ...s_cloudfront_origin_request_policy_test.go | 64 ++++++++ aws/provider.go | 1 + ...dfront_origin_request_policy.html.markdown | 54 ++++++ ...dfront_origin_request_policy.html.markdown | 75 +++++++++ 5 files changed, 348 insertions(+) create mode 100644 aws/data_source_aws_cloudfront_origin_request_policy.go create mode 100644 aws/data_source_aws_cloudfront_origin_request_policy_test.go create mode 100644 website/docs/d/cloudfront_origin_request_policy.html.markdown create mode 100644 website/docs/r/cloudfront_origin_request_policy.html.markdown diff --git a/aws/data_source_aws_cloudfront_origin_request_policy.go b/aws/data_source_aws_cloudfront_origin_request_policy.go new file mode 100644 index 00000000000..0e2a4366076 --- /dev/null +++ b/aws/data_source_aws_cloudfront_origin_request_policy.go @@ -0,0 +1,154 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCloudFrontOriginRequestPolicyRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "id": { + Type: schema.TypeString, + Optional: true, + }, + "comment": { + Type: schema.TypeString, + Computed: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "cookies_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookie_behavior": { + Computed: true, + Type: schema.TypeString, + }, + "cookies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "headers_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_behavior": { + Computed: true, + Type: schema.TypeString, + }, + "headers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "query_strings_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_string_behavior": { + Type: schema.TypeString, + Computed: true, + }, + "query_strings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { + var originRequestPolicy *cloudfront.OriginRequestPolicy + request := &cloudfront.ListOriginRequestPoliciesInput{} + resp, err := conn.ListOriginRequestPolicies(request) + if err != nil { + return err + } + + for _, policySummary := range resp.OriginRequestPolicyList.Items { + if *policySummary.OriginRequestPolicy.OriginRequestPolicyConfig.Name == d.Get("name").(string) { + originRequestPolicy = policySummary.OriginRequestPolicy + break + } + } + + if originRequestPolicy != nil { + d.SetId(aws.StringValue(originRequestPolicy.Id)) + } + return nil +} + +func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + if d.Id() == "" { + if err := dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d, conn); err != nil { + return err + } + } + + if d.Id() != "" { + request := &cloudfront.GetOriginRequestPolicyInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetOriginRequestPolicy(request) + if err != nil { + return err + } + d.Set("etag", aws.StringValue(resp.ETag)) + + flattenCloudFrontOriginRequestPolicy(d, resp.OriginRequestPolicy.OriginRequestPolicyConfig) + } + + return nil +} diff --git a/aws/data_source_aws_cloudfront_origin_request_policy_test.go b/aws/data_source_aws_cloudfront_origin_request_policy_test.go new file mode 100644 index 00000000000..1ca5a7cc9cc --- /dev/null +++ b/aws/data_source_aws_cloudfront_origin_request_policy_test.go @@ -0,0 +1,64 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSCloudFrontDataSourceOriginRequestPolicy_basic(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDataSourceOriginRequestPolicyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + }, + }) +} +func testAccAWSCloudFrontDataSourceOriginRequestPolicyConfig(rInt int) string { + return fmt.Sprintf(` +data "aws_cloudfront_origin_request_policy" "example" { + name = aws_cloudfront_origin_request_policy.example.name +} + +resource "aws_cloudfront_origin_request_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } +} +`, rInt) +} diff --git a/aws/provider.go b/aws/provider.go index 9aec8ccd060..b03e3f783a5 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -190,6 +190,7 @@ func Provider() *schema.Provider { "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), + "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), "aws_cloudwatch_log_group": dataSourceAwsCloudwatchLogGroup(), diff --git a/website/docs/d/cloudfront_origin_request_policy.html.markdown b/website/docs/d/cloudfront_origin_request_policy.html.markdown new file mode 100644 index 00000000000..0a152a1ca9f --- /dev/null +++ b/website/docs/d/cloudfront_origin_request_policy.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_origin_request_policy" +description: |- + Determines the values that CloudFront includes in requests that it sends to the origin. +--- + +# Data Source: aws_cloudfront_origin_request_policy + +## Example Usage + +The following example below creates a CloudFront origin request policy. + +```hcl +data "aws_cloudfront_origin_request_policy" "example" { + name = "example-policy" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - A unique name to identify the origin request policy. +* `id` - The identifier for the origin request policy. + +## Attributes Reference + +* `comment` - A comment to describe the origin request policy. +* `cookies_config` - An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `etag` - The current version of the origin request policy. +* `headers_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. + +### Cookies Config + +`cookie_behavior` - Determines whether any cookies in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist` `all`. +`cookies` - An object that contains a list of cookie names. See [Items](#items) for more information. + +### Headers Config + +`header_behavior` - Determines whether any HTTP headers are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allViewer`, `allViewerAndWhitelistCloudFront`. +`headers` - An object that contains a list of header names. See [Items](#items) for more information. + +### Query String Config + +`query_string_behavior` - Determines whether any URL query strings in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `all`. +`query_strings` - An object that contains a list of query string names. See [Items](#items) for more information. + +### Items + +`items` - A list of item names (cookies, headers, or query strings). diff --git a/website/docs/r/cloudfront_origin_request_policy.html.markdown b/website/docs/r/cloudfront_origin_request_policy.html.markdown new file mode 100644 index 00000000000..bd7069cacbe --- /dev/null +++ b/website/docs/r/cloudfront_origin_request_policy.html.markdown @@ -0,0 +1,75 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_origin_request_policy" +description: |- + Determines the values that CloudFront includes in requests that it sends to the origin. +--- + +# Resource: aws_cloudfront_origin_request_policy + +## Example Usage + +The following example below creates a CloudFront origin request policy. + +```hcl +resource "aws_cloudfront_origin_request_policy" "example" { + name = "example-policy" + comment = "example comment" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["example"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["example"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["example"] + } + } +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A unique name to identify the origin request policy. +* `comment` - (Optional) A comment to describe the origin request policy. +* `cookies_config` - (Required) An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `headers_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. + +### Cookies Config + +`cookie_behavior` - (Required) Determines whether any cookies in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist` `all`. +`cookies` - (Optional) An object that contains a list of cookie names. See [Items](#items) for more information. + +### Headers Config + +`header_behavior` - (Required) Determines whether any HTTP headers are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allViewer`, `allViewerAndWhitelistCloudFront`. +`headers` - (Optional) An object that contains a list of header names. See [Items](#items) for more information. + +### Query String Config + +`query_string_behavior` - (Required) Determines whether any URL query strings in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `all`. +`query_strings` - (Optional) An object that contains a list of query string names. See [Items](#items) for more information. + +### Items + +`items` - (Required) A list of item names (cookies, headers, or query strings). + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `etag` - The current version of the origin request policy. +* `id` - The identifier for the origin request policy. From 07e78a40d7a1243ae4586af2e7ab37910f5633a0 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 28 Jan 2021 11:20:40 -0800 Subject: [PATCH 0911/1212] correct issue link formatting --- scripts/release-note.tmpl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/release-note.tmpl b/scripts/release-note.tmpl index 87c3ed61de9..04a1c906810 100644 --- a/scripts/release-note.tmpl +++ b/scripts/release-note.tmpl @@ -1,11 +1,11 @@ {{- define "note" -}} {{- if eq "new-resource" .Type -}} -* **New Resource:** `{{.Body}}` (https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}}) +* **New Resource:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else if eq "new-data-source" .Type -}} -* **New Data Source:** `{{.Body}}` (https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}}) +* **New Data Source:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else if eq "new-guide" .Type -}} -* **New Guide:** `{{.Body}}` (https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}}) +* **New Guide:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- else -}} -* {{.Body}} (https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}}) +* {{.Body}} ([#{{- .Issue -}}](https://github.com/hashicorp/terraform-provider-aws/issues/{{- .Issue -}})) {{- end -}} {{- end -}} From b0deda00fc407a5a6825cd7b2c23eb44c10e9062 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 28 Jan 2021 19:28:43 +0000 Subject: [PATCH 0912/1212] Update CHANGELOG.md for #17339 --- CHANGELOG.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4581fcaa40f..a42f4c45112 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,28 +2,28 @@ NOTES: -* data-source/aws_route53_zone: The Route 53 `ListResourceRecordSets` API call has been implemented to support the `name_servers` attribute for private Hosted Zones similar to the resource implementation. Environments using restrictive IAM permissions may require updates. (https://github.com/hashicorp/terraform-provider-aws/issues/17002) +* data-source/aws_route53_zone: The Route 53 `ListResourceRecordSets` API call has been implemented to support the `name_servers` attribute for private Hosted Zones similar to the resource implementation. Environments using restrictive IAM permissions may require updates. ([#17002](https://github.com/hashicorp/terraform-provider-aws/issues/17002)) FEATURES: -* **New Data Source:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) -* **New Resource:** `aws_imagebuilder_image` (https://github.com/hashicorp/terraform-provider-aws/issues/16710) -* **New Resource:** `aws_prometheus_workspace` (https://github.com/hashicorp/terraform-provider-aws/issues/16882) +* **New Data Source:** `aws_imagebuilder_image` ([#16710](https://github.com/hashicorp/terraform-provider-aws/issues/16710)) +* **New Resource:** `aws_imagebuilder_image` ([#16710](https://github.com/hashicorp/terraform-provider-aws/issues/16710)) +* **New Resource:** `aws_prometheus_workspace` ([#16882](https://github.com/hashicorp/terraform-provider-aws/issues/16882)) ENHANCEMENTS: -* data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) -* data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes (https://github.com/hashicorp/terraform-provider-aws/issues/13420) -* resource/aws_codeartifact_domain: Make `encryption_key` optional (https://github.com/hashicorp/terraform-provider-aws/issues/17262) -* resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument (https://github.com/hashicorp/terraform-provider-aws/issues/17320) +* data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument ([#17320](https://github.com/hashicorp/terraform-provider-aws/issues/17320)) +* data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes ([#13420](https://github.com/hashicorp/terraform-provider-aws/issues/13420)) +* resource/aws_codeartifact_domain: Make `encryption_key` optional ([#17262](https://github.com/hashicorp/terraform-provider-aws/issues/17262)) +* resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument ([#17320](https://github.com/hashicorp/terraform-provider-aws/issues/17320)) BUG FIXES: -* data-source/aws_elb_hosted_zone_id: Correct values for `cn-north-1` and `cn-northwest-1` regions (https://github.com/hashicorp/terraform-provider-aws/issues/17226) -* data-source/aws_lb_listener: Prevent error when retrieving a listener whose default action contains weighted target groups (https://github.com/hashicorp/terraform-provider-aws/issues/17238) -* data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones (https://github.com/hashicorp/terraform-provider-aws/issues/17002) -* resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified (https://github.com/hashicorp/terraform-provider-aws/issues/17243) -* resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values (https://github.com/hashicorp/terraform-provider-aws/issues/17201) +* data-source/aws_elb_hosted_zone_id: Correct values for `cn-north-1` and `cn-northwest-1` regions ([#17226](https://github.com/hashicorp/terraform-provider-aws/issues/17226)) +* data-source/aws_lb_listener: Prevent error when retrieving a listener whose default action contains weighted target groups ([#17238](https://github.com/hashicorp/terraform-provider-aws/issues/17238)) +* data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones ([#17002](https://github.com/hashicorp/terraform-provider-aws/issues/17002)) +* resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified ([#17243](https://github.com/hashicorp/terraform-provider-aws/issues/17243)) +* resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values ([#17201](https://github.com/hashicorp/terraform-provider-aws/issues/17201)) ## 3.25.0 (January 22, 2021) From 0e8e5c338d457507ef80e05fabfb4409d137ae56 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 12:06:59 -0800 Subject: [PATCH 0913/1212] Fix cloudfront_distribution request policy tests --- ...nt_distribution_configuration_structure.go | 1 + ...source_aws_cloudfront_distribution_test.go | 59 +++++++++++++++++-- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 40643c3bdaa..6473b5ccef7 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -195,6 +195,7 @@ func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront. ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), MinTTL: aws.Int64(int64(m["min_ttl"].(int))), + OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), } diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index e4c703d63f3..a90692322cb 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -1393,6 +1393,37 @@ variable rand_id { default = %[1]d } +# log bucket +%[2]s + +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } + } +} + resource "aws_cloudfront_origin_request_policy" "test_policy" { name = "test-policy%[1]d" comment = "test comment" @@ -1417,10 +1448,30 @@ resource "aws_cloudfront_origin_request_policy" "test_policy" { } resource "aws_cloudfront_distribution" "custom_distribution" { + origin { + domain_name = "www.example.com" + origin_id = "myCustomOrigin" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "http-only" + origin_ssl_protocols = ["SSLv3", "TLSv1"] + origin_read_timeout = 30 + origin_keepalive_timeout = 5 + } + } + enabled = true comment = "Some comment" default_root_object = "index.html" + logging_config { + include_cookies = false + bucket = "${aws_s3_bucket.s3_bucket_logs.id}.s3.amazonaws.com" + prefix = "myprefix" + } + default_cache_behavior { allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] cached_methods = ["GET", "HEAD"] @@ -1428,6 +1479,7 @@ resource "aws_cloudfront_distribution" "custom_distribution" { smooth_streaming = false origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id + cache_policy_id = aws_cloudfront_cache_policy.example.id forwarded_values { query_string = false @@ -1438,9 +1490,6 @@ resource "aws_cloudfront_distribution" "custom_distribution" { } viewer_protocol_policy = "allow-all" - min_ttl = 0 - default_ttl = 3600 - max_ttl = 86400 } price_class = "PriceClass_200" @@ -1455,10 +1504,8 @@ resource "aws_cloudfront_distribution" "custom_distribution" { viewer_certificate { cloudfront_default_certificate = true } - - %[2]s } -`, acctest.RandInt(), testAccAWSCloudFrontDistributionRetainConfig()) +`, acctest.RandInt(), logBucket, testAccAWSCloudFrontDistributionRetainConfig()) var testAccAWSCloudFrontDistributionMultiOriginConfig = fmt.Sprintf(` variable rand_id { From 2201670d8ef4e94622313009bcfbf8f9a667c3a9 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 12:35:35 -0800 Subject: [PATCH 0914/1212] Fix formatting --- ...dfront_distribution_configuration_structure_test.go | 1 + aws/cloudfront_origin_request_policy_structure.go | 10 +++------- aws/resource_aws_cloudfront_distribution_test.go | 4 ++-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure_test.go b/aws/cloudfront_distribution_configuration_structure_test.go index c3e38114616..61659ef7577 100644 --- a/aws/cloudfront_distribution_configuration_structure_test.go +++ b/aws/cloudfront_distribution_configuration_structure_test.go @@ -21,6 +21,7 @@ func defaultCacheBehaviorConf() map[string]interface{} { "smooth_streaming": false, "default_ttl": 86400, "allowed_methods": allowedMethodsConf(), + "origin_request_policy_id": "ABCD1234", "cached_methods": cachedMethodsConf(), "compress": true, "field_level_encryption_id": "", diff --git a/aws/cloudfront_origin_request_policy_structure.go b/aws/cloudfront_origin_request_policy_structure.go index 0c647dcada3..082656903b4 100644 --- a/aws/cloudfront_origin_request_policy_structure.go +++ b/aws/cloudfront_origin_request_policy_structure.go @@ -20,9 +20,7 @@ func expandCloudFrontOriginRequestPolicyCookieNames(cookieNamesFlat map[string]i } func expandCloudFrontOriginRequestPolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyCookiesConfig { - cookies := &cloudfront.CookieNames{ - Quantity: aws.Int64(int64(0)), - } + var cookies *cloudfront.CookieNames if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { cookies = expandCloudFrontOriginRequestPolicyCookieNames(cookiesFlat[0].(map[string]interface{})) @@ -52,7 +50,7 @@ func expandCloudFrontOriginRequestPolicyHeaders(headerNamesFlat map[string]inter } func expandCloudFrontOriginRequestPolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyHeadersConfig { - headers := &cloudfront.Headers{} + var headers *cloudfront.Headers if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { headers = expandCloudFrontOriginRequestPolicyHeaders(headersFlat[0].(map[string]interface{})) @@ -82,9 +80,7 @@ func expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringNamesFlat ma } func expandCloudFrontOriginRequestPolicyQueryStringsConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyQueryStringsConfig { - queryStrings := &cloudfront.QueryStringNames{ - Quantity: aws.Int64(int64(0)), - } + var queryStrings *cloudfront.QueryStringNames if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { queryStrings = expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index a90692322cb..29badfe2ce8 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -1478,8 +1478,8 @@ resource "aws_cloudfront_distribution" "custom_distribution" { target_origin_id = "myCustomOrigin" smooth_streaming = false - origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id - cache_policy_id = aws_cloudfront_cache_policy.example.id + origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id + cache_policy_id = aws_cloudfront_cache_policy.example.id forwarded_values { query_string = false From 0b986b4301fd862d94393252c3d09221e347d3d6 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 12:40:56 -0800 Subject: [PATCH 0915/1212] Remove trailing whitespace --- website/docs/d/cloudfront_origin_request_policy.html.markdown | 2 +- website/docs/r/cloudfront_origin_request_policy.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/d/cloudfront_origin_request_policy.html.markdown b/website/docs/d/cloudfront_origin_request_policy.html.markdown index 0a152a1ca9f..5b6cfe1fece 100644 --- a/website/docs/d/cloudfront_origin_request_policy.html.markdown +++ b/website/docs/d/cloudfront_origin_request_policy.html.markdown @@ -24,7 +24,7 @@ data "aws_cloudfront_origin_request_policy" "example" { The following arguments are supported: * `name` - A unique name to identify the origin request policy. -* `id` - The identifier for the origin request policy. +* `id` - The identifier for the origin request policy. ## Attributes Reference diff --git a/website/docs/r/cloudfront_origin_request_policy.html.markdown b/website/docs/r/cloudfront_origin_request_policy.html.markdown index bd7069cacbe..4e19f1ecbba 100644 --- a/website/docs/r/cloudfront_origin_request_policy.html.markdown +++ b/website/docs/r/cloudfront_origin_request_policy.html.markdown @@ -72,4 +72,4 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: * `etag` - The current version of the origin request policy. -* `id` - The identifier for the origin request policy. +* `id` - The identifier for the origin request policy. From b43c6a5b35c67019f5e69451060c9648a6e0001f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Jan 2021 15:49:22 -0500 Subject: [PATCH 0916/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.36.28 to 1.37.0 (#17340) * build(deps): bump github.com/aws/aws-sdk-go from 1.36.28 to 1.37.0 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.28 to 1.37.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.28...v1.37.0) Signed-off-by: dependabot[bot] * Update CHANGELOG for #17340 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10851 Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Brian Flad --- .changelog/17340.txt | 3 +++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 .changelog/17340.txt diff --git a/.changelog/17340.txt b/.changelog/17340.txt new file mode 100644 index 00000000000..51e19380915 --- /dev/null +++ b/.changelog/17340.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +provider: Support AWS Single-Sign On (SSO) cached credentials +``` diff --git a/go.mod b/go.mod index 38649ac4693..ddea0c2809d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.28 + github.com/aws/aws-sdk-go v1.37.0 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index 13fafac0a13..3f6892724e4 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.36.28 h1:JVRN7BZgwQ31SQCBwG5QM445+ynJU0ruKu+miFIijYY= -github.com/aws/aws-sdk-go v1.36.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From 5f104c847d1af858bcd296f95c99a00fae8484c6 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 28 Jan 2021 20:51:03 +0000 Subject: [PATCH 0917/1212] Update CHANGELOG.md for #17340 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a42f4c45112..0734f626cc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,11 +9,13 @@ FEATURES: * **New Data Source:** `aws_imagebuilder_image` ([#16710](https://github.com/hashicorp/terraform-provider-aws/issues/16710)) * **New Resource:** `aws_imagebuilder_image` ([#16710](https://github.com/hashicorp/terraform-provider-aws/issues/16710)) * **New Resource:** `aws_prometheus_workspace` ([#16882](https://github.com/hashicorp/terraform-provider-aws/issues/16882)) +* **New Resource:** `aws_sagemaker_app_image_config` ([#17221](https://github.com/hashicorp/terraform-provider-aws/issues/17221)) ENHANCEMENTS: * data-source/aws_elasticache_replication_group: Add `multi_az_enabled` argument ([#17320](https://github.com/hashicorp/terraform-provider-aws/issues/17320)) * data-source/aws_vpc_peering_connection: Add `cidr_block_set` and `peer_cidr_block_set` attributes ([#13420](https://github.com/hashicorp/terraform-provider-aws/issues/13420)) +* provider: Support AWS Single-Sign On (SSO) cached credentials ([#17340](https://github.com/hashicorp/terraform-provider-aws/issues/17340)) * resource/aws_codeartifact_domain: Make `encryption_key` optional ([#17262](https://github.com/hashicorp/terraform-provider-aws/issues/17262)) * resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument ([#17320](https://github.com/hashicorp/terraform-provider-aws/issues/17320)) @@ -24,6 +26,7 @@ BUG FIXES: * data-source/aws_route53_zone: Ensure `name_servers` is populated for private Hosted Zones ([#17002](https://github.com/hashicorp/terraform-provider-aws/issues/17002)) * resource/aws_ebs_volume: Allow both `size` and `snapshot_id` attributes to be specified ([#17243](https://github.com/hashicorp/terraform-provider-aws/issues/17243)) * resource/aws_elasticache_replication_group: Correctly update computed `member_clusters` values ([#17201](https://github.com/hashicorp/terraform-provider-aws/issues/17201)) +* resource/aws_sagemaker_code_repository: fix doc name ([#17221](https://github.com/hashicorp/terraform-provider-aws/issues/17221)) ## 3.25.0 (January 22, 2021) From 92ce43ccd494b9692543c1b3d7f0d5390d245b6d Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 28 Jan 2021 21:34:31 +0000 Subject: [PATCH 0918/1212] Update CHANGELOG.md for #17301 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0734f626cc1..08fb30840dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS: * provider: Support AWS Single-Sign On (SSO) cached credentials ([#17340](https://github.com/hashicorp/terraform-provider-aws/issues/17340)) * resource/aws_codeartifact_domain: Make `encryption_key` optional ([#17262](https://github.com/hashicorp/terraform-provider-aws/issues/17262)) * resource/aws_elasticache_replication_group: Add `multi_az_enabled` argument ([#17320](https://github.com/hashicorp/terraform-provider-aws/issues/17320)) +* resource/aws_elasticache_replication_group: Allow changing `cluster_mode.replica_count` without re-creation ([#17301](https://github.com/hashicorp/terraform-provider-aws/issues/17301)) BUG FIXES: From ac3363c699ef9a3a88c2de04dfae8af4afc7563a Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Thu, 28 Jan 2021 21:45:39 +0000 Subject: [PATCH 0919/1212] v3.26.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08fb30840dc..e075e2ec1d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 3.26.0 (Unreleased) +## 3.26.0 (January 28, 2021) NOTES: From 80871e5640e6c391938bdec5b3f4d4d441eed2cb Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 28 Jan 2021 14:11:19 -0800 Subject: [PATCH 0920/1212] Update CHANGELOG.md after release --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e075e2ec1d0..e85665ec59c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,6 @@ -## 3.26.0 (January 28, 2021) +# 3.27.0 (Unreleased) + +# 3.26.0 (January 28, 2021) NOTES: From 26a9df7ff0de50192962a9d6c627638a9e314a71 Mon Sep 17 00:00:00 2001 From: Chris Trawick Date: Thu, 28 Jan 2021 17:25:58 -0500 Subject: [PATCH 0921/1212] fixed a potential conflict --- aws/configservice.go | 18 ++++++++++++++++++ aws/resource_aws_config_conformance_pack.go | 18 ------------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/aws/configservice.go b/aws/configservice.go index d83d1e43bbf..21431d24907 100644 --- a/aws/configservice.go +++ b/aws/configservice.go @@ -233,3 +233,21 @@ func configDescribeConformancePackStatus(conn *configservice.ConfigService, name return nil, nil } + +func expandConfigConformancePackParameters(m map[string]interface{}) (params []*configservice.ConformancePackInputParameter) { + for k, v := range m { + params = append(params, &configservice.ConformancePackInputParameter{ + ParameterName: aws.String(k), + ParameterValue: aws.String(v.(string)), + }) + } + return +} + +func flattenConformancePackInputParameters(parameters []*configservice.ConformancePackInputParameter) (m map[string]string) { + m = make(map[string]string) + for _, p := range parameters { + m[*p.ParameterName] = *p.ParameterValue + } + return +} diff --git a/aws/resource_aws_config_conformance_pack.go b/aws/resource_aws_config_conformance_pack.go index 4e9e156ba2b..e51a013b00b 100644 --- a/aws/resource_aws_config_conformance_pack.go +++ b/aws/resource_aws_config_conformance_pack.go @@ -122,16 +122,6 @@ func resourceAwsConfigConformancePackPut(d *schema.ResourceData, meta interface{ return resourceAwsConfigConformancePackRead(d, meta) } -func expandConfigConformancePackParameters(m map[string]interface{}) (params []*configservice.ConformancePackInputParameter) { - for k, v := range m { - params = append(params, &configservice.ConformancePackInputParameter{ - ParameterName: aws.String(k), - ParameterValue: aws.String(v.(string)), - }) - } - return -} - func refreshConformancePackStatus(d *schema.ResourceData, conn *configservice.ConfigService) func() (interface{}, string, error) { return func() (interface{}, string, error) { out, err := conn.DescribeConformancePackStatus(&configservice.DescribeConformancePackStatusInput{ @@ -203,14 +193,6 @@ func resourceAwsConfigConformancePackRead(d *schema.ResourceData, meta interface return nil } -func flattenConformancePackInputParameters(parameters []*configservice.ConformancePackInputParameter) (m map[string]string) { - m = make(map[string]string) - for _, p := range parameters { - m[*p.ParameterName] = *p.ParameterValue - } - return -} - func resourceAwsConfigConformancePackDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).configconn From c5375a2c3e87235620f91822599c2e56fff4ca54 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 29 Jan 2021 02:45:25 +0200 Subject: [PATCH 0922/1212] resource/aws_glacier_vault: Prevent GetVaultAccessPolicy crash, add plan-time validations, general refactoring (#12645) Output from acceptance testing: ``` --- PASS: TestAccAWSGlacierVault_disappears (12.48s) --- PASS: TestAccAWSGlacierVault_basic (17.66s) --- PASS: TestAccAWSGlacierVault_policy (41.22s) --- PASS: TestAccAWSGlacierVault_tags (43.39s) --- PASS: TestAccAWSGlacierVault_notification (46.12s) ``` --- aws/resource_aws_glacier_vault.go | 181 +++++++++-------- aws/resource_aws_glacier_vault_test.go | 267 +++++++++++++++++++++---- 2 files changed, 322 insertions(+), 126 deletions(-) diff --git a/aws/resource_aws_glacier_vault.go b/aws/resource_aws_glacier_vault.go index 8e1845164be..20574497843 100644 --- a/aws/resource_aws_glacier_vault.go +++ b/aws/resource_aws_glacier_vault.go @@ -7,7 +7,6 @@ import ( "regexp" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/glacier" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" @@ -31,18 +30,11 @@ func resourceAwsGlacierVault() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters, hyphens, underscores, and periods are allowed in %q", k)) - } - if len(value) > 255 { - errors = append(errors, fmt.Errorf( - "%q cannot be longer than 255 characters", k)) - } - return - }, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 255), + validation.StringMatch(regexp.MustCompile(`^[.0-9A-Za-z-_]+$`), + "only alphanumeric characters, hyphens, underscores, and periods are allowed"), + ), }, "location": { @@ -56,29 +48,34 @@ func resourceAwsGlacierVault() *schema.Resource { }, "access_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: suppressEquivalentAwsPolicyDiffs, }, "notification": { Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "events": { Type: schema.TypeSet, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "ArchiveRetrievalCompleted", + "InventoryRetrievalCompleted", + }, false), + }, + Set: schema.HashString, }, "sns_topic": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, }, }, @@ -90,42 +87,59 @@ func resourceAwsGlacierVault() *schema.Resource { } func resourceAwsGlacierVaultCreate(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn + conn := meta.(*AWSClient).glacierconn input := &glacier.CreateVaultInput{ VaultName: aws.String(d.Get("name").(string)), } - out, err := glacierconn.CreateVault(input) + _, err := conn.CreateVault(input) if err != nil { - return fmt.Errorf("Error creating Glacier Vault: %s", err) + return fmt.Errorf("Error creating Glacier Vault: %w", err) } d.SetId(d.Get("name").(string)) - d.Set("location", out.Location) - return resourceAwsGlacierVaultUpdate(d, meta) + if v, ok := d.GetOk("tags"); ok { + if err := keyvaluetags.GlacierUpdateTags(conn, d.Id(), nil, v.(map[string]interface{})); err != nil { + return fmt.Errorf("error updating Glacier Vault (%s) tags: %w", d.Id(), err) + } + } + + if _, ok := d.GetOk("access_policy"); ok { + if err := resourceAwsGlacierVaultPolicyUpdate(conn, d); err != nil { + return fmt.Errorf("error updating Glacier Vault (%s) access policy: %w", d.Id(), err) + } + } + + if _, ok := d.GetOk("notification"); ok { + if err := resourceAwsGlacierVaultNotificationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating Glacier Vault (%s) notification: %w", d.Id(), err) + } + } + + return resourceAwsGlacierVaultRead(d, meta) } func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn + conn := meta.(*AWSClient).glacierconn if d.HasChange("tags") { o, n := d.GetChange("tags") - if err := keyvaluetags.GlacierUpdateTags(glacierconn, d.Id(), o, n); err != nil { + if err := keyvaluetags.GlacierUpdateTags(conn, d.Id(), o, n); err != nil { return fmt.Errorf("error updating Glacier Vault (%s) tags: %s", d.Id(), err) } } if d.HasChange("access_policy") { - if err := resourceAwsGlacierVaultPolicyUpdate(glacierconn, d); err != nil { - return err + if err := resourceAwsGlacierVaultPolicyUpdate(conn, d); err != nil { + return fmt.Errorf("error updating Glacier Vault (%s) access policy: %w", d.Id(), err) } } if d.HasChange("notification") { - if err := resourceAwsGlacierVaultNotificationUpdate(glacierconn, d); err != nil { - return err + if err := resourceAwsGlacierVaultNotificationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating Glacier Vault (%s) notification: %w", d.Id(), err) } } @@ -133,16 +147,21 @@ func resourceAwsGlacierVaultUpdate(d *schema.ResourceData, meta interface{}) err } func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn + conn := meta.(*AWSClient).glacierconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig input := &glacier.DescribeVaultInput{ VaultName: aws.String(d.Id()), } - out, err := glacierconn.DescribeVault(input) + out, err := conn.DescribeVault(input) + if isAWSErr(err, glacier.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] Glaier Vault (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } if err != nil { - return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) + return fmt.Errorf("Error reading Glacier Vault: %w", err) } awsClient := meta.(*AWSClient) @@ -155,91 +174,83 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error } d.Set("location", location) - tags, err := keyvaluetags.GlacierListTags(glacierconn, d.Id()) + tags, err := keyvaluetags.GlacierListTags(conn, d.Id()) if err != nil { - return fmt.Errorf("error listing tags for Glacier Vault (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for Glacier Vault (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } log.Printf("[DEBUG] Getting the access_policy for Vault %s", d.Id()) - pol, err := glacierconn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ + pol, err := conn.GetVaultAccessPolicy(&glacier.GetVaultAccessPolicyInput{ VaultName: aws.String(d.Id()), }) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { + if isAWSErr(err, glacier.ErrCodeResourceNotFoundException, "") { d.Set("access_policy", "") - } else if pol != nil { - policy, err := structure.NormalizeJsonString(*pol.Policy.Policy) + } else if err != nil { + return fmt.Errorf("error getting access policy for Glacier Vault (%s): %w", d.Id(), err) + } else if pol != nil && pol.Policy != nil { + policy, err := structure.NormalizeJsonString(aws.StringValue(pol.Policy.Policy)) if err != nil { - return fmt.Errorf("access policy contains an invalid JSON: %s", err) + return fmt.Errorf("access policy contains an invalid JSON: %w", err) } d.Set("access_policy", policy) - } else { - return err } - notifications, err := getGlacierVaultNotification(glacierconn, d.Id()) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() == "ResourceNotFoundException" { - d.Set("notification", "") + notifications, err := getGlacierVaultNotification(conn, d.Id()) + if isAWSErr(err, glacier.ErrCodeResourceNotFoundException, "") { + d.Set("notification", []map[string]interface{}{}) } else if pol != nil { d.Set("notification", notifications) } else { - return err + return fmt.Errorf("error setting notification: %w", err) } return nil } func resourceAwsGlacierVaultDelete(d *schema.ResourceData, meta interface{}) error { - glacierconn := meta.(*AWSClient).glacierconn + conn := meta.(*AWSClient).glacierconn log.Printf("[DEBUG] Glacier Delete Vault: %s", d.Id()) - _, err := glacierconn.DeleteVault(&glacier.DeleteVaultInput{ + _, err := conn.DeleteVault(&glacier.DeleteVaultInput{ VaultName: aws.String(d.Id()), }) if err != nil { - return fmt.Errorf("Error deleting Glacier Vault: %s", err.Error()) + return fmt.Errorf("Error deleting Glacier Vault: %w", err) } return nil } -func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { +func resourceAwsGlacierVaultNotificationUpdate(conn *glacier.Glacier, d *schema.ResourceData) error { if v, ok := d.GetOk("notification"); ok { settings := v.([]interface{}) - if len(settings) > 1 { - return fmt.Errorf("Only a single Notification Block is allowed for Glacier Vault") - } else if len(settings) == 1 { - s := settings[0].(map[string]interface{}) - var events []*string - for _, id := range s["events"].(*schema.Set).List() { - events = append(events, aws.String(id.(string))) - } - - _, err := glacierconn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ - VaultName: aws.String(d.Id()), - VaultNotificationConfig: &glacier.VaultNotificationConfig{ - SNSTopic: aws.String(s["sns_topic"].(string)), - Events: events, - }, - }) + s := settings[0].(map[string]interface{}) + + _, err := conn.SetVaultNotifications(&glacier.SetVaultNotificationsInput{ + VaultName: aws.String(d.Id()), + VaultNotificationConfig: &glacier.VaultNotificationConfig{ + SNSTopic: aws.String(s["sns_topic"].(string)), + Events: expandStringSet(s["events"].(*schema.Set)), + }, + }) - if err != nil { - return fmt.Errorf("Error Updating Glacier Vault Notifications: %s", err.Error()) - } + if err != nil { + return fmt.Errorf("Error Updating Glacier Vault Notifications: %w", err) } } else { - _, err := glacierconn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ + _, err := conn.DeleteVaultNotifications(&glacier.DeleteVaultNotificationsInput{ VaultName: aws.String(d.Id()), }) if err != nil { - return fmt.Errorf("Error Removing Glacier Vault Notifications: %s", err.Error()) + return fmt.Errorf("Error Removing Glacier Vault Notifications: %w", err) } } @@ -247,7 +258,7 @@ func resourceAwsGlacierVaultNotificationUpdate(glacierconn *glacier.Glacier, d * return nil } -func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema.ResourceData) error { +func resourceAwsGlacierVaultPolicyUpdate(conn *glacier.Glacier, d *schema.ResourceData) error { vaultName := d.Id() policyContents := d.Get("access_policy").(string) @@ -258,22 +269,22 @@ func resourceAwsGlacierVaultPolicyUpdate(glacierconn *glacier.Glacier, d *schema if policyContents != "" { log.Printf("[DEBUG] Glacier Vault: %s, put policy", vaultName) - _, err := glacierconn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ + _, err := conn.SetVaultAccessPolicy(&glacier.SetVaultAccessPolicyInput{ VaultName: aws.String(d.Id()), Policy: policy, }) if err != nil { - return fmt.Errorf("Error putting Glacier Vault policy: %s", err.Error()) + return fmt.Errorf("Error putting Glacier Vault policy: %w", err) } } else { log.Printf("[DEBUG] Glacier Vault: %s, delete policy: %s", vaultName, policy) - _, err := glacierconn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ + _, err := conn.DeleteVaultAccessPolicy(&glacier.DeleteVaultAccessPolicyInput{ VaultName: aws.String(d.Id()), }) if err != nil { - return fmt.Errorf("Error deleting Glacier Vault policy: %s", err.Error()) + return fmt.Errorf("Error deleting Glacier Vault policy: %w", err) } } @@ -287,14 +298,14 @@ func buildGlacierVaultLocation(accountId, vaultName string) (string, error) { return fmt.Sprintf("/" + accountId + "/vaults/" + vaultName), nil } -func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { +func getGlacierVaultNotification(conn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { request := &glacier.GetVaultNotificationsInput{ VaultName: aws.String(vaultName), } - response, err := glacierconn.GetVaultNotifications(request) + response, err := conn.GetVaultNotifications(request) if err != nil { - return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %s", err.Error()) + return nil, fmt.Errorf("Error reading Glacier Vault Notifications: %w", err) } notifications := make(map[string]interface{}) diff --git a/aws/resource_aws_glacier_vault_test.go b/aws/resource_aws_glacier_vault_test.go index 1ce209de0a1..16261e9e365 100644 --- a/aws/resource_aws_glacier_vault_test.go +++ b/aws/resource_aws_glacier_vault_test.go @@ -3,10 +3,10 @@ package aws import ( "fmt" "log" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/glacier" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -75,7 +75,8 @@ func testSweepGlacierVaults(region string) error { } func TestAccAWSGlacierVault_basic(t *testing.T) { - rInt := acctest.RandInt() + var vault glacier.DescribeVaultOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_glacier_vault.test" resource.ParallelTest(t, resource.TestCase{ @@ -84,9 +85,14 @@ func TestAccAWSGlacierVault_basic(t *testing.T) { CheckDestroy: testAccCheckGlacierVaultDestroy, Steps: []resource.TestStep{ { - Config: testAccGlacierVault_basic(rInt), + Config: testAccGlacierVaultBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists(resourceName), + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "glacier", regexp.MustCompile(`vaults/.+`)), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + resource.TestCheckResourceAttr(resourceName, "access_policy", ""), ), }, { @@ -98,9 +104,11 @@ func TestAccAWSGlacierVault_basic(t *testing.T) { }) } -func TestAccAWSGlacierVault_full(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSGlacierVault_notification(t *testing.T) { + var vault glacier.DescribeVaultOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_glacier_vault.test" + snsResourceName := "aws_sns_topic.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -108,9 +116,12 @@ func TestAccAWSGlacierVault_full(t *testing.T) { CheckDestroy: testAccCheckGlacierVaultDestroy, Steps: []resource.TestStep{ { - Config: testAccGlacierVault_full(rInt), + Config: testAccGlacierVaultNotificationConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists(resourceName), + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.events.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "notification.0.sns_topic", snsResourceName, "arn"), ), }, { @@ -118,12 +129,30 @@ func TestAccAWSGlacierVault_full(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGlacierVaultBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "notification.#", "0"), + testAccCheckVaultNotificationsMissing(resourceName), + ), + }, + { + Config: testAccGlacierVaultNotificationConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "notification.0.events.#", "2"), + resource.TestCheckResourceAttrPair(resourceName, "notification.0.sns_topic", snsResourceName, "arn"), + ), + }, }, }) } -func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { - rInt := acctest.RandInt() +func TestAccAWSGlacierVault_policy(t *testing.T) { + var vault glacier.DescribeVaultOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_glacier_vault.test" resource.ParallelTest(t, resource.TestCase{ @@ -132,9 +161,12 @@ func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { CheckDestroy: testAccCheckGlacierVaultDestroy, Steps: []resource.TestStep{ { - Config: testAccGlacierVault_full(rInt), + Config: testAccGlacierVaultPolicyConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists(resourceName), + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestMatchResourceAttr(resourceName, "access_policy", + regexp.MustCompile(`"Sid":"cross-account-upload".+`)), ), }, { @@ -143,17 +175,92 @@ func TestAccAWSGlacierVault_RemoveNotifications(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccGlacierVault_withoutNotification(rInt), + Config: testAccGlacierVaultPolicyConfigUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckGlacierVaultExists(resourceName), - testAccCheckVaultNotificationsMissing(resourceName), + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestMatchResourceAttr(resourceName, "access_policy", + regexp.MustCompile(`"Sid":"cross-account-upload1".+`)), + ), + }, + { + Config: testAccGlacierVaultBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "access_policy", ""), + ), + }, + }, + }) +} + +func TestAccAWSGlacierVault_tags(t *testing.T) { + var vault glacier.DescribeVaultOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glacier_vault.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlacierVaultConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGlacierVaultConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccGlacierVaultConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, }, }) } -func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { +func TestAccAWSGlacierVault_disappears(t *testing.T) { + var vault glacier.DescribeVaultOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_glacier_vault.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlacierVaultBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlacierVaultExists(resourceName, &vault), + testAccCheckResourceDisappears(testAccProvider, resourceAwsGlacierVault(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckGlacierVaultExists(name string, vault *glacier.DescribeVaultOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -164,8 +271,8 @@ func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn - out, err := glacierconn.DescribeVault(&glacier.DescribeVaultInput{ + conn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := conn.DescribeVault(&glacier.DescribeVaultInput{ VaultName: aws.String(rs.Primary.ID), }) @@ -182,6 +289,8 @@ func testAccCheckGlacierVaultExists(name string) resource.TestCheckFunc { *out.VaultName, rs.Primary.ID) } + *vault = *out + return nil } } @@ -197,13 +306,13 @@ func testAccCheckVaultNotificationsMissing(name string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - glacierconn := testAccProvider.Meta().(*AWSClient).glacierconn - out, err := glacierconn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{ + conn := testAccProvider.Meta().(*AWSClient).glacierconn + out, err := conn.GetVaultNotifications(&glacier.GetVaultNotificationsInput{ VaultName: aws.String(rs.Primary.ID), }) - if awserr, ok := err.(awserr.Error); ok && awserr.Code() != "ResourceNotFoundException" { - return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, awserr.Code()) + if !isAWSErr(err, glacier.ErrCodeResourceNotFoundException, "") { + return fmt.Errorf("Expected ResourceNotFoundException for Vault %s Notification Block but got %s", rs.Primary.ID, err) } if out.VaultNotificationConfig != nil { @@ -228,7 +337,7 @@ func testAccCheckGlacierVaultDestroy(s *terraform.State) error { } if _, err := conn.DescribeVault(input); err != nil { // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "ResourceNotFoundException" { + if isAWSErr(err, glacier.ErrCodeResourceNotFoundException, "") { continue } @@ -239,47 +348,123 @@ func testAccCheckGlacierVaultDestroy(s *terraform.State) error { return nil } -func testAccGlacierVault_basic(rInt int) string { +func testAccGlacierVaultBasicConfig(rName string) string { return fmt.Sprintf(` resource "aws_glacier_vault" "test" { - name = "my_test_vault_%d" + name = %[1]q } -`, rInt) +`, rName) } -func testAccGlacierVault_full(rInt int) string { +func testAccGlacierVaultNotificationConfig(rName string) string { return fmt.Sprintf(` -resource "aws_sns_topic" "aws_sns_topic" { - name = "glacier-sns-topic-%d" +resource "aws_sns_topic" "test" { + name = %[1]q } resource "aws_glacier_vault" "test" { - name = "my_test_vault_%d" + name = %[1]q notification { - sns_topic = aws_sns_topic.aws_sns_topic.arn + sns_topic = aws_sns_topic.test.arn events = ["ArchiveRetrievalCompleted", "InventoryRetrievalCompleted"] } +} +`, rName) +} - tags = { - Test = "Test1" - } +func testAccGlacierVaultPolicyConfig(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_region" "current" {} + +data "aws_caller_identity" "current" {} + +resource "aws_glacier_vault" "test" { + name = %[1]q + + access_policy = < Date: Thu, 28 Jan 2021 19:45:55 -0500 Subject: [PATCH 0923/1212] Update CHANGELOG for #12645 --- .changelog/12645.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .changelog/12645.txt diff --git a/.changelog/12645.txt b/.changelog/12645.txt new file mode 100644 index 00000000000..a6375fb21da --- /dev/null +++ b/.changelog/12645.txt @@ -0,0 +1,11 @@ +```release-note:bug +resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors +``` + +```release-note:bug +resource/aws_glacier_vault: Properly remove from state when resource does not exist +``` + +```release-note:enhancement +resource/aws_glacier_vault: Add plan-time validation for `notification` configuration block `events` and `sns_topic_arn` arguments +``` From 9fc2537798712196dc241ddab4e011e5b33a060c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Jan 2021 09:46:56 -0500 Subject: [PATCH 0924/1212] build(deps): Bump github.com/aws/aws-sdk-go in /awsproviderlint (#17343) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.36.28 to 1.37.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.36.28...v1.37.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../aws/credentials/ssocreds/doc.go | 60 + .../aws-sdk-go/aws/credentials/ssocreds/os.go | 9 + .../aws/credentials/ssocreds/os_windows.go | 7 + .../aws/credentials/ssocreds/provider.go | 180 +++ .../aws/aws-sdk-go/aws/endpoints/defaults.go | 87 +- .../aws/aws-sdk-go/aws/session/credentials.go | 19 + .../aws/aws-sdk-go/aws/session/session.go | 2 +- .../aws-sdk-go/aws/session/shared_config.go | 80 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../private/protocol/jsonrpc/jsonrpc.go | 88 ++ .../protocol/jsonrpc/unmarshal_error.go | 107 ++ .../private/protocol/restjson/restjson.go | 59 + .../protocol/restjson/unmarshal_error.go | 134 ++ .../aws/aws-sdk-go/service/sso/api.go | 1210 +++++++++++++++++ .../aws/aws-sdk-go/service/sso/doc.go | 44 + .../aws/aws-sdk-go/service/sso/errors.go | 44 + .../aws/aws-sdk-go/service/sso/service.go | 104 ++ .../service/sso/ssoiface/interface.go | 86 ++ awsproviderlint/vendor/modules.txt | 7 +- 21 files changed, 2315 insertions(+), 20 deletions(-) create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/api.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/service.go create mode 100644 awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index d177937aafa..41ef31008cf 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.28 + github.com/aws/aws-sdk-go v1.37.0 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 377c1e6d855..4d4dcc5503e 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -56,8 +56,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.28 h1:JVRN7BZgwQ31SQCBwG5QM445+ynJU0ruKu+miFIijYY= -github.com/aws/aws-sdk-go v1.36.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..18c940ab3c3 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 00000000000..ceca7dceecb --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,9 @@ +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 00000000000..eb48f61e5bc --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 00000000000..6eda2a5557f --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,180 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + tokenFile, err := loadTokenFile(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 013ccec4a5b..3cc48800d6d 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -380,9 +380,33 @@ var awsPartition = partition{ "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "api.ecr": service{ @@ -746,6 +770,7 @@ var awsPartition = partition{ "appmesh": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1413,6 +1438,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3053,6 +3079,7 @@ var awsPartition = partition{ "fsx": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3062,6 +3089,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3095,11 +3123,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -3759,6 +3788,7 @@ var awsPartition = partition{ "lakeformation": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -4274,7 +4304,19 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "monitoring": service{ @@ -5188,7 +5230,19 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "runtime.sagemaker": service{ @@ -8674,7 +8728,12 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9909,6 +9968,12 @@ var awsisoPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "states": service{ Endpoints: endpoints{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index fe6dac1f476..b0cef7575d2 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" @@ -100,6 +101,9 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.Creds, ) + case sharedCfg.hasSSOConfiguration(): + creds = resolveSSOCredentials(cfg, sharedCfg, handlers) + case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) @@ -151,6 +155,21 @@ func resolveCredsFromProfile(cfg *aws.Config, return creds, nil } +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials { + cfgCopy := cfg.Copy() + cfgCopy.Region = &sharedCfg.SSORegion + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + ) +} + // valid credential source values const ( credSourceEc2Metadata = "Ec2InstanceMetadata" diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 08713cc3474..038ae222ffc 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -36,7 +36,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index be7daacf308..5ab05d56cc6 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,6 +2,7 @@ package session import ( "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" @@ -25,6 +26,12 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + // CSM options csmEnabledKey = `csm_enabled` csmHostKey = `csm_host` @@ -78,6 +85,11 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + RoleARN string RoleSessionName string ExternalID string @@ -217,9 +229,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.clearAssumeRoleOptions() } else { // First time a profile has been seen, It must either be a assume role - // or credentials. Assert if the credential type requires a role ARN, - // the ARN is also set. - if err := cfg.validateCredentialsRequireARN(profile); err != nil { + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } @@ -312,6 +324,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } cfg.S3UsEast1RegionalEndpoint = sre } + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -342,6 +360,18 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e return nil } +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + if err := cfg.validateSSOConfiguration(profile); err != nil { + return err + } + + return nil +} + func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { var credSource string @@ -371,6 +401,7 @@ func (cfg *sharedConfig) validateCredentialType() error { len(cfg.CredentialSource) != 0, len(cfg.CredentialProcess) != 0, len(cfg.WebIdentityTokenFile) != 0, + cfg.hasSSOConfiguration(), ) { return ErrSharedConfigSourceCollision } @@ -378,12 +409,43 @@ func (cfg *sharedConfig) validateCredentialType() error { return nil } +func (cfg *sharedConfig) validateSSOConfiguration(profile string) error { + if !cfg.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(cfg.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(cfg.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(cfg.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(cfg.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + profile, strings.Join(missing, ", ")) + } + + return nil +} + func (cfg *sharedConfig) hasCredentials() bool { switch { case len(cfg.SourceProfileName) != 0: case len(cfg.CredentialSource) != 0: case len(cfg.CredentialProcess) != 0: case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): case cfg.Creds.HasKeys(): default: return false @@ -407,6 +469,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { cfg.SourceProfileName = "" } +func (cfg *sharedConfig) hasSSOConfiguration() bool { + switch { + case len(cfg.SSOAccountID) != 0: + case len(cfg.SSORegion) != 0: + case len(cfg.SSORoleName) != 0: + case len(cfg.SSOStartURL) != 0: + default: + return false + } + return true +} + func oneOrNone(bs ...bool) bool { var count int diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index 609aa89c084..cf26997eb29 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.36.28" +const SDKVersion = "1.37.0" diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000000..a029217e4c6 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,88 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 00000000000..c0c52e2db0f --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 00000000000..2e0e205af37 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 00000000000..d756d8cc529 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,134 @@ +package restjson + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + code := resp.Header.Get(errorTypeHeader) + msg := resp.Header.Get(errorMessageHeader) + + body := resp.Body + if len(code) == 0 { + // If unable to get code from HTTP headers have to parse JSON message + // to determine what kind of exception this will be. + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + + body = ioutil.NopCloser(&buf) + code = jsonErr.Code + msg = jsonErr.Message + } + + // If code has colon separators remove them so can compare against modeled + // exception names. + code = strings.SplitN(code, ":", 2)[0] + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + code := r.HTTPResponse.Header.Get(errorTypeHeader) + if code == "" { + code = jsonErr.Code + } + msg := r.HTTPResponse.Header.Get(errorMessageHeader) + if msg == "" { + msg = jsonErr.Message + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 00000000000..4498f285e47 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the client- and server-side session that is associated with the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 00000000000..92d82b2afb6 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to +// assign user access to AWS SSO resources such as the user portal. Users can +// get AWS account applications and roles assigned to them and get federated +// into the application. +// +// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the AWS SSO User Guide. +// +// This API reference guide describes the AWS SSO Portal operations that you +// can call programatically and includes detailed information on data types +// and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to AWS SSO and other +// AWS services. For more information about the AWS SDKs, including how to download +// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 00000000000..77a6792e352 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 00000000000..35175331fc7 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 00000000000..4cac247c188 --- /dev/null +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index a7795dbf411..be13611d063 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.36.28 +# github.com/aws/aws-sdk-go v1.37.0 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -25,6 +25,7 @@ github.com/aws/aws-sdk-go/aws/credentials github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds github.com/aws/aws-sdk-go/aws/credentials/endpointcreds github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds github.com/aws/aws-sdk-go/aws/credentials/stscreds github.com/aws/aws-sdk-go/aws/csm github.com/aws/aws-sdk-go/aws/defaults @@ -50,12 +51,16 @@ github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/bflad/gopaniccheck v0.1.0 From 0739656c591e50f2f6340b3b83298bdaa0c43e75 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 10:03:30 -0500 Subject: [PATCH 0925/1212] docs/service/apigateway: aws_api_gateway_deployment usage overhaul to discourage stage_name and further encourage create_before_destroy (#17230) * docs/service/apigateway: aws_api_gateway_deployment usage overhaul to discourage stage_name and further encourage create_before_destroy Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11344 Adds new end-to-end example of an OpenAPI REST API and also encourages the usage of OpenAPI specifications for configuring the REST API. Support for the other API Gateway resources is not going anywhere, but the dependency management aspect of deployments can be more difficult in that model and it is much easier to discover the API Gateway resources over the OpenAPI support. In the future, it may be worth considering deprecating the `stage_name` and friends arguments since having a Terraform resource manage two remote resources is an anti-pattern and not well supported. Output from example: ```console $ terraform apply An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: + create Terraform will perform the following actions: # aws_acm_certificate.example will be created + resource "aws_acm_certificate" "example" { + arn = (known after apply) + certificate_body = (known after apply) + domain_name = (known after apply) + domain_validation_options = (known after apply) + id = (known after apply) + private_key = (sensitive value) + status = (known after apply) + subject_alternative_names = (known after apply) + validation_emails = (known after apply) + validation_method = (known after apply) } # aws_api_gateway_base_path_mapping.example will be created + resource "aws_api_gateway_base_path_mapping" "example" { + api_id = (known after apply) + domain_name = (known after apply) + id = (known after apply) + stage_name = "example" } # aws_api_gateway_deployment.example will be created + resource "aws_api_gateway_deployment" "example" { + created_date = (known after apply) + execution_arn = (known after apply) + id = (known after apply) + invoke_url = (known after apply) + rest_api_id = (known after apply) + triggers = { + "redeployment" = "e042aae1faf8de8d7c7c98c063a986025f058c69" } } # aws_api_gateway_domain_name.example will be created + resource "aws_api_gateway_domain_name" "example" { + arn = (known after apply) + certificate_upload_date = (known after apply) + cloudfront_domain_name = (known after apply) + cloudfront_zone_id = (known after apply) + domain_name = (known after apply) + id = (known after apply) + regional_certificate_arn = (known after apply) + regional_domain_name = (known after apply) + regional_zone_id = (known after apply) + security_policy = (known after apply) + endpoint_configuration { + types = [ + "REGIONAL", ] } } # aws_api_gateway_method_settings.example will be created + resource "aws_api_gateway_method_settings" "example" { + id = (known after apply) + method_path = "*/*" + rest_api_id = (known after apply) + stage_name = "example" + settings { + cache_data_encrypted = (known after apply) + cache_ttl_in_seconds = (known after apply) + caching_enabled = (known after apply) + data_trace_enabled = (known after apply) + logging_level = (known after apply) + metrics_enabled = true + require_authorization_for_cache_control = (known after apply) + throttling_burst_limit = -1 + throttling_rate_limit = -1 + unauthorized_cache_control_header_strategy = (known after apply) } } # aws_api_gateway_rest_api.example will be created + resource "aws_api_gateway_rest_api" "example" { + api_key_source = (known after apply) + arn = (known after apply) + binary_media_types = (known after apply) + body = jsonencode( { + info = { + title = "api-gateway-rest-api-openapi-example" + version = "1.0" } + openapi = "3.0.1" + paths = { + /path1 = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" } } } } } ) + created_date = (known after apply) + description = (known after apply) + disable_execute_api_endpoint = (known after apply) + execution_arn = (known after apply) + id = (known after apply) + minimum_compression_size = -1 + name = "api-gateway-rest-api-openapi-example" + policy = (known after apply) + root_resource_id = (known after apply) + endpoint_configuration { + types = [ + "REGIONAL", ] + vpc_endpoint_ids = (known after apply) } } # aws_api_gateway_stage.example will be created + resource "aws_api_gateway_stage" "example" { + arn = (known after apply) + deployment_id = (known after apply) + execution_arn = (known after apply) + id = (known after apply) + invoke_url = (known after apply) + rest_api_id = (known after apply) + stage_name = "example" } # tls_private_key.example will be created + resource "tls_private_key" "example" { + algorithm = "RSA" + ecdsa_curve = "P224" + id = (known after apply) + private_key_pem = (sensitive value) + public_key_fingerprint_md5 = (known after apply) + public_key_openssh = (known after apply) + public_key_pem = (known after apply) + rsa_bits = 2048 } # tls_self_signed_cert.example will be created + resource "tls_self_signed_cert" "example" { + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", ] + cert_pem = (known after apply) + dns_names = [ + "example.com", ] + early_renewal_hours = 0 + id = (known after apply) + key_algorithm = "RSA" + private_key_pem = (sensitive value) + ready_for_renewal = true + validity_end_time = (known after apply) + validity_period_hours = 12 + validity_start_time = (known after apply) + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" } } Plan: 9 to add, 0 to change, 0 to destroy. Changes to Outputs: + domain_url = (known after apply) + stage_invoke_url = (known after apply) Do you want to perform these actions? Terraform will perform the actions described above. Only 'yes' will be accepted to approve. Enter a value: yes tls_private_key.example: Creating... tls_private_key.example: Creation complete after 0s [id=c1129fc488709c4293493669e43d40b60144999d] tls_self_signed_cert.example: Creating... tls_self_signed_cert.example: Creation complete after 0s [id=199729227385231255426302845367097804347] aws_api_gateway_rest_api.example: Creating... aws_acm_certificate.example: Creating... aws_api_gateway_rest_api.example: Creation complete after 2s [id=halquax36h] aws_api_gateway_deployment.example: Creating... aws_acm_certificate.example: Creation complete after 3s [id=arn:aws:acm:us-west-2:123456789012:certificate/35cc4fc5-072f-4543-99d1-a1336ac05a41] aws_api_gateway_domain_name.example: Creating... aws_api_gateway_deployment.example: Creation complete after 1s [id=tj62g3] aws_api_gateway_stage.example: Creating... aws_api_gateway_stage.example: Creation complete after 1s [id=ags-halquax36h-example] aws_api_gateway_method_settings.example: Creating... aws_api_gateway_method_settings.example: Creation complete after 1s [id=halquax36h-example-*/*] aws_api_gateway_domain_name.example: Creation complete after 3s [id=example.com] aws_api_gateway_base_path_mapping.example: Creating... aws_api_gateway_base_path_mapping.example: Creation complete after 1s [id=example.com/] Apply complete! Resources: 9 added, 0 changed, 0 destroyed. Outputs: domain_url = "curl -H 'Host: example.com' https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path1 # may take a minute to become available on initial deploy" stage_invoke_url = "curl https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path1" $ curl -s https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path1 | jq '.createDate' "2021-01-21-00-44-18" $ curl -H 'Host: example.com' -s https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path1 | jq '.createDate' "2021-01-21-00-44-18" $ terraform apply -var 'rest_api_path=/path2' tls_private_key.example: Refreshing state... [id=c1129fc488709c4293493669e43d40b60144999d] tls_self_signed_cert.example: Refreshing state... [id=199729227385231255426302845367097804347] aws_api_gateway_rest_api.example: Refreshing state... [id=halquax36h] aws_acm_certificate.example: Refreshing state... [id=arn:aws:acm:us-west-2:123456789012:certificate/35cc4fc5-072f-4543-99d1-a1336ac05a41] aws_api_gateway_deployment.example: Refreshing state... [id=tj62g3] aws_api_gateway_domain_name.example: Refreshing state... [id=example.com] aws_api_gateway_stage.example: Refreshing state... [id=ags-halquax36h-example] aws_api_gateway_base_path_mapping.example: Refreshing state... [id=example.com/] aws_api_gateway_method_settings.example: Refreshing state... [id=halquax36h-example-*/*] An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place +/- create replacement and then destroy Terraform will perform the following actions: # aws_api_gateway_deployment.example must be replaced +/- resource "aws_api_gateway_deployment" "example" { ~ created_date = "2021-01-22T02:59:46Z" -> (known after apply) ~ execution_arn = "arn:aws:execute-api:us-west-2:123456789012:halquax36h/" -> (known after apply) ~ id = "tj62g3" -> (known after apply) ~ invoke_url = "https://halquax36h.execute-api.us-west-2.amazonaws.com/" -> (known after apply) ~ triggers = { # forces replacement ~ "redeployment" = "e042aae1faf8de8d7c7c98c063a986025f058c69" -> "e6742b53b5eed7039e6fec056113bb049954d64b" } # (1 unchanged attribute hidden) } # aws_api_gateway_rest_api.example will be updated in-place ~ resource "aws_api_gateway_rest_api" "example" { ~ body = jsonencode( ~ { ~ paths = { - /path1 = { - get = { - x-amazon-apigateway-integration = { - httpMethod = "GET" - payloadFormatVersion = "1.0" - type = "HTTP_PROXY" - uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" } } } -> null + /path2 = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" } } } } # (2 unchanged elements hidden) } ) id = "halquax36h" name = "api-gateway-rest-api-openapi-example" tags = {} # (8 unchanged attributes hidden) # (1 unchanged block hidden) } # aws_api_gateway_stage.example will be updated in-place ~ resource "aws_api_gateway_stage" "example" { ~ deployment_id = "tj62g3" -> (known after apply) id = "ags-halquax36h-example" tags = {} # (8 unchanged attributes hidden) } Plan: 1 to add, 2 to change, 1 to destroy. Changes to Outputs: ~ domain_url = "curl -H 'Host: example.com' https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path1 # may take a minute to become available on initial deploy" -> "curl -H 'Host: example.com' https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path2 # may take a minute to become available on initial deploy" ~ stage_invoke_url = "curl https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path1" -> "curl https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path2" Do you want to perform these actions? Terraform will perform the actions described above. Only 'yes' will be accepted to approve. Enter a value: yes aws_api_gateway_rest_api.example: Modifying... [id=halquax36h] aws_api_gateway_rest_api.example: Modifications complete after 1s [id=halquax36h] aws_api_gateway_deployment.example: Creating... aws_api_gateway_deployment.example: Creation complete after 1s [id=9vc6zm] aws_api_gateway_stage.example: Modifying... [id=ags-halquax36h-example] aws_api_gateway_stage.example: Modifications complete after 1s [id=ags-halquax36h-example] aws_api_gateway_deployment.example: Destroying... [id=tj62g3] aws_api_gateway_deployment.example: Destruction complete after 0s Apply complete! Resources: 1 added, 2 changed, 1 destroyed. Outputs: domain_url = "curl -H 'Host: example.com' https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path2 # may take a minute to become available on initial deploy" stage_invoke_url = "curl https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path2" $ curl -s https://halquax36h.execute-api.us-west-2.amazonaws.com/example/path2 | jq '.createDate' "2021-01-21-00-44-18" $ curl -H 'Host: example.com' -s https://d-orixhuv0o9.execute-api.us-west-2.amazonaws.com/path2 | jq '.createDate' "2021-01-21-00-44-18" ``` * docs/service/apigateway: Adjust for main branch rename * examples/api-gateway-rest-api-openapi: Add curl_ prefix to output names --- .gitignore | 2 + .../api-gateway-rest-api-openapi/README.md | 11 ++ .../api-gateway-rest-api-openapi/domain.tf | 18 +++ examples/api-gateway-rest-api-openapi/main.tf | 7 + .../api-gateway-rest-api-openapi/outputs.tf | 15 ++ .../api-gateway-rest-api-openapi/rest-api.tf | 39 +++++ .../api-gateway-rest-api-openapi/stage.tf | 19 +++ .../terraform.template.tfvars | 4 + examples/api-gateway-rest-api-openapi/tls.tf | 29 ++++ .../api-gateway-rest-api-openapi/variables.tf | 23 +++ ...pi_gateway_base_path_mapping.html.markdown | 16 +- .../r/api_gateway_deployment.html.markdown | 143 ++++++++++++------ .../r/api_gateway_domain_name.html.markdown | 2 + .../api_gateway_method_settings.html.markdown | 102 +++++++------ .../docs/r/api_gateway_rest_api.html.markdown | 108 +++++++++++-- .../docs/r/api_gateway_stage.html.markdown | 77 +++++----- .../r/api_gateway_usage_plan.html.markdown | 60 ++++++-- ...regional_web_acl_association.html.markdown | 78 +++++----- .../r/wafv2_web_acl_association.html.markdown | 53 ++++--- 19 files changed, 580 insertions(+), 226 deletions(-) create mode 100644 examples/api-gateway-rest-api-openapi/README.md create mode 100644 examples/api-gateway-rest-api-openapi/domain.tf create mode 100644 examples/api-gateway-rest-api-openapi/main.tf create mode 100644 examples/api-gateway-rest-api-openapi/outputs.tf create mode 100644 examples/api-gateway-rest-api-openapi/rest-api.tf create mode 100644 examples/api-gateway-rest-api-openapi/stage.tf create mode 100644 examples/api-gateway-rest-api-openapi/terraform.template.tfvars create mode 100644 examples/api-gateway-rest-api-openapi/tls.tf create mode 100644 examples/api-gateway-rest-api-openapi/variables.tf diff --git a/.gitignore b/.gitignore index 41faf76cc6e..1606a7811be 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,8 @@ *.exe .DS_Store example.tf +.terraform.lock.hcl +.terraform.tfstate.lock.info terraform.tfplan terraform.tfstate bin/ diff --git a/examples/api-gateway-rest-api-openapi/README.md b/examples/api-gateway-rest-api-openapi/README.md new file mode 100644 index 00000000000..51cbc8136b5 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/README.md @@ -0,0 +1,11 @@ +# API Gateway REST API OpenAPI Example + +This example demonstrates how to create an end-to-end AWS API Gateway REST API setup with an OpenAPI configuration that proxies the [AWS IP Address Ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html) JSON, enables CloudWatch metrics, and sets up a domain with a self-signed TLS certificate to mimic a real-world endpoint. The outputs will provide sample `curl` commands to verify the REST API deployment. + +## Running this Example + +Terraform variables are available to modify this example, see the `variables.tf` file. They can be provided by `cp terraform.template.tfvars terraform.tfvars`, modifying `terraform.tfvars` with your variables, and running `terraform apply`. Alternatively, the variables can be provided as flags by running: + +```shell +terraform apply -var="aws_region=us-west-2" +``` diff --git a/examples/api-gateway-rest-api-openapi/domain.tf b/examples/api-gateway-rest-api-openapi/domain.tf new file mode 100644 index 00000000000..ef56e11d8f6 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/domain.tf @@ -0,0 +1,18 @@ +# +# Domain Setup +# + +resource "aws_api_gateway_domain_name" "example" { + domain_name = aws_acm_certificate.example.domain_name + regional_certificate_arn = aws_acm_certificate.example.arn + + endpoint_configuration { + types = ["REGIONAL"] + } +} + +resource "aws_api_gateway_base_path_mapping" "example" { + api_id = aws_api_gateway_rest_api.example.id + domain_name = aws_api_gateway_domain_name.example.domain_name + stage_name = aws_api_gateway_stage.example.stage_name +} diff --git a/examples/api-gateway-rest-api-openapi/main.tf b/examples/api-gateway-rest-api-openapi/main.tf new file mode 100644 index 00000000000..1b8857c5dc8 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/main.tf @@ -0,0 +1,7 @@ +terraform { + required_version = ">= 0.12" +} + +provider "aws" { + region = var.aws_region +} diff --git a/examples/api-gateway-rest-api-openapi/outputs.tf b/examples/api-gateway-rest-api-openapi/outputs.tf new file mode 100644 index 00000000000..a144a5b4c11 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/outputs.tf @@ -0,0 +1,15 @@ +# +# Outputs +# + +output "curl_domain_url" { + depends_on = [aws_api_gateway_base_path_mapping.example] + + description = "API Gateway Domain URL (self-signed certificate)" + value = "curl -H 'Host: ${var.rest_api_domain_name}' https://${aws_api_gateway_domain_name.example.regional_domain_name}${var.rest_api_path} # may take a minute to become available on initial deploy" +} + +output "curl_stage_invoke_url" { + description = "API Gateway Stage Invoke URL" + value = "curl ${aws_api_gateway_stage.example.invoke_url}${var.rest_api_path}" +} diff --git a/examples/api-gateway-rest-api-openapi/rest-api.tf b/examples/api-gateway-rest-api-openapi/rest-api.tf new file mode 100644 index 00000000000..9d73af6e35a --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/rest-api.tf @@ -0,0 +1,39 @@ +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = var.rest_api_name + version = "1.0" + } + paths = { + (var.rest_api_path) = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = var.rest_api_name + + endpoint_configuration { + types = ["REGIONAL"] + } +} + +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id + + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } + + lifecycle { + create_before_destroy = true + } +} diff --git a/examples/api-gateway-rest-api-openapi/stage.tf b/examples/api-gateway-rest-api-openapi/stage.tf new file mode 100644 index 00000000000..aa5e5a83f57 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/stage.tf @@ -0,0 +1,19 @@ +# +# Stage and Stage Settings +# + +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" +} + +resource "aws_api_gateway_method_settings" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "*/*" + + settings { + metrics_enabled = true + } +} diff --git a/examples/api-gateway-rest-api-openapi/terraform.template.tfvars b/examples/api-gateway-rest-api-openapi/terraform.template.tfvars new file mode 100644 index 00000000000..897df7521d4 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/terraform.template.tfvars @@ -0,0 +1,4 @@ +aws_region = "us-west-2" +rest_api_domain_name = "example.com" +rest_api_name = "api-gateway-rest-api-openapi-example" +rest_api_path = "/path1" diff --git a/examples/api-gateway-rest-api-openapi/tls.tf b/examples/api-gateway-rest-api-openapi/tls.tf new file mode 100644 index 00000000000..d1d47c2ca53 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/tls.tf @@ -0,0 +1,29 @@ +# +# Self-Signed TLS Certificate for Testing +# + +resource "tls_private_key" "example" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "example" { + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] + dns_names = [var.rest_api_domain_name] + key_algorithm = tls_private_key.example.algorithm + private_key_pem = tls_private_key.example.private_key_pem + validity_period_hours = 12 + + subject { + common_name = var.rest_api_domain_name + organization = "ACME Examples, Inc" + } +} + +resource "aws_acm_certificate" "example" { + certificate_body = tls_self_signed_cert.example.cert_pem + private_key = tls_private_key.example.private_key_pem +} diff --git a/examples/api-gateway-rest-api-openapi/variables.tf b/examples/api-gateway-rest-api-openapi/variables.tf new file mode 100644 index 00000000000..1a47ff7f712 --- /dev/null +++ b/examples/api-gateway-rest-api-openapi/variables.tf @@ -0,0 +1,23 @@ +variable "aws_region" { + default = "us-west-2" + description = "AWS Region to deploy example API Gateway REST API" + type = string +} + +variable "rest_api_domain_name" { + default = "example.com" + description = "Domain name of the API Gateway REST API for self-signed TLS certificate" + type = string +} + +variable "rest_api_name" { + default = "api-gateway-rest-api-openapi-example" + description = "Name of the API Gateway REST API (can be used to trigger redeployments)" + type = string +} + +variable "rest_api_path" { + default = "/path1" + description = "Path to create in the API Gateway REST API (can be used to trigger redeployments)" + type = string +} diff --git a/website/docs/r/api_gateway_base_path_mapping.html.markdown b/website/docs/r/api_gateway_base_path_mapping.html.markdown index da8e0721e6c..671be127304 100644 --- a/website/docs/r/api_gateway_base_path_mapping.html.markdown +++ b/website/docs/r/api_gateway_base_path_mapping.html.markdown @@ -14,11 +14,13 @@ custom domain name. ## Example Usage +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). + ```hcl -resource "aws_api_gateway_deployment" "example" { - # See aws_api_gateway_rest_api docs for how to create this - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - stage_name = "live" +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" } resource "aws_api_gateway_domain_name" "example" { @@ -30,9 +32,9 @@ resource "aws_api_gateway_domain_name" "example" { certificate_private_key = file("${path.module}/example.com/example.key") } -resource "aws_api_gateway_base_path_mapping" "test" { - api_id = aws_api_gateway_rest_api.MyDemoAPI.id - stage_name = aws_api_gateway_deployment.example.stage_name +resource "aws_api_gateway_base_path_mapping" "example" { + api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name domain_name = aws_api_gateway_domain_name.example.domain_name } ``` diff --git a/website/docs/r/api_gateway_deployment.html.markdown b/website/docs/r/api_gateway_deployment.html.markdown index 950c48ef755..731cf240013 100644 --- a/website/docs/r/api_gateway_deployment.html.markdown +++ b/website/docs/r/api_gateway_deployment.html.markdown @@ -3,92 +3,139 @@ subcategory: "API Gateway (REST APIs)" layout: "aws" page_title: "AWS: aws_api_gateway_deployment" description: |- - Provides an API Gateway REST Deployment. + Manages an API Gateway REST Deployment. --- # Resource: aws_api_gateway_deployment -Provides an API Gateway REST Deployment. +Manages an API Gateway REST Deployment. A deployment is a snapshot of the REST API configuration. The deployment can then be published to callable endpoints via the [`aws_api_gateway_stage` resource](api_gateway_stage.html) and optionally managed further with the [`aws_api_gateway_base_path_mapping` resource](api_gateway_base_path_mapping.html), [`aws_api_gateway_domain_name` resource](api_gateway_domain_name.html), and [`aws_api_method_settings` resource](api_gateway_method_settings.html). For more information, see the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-deploy-api.html). -~> **Note:** This resource depends on having at least one `aws_api_gateway_integration` created in the REST API, which itself has other dependencies. To avoid race conditions when all resources are being created together, you need to add implicit resource references via the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). +To properly capture all REST API configuration in a deployment, this resource must have dependencies on all prior Terraform resources that manage resources/paths, methods, integrations, etc. --> It is recommended to enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. +* For REST APIs that are configured via OpenAPI specification ([`aws_api_gateway_rest_api` resource](api_gateway_rest_api.html) `body` argument), no special dependency setup is needed beyond referencing the `id` attribute of that resource unless additional Terraform resources have further customized the REST API. +* When the REST API configuration involves other Terraform resources ([`aws_api_gateway_integration` resource](api_gateway_integration.html), etc.), the dependency setup can be done with implicit resource references in the `triggers` argument or explicit resource references using the [resource `depends_on` meta-argument](https://www.terraform.io/docs/configuration/meta-arguments/depends_on.html). The `triggers` argument should be preferred over `depends_on`, since `depends_on` can only capture dependency ordering and will not cause the resource to recreate (redeploy the REST API) with upstream configuration changes. -## Example Usage +!> **WARNING:** It is recommended to use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead of managing an API Gateway Stage via the `stage_name` argument of this resource. When this resource is recreated (REST API redeployment) with the `stage_name` configured, the stage is deleted and recreated. This will cause a temporary service interruption, increase Terraform plan differences, and can require a second Terraform apply to recreate any downstream stage configuration such as associated `aws_api_method_settings` resources. -```hcl -resource "aws_api_gateway_rest_api" "MyDemoAPI" { - name = "MyDemoAPI" - description = "This is my API for demonstration purposes" -} +~> **NOTE:** It is recommended to enable the [resource `lifecycle` configuration block `create_before_destroy` argument](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy) in this resource configuration to properly order redeployments in Terraform. Without enabling `create_before_destroy`, API Gateway can return errors such as `BadRequestException: Active stages pointing to this deployment must be moved or deleted` on recreation. -resource "aws_api_gateway_resource" "MyDemoResource" { - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - parent_id = aws_api_gateway_rest_api.MyDemoAPI.root_resource_id - path_part = "test" -} +## Example Usage -resource "aws_api_gateway_method" "MyDemoMethod" { - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - resource_id = aws_api_gateway_resource.MyDemoResource.id - http_method = "GET" - authorization = "NONE" -} +### OpenAPI Specification -resource "aws_api_gateway_integration" "MyDemoIntegration" { - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - resource_id = aws_api_gateway_resource.MyDemoResource.id - http_method = aws_api_gateway_method.MyDemoMethod.http_method - type = "MOCK" -} +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). -resource "aws_api_gateway_deployment" "MyDemoDeployment" { - depends_on = [aws_api_gateway_integration.MyDemoIntegration] +```hcl +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" +} - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - stage_name = "test" +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id - variables = { - "answer" = "42" + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) } lifecycle { create_before_destroy = true } } -``` -### Redeployment Triggers +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" +} +``` -~> **NOTE:** This is an optional and Terraform 0.12 (or later) advanced configuration that shows calculating a hash of the API's Terraform resources to determine changes that should trigger a new deployment. This value will change after the first Terraform apply of new resources, triggering an immediate redeployment, however it will stabilize afterwards except for resource changes. The `triggers` map can also be configured in other, more complex ways to fit the environment, avoiding the immediate redeployment issue. +### Terraform Resources ```hcl -resource "aws_api_gateway_deployment" "MyDemoDeployment" { - rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id - stage_name = "test" +resource "aws_api_gateway_rest_api" "example" { + name = "example" +} + +resource "aws_api_gateway_resource" "example" { + parent_id = aws_api_gateway_rest_api.example.root_resource_id + path_part = "example" + rest_api_id = aws_api_gateway_rest_api.example.id +} + +resource "aws_api_gateway_method" "example" { + authorization = "NONE" + http_method = "GET" + resource_id = aws_api_gateway_resource.example.id + rest_api_id = aws_api_gateway_rest_api.example.id +} + +resource "aws_api_gateway_integration" "example" { + http_method = aws_api_gateway_method.example.http_method + resource_id = aws_api_gateway_resource.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + type = "MOCK" +} + +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id triggers = { - redeployment = sha1(join(",", list( - jsonencode(aws_api_gateway_integration.example), - ))) + # NOTE: The configuration below will satisfy ordering considerations, + # but not pick up all future REST API changes. More advanced patterns + # are possible, such as using the filesha1() function against the + # Terraform configuration file(s) or removing the .id references to + # calculate a hash against whole resources. Be aware that using whole + # resources will show a difference after the initial implementation. + # It will stabilize to only change when resources change afterwards. + redeployment = sha1(jsonencode([ + aws_api_gateway_resource.example.id, + aws_api_gateway_method.example.id, + aws_api_gateway_integration.example.id, + ])) } lifecycle { create_before_destroy = true } } + +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" +} ``` ## Argument Reference The following arguments are supported: -* `rest_api_id` - (Required) The ID of the associated REST API -* `stage_name` - (Optional) The name of the stage. If the specified stage already exists, it will be updated to point to the new deployment. If the stage does not exist, a new one will be created and point to this deployment. -* `description` - (Optional) The description of the deployment -* `stage_description` - (Optional) The description of the stage -* `triggers` - (Optional) A map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). -* `variables` - (Optional) A map that defines variables for the stage +* `rest_api_id` - (Required) REST API identifier. +* `description` - (Optional) Description of the deployment +* `stage_name` - (Optional) Name of the stage to create with this deployment. If the specified stage already exists, it will be updated to point to the new deployment. It is recommended to use the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead to manage stages. +* `stage_description` - (Optional) Description to set on the stage managed by the `stage_name` argument. +* `triggers` - (Optional) Map of arbitrary keys and values that, when changed, will trigger a redeployment. To force a redeployment without changing these keys/values, use the [`terraform taint` command](https://www.terraform.io/docs/commands/taint.html). +* `variables` - (Optional) Map to set on the stage managed by the `stage_name` argument. ## Attributes Reference diff --git a/website/docs/r/api_gateway_domain_name.html.markdown b/website/docs/r/api_gateway_domain_name.html.markdown index 004a4b9eb3e..3eefe3b2bdc 100644 --- a/website/docs/r/api_gateway_domain_name.html.markdown +++ b/website/docs/r/api_gateway_domain_name.html.markdown @@ -40,6 +40,8 @@ from the validation resource where it will be available after the resource creat ## Example Usage +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). + ### Edge Optimized (ACM Certificate) ```hcl diff --git a/website/docs/r/api_gateway_method_settings.html.markdown b/website/docs/r/api_gateway_method_settings.html.markdown index b5e131bdfda..d7f7a9d8f0f 100644 --- a/website/docs/r/api_gateway_method_settings.html.markdown +++ b/website/docs/r/api_gateway_method_settings.html.markdown @@ -3,69 +3,81 @@ subcategory: "API Gateway (REST APIs)" layout: "aws" page_title: "AWS: aws_api_gateway_method_settings" description: |- - Provides an API Gateway Method Settings, e.g. logging or monitoring. + Manages API Gateway Stage Method Settings --- # Resource: aws_api_gateway_method_settings -Provides an API Gateway Method Settings, e.g. logging or monitoring. +Manages API Gateway Stage Method Settings. For example, CloudWatch logging and metrics. + +~> **NOTE:** It is recommended to use this resource in conjunction with the [`aws_api_gateway_stage` resource](api_gateway_stage.html) instead of a stage managed by the [`aws_api_gateway_deployment` resource](api_gateway_deployment.html) optional `stage_name` argument. Stages managed by the `aws_api_gateway_deployment` resource are recreated on redeployment and this resource will require a second apply to recreate the method settings. ## Example Usage -```hcl -resource "aws_api_gateway_method_settings" "s" { - rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = aws_api_gateway_stage.test.stage_name - method_path = "${trimprefix(aws_api_gateway_resource.test.path, "/")}/${aws_api_gateway_method.test.http_method}" +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). - settings { - metrics_enabled = true - logging_level = "INFO" - } +```hcl +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" } -resource "aws_api_gateway_rest_api" "test" { - name = "MyDemoAPI" - description = "This is my API for demonstration purposes" -} +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id -resource "aws_api_gateway_deployment" "test" { - depends_on = [aws_api_gateway_integration.test] - rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = "dev" -} + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } -resource "aws_api_gateway_stage" "test" { - stage_name = "prod" - rest_api_id = aws_api_gateway_rest_api.test.id - deployment_id = aws_api_gateway_deployment.test.id + lifecycle { + create_before_destroy = true + } } -resource "aws_api_gateway_resource" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - parent_id = aws_api_gateway_rest_api.test.root_resource_id - path_part = "mytestresource" +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" } -resource "aws_api_gateway_method" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - resource_id = aws_api_gateway_resource.test.id - http_method = "GET" - authorization = "NONE" +resource "aws_api_gateway_method_settings" "all" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "*/*" + + settings { + metrics_enabled = true + logging_level = "ERROR" + } } -resource "aws_api_gateway_integration" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - resource_id = aws_api_gateway_resource.test.id - http_method = aws_api_gateway_method.test.http_method - type = "MOCK" +resource "aws_api_gateway_method_settings" "path_specific" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" - request_templates = { - "application/xml" = < **Note:** Amazon API Gateway Version 1 resources are used for creating and deploying REST APIs. To create and deploy WebSocket and HTTP APIs, use Amazon API Gateway Version 2 [resources](/docs/providers/aws/r/apigatewayv2_api.html). ## Example Usage -### Basic +### OpenAPI Specification + +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). ```hcl -resource "aws_api_gateway_rest_api" "MyDemoAPI" { - name = "MyDemoAPI" - description = "This is my API for demonstration purposes" +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" + + endpoint_configuration { + types = ["REGIONAL"] + } +} + +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id + + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" } ``` -### Regional Endpoint Type +### Terraform Resources ```hcl resource "aws_api_gateway_rest_api" "example" { - name = "regional-example" + name = "example" +} - endpoint_configuration { - types = ["REGIONAL"] +resource "aws_api_gateway_resource" "example" { + parent_id = aws_api_gateway_rest_api.example.root_resource_id + path_part = "example" + rest_api_id = aws_api_gateway_rest_api.example.id +} + +resource "aws_api_gateway_method" "example" { + authorization = "NONE" + http_method = "GET" + resource_id = aws_api_gateway_resource.example.id + rest_api_id = aws_api_gateway_rest_api.example.id +} + +resource "aws_api_gateway_integration" "example" { + http_method = aws_api_gateway_method.example.http_method + resource_id = aws_api_gateway_resource.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + type = "MOCK" +} + +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id + + triggers = { + # NOTE: The configuration below will satisfy ordering considerations, + # but not pick up all future REST API changes. More advanced patterns + # are possible, such as using the filesha1() function against the + # Terraform configuration file(s) or removing the .id references to + # calculate a hash against whole resources. Be aware that using whole + # resources will show a difference after the initial implementation. + # It will stabilize to only change when resources change afterwards. + redeployment = sha1(jsonencode([ + aws_api_gateway_resource.example.id, + aws_api_gateway_method.example.id, + aws_api_gateway_integration.example.id, + ])) + } + + lifecycle { + create_before_destroy = true } } + +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" +} ``` ## Argument Reference diff --git a/website/docs/r/api_gateway_stage.html.markdown b/website/docs/r/api_gateway_stage.html.markdown index c06c2bfe59d..de7286e1e2d 100644 --- a/website/docs/r/api_gateway_stage.html.markdown +++ b/website/docs/r/api_gateway_stage.html.markdown @@ -3,63 +3,70 @@ subcategory: "API Gateway (REST APIs)" layout: "aws" page_title: "AWS: aws_api_gateway_stage" description: |- - Provides an API Gateway Stage. + Manages an API Gateway Stage. --- # Resource: aws_api_gateway_stage -Provides an API Gateway Stage. +Manages an API Gateway Stage. A stage is a named reference to a deployment, which can be done via the [`aws_api_gateway_deployment` resource](api_gateway_deployment.html). Stages can be optionally managed further with the [`aws_api_gateway_base_path_mapping` resource](api_gateway_base_path_mapping.html), [`aws_api_gateway_domain_name` resource](api_gateway_domain_name.html), and [`aws_api_method_settings` resource](api_gateway_method_settings.html). For more information, see the [API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-stages.html). ## Example Usage +An end-to-end example of a REST API configured with OpenAPI can be found in the [`/examples/api-gateway-rest-api-openapi` directory within the GitHub repository](https://github.com/hashicorp/terraform-provider-aws/tree/main/examples/api-gateway-rest-api-openapi). + ```hcl -resource "aws_api_gateway_stage" "test" { - stage_name = "prod" - rest_api_id = aws_api_gateway_rest_api.test.id - deployment_id = aws_api_gateway_deployment.test.id +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" } -resource "aws_api_gateway_rest_api" "test" { - name = "MyDemoAPI" - description = "This is my API for demonstration purposes" -} +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id -resource "aws_api_gateway_deployment" "test" { - depends_on = [aws_api_gateway_integration.test] - rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = "dev" -} + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } -resource "aws_api_gateway_resource" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - parent_id = aws_api_gateway_rest_api.test.root_resource_id - path_part = "mytestresource" + lifecycle { + create_before_destroy = true + } } -resource "aws_api_gateway_method" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - resource_id = aws_api_gateway_resource.test.id - http_method = "GET" - authorization = "NONE" +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" } -resource "aws_api_gateway_method_settings" "s" { - rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = aws_api_gateway_stage.test.stage_name - method_path = "${aws_api_gateway_resource.test.path_part}/${aws_api_gateway_method.test.http_method}" +resource "aws_api_gateway_method_settings" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "*/*" settings { metrics_enabled = true logging_level = "INFO" } } - -resource "aws_api_gateway_integration" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - resource_id = aws_api_gateway_resource.test.id - http_method = aws_api_gateway_method.test.http_method - type = "MOCK" -} ``` ### Managing the API Logging CloudWatch Log Group diff --git a/website/docs/r/api_gateway_usage_plan.html.markdown b/website/docs/r/api_gateway_usage_plan.html.markdown index dd59e5430a1..877ec197133 100644 --- a/website/docs/r/api_gateway_usage_plan.html.markdown +++ b/website/docs/r/api_gateway_usage_plan.html.markdown @@ -13,35 +13,67 @@ Provides an API Gateway Usage Plan. ## Example Usage ```hcl -resource "aws_api_gateway_rest_api" "myapi" { - name = "MyDemoAPI" +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" } -# ... +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id -resource "aws_api_gateway_deployment" "dev" { - rest_api_id = aws_api_gateway_rest_api.myapi.id - stage_name = "dev" + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_api_gateway_stage" "development" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "development" } -resource "aws_api_gateway_deployment" "prod" { - rest_api_id = aws_api_gateway_rest_api.myapi.id - stage_name = "prod" +resource "aws_api_gateway_stage" "production" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "production" } -resource "aws_api_gateway_usage_plan" "MyUsagePlan" { +resource "aws_api_gateway_usage_plan" "example" { name = "my-usage-plan" description = "my description" product_code = "MYCODE" api_stages { - api_id = aws_api_gateway_rest_api.myapi.id - stage = aws_api_gateway_deployment.dev.stage_name + api_id = aws_api_gateway_rest_api.example.id + stage = aws_api_gateway_stage.development.stage_name } api_stages { - api_id = aws_api_gateway_rest_api.myapi.id - stage = aws_api_gateway_deployment.prod.stage_name + api_id = aws_api_gateway_rest_api.example.id + stage = aws_api_gateway_stage.production.stage_name } quota_settings { diff --git a/website/docs/r/wafregional_web_acl_association.html.markdown b/website/docs/r/wafregional_web_acl_association.html.markdown index 6c1792ed235..383ae77c435 100644 --- a/website/docs/r/wafregional_web_acl_association.html.markdown +++ b/website/docs/r/wafregional_web_acl_association.html.markdown @@ -123,60 +123,50 @@ resource "aws_wafregional_web_acl" "foo" { } } -resource "aws_api_gateway_rest_api" "test" { - name = "foo" -} - -resource "aws_api_gateway_resource" "test" { - parent_id = aws_api_gateway_rest_api.test.root_resource_id - path_part = "test" - rest_api_id = aws_api_gateway_rest_api.test.id -} - -resource "aws_api_gateway_method" "test" { - authorization = "NONE" - http_method = "GET" - resource_id = aws_api_gateway_resource.test.id - rest_api_id = aws_api_gateway_rest_api.test.id -} - -resource "aws_api_gateway_method_response" "test" { - http_method = aws_api_gateway_method.test.http_method - resource_id = aws_api_gateway_resource.test.id - rest_api_id = aws_api_gateway_rest_api.test.id - status_code = "400" -} +resource "aws_api_gateway_rest_api" "example" { + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) -resource "aws_api_gateway_integration" "test" { - http_method = aws_api_gateway_method.test.http_method - integration_http_method = "GET" - resource_id = aws_api_gateway_resource.test.id - rest_api_id = aws_api_gateway_rest_api.test.id - type = "HTTP" - uri = "http://www.example.com" + name = "example" } -resource "aws_api_gateway_integration_response" "test" { - rest_api_id = aws_api_gateway_rest_api.test.id - resource_id = aws_api_gateway_resource.test.id - http_method = aws_api_gateway_integration.test.http_method - status_code = aws_api_gateway_method_response.test.status_code -} +resource "aws_api_gateway_deployment" "example" { + rest_api_id = aws_api_gateway_rest_api.example.id -resource "aws_api_gateway_deployment" "test" { - depends_on = [aws_api_gateway_integration_response.test] + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } - rest_api_id = aws_api_gateway_rest_api.test.id + lifecycle { + create_before_destroy = true + } } -resource "aws_api_gateway_stage" "test" { - deployment_id = aws_api_gateway_deployment.test.id - rest_api_id = aws_api_gateway_rest_api.test.id - stage_name = "test" +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = "example" } resource "aws_wafregional_web_acl_association" "association" { - resource_arn = aws_api_gateway_stage.test.arn + resource_arn = aws_api_gateway_stage.example.arn web_acl_id = aws_wafregional_web_acl.foo.id } ``` diff --git a/website/docs/r/wafv2_web_acl_association.html.markdown b/website/docs/r/wafv2_web_acl_association.html.markdown index 3042aa12d47..30be8046b3c 100644 --- a/website/docs/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/r/wafv2_web_acl_association.html.markdown @@ -18,39 +18,46 @@ Creates a WAFv2 Web ACL Association. ## Example Usage ```hcl -resource "aws_api_gateway_stage" "example" { - stage_name = "test" - rest_api_id = aws_api_gateway_rest_api.example.id - deployment_id = aws_api_gateway_deployment.example.id -} - resource "aws_api_gateway_rest_api" "example" { - name = "web-acl-association-example" + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/path1" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + }) + + name = "example" } resource "aws_api_gateway_deployment" "example" { rest_api_id = aws_api_gateway_rest_api.example.id - depends_on = [aws_api_gateway_integration.example] -} -resource "aws_api_gateway_integration" "example" { - rest_api_id = aws_api_gateway_rest_api.example.id - resource_id = aws_api_gateway_resource.example.id - http_method = aws_api_gateway_method.example.http_method - type = "MOCK" -} + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) + } -resource "aws_api_gateway_resource" "example" { - rest_api_id = aws_api_gateway_rest_api.example.id - parent_id = aws_api_gateway_rest_api.example.root_resource_id - path_part = "mytestresource" + lifecycle { + create_before_destroy = true + } } -resource "aws_api_gateway_method" "example" { +resource "aws_api_gateway_stage" "example" { + deployment_id = aws_api_gateway_deployment.example.id rest_api_id = aws_api_gateway_rest_api.example.id - resource_id = aws_api_gateway_resource.example.id - http_method = "GET" - authorization = "NONE" + stage_name = "example" } resource "aws_wafv2_web_acl" "example" { From afa5e874097197fb244c44cc82ce2a8a09af39bb Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 10:13:34 -0500 Subject: [PATCH 0926/1212] service/ec2: Add COIP support to aws_subnet resource and data source (#16676) * service/ec2: Add COIP support to aws_subnet resource and data source Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13170 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13171 Output from acceptance testing in OTL account: ``` --- PASS: TestAccDataSourceAwsSubnet_basic (25.67s) --- PASS: TestAccDataSourceAwsSubnet_ipv6ByIpv6CidrBlock (46.37s) --- PASS: TestAccDataSourceAwsSubnet_ipv6ByIpv6Filter (51.83s) --- PASS: TestAccAWSSubnet_availabilityZoneId (29.12s) --- PASS: TestAccAWSSubnet_basic (31.04s) --- PASS: TestAccAWSSubnet_CustomerOwnedIpv4Pool (63.80s) --- PASS: TestAccAWSSubnet_disappears (21.79s) --- PASS: TestAccAWSSubnet_enableIpv6 (79.03s) --- PASS: TestAccAWSSubnet_ignoreTags (45.48s) --- PASS: TestAccAWSSubnet_ipv6 (90.26s) --- PASS: TestAccAWSSubnet_MapCustomerOwnedIpOnLaunch (49.07s) --- PASS: TestAccAWSSubnet_outpost (47.45s) --- PASS: TestAccAWSSubnet_tags (68.74s) ``` * Update CHANGELOG for #16676 * Fix typo in CHANGELOG --- .changelog/16676.txt | 7 + aws/data_source_aws_subnet.go | 12 ++ aws/data_source_aws_subnet_test.go | 12 ++ aws/internal/service/ec2/errors.go | 4 + aws/internal/service/ec2/finder/finder.go | 19 +++ aws/internal/service/ec2/waiter/status.go | 22 +++ aws/internal/service/ec2/waiter/waiter.go | 23 ++++ aws/resource_aws_subnet.go | 61 ++++++++- aws/resource_aws_subnet_test.go | 159 ++++++++++++++++++++++ website/docs/d/subnet.html.markdown | 3 + website/docs/r/subnet.html.markdown | 2 + 11 files changed, 323 insertions(+), 1 deletion(-) create mode 100644 .changelog/16676.txt diff --git a/.changelog/16676.txt b/.changelog/16676.txt new file mode 100644 index 00000000000..1de03fa3ee6 --- /dev/null +++ b/.changelog/16676.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes +``` + +```release-note:enhancement +resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes +``` diff --git a/aws/data_source_aws_subnet.go b/aws/data_source_aws_subnet.go index 3a4ce67c3aa..40c59e9079e 100644 --- a/aws/data_source_aws_subnet.go +++ b/aws/data_source_aws_subnet.go @@ -77,6 +77,16 @@ func dataSourceAwsSubnet() *schema.Resource { Computed: true, }, + "customer_owned_ipv4_pool": { + Type: schema.TypeString, + Computed: true, + }, + + "map_customer_owned_ip_on_launch": { + Type: schema.TypeBool, + Computed: true, + }, + "map_public_ip_on_launch": { Type: schema.TypeBool, Computed: true, @@ -180,6 +190,8 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) + d.Set("customer_owned_ipv4_pool", subnet.CustomerOwnedIpv4Pool) + d.Set("map_customer_owned_ip_on_launch", subnet.MapCustomerOwnedIpOnLaunch) d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) for _, a := range subnet.Ipv6CidrBlockAssociationSet { diff --git a/aws/data_source_aws_subnet_test.go b/aws/data_source_aws_subnet_test.go index becd33aaabe..053309b8503 100644 --- a/aws/data_source_aws_subnet_test.go +++ b/aws/data_source_aws_subnet_test.go @@ -38,6 +38,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds1ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds1ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds1ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds1ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds1ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds1ResourceName, "outpost_arn", snResourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(ds2ResourceName, "id", snResourceName, "id"), @@ -48,6 +50,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds2ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds2ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds2ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds2ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds2ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds2ResourceName, "outpost_arn", snResourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(ds3ResourceName, "id", snResourceName, "id"), @@ -58,6 +62,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds3ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds3ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds3ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds3ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds3ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds3ResourceName, "outpost_arn", snResourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(ds4ResourceName, "id", snResourceName, "id"), @@ -68,6 +74,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds4ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds4ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds4ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds4ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds4ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds4ResourceName, "outpost_arn", snResourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(ds5ResourceName, "id", snResourceName, "id"), @@ -78,6 +86,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds5ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds5ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds5ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds5ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds5ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds5ResourceName, "outpost_arn", snResourceName, "outpost_arn"), resource.TestCheckResourceAttrPair(ds6ResourceName, "id", snResourceName, "id"), @@ -88,6 +98,8 @@ func TestAccDataSourceAwsSubnet_basic(t *testing.T) { resource.TestCheckResourceAttr(ds6ResourceName, "cidr_block", cidr), resource.TestCheckResourceAttr(ds6ResourceName, "tags.Name", tag), resource.TestCheckResourceAttrPair(ds6ResourceName, "arn", snResourceName, "arn"), + resource.TestCheckResourceAttrPair(ds6ResourceName, "customer_owned_ipv4_pool", snResourceName, "customer_owned_ipv4_pool"), + resource.TestCheckResourceAttrPair(ds6ResourceName, "map_customer_owned_ip_on_launch", snResourceName, "map_customer_owned_ip_on_launch"), resource.TestCheckResourceAttrPair(ds6ResourceName, "outpost_arn", snResourceName, "outpost_arn"), ), }, diff --git a/aws/internal/service/ec2/errors.go b/aws/internal/service/ec2/errors.go index b14b13b5ff4..9162be33aed 100644 --- a/aws/internal/service/ec2/errors.go +++ b/aws/internal/service/ec2/errors.go @@ -24,6 +24,10 @@ const ( InvalidGroupNotFound = "InvalidGroup.NotFound" ) +const ( + ErrCodeInvalidSubnetIDNotFound = "InvalidSubnetID.NotFound" +) + const ( ErrCodeInvalidVpcPeeringConnectionIDNotFound = "InvalidVpcPeeringConnectionID.NotFound" ) diff --git a/aws/internal/service/ec2/finder/finder.go b/aws/internal/service/ec2/finder/finder.go index c3c24d7d06a..10e42d1faca 100644 --- a/aws/internal/service/ec2/finder/finder.go +++ b/aws/internal/service/ec2/finder/finder.go @@ -91,6 +91,25 @@ func SecurityGroupByID(conn *ec2.EC2, id string) (*ec2.SecurityGroup, error) { return result.SecurityGroups[0], nil } +// SubnetByID looks up a Subnet by ID. When not found, returns nil and potentially an API error. +func SubnetByID(conn *ec2.EC2, id string) (*ec2.Subnet, error) { + input := &ec2.DescribeSubnetsInput{ + SubnetIds: aws.StringSlice([]string{id}), + } + + output, err := conn.DescribeSubnets(input) + + if err != nil { + return nil, err + } + + if output == nil || len(output.Subnets) == 0 || output.Subnets[0] == nil { + return nil, nil + } + + return output.Subnets[0], nil +} + // VpcPeeringConnectionByID returns the VPC peering connection corresponding to the specified identifier. // Returns nil and potentially an error if no VPC peering connection is found. func VpcPeeringConnectionByID(conn *ec2.EC2, id string) (*ec2.VpcPeeringConnection, error) { diff --git a/aws/internal/service/ec2/waiter/status.go b/aws/internal/service/ec2/waiter/status.go index f9be6692bf6..47a78775504 100644 --- a/aws/internal/service/ec2/waiter/status.go +++ b/aws/internal/service/ec2/waiter/status.go @@ -3,6 +3,7 @@ package waiter import ( "fmt" "log" + "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" @@ -237,6 +238,27 @@ func SecurityGroupStatus(conn *ec2.EC2, id string) resource.StateRefreshFunc { } } +// SubnetMapCustomerOwnedIpOnLaunch fetches the Subnet and its MapCustomerOwnedIpOnLaunch +func SubnetMapCustomerOwnedIpOnLaunch(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + subnet, err := finder.SubnetByID(conn, id) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidSubnetIDNotFound) { + return nil, "false", nil + } + + if err != nil { + return nil, "false", err + } + + if subnet == nil { + return nil, "false", nil + } + + return subnet, strconv.FormatBool(aws.BoolValue(subnet.MapCustomerOwnedIpOnLaunch)), nil + } +} + const ( vpcPeeringConnectionStatusNotFound = "NotFound" vpcPeeringConnectionStatusUnknown = "Unknown" diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index e912e908b3d..c8c516294ef 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -1,6 +1,7 @@ package waiter import ( + "strconv" "time" "github.com/aws/aws-sdk-go/service/ec2" @@ -249,6 +250,28 @@ func SecurityGroupCreated(conn *ec2.EC2, id string, timeout time.Duration) (*ec2 return nil, err } +const ( + SubnetAttributePropagationTimeout = 5 * time.Minute +) + +func SubnetMapCustomerOwnedIpOnLaunchUpdated(conn *ec2.EC2, subnetID string, expectedValue bool) (*ec2.Subnet, error) { + stateConf := &resource.StateChangeConf{ + Target: []string{strconv.FormatBool(expectedValue)}, + Refresh: SubnetMapCustomerOwnedIpOnLaunch(conn, subnetID), + Timeout: SubnetAttributePropagationTimeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.Subnet); ok { + return output, err + } + + return nil, err +} + const ( VpnGatewayVpcAttachmentAttachedTimeout = 15 * time.Minute diff --git a/aws/resource_aws_subnet.go b/aws/resource_aws_subnet.go index 5d476d33f21..d05943849cf 100644 --- a/aws/resource_aws_subnet.go +++ b/aws/resource_aws_subnet.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" ) func resourceAwsSubnet() *schema.Resource { @@ -67,6 +68,18 @@ func resourceAwsSubnet() *schema.Resource { ConflictsWith: []string{"availability_zone"}, }, + "customer_owned_ipv4_pool": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"map_customer_owned_ip_on_launch", "outpost_arn"}, + }, + + "map_customer_owned_ip_on_launch": { + Type: schema.TypeBool, + Optional: true, + RequiredWith: []string{"customer_owned_ipv4_pool", "outpost_arn"}, + }, + "map_public_ip_on_launch": { Type: schema.TypeBool, Optional: true, @@ -153,7 +166,8 @@ func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error waiting for subnet (%s) to become ready: %w", d.Id(), err) } - // You cannot modify multiple subnet attributes in the same request. + // You cannot modify multiple subnet attributes in the same request, + // except CustomerOwnedIpv4Pool and MapCustomerOwnedIpOnLaunch. // Reference: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySubnetAttribute.html if d.Get("assign_ipv6_address_on_creation").(bool) { @@ -169,6 +183,24 @@ func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { } } + if v, ok := d.GetOk("customer_owned_ipv4_pool"); ok { + input := &ec2.ModifySubnetAttributeInput{ + CustomerOwnedIpv4Pool: aws.String(v.(string)), + MapCustomerOwnedIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(d.Get("map_customer_owned_ip_on_launch").(bool)), + }, + SubnetId: aws.String(d.Id()), + } + + if _, err := conn.ModifySubnetAttribute(input); err != nil { + return fmt.Errorf("error setting EC2 Subnet (%s) customer owned IPv4 pool and map customer owned IP on launch: %w", d.Id(), err) + } + + if _, err := waiter.SubnetMapCustomerOwnedIpOnLaunchUpdated(conn, d.Id(), d.Get("map_customer_owned_ip_on_launch").(bool)); err != nil { + return fmt.Errorf("error waiting for EC2 Subnet (%s) map customer owned IP on launch update: %w", d.Id(), err) + } + } + if d.Get("map_public_ip_on_launch").(bool) { input := &ec2.ModifySubnetAttributeInput{ MapPublicIpOnLaunch: &ec2.AttributeBooleanValue{ @@ -211,6 +243,8 @@ func resourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { d.Set("availability_zone", subnet.AvailabilityZone) d.Set("availability_zone_id", subnet.AvailabilityZoneId) d.Set("cidr_block", subnet.CidrBlock) + d.Set("customer_owned_ipv4_pool", subnet.CustomerOwnedIpv4Pool) + d.Set("map_customer_owned_ip_on_launch", subnet.MapCustomerOwnedIpOnLaunch) d.Set("map_public_ip_on_launch", subnet.MapPublicIpOnLaunch) d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) d.Set("outpost_arn", subnet.OutpostArn) @@ -249,6 +283,31 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { } } + // You cannot modify multiple subnet attributes in the same request, + // except CustomerOwnedIpv4Pool and MapCustomerOwnedIpOnLaunch. + // Reference: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySubnetAttribute.html + + if d.HasChanges("customer_owned_ipv4_pool", "map_customer_owned_ip_on_launch") { + input := &ec2.ModifySubnetAttributeInput{ + MapCustomerOwnedIpOnLaunch: &ec2.AttributeBooleanValue{ + Value: aws.Bool(d.Get("map_customer_owned_ip_on_launch").(bool)), + }, + SubnetId: aws.String(d.Id()), + } + + if v, ok := d.GetOk("customer_owned_ipv4_pool"); ok { + input.CustomerOwnedIpv4Pool = aws.String(v.(string)) + } + + if _, err := conn.ModifySubnetAttribute(input); err != nil { + return fmt.Errorf("error updating EC2 Subnet (%s) customer owned IPv4 pool and map customer owned IP on launch: %w", d.Id(), err) + } + + if _, err := waiter.SubnetMapCustomerOwnedIpOnLaunchUpdated(conn, d.Id(), d.Get("map_customer_owned_ip_on_launch").(bool)); err != nil { + return fmt.Errorf("error waiting for EC2 Subnet (%s) map customer owned IP on launch update: %w", d.Id(), err) + } + } + if d.HasChange("map_public_ip_on_launch") { modifyOpts := &ec2.ModifySubnetAttributeInput{ SubnetId: aws.String(d.Id()), diff --git a/aws/resource_aws_subnet_test.go b/aws/resource_aws_subnet_test.go index 81fd3b116ed..7da2ee09b6a 100644 --- a/aws/resource_aws_subnet_test.go +++ b/aws/resource_aws_subnet_test.go @@ -156,6 +156,8 @@ func TestAccAWSSubnet_basic(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttrSet(resourceName, "availability_zone"), resource.TestCheckResourceAttrSet(resourceName, "availability_zone_id"), + resource.TestCheckResourceAttr(resourceName, "customer_owned_ipv4_pool", ""), + resource.TestCheckResourceAttr(resourceName, "map_customer_owned_ip_on_launch", "false"), resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), @@ -374,6 +376,57 @@ func TestAccAWSSubnet_disappears(t *testing.T) { }) } +func TestAccAWSSubnet_CustomerOwnedIpv4Pool(t *testing.T) { + var subnet ec2.Subnet + coipDataSourceName := "data.aws_ec2_coip_pool.test" + resourceName := "aws_subnet.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSubnetConfigCustomerOwnedIpv4Pool(), + Check: resource.ComposeTestCheckFunc( + testAccCheckSubnetExists(resourceName, &subnet), + resource.TestCheckResourceAttrPair(resourceName, "customer_owned_ipv4_pool", coipDataSourceName, "pool_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSubnet_MapCustomerOwnedIpOnLaunch(t *testing.T) { + var subnet ec2.Subnet + resourceName := "aws_subnet.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSubnetConfigMapCustomerOwnedIpOnLaunch(true), + Check: resource.ComposeTestCheckFunc( + testAccCheckSubnetExists(resourceName, &subnet), + resource.TestCheckResourceAttr(resourceName, "map_customer_owned_ip_on_launch", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSubnet_outpost(t *testing.T) { var v ec2.Subnet outpostDataSourceName := "data.aws_outposts_outpost.test" @@ -675,6 +728,112 @@ resource "aws_subnet" "test" { `) } +func testAccSubnetConfigCustomerOwnedIpv4Pool() string { + return ` +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} + +data "aws_ec2_local_gateway_route_tables" "test" { + filter { + name = "outpost-arn" + values = [data.aws_outposts_outpost.test.arn] + } +} + +data "aws_ec2_coip_pools" "test" { + # Filtering by Local Gateway Route Table ID is documented but not working in EC2 API. + # If there are multiple Outposts in the test account, this lookup can + # be misaligned and cause downstream resource errors. + # + # filter { + # name = "coip-pool.local-gateway-route-table-id" + # values = [tolist(data.aws_ec2_local_gateway_route_tables.test.ids)[0]] + # } +} + +data "aws_ec2_coip_pool" "test" { + pool_id = tolist(data.aws_ec2_coip_pools.test.pool_ids)[0] +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "terraform-testacc-subnet-outpost" + } +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_outposts_outpost.test.availability_zone + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + customer_owned_ipv4_pool = data.aws_ec2_coip_pool.test.pool_id + map_customer_owned_ip_on_launch = true + outpost_arn = data.aws_outposts_outpost.test.arn + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-subnet-outpost" + } +} +` +} + +func testAccSubnetConfigMapCustomerOwnedIpOnLaunch(mapCustomerOwnedIpOnLaunch bool) string { + return fmt.Sprintf(` +data "aws_outposts_outposts" "test" {} + +data "aws_outposts_outpost" "test" { + id = tolist(data.aws_outposts_outposts.test.ids)[0] +} + +data "aws_ec2_local_gateway_route_tables" "test" { + filter { + name = "outpost-arn" + values = [data.aws_outposts_outpost.test.arn] + } +} + +data "aws_ec2_coip_pools" "test" { + # Filtering by Local Gateway Route Table ID is documented but not working in EC2 API. + # If there are multiple Outposts in the test account, this lookup can + # be misaligned and cause downstream resource errors. + # + # filter { + # name = "coip-pool.local-gateway-route-table-id" + # values = [tolist(data.aws_ec2_local_gateway_route_tables.test.ids)[0]] + # } +} + +data "aws_ec2_coip_pool" "test" { + pool_id = tolist(data.aws_ec2_coip_pools.test.pool_ids)[0] +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "terraform-testacc-subnet-outpost" + } +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_outposts_outpost.test.availability_zone + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + customer_owned_ipv4_pool = data.aws_ec2_coip_pool.test.pool_id + map_customer_owned_ip_on_launch = %[1]t + outpost_arn = data.aws_outposts_outpost.test.arn + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-subnet-outpost" + } +} +`, mapCustomerOwnedIpOnLaunch) +} + func testAccSubnetConfigOutpost() string { return ` data "aws_outposts_outposts" "test" {} diff --git a/website/docs/d/subnet.html.markdown b/website/docs/d/subnet.html.markdown index fec6331ba3f..db1d9a4d6e5 100644 --- a/website/docs/d/subnet.html.markdown +++ b/website/docs/d/subnet.html.markdown @@ -97,5 +97,8 @@ the selected subnet. In addition the following attributes are exported: * `arn` - The ARN of the subnet. +* `customer_owned_ipv4_pool` - Identifier of customer owned IPv4 address pool. +* `map_customer_owned_ip_on_launch` - Whether customer owned IP addresses are assigned on network interface creation. +* `map_public_ip_on_launch` - Whether public IP addresses are assigned on instance launch. * `owner_id` - The ID of the AWS account that owns the subnet. * `outpost_arn` - The Amazon Resource Name (ARN) of the Outpost. diff --git a/website/docs/r/subnet.html.markdown b/website/docs/r/subnet.html.markdown index e3184e6b520..3c1d0e9b213 100644 --- a/website/docs/r/subnet.html.markdown +++ b/website/docs/r/subnet.html.markdown @@ -51,8 +51,10 @@ The following arguments are supported: * `availability_zone` - (Optional) The AZ for the subnet. * `availability_zone_id` - (Optional) The AZ ID of the subnet. * `cidr_block` - (Required) The CIDR block for the subnet. +* `customer_owned_ipv4_pool` - (Optional) The customer owned IPv4 address pool. Typically used with the `map_customer_owned_ip_on_launch` argument. The `outpost_arn` argument must be specified when configured. * `ipv6_cidr_block` - (Optional) The IPv6 network range for the subnet, in CIDR notation. The subnet size must use a /64 prefix length. +* `map_customer_owned_ip_on_launch` - (Optional) Specify `true` to indicate that network interfaces created in the subnet should be assigned a customer owned IP address. The `customer_owned_ipv4_pool` and `outpost_arn` arguments must be specified when set to `true`. Default is `false`. * `map_public_ip_on_launch` - (Optional) Specify true to indicate that instances launched into the subnet should be assigned a public IP address. Default is `false`. From 8604ba828b5c22908c630ddc50e52f70084b0034 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Jan 2021 15:18:54 +0000 Subject: [PATCH 0927/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17326) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ddea0c2809d..375c3664226 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba github.com/mattn/go-colorable v0.1.7 // indirect diff --git a/go.sum b/go.sum index 3f6892724e4..78a3559f029 100644 --- a/go.sum +++ b/go.sum @@ -211,8 +211,8 @@ github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8 github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-plugin-go v0.2.1 h1:EW/R8bB2Zbkjmugzsy1d27yS8/0454b3MtYHkzOknqA= github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 h1:k2rpom9wG2cdi5iLRH80EdQB7UX/E6UzYzUfzgsNLuU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 h1:8oo4eMtv3nEZGqe8W0UzMxKnKWuwS/Tb2YyIFJkL59g= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= From 311202aa7f4b84de78233f2eae6d72ccdd9afce6 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 10:46:44 -0500 Subject: [PATCH 0928/1212] scripts: Allow H1 headers in CHANGELOG.md and generate current CHANGELOG (#17358) --- CHANGELOG.md | 11 +++++++++++ scripts/generate-changelog.sh | 7 ++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e85665ec59c..acffe23d6fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # 3.27.0 (Unreleased) +ENHANCEMENTS: + +* data-source/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) +* resource/aws_glacier_vault: Add plan-time validation for `notification` configuration block `events` and `sns_topic_arn` arguments ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) + +BUG FIXES: + +* resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) + # 3.26.0 (January 28, 2021) NOTES: diff --git a/scripts/generate-changelog.sh b/scripts/generate-changelog.sh index a72ac73c7d3..a5a09b9a178 100755 --- a/scripts/generate-changelog.sh +++ b/scripts/generate-changelog.sh @@ -9,14 +9,15 @@ __parent="$(dirname "$__dir")" CHANGELOG_FILE_NAME="CHANGELOG.md" CHANGELOG_TMP_FILE_NAME="CHANGELOG.tmp" TARGET_SHA=$(git rev-parse HEAD) -PREVIOUS_RELEASE_SHA=$(git rev-list -n 1 $(git describe --abbrev=0 --match='v*.*.*' --tags)) +PREVIOUS_RELEASE_TAG=$(git describe --abbrev=0 --match='v*.*.*' --tags) +PREVIOUS_RELEASE_SHA=$(git rev-list -n 1 $PREVIOUS_RELEASE_TAG) if [ $TARGET_SHA == $PREVIOUS_RELEASE_SHA ]; then echo "Nothing to do" exit 0 fi -PREVIOUS_CHANGELOG=$(sed -n -e "/## $(git describe --abbrev=0 --match='v*.*.*' --tags | tr -d v)/,\$p" $__parent/$CHANGELOG_FILE_NAME) +PREVIOUS_CHANGELOG=$(sed -n -e "/# ${PREVIOUS_RELEASE_TAG#v}/,\$p" $__parent/$CHANGELOG_FILE_NAME) if [ -z "$PREVIOUS_CHANGELOG" ] then @@ -39,7 +40,7 @@ fi rm -f $CHANGELOG_TMP_FILE_NAME -sed -n -e "1{/## /p;}" $__parent/$CHANGELOG_FILE_NAME > $CHANGELOG_TMP_FILE_NAME +sed -n -e "1{/# /p;}" $__parent/$CHANGELOG_FILE_NAME > $CHANGELOG_TMP_FILE_NAME echo "$CHANGELOG" >> $CHANGELOG_TMP_FILE_NAME echo >> $CHANGELOG_TMP_FILE_NAME echo "$PREVIOUS_CHANGELOG" >> $CHANGELOG_TMP_FILE_NAME From d7f96173f26e30210949adaab6747dfe4e5fa09c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 28 Jan 2021 23:15:41 +0200 Subject: [PATCH 0929/1212] fix partition ordering --- aws/internal/service/glue/finder/finder.go | 27 ++++++++++++++ aws/internal/service/glue/id.go | 7 ++-- aws/resource_aws_glue_partition.go | 31 +++++----------- aws/resource_aws_glue_partition_test.go | 42 ++++------------------ 4 files changed, 45 insertions(+), 62 deletions(-) diff --git a/aws/internal/service/glue/finder/finder.go b/aws/internal/service/glue/finder/finder.go index fdceee9166b..e402963f1c1 100644 --- a/aws/internal/service/glue/finder/finder.go +++ b/aws/internal/service/glue/finder/finder.go @@ -50,3 +50,30 @@ func SchemaVersionByID(conn *glue.Glue, id string) (*glue.GetSchemaVersionOutput return output, nil } + +// PartitionByValues returns the Partition corresponding to the specified Partition Values. +func PartitionByValues(conn *glue.Glue, id string) (*glue.Partition, error) { + + catalogID, dbName, tableName, values, err := tfglue.ReadAwsGluePartitionID(id) + if err != nil { + return nil, err + } + + input := &glue.GetPartitionInput{ + CatalogId: aws.String(catalogID), + DatabaseName: aws.String(dbName), + TableName: aws.String(tableName), + PartitionValues: aws.StringSlice(values), + } + + output, err := conn.GetPartition(input) + if err != nil { + return nil, err + } + + if output == nil && output.Partition == nil { + return nil, nil + } + + return output.Partition, nil +} diff --git a/aws/internal/service/glue/id.go b/aws/internal/service/glue/id.go index 2bc05fff411..77a22050498 100644 --- a/aws/internal/service/glue/id.go +++ b/aws/internal/service/glue/id.go @@ -7,7 +7,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func ReadAwsGluePartitionID(id string) (catalogID string, dbName string, tableName string, values []string, error error) { @@ -19,13 +18,13 @@ func ReadAwsGluePartitionID(id string) (catalogID string, dbName string, tableNa return idParts[0], idParts[1], idParts[2], vals, nil } -func CreateAwsGluePartitionID(catalogID, dbName, tableName string, values *schema.Set) string { +func CreateAwsGluePartitionID(catalogID, dbName, tableName string, values []interface{}) string { return fmt.Sprintf("%s:%s:%s:%s", catalogID, dbName, tableName, stringifyAwsGluePartition(values)) } -func stringifyAwsGluePartition(partValues *schema.Set) string { +func stringifyAwsGluePartition(partValues []interface{}) string { var b bytes.Buffer - for _, val := range partValues.List() { + for _, val := range partValues { b.WriteString(fmt.Sprintf("%s#", val.(string))) } vals := strings.Trim(b.String(), "#") diff --git a/aws/resource_aws_glue_partition.go b/aws/resource_aws_glue_partition.go index 7bc957b4a8d..245df802500 100644 --- a/aws/resource_aws_glue_partition.go +++ b/aws/resource_aws_glue_partition.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" tfglue "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" ) func resourceAwsGluePartition() *schema.Resource { @@ -42,7 +43,7 @@ func resourceAwsGluePartition() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 255), }, "partition_values": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -198,7 +199,7 @@ func resourceAwsGluePartitionCreate(d *schema.ResourceData, meta interface{}) er catalogID := createAwsGlueCatalogID(d, meta.(*AWSClient).accountid) dbName := d.Get("database_name").(string) tableName := d.Get("table_name").(string) - values := d.Get("partition_values").(*schema.Set) + values := d.Get("partition_values").([]interface{}) input := &glue.CreatePartitionInput{ CatalogId: aws.String(catalogID), @@ -221,37 +222,21 @@ func resourceAwsGluePartitionCreate(d *schema.ResourceData, meta interface{}) er func resourceAwsGluePartitionRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).glueconn - catalogID, dbName, tableName, values, err := tfglue.ReadAwsGluePartitionID(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] Reading Glue Partition: %s", d.Id()) - input := &glue.GetPartitionInput{ - CatalogId: aws.String(catalogID), - DatabaseName: aws.String(dbName), - TableName: aws.String(tableName), - PartitionValues: aws.StringSlice(values), - } - - out, err := conn.GetPartition(input) + partition, err := finder.PartitionByValues(conn, d.Id()) if err != nil { - if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { log.Printf("[WARN] Glue Partition (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - return fmt.Errorf("error reading Glue Partition: %w", err) } - partition := out.Partition - d.Set("table_name", partition.TableName) - d.Set("catalog_id", catalogID) + d.Set("catalog_id", partition.CatalogId) d.Set("database_name", partition.DatabaseName) - d.Set("partition_values", flattenStringSet(partition.Values)) + d.Set("partition_values", flattenStringList(partition.Values)) if partition.LastAccessTime != nil { d.Set("last_accessed_time", partition.LastAccessTime.Format(time.RFC3339)) @@ -332,8 +317,8 @@ func expandGluePartitionInput(d *schema.ResourceData) *glue.PartitionInput { tableInput.Parameters = stringMapToPointers(v.(map[string]interface{})) } - if v, ok := d.GetOk("partition_values"); ok && v.(*schema.Set).Len() > 0 { - tableInput.Values = expandStringSet(v.(*schema.Set)) + if v, ok := d.GetOk("partition_values"); ok && len(v.([]interface{})) > 0 { + tableInput.Values = expandStringList(v.([]interface{})) } return tableInput diff --git a/aws/resource_aws_glue_partition_test.go b/aws/resource_aws_glue_partition_test.go index 5001ec0d8ae..4fbf56617ab 100644 --- a/aws/resource_aws_glue_partition_test.go +++ b/aws/resource_aws_glue_partition_test.go @@ -2,15 +2,13 @@ package aws import ( "fmt" - "reflect" "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/glue" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - tfglue "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/glue/finder" ) func TestAccAWSGluePartition_basic(t *testing.T) { @@ -30,7 +28,7 @@ func TestAccAWSGluePartition_basic(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "catalog_id"), resource.TestCheckResourceAttr(resourceName, "database_name", rName), resource.TestCheckResourceAttr(resourceName, "partition_values.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "partition_values.*", parValue), + resource.TestCheckResourceAttr(resourceName, "partition_values.0", parValue), resource.TestCheckResourceAttr(resourceName, "parameters.%", "0"), resource.TestCheckResourceAttrSet(resourceName, "creation_time"), ), @@ -60,8 +58,8 @@ func TestAccAWSGluePartition_multipleValues(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckGluePartitionExists(resourceName), resource.TestCheckResourceAttr(resourceName, "partition_values.#", "2"), - resource.TestCheckTypeSetElemAttr(resourceName, "partition_values.*", parValue), - resource.TestCheckTypeSetElemAttr(resourceName, "partition_values.*", parValue2), + resource.TestCheckResourceAttr(resourceName, "partition_values.0", parValue), + resource.TestCheckResourceAttr(resourceName, "partition_values.1", parValue2), ), }, { @@ -147,18 +145,7 @@ func testAccCheckGluePartitionDestroy(s *terraform.State) error { continue } - catalogID, dbName, tableName, values, err := tfglue.ReadAwsGluePartitionID(rs.Primary.ID) - if err != nil { - return err - } - - input := &glue.GetPartitionInput{ - DatabaseName: aws.String(dbName), - CatalogId: aws.String(catalogID), - TableName: aws.String(tableName), - PartitionValues: aws.StringSlice(values), - } - if _, err := conn.GetPartition(input); err != nil { + if _, err := finder.PartitionByValues(conn, rs.Primary.ID); err != nil { if isAWSErr(err, glue.ErrCodeEntityNotFoundException, "") { continue } @@ -181,31 +168,16 @@ func testAccCheckGluePartitionExists(name string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - catalogID, dbName, tableName, values, err := tfglue.ReadAwsGluePartitionID(rs.Primary.ID) - if err != nil { - return err - } - conn := testAccProvider.Meta().(*AWSClient).glueconn - out, err := conn.GetPartition(&glue.GetPartitionInput{ - DatabaseName: aws.String(dbName), - CatalogId: aws.String(catalogID), - TableName: aws.String(tableName), - PartitionValues: aws.StringSlice(values), - }) - + out, err := finder.PartitionByValues(conn, rs.Primary.ID) if err != nil { return err } - if out.Partition == nil { + if out == nil { return fmt.Errorf("No Glue Partition Found") } - if !reflect.DeepEqual(aws.StringValueSlice(out.Partition.Values), values) { - return fmt.Errorf("Glue Partition Mismatch") - } - return nil } } From ad8293cc2ec8556783e4896eed33ae97e56f5919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Jan 2021 15:58:17 +0000 Subject: [PATCH 0930/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17324) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../v2/helper/schema/schema.go | 40 +++++++++++++++---- .../terraform-plugin-sdk/v2/meta/meta.go | 2 +- awsproviderlint/vendor/modules.txt | 2 +- 5 files changed, 38 insertions(+), 12 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index 41ef31008cf..b826dab31c5 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -5,6 +5,6 @@ go 1.15 require ( github.com/aws/aws-sdk-go v1.37.0 github.com/bflad/tfproviderlint v0.21.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab ) diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 4d4dcc5503e..659221bcc8d 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -217,8 +217,8 @@ github.com/hashicorp/terraform-plugin-sdk v1.9.0 h1:WBHHIX/RgF6/lbfMCzx0qKl96BbQ github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0 h1:jPPqctLDg75CilV3IpypAz6on3MSMOiUMzXNz+Xex6E= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0/go.mod h1:xOf85UtHJ0/9/EF3eKgZFlJ6feN8sDtjQRWRHhimCUw= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 h1:k2rpom9wG2cdi5iLRH80EdQB7UX/E6UzYzUfzgsNLuU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 h1:8oo4eMtv3nEZGqe8W0UzMxKnKWuwS/Tb2YyIFJkL59g= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= github.com/hashicorp/terraform-plugin-test v1.2.0 h1:AWFdqyfnOj04sxTdaAF57QqvW7XXrT8PseUHkbKsE8I= github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/terraform-plugin-test/v2 v2.0.0-20200724200815-faa9931ac59e h1:Q8lNGrk3SVdXEbLuUJD03jghIjykJT9pu1aReKgb858= diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go index f3df1bb4eea..7146bef766b 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -408,6 +408,7 @@ func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *t if d.Old != "" && d.New == "" { // This is a computed value with an old value set already, // just let it go. + log.Println("[DEBUG] A computed value with the empty string as the new value and a non-empty old value was found. Interpreting the empty string as \"unset\" to align with legacy behavior.") return nil } } @@ -1061,13 +1062,18 @@ func (m schemaMap) diffList( oldStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff( + finalizedAttr := countSchema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: oldStr, New: newStr, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } else { + delete(diff.Attributes, k+".#") + } } // Figure out the maximum @@ -1167,13 +1173,18 @@ func (m schemaMap) diffMap( oldStr = "" } - diff.Attributes[k+".%"] = countSchema.finalizeDiff( + finalizedAttr := countSchema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: oldStr, New: newStr, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[k+".%"] = finalizedAttr + } else { + delete(diff.Attributes, k+".%") + } } // If the new map is nil and we're computed, then ignore it. @@ -1190,22 +1201,28 @@ func (m schemaMap) diffMap( continue } - diff.Attributes[prefix+k] = schema.finalizeDiff( + finalizedAttr := schema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: old, New: v, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } } for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff( + finalizedAttr := schema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: v, NewRemoved: true, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[prefix+k] = finalizedAttr + } } return nil @@ -1277,26 +1294,32 @@ func (m schemaMap) diffSet( countStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff( + finalizedAttr := countSchema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: countStr, NewComputed: true, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } return nil } // If the counts are not the same, then record that diff changed := oldLen != newLen if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff( + finalizedAttr := countSchema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: oldStr, New: newStr, }, customized, ) + if finalizedAttr != nil { + diff.Attributes[k+".#"] = finalizedAttr + } } // Build the list of codes that will make up our set. This is the @@ -1383,7 +1406,7 @@ func (m schemaMap) diffString( return nil } - diff.Attributes[k] = schema.finalizeDiff( + finalizedAttr := schema.finalizeDiff( &terraform.ResourceAttrDiff{ Old: os, New: ns, @@ -1393,6 +1416,9 @@ func (m schemaMap) diffString( }, customized, ) + if finalizedAttr != nil { + diff.Attributes[k] = finalizedAttr + } return nil } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go index c6084c41d3f..a36cabe9049 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var SDKVersion = "2.4.1" +var SDKVersion = "2.4.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index be13611d063..7e6068d0087 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -275,7 +275,7 @@ github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov5/server github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 ## explicit github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging From 701abad850a8d958e8816afd6d3cfec6801b48f3 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 29 Jan 2021 18:07:07 +0200 Subject: [PATCH 0931/1212] changelog --- .changelog/17344.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17344.txt diff --git a/.changelog/17344.txt b/.changelog/17344.txt new file mode 100644 index 00000000000..5500b9e7494 --- /dev/null +++ b/.changelog/17344.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_glue_partition - Fix `partition_values` to perserve order. +``` From ddd0d84c43358ba45621f53b44327a15f228eccf Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Fri, 29 Jan 2021 08:20:29 -0800 Subject: [PATCH 0932/1212] Update CHANGELOG.md to correct headings --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index acffe23d6fb..c98d1d64b51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 3.27.0 (Unreleased) +## 3.27.0 (Unreleased) ENHANCEMENTS: @@ -11,7 +11,7 @@ BUG FIXES: * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) -# 3.26.0 (January 28, 2021) +## 3.26.0 (January 28, 2021) NOTES: From 56ea894739d213634d1cdf1252e7a5ab37917632 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:06:28 -0500 Subject: [PATCH 0933/1212] resource/aws_kinesis_firehose_delivery_stream: Use IAM timeout constant for retries, add LakeFormation permissions retries and configuration to tests (#17254) * resource/aws_kinesis_firehose_delivery_stream: Use IAM timeout constant for retries, add LakeFormation permissions retries and configuration to tests Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16752 Previously: ``` === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty resource_aws_kinesis_firehose_delivery_stream_test.go:638: Step 1/2 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-4731441258578020859 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: 67116cf3-6102-4d1e-9229-a8c0e63cf9f7; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty (21.32s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Deserializer_Update resource_aws_kinesis_firehose_delivery_stream_test.go:596: Step 1/3 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-1453880257072042205 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: a5a8ef8d-e7c8-419b-a5a3-b762145c6783; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Deserializer_Update (30.13s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty resource_aws_kinesis_firehose_delivery_stream_test.go:669: Step 1/2 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-4296742326842474514 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: 67b204a4-290f-4b8b-bba7-ec850759a4fe; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty (18.58s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty resource_aws_kinesis_firehose_delivery_stream_test.go:700: Step 1/2 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-4205955522949248362 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: 9cd73bb5-9a58-4c35-b2da-4e3f12e17415; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty (21.11s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty resource_aws_kinesis_firehose_delivery_stream_test.go:731: Step 1/2 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-2371862365551213044 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: 2e0188ba-98ba-496b-99f1-804376dc5862; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty (25.47s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Serializer_Update resource_aws_kinesis_firehose_delivery_stream_test.go:762: Step 1/3 error: Error running apply: Error: error creating Kinesis Firehose Delivery Stream: InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-2168117662921768660 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: aa73610e-cac0-44a6-8e0a-fded3e5c6bd9; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Serializer_Update (25.85s) === CONT TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Enabled resource_aws_kinesis_firehose_delivery_stream_test.go:490: Step 3/4 error: Error running apply: Error: Error Updating Kinesis Firehose Delivery Stream: "tf-acc-test-8695271398619453258" InvalidArgumentException: Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. Insufficient Lake Formation permission(s) on tf-acc-test-8695271398619453258 (Service: AWSGlue; Status Code: 400; Error Code: AccessDeniedException; Request ID: c6b9cf64-3918-4140-b85b-fe53c0a4406b; Proxy: null) --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Enabled (111.38s) ``` Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_basic (131.86s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_disappears (90.21s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigEndpointUpdates (678.89s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates (975.34s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchWithVpcConfigUpdates (1432.78s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Deserializer_Update (160.49s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Enabled (176.11s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty (135.95s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty (131.68s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty (120.16s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty (136.73s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Serializer_Update (120.12s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ErrorOutputPrefix (124.47s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ExternalUpdate (162.47s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_KinesisStreamSource (95.95s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ProcessingConfiguration_Empty (126.45s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3basic (136.44s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3KmsKeyArn (124.11s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3Updates (176.36s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_HttpEndpointConfiguration (135.22s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_HttpEndpointConfiguration_RetryDuration (126.68s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_missingProcessingConfiguration (126.21s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates (437.94s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basic (104.37s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSE (295.11s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSEAndKeyArn (260.98s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSEAndKeyType (248.31s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithTags (140.47s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates (197.64s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3KinesisStreamSource (94.39s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3WithCloudwatchLogging (81.40s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_SplunkConfigUpdates (156.62s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- FAIL: TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates (18.19s) # unrelated; did not succeed while acquiring capacity --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_basic (112.04s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_disappears (82.08s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigEndpointUpdates (727.48s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates (640.24s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchWithVpcConfigUpdates (1538.54s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Deserializer_Update (134.15s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Enabled (162.17s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_HiveJsonSerDe_Empty (96.38s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OpenXJsonSerDe_Empty (101.65s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_OrcSerDe_Empty (101.56s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_ParquetSerDe_Empty (110.19s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_DataFormatConversionConfiguration_Serializer_Update (95.24s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ErrorOutputPrefix (127.33s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ExternalUpdate (122.80s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_KinesisStreamSource (103.54s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3_ProcessingConfiguration_Empty (91.54s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3basic (120.64s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3KmsKeyArn (107.85s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3Updates (158.79s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_HttpEndpointConfiguration (108.81s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_HttpEndpointConfiguration_RetryDuration (113.39s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_missingProcessingConfiguration (101.69s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basic (66.63s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSE (212.50s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSEAndKeyArn (191.54s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithSSEAndKeyType (218.06s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3basicWithTags (126.11s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates (169.25s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3KinesisStreamSource (105.90s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_s3WithCloudwatchLogging (94.65s) --- PASS: TestAccAWSKinesisFirehoseDeliveryStream_SplunkConfigUpdates (148.60s) ``` * Update CHANGELOG for #17254 --- .changelog/17254.txt | 3 + ...ce_aws_kinesis_firehose_delivery_stream.go | 52 +++++++------- ...s_kinesis_firehose_delivery_stream_test.go | 70 +++++++++++++++++-- 3 files changed, 96 insertions(+), 29 deletions(-) create mode 100644 .changelog/17254.txt diff --git a/.changelog/17254.txt b/.changelog/17254.txt new file mode 100644 index 00000000000..2687ec3fde7 --- /dev/null +++ b/.changelog/17254.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors +``` diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index c3998de9548..60265f3267a 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -10,10 +10,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/firehose" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + iamwaiter "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/iam/waiter" ) const ( @@ -2515,30 +2517,31 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta createInput.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().FirehoseTags() } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { _, err := conn.CreateDeliveryStream(createInput) if err != nil { - log.Printf("[DEBUG] Error creating Firehose Delivery Stream: %s", err) + // Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Access was denied") { + return resource.RetryableError(err) + } - // Retry for IAM eventual consistency - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { return resource.RetryableError(err) } - // Retry for IAM eventual consistency - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Please make sure the role specified in VpcConfiguration has permissions") { + + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Please make sure the role specified in VpcConfiguration has permissions") { return resource.RetryableError(err) } + // InvalidArgumentException: Verify that the IAM role has access to the ElasticSearch domain. - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Verify that the IAM role has access") { + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Verify that the IAM role has access") { return resource.RetryableError(err) } - // IAM roles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") { - log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...") + + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") { return resource.RetryableError(err) } - // Not retryable + return resource.NonRetryableError(err) } @@ -2660,30 +2663,31 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta } } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { _, err := conn.UpdateDestination(updateInput) if err != nil { - log.Printf("[DEBUG] Error updating Firehose Delivery Stream: %s", err) + // Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Access was denied") { + return resource.RetryableError(err) + } - // Retry for IAM eventual consistency - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { return resource.RetryableError(err) } - // Retry for IAM eventual consistency - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Please make sure the role specified in VpcConfiguration has permissions") { + + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Please make sure the role specified in VpcConfiguration has permissions") { return resource.RetryableError(err) } + // InvalidArgumentException: Verify that the IAM role has access to the ElasticSearch domain. - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Verify that the IAM role has access") { + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Verify that the IAM role has access") { return resource.RetryableError(err) } - // IAM roles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if isAWSErr(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") { - log.Printf("[DEBUG] Firehose could not assume role referenced, retrying...") + + if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") { return resource.RetryableError(err) } - // Not retryable + return resource.NonRetryableError(err) } diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go index 2b5b611a684..0eeb7ec0565 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go @@ -1833,11 +1833,21 @@ resource "aws_iam_role_policy" "firehose" { "Sid": "GlueAccess", "Effect": "Allow", "Action": [ + "glue:GetTable", + "glue:GetTableVersion", "glue:GetTableVersions" ], "Resource": [ "*" ] + }, + { + "Sid": "LakeFormationDataAccess", + "Effect": "Allow", + "Action": [ + "lakeformation:GetDataAccess" + ], + "Resource": "*" } ] } @@ -2226,6 +2236,16 @@ resource "aws_glue_catalog_table" "test" { } } +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALL"] + principal = aws_iam_role.firehose.arn + + table { + database_name = aws_glue_catalog_database.test.name + name = aws_glue_catalog_table.test.name + } +} + resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" name = %[1]q @@ -2259,7 +2279,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } } - depends_on = [aws_iam_role_policy.firehose] + depends_on = [aws_iam_role_policy.firehose, aws_lakeformation_permissions.test] } `, rName, enabled) } @@ -2282,6 +2302,16 @@ resource "aws_glue_catalog_table" "test" { } } +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALL"] + principal = aws_iam_role.firehose.arn + + table { + database_name = aws_glue_catalog_database.test.name + name = aws_glue_catalog_table.test.name + } +} + resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" name = %[1]q @@ -2313,7 +2343,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } } - depends_on = [aws_iam_role_policy.firehose] + depends_on = [aws_iam_role_policy.firehose, aws_lakeformation_permissions.test] } `, rName) } @@ -2350,6 +2380,16 @@ resource "aws_glue_catalog_table" "test" { } } +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALL"] + principal = aws_iam_role.firehose.arn + + table { + database_name = aws_glue_catalog_database.test.name + name = aws_glue_catalog_table.test.name + } +} + resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" name = %[1]q @@ -2381,7 +2421,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } } - depends_on = [aws_iam_role_policy.firehose] + depends_on = [aws_iam_role_policy.firehose, aws_lakeformation_permissions.test] } `, rName) } @@ -2404,6 +2444,16 @@ resource "aws_glue_catalog_table" "test" { } } +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALL"] + principal = aws_iam_role.firehose.arn + + table { + database_name = aws_glue_catalog_database.test.name + name = aws_glue_catalog_table.test.name + } +} + resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" name = %[1]q @@ -2435,7 +2485,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } } - depends_on = [aws_iam_role_policy.firehose] + depends_on = [aws_iam_role_policy.firehose, aws_lakeformation_permissions.test] } `, rName) } @@ -2458,6 +2508,16 @@ resource "aws_glue_catalog_table" "test" { } } +resource "aws_lakeformation_permissions" "test" { + permissions = ["ALL"] + principal = aws_iam_role.firehose.arn + + table { + database_name = aws_glue_catalog_database.test.name + name = aws_glue_catalog_table.test.name + } +} + resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "extended_s3" name = %[1]q @@ -2489,7 +2549,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } } - depends_on = [aws_iam_role_policy.firehose] + depends_on = [aws_iam_role_policy.firehose, aws_lakeformation_permissions.test] } `, rName) } From 0b13448a71cff4e92b6e0cff59c121baa974d10b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:07:53 -0500 Subject: [PATCH 0934/1212] tests/resource/aws_lakeformation_permissions: Add test for table wildcard permissions (#17316) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17300 Output from acceptance testing: ``` --- PASS: TestAccAWSLakeFormation_serial/Permissions/tableWildcard (25.69s) ``` --- ...urce_aws_lakeformation_permissions_test.go | 77 ++++++++++++++++++- aws/resource_aws_lakeformation_test.go | 3 +- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 25db4a1ab59..023beac6d70 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -95,7 +95,7 @@ func testAccAWSLakeFormationPermissions_database(t *testing.T) { }) } -func testAccAWSLakeFormationPermissions_table(t *testing.T) { +func testAccAWSLakeFormationPermissions_table_name(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_permissions.test" roleName := "aws_iam_role.test" @@ -107,7 +107,7 @@ func testAccAWSLakeFormationPermissions_table(t *testing.T) { CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSLakeFormationPermissionsConfig_table(rName), + Config: testAccAWSLakeFormationPermissionsConfig_table_name(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSLakeFormationPermissionsExists(resourceName), resource.TestCheckResourceAttrPair(roleName, "arn", resourceName, "principal"), @@ -124,6 +124,29 @@ func testAccAWSLakeFormationPermissions_table(t *testing.T) { }) } +func testAccAWSLakeFormationPermissions_table_wildcard(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lakeformation_permissions.test" + databaseResourceName := "aws_glue_catalog_database.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(lakeformation.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLakeFormationPermissionsDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLakeFormationPermissionsConfig_table_wildcard(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLakeFormationPermissionsExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "table.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "table.0.database_name", databaseResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "table.0.wildcard", "true"), + ), + }, + }, + }) +} + func testAccAWSLakeFormationPermissions_tableWithColumns(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_lakeformation_permissions.test" @@ -474,7 +497,7 @@ resource "aws_lakeformation_permissions" "test" { `, rName) } -func testAccAWSLakeFormationPermissionsConfig_table(rName string) string { +func testAccAWSLakeFormationPermissionsConfig_table_name(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -526,6 +549,54 @@ resource "aws_lakeformation_permissions" "test" { `, rName) } +func testAccAWSLakeFormationPermissionsConfig_table_wildcard(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + path = "/" + + assume_role_policy = < Date: Fri, 29 Jan 2021 18:09:21 +0000 Subject: [PATCH 0935/1212] Update CHANGELOG.md for #17316 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c98d1d64b51..d376fcc2ad9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ BUG FIXES: * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) ## 3.26.0 (January 28, 2021) From bd4b5924b6684d8ca59b936aac5d2fcc275b1c81 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:13:12 -0500 Subject: [PATCH 0936/1212] tests/data-source/aws_pricing_product: Adjust redshift query precision (#17249) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16720 Previously: ``` === CONT TestAccDataSourceAwsPricingProduct_redshift TestAccDataSourceAwsPricingProduct_redshift: data_source_aws_pricing_product_test.go:29: Step 1/1 error: Error running pre-apply refresh: Error: Pricing product query not precise enough. Returned more than one element: .... ``` Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccDataSourceAwsPricingProduct_redshift (8.55s) ``` Output from acceptance testing in AWS GovCloud (US): ``` === CONT TestAccDataSourceAwsPricingProduct_redshift provider_test.go:697: skipping tests; partition aws-us-gov does not support api.pricing service --- SKIP: TestAccDataSourceAwsPricingProduct_redshift (1.38s) ``` --- aws/data_source_aws_pricing_product_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/aws/data_source_aws_pricing_product_test.go b/aws/data_source_aws_pricing_product_test.go index 3c4f0b5ee50..46ea0cded56 100644 --- a/aws/data_source_aws_pricing_product_test.go +++ b/aws/data_source_aws_pricing_product_test.go @@ -114,6 +114,11 @@ data "aws_pricing_product" "test" { field = "location" value = data.aws_region.current.description } + + filters { + field = "productFamily" + value = "Compute Instance" + } } `) } From edb9d29326b8327bdc7fb0b2964774e983837f53 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 22 Jan 2021 16:59:14 -0800 Subject: [PATCH 0937/1212] Add protocol_version Add `protocol_version` to `lb_target_group` resource and data source. --- aws/data_source_aws_lb_target_group.go | 5 ++ aws/data_source_aws_lb_target_group_test.go | 1 + aws/resource_aws_lb_target_group.go | 24 +++++- aws/resource_aws_lb_target_group_test.go | 89 +++++++++++++++++++++ 4 files changed, 118 insertions(+), 1 deletion(-) diff --git a/aws/data_source_aws_lb_target_group.go b/aws/data_source_aws_lb_target_group.go index 0b2b94a3211..b22e4bc98db 100644 --- a/aws/data_source_aws_lb_target_group.go +++ b/aws/data_source_aws_lb_target_group.go @@ -40,6 +40,11 @@ func dataSourceAwsLbTargetGroup() *schema.Resource { Computed: true, }, + "protocol_version": { + Type: schema.TypeString, + Computed: true, + }, + "vpc_id": { Type: schema.TypeString, Computed: true, diff --git a/aws/data_source_aws_lb_target_group_test.go b/aws/data_source_aws_lb_target_group_test.go index 561c52666bf..56f00d7b7c6 100644 --- a/aws/data_source_aws_lb_target_group_test.go +++ b/aws/data_source_aws_lb_target_group_test.go @@ -26,6 +26,7 @@ func TestAccDataSourceAWSALBTargetGroup_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceNameArn, "arn_suffix"), resource.TestCheckResourceAttr(resourceNameArn, "port", "8080"), resource.TestCheckResourceAttr(resourceNameArn, "protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceNameArn, "protocol_version", "HTTP1"), resource.TestCheckResourceAttrSet(resourceNameArn, "vpc_id"), resource.TestCheckResourceAttrSet(resourceNameArn, "load_balancing_algorithm_type"), resource.TestCheckResourceAttr(resourceNameArn, "deregistration_delay", "300"), diff --git a/aws/resource_aws_lb_target_group.go b/aws/resource_aws_lb_target_group.go index 2103b85b3a0..cfaf1bade91 100644 --- a/aws/resource_aws_lb_target_group.go +++ b/aws/resource_aws_lb_target_group.go @@ -72,6 +72,20 @@ func resourceAwsLbTargetGroup() *schema.Resource { ValidateFunc: validation.StringInSlice(elbv2.ProtocolEnum_Values(), true), }, + "protocol_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(v interface{}) string { + return strings.ToUpper(v.(string)) + }, + ValidateFunc: validation.StringInSlice([]string{ + "HTTP1", + "HTTP2", + "GRPC", + }, true), + }, + "vpc_id": { Type: schema.TypeString, Optional: true, @@ -294,6 +308,10 @@ func resourceAwsLbTargetGroupCreate(d *schema.ResourceData, meta interface{}) er } params.Port = aws.Int64(int64(d.Get("port").(int))) params.Protocol = aws.String(d.Get("protocol").(string)) + switch d.Get("protocol").(string) { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + params.ProtocolVersion = aws.String(d.Get("protocol_version").(string)) + } params.VpcId = aws.String(d.Get("vpc_id").(string)) } @@ -632,6 +650,10 @@ func flattenAwsLbTargetGroupResource(d *schema.ResourceData, meta interface{}, t d.Set("port", targetGroup.Port) d.Set("protocol", targetGroup.Protocol) } + switch d.Get("protocol").(string) { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + d.Set("protocol_version", targetGroup.ProtocolVersion) + } if err := d.Set("health_check", []interface{}{healthCheck}); err != nil { return fmt.Errorf("error setting health_check: %s", err) @@ -727,7 +749,7 @@ func flattenAwsLbTargetGroupStickiness(d *schema.ResourceData, attributes []*elb func resourceAwsLbTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { protocol := diff.Get("protocol").(string) - // Network Load Balancers have many special qwirks to them. + // Network Load Balancers have many special quirks to them. // See http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { healthCheck := healthChecks[0].(map[string]interface{}) diff --git a/aws/resource_aws_lb_target_group_test.go b/aws/resource_aws_lb_target_group_test.go index 34e4f3dfc61..171b5897b25 100644 --- a/aws/resource_aws_lb_target_group_test.go +++ b/aws/resource_aws_lb_target_group_test.go @@ -110,6 +110,7 @@ func TestAccAWSLBTargetGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "name", targetGroupName), resource.TestCheckResourceAttr(resourceName, "port", "443"), resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "protocol_version", "HTTP1"), resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), resource.TestCheckResourceAttr(resourceName, "slow_start", "0"), @@ -166,6 +167,51 @@ func TestAccAWSLBTargetGroup_basicUdp(t *testing.T) { }) } +func TestAccAWSLBTargetGroup_ProtocolVersion(t *testing.T) { + var conf elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLBTargetGroupConfig_ProtocolVersion(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSLBTargetGroupExists(resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", targetGroupName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "protocol_version", "HTTP2"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "slow_start", "0"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "60"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8081"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTP"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "3"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "3"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "3"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200-299"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "TestAccAWSLBTargetGroup_basic"), + ), + }, + }, + }) +} + func TestAccAWSLBTargetGroup_withoutHealthcheck(t *testing.T) { var conf elbv2.TargetGroup targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandString(10)) @@ -1466,6 +1512,49 @@ resource "aws_vpc" "test" { `, targetGroupName) } +func testAccAWSLBTargetGroupConfig_ProtocolVersion(targetGroupName string) string { + return fmt.Sprintf(` +resource "aws_lb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + protocol_version = "HTTP2" + vpc_id = aws_vpc.test.id + + deregistration_delay = 200 + slow_start = 0 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags = { + Name = "TestAccAWSLBTargetGroup_basic" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "terraform-testacc-lb-target-group-basic" + } +} +`, targetGroupName) +} + func testAccAWSLBTargetGroupConfigProtocolGeneve(rName string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { From 29ba172a2d84dcb951f289444d5d851e25e7485e Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 22 Jan 2021 17:08:31 -0800 Subject: [PATCH 0938/1212] protocol_version documentation --- website/docs/r/lb_target_group.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/lb_target_group.html.markdown b/website/docs/r/lb_target_group.html.markdown index e41d4f980c9..8e9a37e23f2 100644 --- a/website/docs/r/lb_target_group.html.markdown +++ b/website/docs/r/lb_target_group.html.markdown @@ -63,6 +63,7 @@ The following arguments are supported: * `port` - (Optional, Forces new resource) The port on which targets receive traffic, unless overridden when registering a specific target. Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`. * `protocol` - (Optional, Forces new resource) The protocol to use for routing traffic to the targets. Should be one of `GENEVE`, `HTTP`, `HTTPS`, `TCP`, `TCP_UDP`, `TLS`, or `UDP`. Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`. +* `protocol_version` - (Optional, Forces new resource) Only applicable when `protocol` is `HTTP` or `HTTPS`. The protocol version. Specify GRPC to send requests to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. The default is HTTP1, which sends requests to targets using HTTP/1.1 * `vpc_id` - (Optional, Forces new resource) The identifier of the VPC in which to create the target group. Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`. * `deregistration_delay` - (Optional) The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. * `slow_start` - (Optional) The amount time for targets to warm up before the load balancer sends them a full share of requests. The range is 30-900 seconds or 0 to disable. The default value is 0 seconds. From cf1dea4b2b940014b8ec5463947e6bdc66025959 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:23:47 -0500 Subject: [PATCH 0939/1212] resource/aws_iam_access_key: Add `create_date` attribute (#17318) * resource/aws_iam_access_key: Add `create_date` attribute Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17294 Output from acceptance testing: ``` --- PASS: TestAccAWSAccessKey_basic (9.80s) --- PASS: TestAccAWSAccessKey_encrypted (10.33s) --- PASS: TestAccAWSAccessKey_inactive (16.93s) ``` * Update CHANGELOG for #17318 --- .changelog/17318.txt | 3 +++ aws/resource_aws_iam_access_key.go | 21 +++++++++++++++------ aws/resource_aws_iam_access_key_test.go | 1 + website/docs/r/iam_access_key.html.markdown | 1 + 4 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 .changelog/17318.txt diff --git a/.changelog/17318.txt b/.changelog/17318.txt new file mode 100644 index 00000000000..224a622a45d --- /dev/null +++ b/.changelog/17318.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iam_access_key: Add `create_date` attribute +``` diff --git a/aws/resource_aws_iam_access_key.go b/aws/resource_aws_iam_access_key.go index e402f1834a6..71a7e38ecc0 100644 --- a/aws/resource_aws_iam_access_key.go +++ b/aws/resource_aws_iam_access_key.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "encoding/base64" "fmt" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -51,7 +52,7 @@ func resourceAwsIamAccessKey() *schema.Resource { ForceNew: true, Optional: true, }, - "key_fingerprint": { + "create_date": { Type: schema.TypeString, Computed: true, }, @@ -59,6 +60,10 @@ func resourceAwsIamAccessKey() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "key_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -148,12 +153,16 @@ func resourceAwsIamAccessKeyRead(d *schema.ResourceData, meta interface{}) error func resourceAwsIamAccessKeyReadResult(d *schema.ResourceData, key *iam.AccessKeyMetadata) error { d.SetId(aws.StringValue(key.AccessKeyId)) - if err := d.Set("user", key.UserName); err != nil { - return err - } - if err := d.Set("status", key.Status); err != nil { - return err + + if key.CreateDate != nil { + d.Set("create_date", aws.TimeValue(key.CreateDate).Format(time.RFC3339)) + } else { + d.Set("create_date", nil) } + + d.Set("status", key.Status) + d.Set("user", key.UserName) + return nil } diff --git a/aws/resource_aws_iam_access_key_test.go b/aws/resource_aws_iam_access_key_test.go index 8edcabbe003..87f12fa0d64 100644 --- a/aws/resource_aws_iam_access_key_test.go +++ b/aws/resource_aws_iam_access_key_test.go @@ -30,6 +30,7 @@ func TestAccAWSAccessKey_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), testAccCheckAWSAccessKeyAttributes(&conf, "Active"), + testAccCheckResourceAttrRfc3339("aws_iam_access_key.a_key", "create_date"), resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), ), }, diff --git a/website/docs/r/iam_access_key.html.markdown b/website/docs/r/iam_access_key.html.markdown index 117b59135c2..b1b813b92d6 100644 --- a/website/docs/r/iam_access_key.html.markdown +++ b/website/docs/r/iam_access_key.html.markdown @@ -78,6 +78,7 @@ Valid values are `Active` and `Inactive`. In addition to all arguments above, the following attributes are exported: +* `create_date` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created. * `id` - The access key ID. * `user` - The IAM user associated with this access key. * `key_fingerprint` - The fingerprint of the PGP key used to encrypt From 57fcdbe805c32a30d1c68d257b07921cafbc8601 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 29 Jan 2021 18:25:26 +0000 Subject: [PATCH 0940/1212] Update CHANGELOG.md for #17318 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d376fcc2ad9..5d7ae9a8a17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ENHANCEMENTS: * data-source/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) * resource/aws_glacier_vault: Add plan-time validation for `notification` configuration block `events` and `sns_topic_arn` arguments ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_iam_access_key: Add `create_date` attribute ([#17318](https://github.com/hashicorp/terraform-provider-aws/issues/17318)) * resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) BUG FIXES: From d976d45331b407cd129e548afdbfb2e33962c196 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:26:37 -0500 Subject: [PATCH 0941/1212] resource/aws_iam_access_key: Support resource import (#17321) * resource/aws_iam_access_key: Support resource import Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17288 Output from acceptance testing: ``` --- PASS: TestAccAWSAccessKey_encrypted (12.92s) --- PASS: TestAccAWSAccessKey_basic (12.92s) --- PASS: TestAccAWSAccessKey_inactive (19.64s) ``` * Update CHANGELOG for #17321 --- .changelog/17321.txt | 3 +++ aws/resource_aws_iam_access_key.go | 27 ++++++++++++++++++++ aws/resource_aws_iam_access_key_test.go | 18 +++++++++++++ website/docs/r/iam_access_key.html.markdown | 28 ++++++++++----------- 4 files changed, 62 insertions(+), 14 deletions(-) create mode 100644 .changelog/17321.txt diff --git a/.changelog/17321.txt b/.changelog/17321.txt new file mode 100644 index 00000000000..6cb1e24b277 --- /dev/null +++ b/.changelog/17321.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iam_access_key: Support resource import +``` diff --git a/aws/resource_aws_iam_access_key.go b/aws/resource_aws_iam_access_key.go index 71a7e38ecc0..2a77f6c1270 100644 --- a/aws/resource_aws_iam_access_key.go +++ b/aws/resource_aws_iam_access_key.go @@ -22,6 +22,33 @@ func resourceAwsIamAccessKey() *schema.Resource { Update: resourceAwsIamAccessKeyUpdate, Delete: resourceAwsIamAccessKeyDelete, + Importer: &schema.ResourceImporter{ + // ListAccessKeys requires UserName field in certain scenarios: + // ValidationError: Must specify userName when calling with non-User credentials + // To prevent import from requiring this extra information, use GetAccessKeyLastUsed. + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).iamconn + + input := &iam.GetAccessKeyLastUsedInput{ + AccessKeyId: aws.String(d.Id()), + } + + output, err := conn.GetAccessKeyLastUsed(input) + + if err != nil { + return nil, fmt.Errorf("error fetching IAM Access Key (%s) username via GetAccessKeyLastUsed: %w", d.Id(), err) + } + + if output == nil || output.UserName == nil { + return nil, fmt.Errorf("error fetching IAM Access Key (%s) username via GetAccessKeyLastUsed: empty response", d.Id()) + } + + d.Set("user", output.UserName) + + return []*schema.ResourceData{d}, nil + }, + }, + Schema: map[string]*schema.Schema{ "user": { Type: schema.TypeString, diff --git a/aws/resource_aws_iam_access_key_test.go b/aws/resource_aws_iam_access_key_test.go index 87f12fa0d64..18308e25da8 100644 --- a/aws/resource_aws_iam_access_key_test.go +++ b/aws/resource_aws_iam_access_key_test.go @@ -34,6 +34,12 @@ func TestAccAWSAccessKey_basic(t *testing.T) { resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), ), }, + { + ResourceName: "aws_iam_access_key.a_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"encrypted_secret", "key_fingerprint", "pgp_key", "secret", "ses_smtp_password_v4"}, + }, }, }) } @@ -61,6 +67,12 @@ func TestAccAWSAccessKey_encrypted(t *testing.T) { "aws_iam_access_key.a_key", "key_fingerprint"), ), }, + { + ResourceName: "aws_iam_access_key.a_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"encrypted_secret", "key_fingerprint", "pgp_key", "secret", "ses_smtp_password_v4"}, + }, }, }) } @@ -82,6 +94,12 @@ func TestAccAWSAccessKey_inactive(t *testing.T) { resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), ), }, + { + ResourceName: "aws_iam_access_key.a_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"encrypted_secret", "key_fingerprint", "pgp_key", "secret", "ses_smtp_password_v4"}, + }, { Config: testAccAWSAccessKeyConfig_inactive(rName), Check: resource.ComposeTestCheckFunc( diff --git a/website/docs/r/iam_access_key.html.markdown b/website/docs/r/iam_access_key.html.markdown index b1b813b92d6..32f83bf5009 100644 --- a/website/docs/r/iam_access_key.html.markdown +++ b/website/docs/r/iam_access_key.html.markdown @@ -81,17 +81,17 @@ In addition to all arguments above, the following attributes are exported: * `create_date` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created. * `id` - The access key ID. * `user` - The IAM user associated with this access key. -* `key_fingerprint` - The fingerprint of the PGP key used to encrypt - the secret -* `secret` - The secret access key. Note that this will be written -to the state file. If you use this, please protect your backend state file -judiciously. Alternatively, you may supply a `pgp_key` instead, which will -prevent the secret from being stored in plaintext, at the cost of preventing -the use of the secret key in automation. -* `encrypted_secret` - The encrypted secret, base64 encoded, if `pgp_key` was specified. -~> **NOTE:** The encrypted secret may be decrypted using the command line, - for example: `terraform output encrypted_secret | base64 --decode | keybase pgp decrypt`. -* `ses_smtp_password_v4` - The secret access key converted into an SES SMTP - password by applying [AWS's documented Sigv4 conversion - algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). - As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region) +* `key_fingerprint` - The fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources. +* `secret` - The secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation. +* `encrypted_secret` - The encrypted secret, base64 encoded, if `pgp_key` was specified. This attribute is not available for imported resources. The encrypted secret may be decrypted using the command line, for example: `terraform output -raw encrypted_secret | base64 --decode | keybase pgp decrypt`. +* `ses_smtp_password_v4` - The secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region). + +## Import + +IAM Access Keys can be imported using the identifier, e.g. + +``` +$ terraform import aws_iam_access_key.example AKIA1234567890 +``` + +Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, and `ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API. From f8257400944b4ecb097f78348c0bc7cfaa075d29 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 29 Jan 2021 18:28:20 +0000 Subject: [PATCH 0942/1212] Update CHANGELOG.md for #17321 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d7ae9a8a17..bff31b8e13d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ ENHANCEMENTS: * data-source/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) * resource/aws_glacier_vault: Add plan-time validation for `notification` configuration block `events` and `sns_topic_arn` arguments ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_iam_access_key: Add `create_date` attribute ([#17318](https://github.com/hashicorp/terraform-provider-aws/issues/17318)) +* resource/aws_iam_access_key: Support resource import ([#17321](https://github.com/hashicorp/terraform-provider-aws/issues/17321)) * resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) BUG FIXES: From 11525f3c10326147d538203dd97903d05a0e8c07 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Fri, 29 Jan 2021 13:42:59 -0500 Subject: [PATCH 0943/1212] resource/aws_iam_access_key: Ensure Inactive status is properly configured during resource creation (#17322) * resource/aws_iam_access_key: Ensure Inactive status is properly configured during resource creation Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16818 Previously before code updates: ``` === CONT TestAccAWSAccessKey_Status resource_aws_iam_access_key_test.go:71: Step 1/3 error: Check failed: Check 2/2 error: aws_iam_access_key.a_key: Attribute 'status' expected "Inactive", got "Active" --- FAIL: TestAccAWSAccessKey_Status (5.97s) ``` Output from acceptance testing: ``` --- PASS: TestAccAWSAccessKey_basic (8.96s) --- PASS: TestAccAWSAccessKey_encrypted (9.13s) --- PASS: TestAccAWSAccessKey_Status (27.74s) ``` * Update CHANGELOG for #17322 --- .changelog/17322.txt | 3 +++ aws/resource_aws_iam_access_key.go | 16 +++++++++++++++ aws/resource_aws_iam_access_key_test.go | 27 +++++++++++++++---------- 3 files changed, 35 insertions(+), 11 deletions(-) create mode 100644 .changelog/17322.txt diff --git a/.changelog/17322.txt b/.changelog/17322.txt new file mode 100644 index 00000000000..a6ff7c2cda1 --- /dev/null +++ b/.changelog/17322.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation +``` diff --git a/aws/resource_aws_iam_access_key.go b/aws/resource_aws_iam_access_key.go index 2a77f6c1270..d8bcd7d5054 100644 --- a/aws/resource_aws_iam_access_key.go +++ b/aws/resource_aws_iam_access_key.go @@ -142,6 +142,22 @@ func resourceAwsIamAccessKeyCreate(d *schema.ResourceData, meta interface{}) err } d.Set("ses_smtp_password_v4", sesSMTPPasswordV4) + if v, ok := d.GetOk("status"); ok && v.(string) == iam.StatusTypeInactive { + input := &iam.UpdateAccessKeyInput{ + AccessKeyId: aws.String(d.Id()), + Status: aws.String(iam.StatusTypeInactive), + UserName: aws.String(d.Get("user").(string)), + } + + _, err := iamconn.UpdateAccessKey(input) + + if err != nil { + return fmt.Errorf("error deactivating IAM Access Key (%s): %w", d.Id(), err) + } + + createResp.AccessKey.Status = aws.String(iam.StatusTypeInactive) + } + return resourceAwsIamAccessKeyReadResult(d, &iam.AccessKeyMetadata{ AccessKeyId: createResp.AccessKey.AccessKeyId, CreateDate: createResp.AccessKey.CreateDate, diff --git a/aws/resource_aws_iam_access_key_test.go b/aws/resource_aws_iam_access_key_test.go index 18308e25da8..3881ab8ba4b 100644 --- a/aws/resource_aws_iam_access_key_test.go +++ b/aws/resource_aws_iam_access_key_test.go @@ -77,7 +77,7 @@ func TestAccAWSAccessKey_encrypted(t *testing.T) { }) } -func TestAccAWSAccessKey_inactive(t *testing.T) { +func TestAccAWSAccessKey_Status(t *testing.T) { var conf iam.AccessKeyMetadata rName := fmt.Sprintf("test-user-%d", acctest.RandInt()) @@ -87,11 +87,10 @@ func TestAccAWSAccessKey_inactive(t *testing.T) { CheckDestroy: testAccCheckAWSAccessKeyDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSAccessKeyConfig(rName), + Config: testAccAWSAccessKeyConfig_Status(rName, iam.StatusTypeInactive), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), - testAccCheckAWSAccessKeyAttributes(&conf, "Active"), - resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), + resource.TestCheckResourceAttr("aws_iam_access_key.a_key", "status", iam.StatusTypeInactive), ), }, { @@ -101,11 +100,17 @@ func TestAccAWSAccessKey_inactive(t *testing.T) { ImportStateVerifyIgnore: []string{"encrypted_secret", "key_fingerprint", "pgp_key", "secret", "ses_smtp_password_v4"}, }, { - Config: testAccAWSAccessKeyConfig_inactive(rName), + Config: testAccAWSAccessKeyConfig_Status(rName, iam.StatusTypeActive), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), - testAccCheckAWSAccessKeyAttributes(&conf, "Inactive"), - resource.TestCheckResourceAttrSet("aws_iam_access_key.a_key", "secret"), + resource.TestCheckResourceAttr("aws_iam_access_key.a_key", "status", iam.StatusTypeActive), + ), + }, + { + Config: testAccAWSAccessKeyConfig_Status(rName, iam.StatusTypeInactive), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAccessKeyExists("aws_iam_access_key.a_key", &conf), + resource.TestCheckResourceAttr("aws_iam_access_key.a_key", "status", iam.StatusTypeInactive), ), }, }, @@ -241,17 +246,17 @@ EOF `, rName, key) } -func testAccAWSAccessKeyConfig_inactive(rName string) string { +func testAccAWSAccessKeyConfig_Status(rName string, status string) string { return fmt.Sprintf(` resource "aws_iam_user" "a_user" { - name = "%s" + name = %[1]q } resource "aws_iam_access_key" "a_key" { user = aws_iam_user.a_user.name - status = "Inactive" + status = %[2]q } -`, rName) +`, rName, status) } func TestSesSmtpPasswordFromSecretKeySigV4(t *testing.T) { From 191d252bf2438a3f9ee722ce77e6236a7c083715 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 29 Jan 2021 18:44:27 +0000 Subject: [PATCH 0944/1212] Update CHANGELOG.md for #17322 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bff31b8e13d..d65406a3ffc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ BUG FIXES: * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) ## 3.26.0 (January 28, 2021) From 806fd0ab4fea812aca46a1c79bf4049da5a77005 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 29 Jan 2021 15:16:23 -0500 Subject: [PATCH 0945/1212] r/aws_apigatewayv2_integration: Better documentation for 'integration_type'. --- website/docs/r/apigatewayv2_integration.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/apigatewayv2_integration.html.markdown b/website/docs/r/apigatewayv2_integration.html.markdown index 5cd30429334..52eb4fed94d 100644 --- a/website/docs/r/apigatewayv2_integration.html.markdown +++ b/website/docs/r/apigatewayv2_integration.html.markdown @@ -69,8 +69,8 @@ The following arguments are supported: * `api_id` - (Required) The API identifier. * `integration_type` - (Required) The integration type of an integration. -Valid values: `AWS`, `AWS_PROXY`, `HTTP`, `HTTP_PROXY`, `MOCK`. -* `connection_id` - (Optional) The ID of the VPC link for a private integration. Supported only for HTTP APIs. Must be between 1 and 1024 characters in length. +Valid values: `AWS` (supported only for WebSocket APIs), `AWS_PROXY`, `HTTP` (supported only for WebSocket APIs), `HTTP_PROXY`, `MOCK` (supported only for WebSocket APIs). +* `connection_id` - (Optional) The ID of the [VPC link](apigatewayv2_vpc_link.html) for a private integration. Supported only for HTTP APIs. Must be between 1 and 1024 characters in length. * `connection_type` - (Optional) The type of the network connection to the integration endpoint. Valid values: `INTERNET`, `VPC_LINK`. Default is `INTERNET`. * `content_handling_strategy` - (Optional) How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`. Supported only for WebSocket APIs. * `credentials_arn` - (Optional) The credentials required for the integration, if any. From faf6cd8215c2093385018efe8ed121ce8dcfa344 Mon Sep 17 00:00:00 2001 From: John Patton Date: Fri, 29 Jan 2021 17:39:10 -0500 Subject: [PATCH 0946/1212] resource/aws_vpc_endpoint_route_table_association: Update example import statement on website with more accurate route table id pattern --- .../docs/r/vpc_endpoint_route_table_association.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/vpc_endpoint_route_table_association.html.markdown b/website/docs/r/vpc_endpoint_route_table_association.html.markdown index 610769bf288..5e7a45695fd 100644 --- a/website/docs/r/vpc_endpoint_route_table_association.html.markdown +++ b/website/docs/r/vpc_endpoint_route_table_association.html.markdown @@ -39,5 +39,5 @@ VPC Endpoint Route Table Associations can be imported using `vpc_endpoint_id` to e.g. ``` -$ terraform import aws_vpc_endpoint_route_table_association.example vpce-aaaaaaaa/rt-bbbbbbbb +$ terraform import aws_vpc_endpoint_route_table_association.example vpce-aaaaaaaa/rtb-bbbbbbbb ``` From d4cd5e6325dbb48c022ab26895f8efee9116f65a Mon Sep 17 00:00:00 2001 From: Pradeep Bhadani Date: Sat, 30 Jan 2021 00:07:38 +0000 Subject: [PATCH 0947/1212] fix linked text --- website/docs/r/s3_bucket.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 51c09d1d95b..35f53e078cd 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a S3 bucket resource. --> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the [`aws_s3control_bucket` resource](/docs/providers/aws/r/s3control_bucket.html). +-> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the [`aws_s3control_bucket`](/docs/providers/aws/r/s3control_bucket.html) resource. ## Example Usage From 24d70154beb51689401ef9b173cc2f81ee86f86d Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 30 Jan 2021 13:39:02 +0200 Subject: [PATCH 0948/1212] add new resource --- .../service/sagemaker/finder/finder.go | 19 ++ .../service/sagemaker/waiter/status.go | 44 ++- .../service/sagemaker/waiter/waiter.go | 69 ++++- aws/provider.go | 1 + ...ource_aws_sagemaker_model_package_group.go | 151 ++++++++++ ..._aws_sagemaker_model_package_group_test.go | 264 ++++++++++++++++++ 6 files changed, 526 insertions(+), 22 deletions(-) create mode 100644 aws/resource_aws_sagemaker_model_package_group.go create mode 100644 aws/resource_aws_sagemaker_model_package_group_test.go diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index f7d201d2bb6..3236aa4544d 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -24,6 +24,25 @@ func CodeRepositoryByName(conn *sagemaker.SageMaker, name string) (*sagemaker.De return output, nil } +// ModelPackageGroupByName returns the code repository corresponding to the specified name. +// Returns nil if no code repository is found. +func ModelPackageGroupByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeModelPackageGroupOutput, error) { + input := &sagemaker.DescribeModelPackageGroupInput{ + ModelPackageGroupName: aws.String(name), + } + + output, err := conn.DescribeModelPackageGroup(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output, nil +} + // ImageByName returns the Image corresponding to the specified name. // Returns nil if no Image is found. func ImageByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { diff --git a/aws/internal/service/sagemaker/waiter/status.go b/aws/internal/service/sagemaker/waiter/status.go index 5a2b5cfb363..57b7eb68f3f 100644 --- a/aws/internal/service/sagemaker/waiter/status.go +++ b/aws/internal/service/sagemaker/waiter/status.go @@ -11,15 +11,16 @@ import ( ) const ( - SagemakerNotebookInstanceStatusNotFound = "NotFound" - SagemakerImageStatusNotFound = "NotFound" - SagemakerImageStatusFailed = "Failed" - SagemakerImageVersionStatusNotFound = "NotFound" - SagemakerImageVersionStatusFailed = "Failed" - SagemakerDomainStatusNotFound = "NotFound" - SagemakerFeatureGroupStatusNotFound = "NotFound" - SagemakerFeatureGroupStatusUnknown = "Unknown" - SagemakerUserProfileStatusNotFound = "NotFound" + SagemakerNotebookInstanceStatusNotFound = "NotFound" + SagemakerImageStatusNotFound = "NotFound" + SagemakerImageStatusFailed = "Failed" + SagemakerImageVersionStatusNotFound = "NotFound" + SagemakerImageVersionStatusFailed = "Failed" + SagemakerDomainStatusNotFound = "NotFound" + SagemakerFeatureGroupStatusNotFound = "NotFound" + SagemakerFeatureGroupStatusUnknown = "Unknown" + SagemakerUserProfileStatusNotFound = "NotFound" + SagemakerModelPackageGroupStatusNotFound = "NotFound" ) // NotebookInstanceStatus fetches the NotebookInstance and its Status @@ -47,6 +48,31 @@ func NotebookInstanceStatus(conn *sagemaker.SageMaker, notebookName string) reso } } +// ModelPackageGroupStatus fetches the ModelPackageGroup and its Status +func ModelPackageGroupStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &sagemaker.DescribeModelPackageGroupInput{ + ModelPackageGroupName: aws.String(name), + } + + output, err := conn.DescribeModelPackageGroup(input) + + if tfawserr.ErrMessageContains(err, "ValidationException", "does not exist") { + return nil, SagemakerModelPackageGroupStatusNotFound, nil + } + + if err != nil { + return nil, sagemaker.ModelPackageGroupStatusFailed, err + } + + if output == nil { + return nil, SagemakerModelPackageGroupStatusNotFound, nil + } + + return output, aws.StringValue(output.ModelPackageGroupStatus), nil + } +} + // ImageStatus fetches the Image and its Status func ImageStatus(conn *sagemaker.SageMaker, name string) resource.StateRefreshFunc { return func() (interface{}, string, error) { diff --git a/aws/internal/service/sagemaker/waiter/waiter.go b/aws/internal/service/sagemaker/waiter/waiter.go index e1e03df43b4..c3618d16d08 100644 --- a/aws/internal/service/sagemaker/waiter/waiter.go +++ b/aws/internal/service/sagemaker/waiter/waiter.go @@ -8,19 +8,21 @@ import ( ) const ( - NotebookInstanceInServiceTimeout = 10 * time.Minute - NotebookInstanceStoppedTimeout = 10 * time.Minute - NotebookInstanceDeletedTimeout = 10 * time.Minute - ImageCreatedTimeout = 10 * time.Minute - ImageDeletedTimeout = 10 * time.Minute - ImageVersionCreatedTimeout = 10 * time.Minute - ImageVersionDeletedTimeout = 10 * time.Minute - DomainInServiceTimeout = 10 * time.Minute - DomainDeletedTimeout = 10 * time.Minute - FeatureGroupCreatedTimeout = 10 * time.Minute - FeatureGroupDeletedTimeout = 10 * time.Minute - UserProfileInServiceTimeout = 10 * time.Minute - UserProfileDeletedTimeout = 10 * time.Minute + NotebookInstanceInServiceTimeout = 10 * time.Minute + NotebookInstanceStoppedTimeout = 10 * time.Minute + NotebookInstanceDeletedTimeout = 10 * time.Minute + ModelPackageGroupCompletedTimeout = 10 * time.Minute + ModelPackageGroupDeletedTimeout = 10 * time.Minute + ImageCreatedTimeout = 10 * time.Minute + ImageDeletedTimeout = 10 * time.Minute + ImageVersionCreatedTimeout = 10 * time.Minute + ImageVersionDeletedTimeout = 10 * time.Minute + DomainInServiceTimeout = 10 * time.Minute + DomainDeletedTimeout = 10 * time.Minute + FeatureGroupCreatedTimeout = 10 * time.Minute + FeatureGroupDeletedTimeout = 10 * time.Minute + UserProfileInServiceTimeout = 10 * time.Minute + UserProfileDeletedTimeout = 10 * time.Minute ) // NotebookInstanceInService waits for a NotebookInstance to return InService @@ -87,6 +89,47 @@ func NotebookInstanceDeleted(conn *sagemaker.SageMaker, notebookName string) (*s return nil, err } +// ModelPackageGroupCompleted waits for a ModelPackageGroup to return Created +func ModelPackageGroupCompleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeModelPackageGroupOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.ModelPackageGroupStatusPending, + sagemaker.ModelPackageGroupStatusInProgress, + }, + Target: []string{sagemaker.ModelPackageGroupStatusCompleted}, + Refresh: ModelPackageGroupStatus(conn, name), + Timeout: ModelPackageGroupCompletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeModelPackageGroupOutput); ok { + return output, err + } + + return nil, err +} + +// ModelPackageGroupDeleted waits for a ModelPackageGroup to return Created +func ModelPackageGroupDeleted(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeModelPackageGroupOutput, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + sagemaker.ModelPackageGroupStatusDeleting, + }, + Target: []string{}, + Refresh: ModelPackageGroupStatus(conn, name), + Timeout: ModelPackageGroupDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*sagemaker.DescribeModelPackageGroupOutput); ok { + return output, err + } + + return nil, err +} + // ImageCreated waits for a Image to return Created func ImageCreated(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeImageOutput, error) { stateConf := &resource.StateChangeConf{ diff --git a/aws/provider.go b/aws/provider.go index 66a97c15473..1ba37addf07 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -888,6 +888,7 @@ func Provider() *schema.Provider { "aws_sagemaker_image": resourceAwsSagemakerImage(), "aws_sagemaker_image_version": resourceAwsSagemakerImageVersion(), "aws_sagemaker_model": resourceAwsSagemakerModel(), + "aws_sagemaker_model_package_group": resourceAwsSagemakerModelPackageGroup(), "aws_sagemaker_notebook_instance_lifecycle_configuration": resourceAwsSagemakerNotebookInstanceLifeCycleConfiguration(), "aws_sagemaker_notebook_instance": resourceAwsSagemakerNotebookInstance(), "aws_sagemaker_user_profile": resourceAwsSagemakerUserProfile(), diff --git a/aws/resource_aws_sagemaker_model_package_group.go b/aws/resource_aws_sagemaker_model_package_group.go new file mode 100644 index 00000000000..1345a36ac39 --- /dev/null +++ b/aws/resource_aws_sagemaker_model_package_group.go @@ -0,0 +1,151 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/waiter" +) + +func resourceAwsSagemakerModelPackageGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSagemakerModelPackageGroupCreate, + Read: resourceAwsSagemakerModelPackageGroupRead, + Update: resourceAwsSagemakerModelPackageGroupUpdate, + Delete: resourceAwsSagemakerModelPackageGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "model_package_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 63), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$`), + "Valid characters are a-z, A-Z, 0-9, and - (hyphen)."), + ), + }, + "model_package_group_description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "tags": tagsSchema(), + }, + } +} + +func resourceAwsSagemakerModelPackageGroupCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + name := d.Get("model_package_group_name").(string) + input := &sagemaker.CreateModelPackageGroupInput{ + ModelPackageGroupName: aws.String(name), + } + + if v, ok := d.GetOk("model_package_group_description"); ok { + input.ModelPackageGroupDescription = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().SagemakerTags() + } + + _, err := conn.CreateModelPackageGroup(input) + if err != nil { + return fmt.Errorf("error creating Sagemaker Model Package Group %s: %w", name, err) + } + + d.SetId(name) + + if _, err := waiter.ModelPackageGroupCompleted(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Sagemaker Model Package Group (%s) to be created: %w", d.Id(), err) + } + + return resourceAwsSagemakerModelPackageGroupRead(d, meta) +} + +func resourceAwsSagemakerModelPackageGroupRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + mpg, err := finder.ModelPackageGroupByName(conn, d.Id()) + if err != nil { + if isAWSErr(err, "ValidationException", "does not exist") { + d.SetId("") + log.Printf("[WARN] Unable to find Sagemaker Model Package Group (%s); removing from state", d.Id()) + return nil + } + return fmt.Errorf("error reading Sagemaker Model Package Group (%s): %w", d.Id(), err) + + } + + arn := aws.StringValue(mpg.ModelPackageGroupArn) + d.Set("model_package_group_name", mpg.ModelPackageGroupName) + d.Set("arn", arn) + d.Set("model_package_group_description", mpg.ModelPackageGroupDescription) + + tags, err := keyvaluetags.SagemakerListTags(conn, arn) + + if err != nil { + return fmt.Errorf("error listing tags for Sagemaker Model Package Group (%s): %w", d.Id(), err) + } + + if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + return nil +} + +func resourceAwsSagemakerModelPackageGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating Sagemaker Model Package Group (%s) tags: %s", d.Id(), err) + } + } + + return resourceAwsSagemakerModelPackageGroupRead(d, meta) +} + +func resourceAwsSagemakerModelPackageGroupDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sagemakerconn + + input := &sagemaker.DeleteModelPackageGroupInput{ + ModelPackageGroupName: aws.String(d.Id()), + } + + if _, err := conn.DeleteModelPackageGroup(input); err != nil { + if isAWSErr(err, "ValidationException", "does not exist") { + return nil + } + return fmt.Errorf("error deleting Sagemaker Model Package Group (%s): %w", d.Id(), err) + } + + if _, err := waiter.ModelPackageGroupDeleted(conn, d.Id()); err != nil { + if isAWSErr(err, "ValidationException", "does not exist") { + return nil + } + return fmt.Errorf("error waiting for Sagemaker Model Package Group (%s) to delete: %w", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_sagemaker_model_package_group_test.go b/aws/resource_aws_sagemaker_model_package_group_test.go new file mode 100644 index 00000000000..8e482f52320 --- /dev/null +++ b/aws/resource_aws_sagemaker_model_package_group_test.go @@ -0,0 +1,264 @@ +package aws + +import ( + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/sagemaker" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/sagemaker/finder" +) + +func init() { + resource.AddTestSweepers("aws_sagemaker_model_package_group", &resource.Sweeper{ + Name: "aws_sagemaker_model_package_group", + F: testSweepSagemakerModelPackageGroups, + }) +} + +func testSweepSagemakerModelPackageGroups(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).sagemakerconn + + err = conn.ListModelPackageGroupsPages(&sagemaker.ListModelPackageGroupsInput{}, func(page *sagemaker.ListModelPackageGroupsOutput, lastPage bool) bool { + for _, ModelPackageGroup := range page.ModelPackageGroupSummaryList { + name := aws.StringValue(ModelPackageGroup.ModelPackageGroupName) + + input := &sagemaker.DeleteModelPackageGroupInput{ + ModelPackageGroupName: ModelPackageGroup.ModelPackageGroupName, + } + + log.Printf("[INFO] Deleting SageMaker Model Package Group: %s", name) + if _, err := conn.DeleteModelPackageGroup(input); err != nil { + log.Printf("[ERROR] Error deleting SageMaker Model Package Group (%s): %s", name, err) + continue + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping SageMaker Model Package Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("Error retrieving SageMaker Model Package Groups: %w", err) + } + + return nil +} + +func TestAccAWSSagemakerModelPackageGroup_basic(t *testing.T) { + var mpg sagemaker.DescribeModelPackageGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_model_package_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerModelPackageGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerModelPackageGroupBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + resource.TestCheckResourceAttr(resourceName, "model_package_group_name", rName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "sagemaker", fmt.Sprintf("model-package-group/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerModelPackageGroup_description(t *testing.T) { + var mpg sagemaker.DescribeModelPackageGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_model_package_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerModelPackageGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerModelPackageGroupDescription(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + resource.TestCheckResourceAttr(resourceName, "model_package_group_description", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSagemakerModelPackageGroup_tags(t *testing.T) { + var mpg sagemaker.DescribeModelPackageGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_model_package_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerModelPackageGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerModelPackageGroupConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSagemakerModelPackageGroupConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSSagemakerModelPackageGroupConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccAWSSagemakerModelPackageGroup_disappears(t *testing.T) { + var mpg sagemaker.DescribeModelPackageGroupOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_sagemaker_model_package_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSagemakerModelPackageGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSagemakerModelPackageGroupBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSagemakerModelPackageGroupExists(resourceName, &mpg), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSagemakerModelPackageGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAWSSagemakerModelPackageGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_sagemaker_model_package_group" { + continue + } + + ModelPackageGroup, err := finder.ModelPackageGroupByName(conn, rs.Primary.ID) + if err != nil { + return nil + } + + if aws.StringValue(ModelPackageGroup.ModelPackageGroupName) == rs.Primary.ID { + return fmt.Errorf("sagemaker Model Package Group %q still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSSagemakerModelPackageGroupExists(n string, mpg *sagemaker.DescribeModelPackageGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No sagmaker Model Package Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).sagemakerconn + resp, err := finder.ModelPackageGroupByName(conn, rs.Primary.ID) + if err != nil { + return err + } + + *mpg = *resp + + return nil + } +} + +func testAccAWSSagemakerModelPackageGroupBasicConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_model_package_group" "test" { + model_package_group_name = %[1]q +} +`, rName) +} + +func testAccAWSSagemakerModelPackageGroupDescription(rName string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_model_package_group" "test" { + model_package_group_name = %[1]q + model_package_group_description = %[1]q +} +`, rName) +} + +func testAccAWSSagemakerModelPackageGroupConfigTags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_model_package_group" "test" { + model_package_group_name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccAWSSagemakerModelPackageGroupConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_sagemaker_model_package_group" "test" { + model_package_group_name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} From 0892a7089b71cbcaf6c04823807a8064e7100cc3 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 30 Jan 2021 13:39:27 +0200 Subject: [PATCH 0949/1212] add force new to desc --- aws/resource_aws_sagemaker_model_package_group.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_sagemaker_model_package_group.go b/aws/resource_aws_sagemaker_model_package_group.go index 1345a36ac39..19d36cd6045 100644 --- a/aws/resource_aws_sagemaker_model_package_group.go +++ b/aws/resource_aws_sagemaker_model_package_group.go @@ -42,6 +42,7 @@ func resourceAwsSagemakerModelPackageGroup() *schema.Resource { "model_package_group_description": { Type: schema.TypeString, Optional: true, + ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 1024), }, "tags": tagsSchema(), From 476e9c811e7c855c1e334f50d39b481854c0f923 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 30 Jan 2021 13:42:08 +0200 Subject: [PATCH 0950/1212] docs --- ...agemaker_model_package_group.html.markdown | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 website/docs/r/sagemaker_model_package_group.html.markdown diff --git a/website/docs/r/sagemaker_model_package_group.html.markdown b/website/docs/r/sagemaker_model_package_group.html.markdown new file mode 100644 index 00000000000..d5346652e3e --- /dev/null +++ b/website/docs/r/sagemaker_model_package_group.html.markdown @@ -0,0 +1,44 @@ +--- +subcategory: "Sagemaker" +layout: "aws" +page_title: "AWS: aws_sagemaker_model_package_group" +description: |- + Provides a Sagemaker Model Package Group resource. +--- + +# Resource: aws_sagemaker_model_package_group + +Provides a Sagemaker Model Package Group resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_sagemaker_model_package_group" "example" { + model_package_group_name = "example" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `model_package_group_name` - (Required) The name of the model group. +* `model_package_group_description` - (Optional) A description for the model group. +* `tags` - (Optional) A map of tags to assign to the resource. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The name of the Model Package Group. +* `arn` - The Amazon Resource Name (ARN) assigned by AWS to this Model Package Group. + +## Import + +Sagemaker Code Model Package Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_sagemaker_model_package_group.test_model_package_group my-code-repo +``` From 64d14ed23c232c6d2bdb2fb8da0e744ee969c8a3 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 30 Jan 2021 13:45:37 +0200 Subject: [PATCH 0951/1212] changelog --- .changelog/17366.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17366.txt diff --git a/.changelog/17366.txt b/.changelog/17366.txt new file mode 100644 index 00000000000..340b0a5cc4a --- /dev/null +++ b/.changelog/17366.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_sagemaker_model_package_group +``` \ No newline at end of file From 1d2abb955c5647b8f709b47e55621c5145982986 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Sun, 31 Jan 2021 10:25:40 +0200 Subject: [PATCH 0952/1212] copy-pasta --- aws/internal/service/sagemaker/finder/finder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/internal/service/sagemaker/finder/finder.go b/aws/internal/service/sagemaker/finder/finder.go index 3236aa4544d..280f46778b4 100644 --- a/aws/internal/service/sagemaker/finder/finder.go +++ b/aws/internal/service/sagemaker/finder/finder.go @@ -24,8 +24,8 @@ func CodeRepositoryByName(conn *sagemaker.SageMaker, name string) (*sagemaker.De return output, nil } -// ModelPackageGroupByName returns the code repository corresponding to the specified name. -// Returns nil if no code repository is found. +// ModelPackageGroupByName returns the Model Package Group corresponding to the specified name. +// Returns nil if no Model Package Group is found. func ModelPackageGroupByName(conn *sagemaker.SageMaker, name string) (*sagemaker.DescribeModelPackageGroupOutput, error) { input := &sagemaker.DescribeModelPackageGroupInput{ ModelPackageGroupName: aws.String(name), From 5cdd86c30245dbc58fc8b0fe8013f38fc37b2e67 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Mon, 1 Feb 2021 08:11:27 -0800 Subject: [PATCH 0953/1212] add-historical-rm --- docs/roadmaps/2020_November_to_January.md | 101 ++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 docs/roadmaps/2020_November_to_January.md diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md new file mode 100644 index 00000000000..c2295cf5a9a --- /dev/null +++ b/docs/roadmaps/2020_November_to_January.md @@ -0,0 +1,101 @@ +# Roadmap: November 2020 - January 2021 + +Every few months, the team will highlight areas of focus for our work and upcoming research. + +We select items for inclusion in the roadmap from the Top 10 Community Issues, [Core Services](docs/CORE_SERVICES.md), and internal priorities. Where community sourced contributions exist we will work with the authors to review and merge their work. Where this does not exist or the original contributors, are not available we will create the resources and implementation ourselves. + +Each weekly release will include necessary tasks that lead to the completion of the stated goals as well as community pull requests, enhancements, and features that are not highlighted in the roadmap. To view all the items we've prioritized for this quarter, please see the [Roadmap milestone](https://github.com/hashicorp/terraform-provider-aws/milestone/138). + +This roadmap does not describe all the work that will be included within this timeframe, but it does describe our focus. We will include other work as events occur . + +From [August through October](docs/roadmaps/2020_August_to_October.md), we committed to adding support for EventBridge, ImageBuilder , LakeFormation and Serverless Application Repository as new service offerings. We were able to deliver EventBridge within that time frame. Unfortunately for a number of reasons we weren’t able to release ImageBuilder, LakeFormation and Serverless Application Repository. That said, they are in progress and on track for release in early November. + +From October-January ‘21, we will be prioritizing the following areas of work: + +## New Services + +### AWS SSO Permission Sets +Issue: [#15108](https://github.com/hashicorp/terraform-provider-aws/issues/15108) + +_[AWS SSO](https://docs.aws.amazon.com/singlesignon/latest/APIReference/welcome.html) account assignment APIs enable you to build automation to create and update permissions that align with your company's common job functions. You can then assign the permissions to users and groups to entitle them for access in their required accounts. For example, you can give your developers broad control over resources in developer accounts, and limit that control to authorized operations personnel in production accounts. The new AWS CloudFormation support enables you to automate account assignments as you build new accounts. You can also use the APIs to decode user and group names from the unique identifiers that appear in AWS CloudTrail logs._ + +Support for AWS SSO Permission Sets will include: + +New Resource(s): +- aws_sso_permission_set +- aws_sso_permission_set_policy +- aws_sso_permission_set_policy_attachment +- aws_sso_account_assignment + +## Issues & Enhancements + +### Core Service Reliability +Core Services are areas of high usage or strategic importance for our users. We strive to offer rock solid reliability in these areas. This quarter we will have a focus on RDS and Elasticache (which we are also promoting to Core Service status) to address some common pain points in their usage and ensure they continue to meet our standards. + +#### RDS + +- [#15177](https://github.com/hashicorp/terraform-provider-aws/issues/15177): Subsequent plan/apply forces global cluster recreation when source cluster's storage_encrypted=true +- [#15583](https://github.com/hashicorp/terraform-provider-aws/issues/15583): aws db parameter group ... converts keys and values to lowercase and fails 'apply' due to aws_db_parameter_group changes +- [#1198](https://github.com/hashicorp/terraform-provider-aws/issues/1198): Unable to ignore changes to RDS minor engine version +- [#9401](https://github.com/hashicorp/terraform-provider-aws/issues/9401): Destroy/recreate DB instance on minor version update rather than updating +- [#2635](https://github.com/hashicorp/terraform-provider-aws/issues/2635): RDS - storage_encrypted = true does not work +- [#467](https://github.com/hashicorp/terraform-provider-aws/issues/467): With aws_db_instance when you remove the snapshot_identifier it wants to force a new resource +- [#10197](https://github.com/hashicorp/terraform-provider-aws/issues/10197): AWS aurora unexpected state 'configuring-iam-database-auth' when modifying the `iam_database_authentication_enabled` flag +- [#13891](https://github.com/hashicorp/terraform-provider-aws/issues/13891): RDS Cluster is not reattached to Global Cluster after failing deletion + +#### Elasticache +The Elasticache work will begin with a research spike to ensure that the we can solve the following issues without introducing breaking changes into the provider: + +- [#14959](https://github.com/hashicorp/terraform-provider-aws/issues/14959): Research Spike: Elasticache Service Fixes and Improvements +- [#12708](https://github.com/hashicorp/terraform-provider-aws/issues/12708): resource/aws_elasticache_replication_group: Add MultiAZ support +- ~[#13517](https://github.com/hashicorp/terraform-provider-aws/issues/13517): Feature Request: `aws_elasticache_cluster` allow auto-minor-version-upgrade to be set~ This parameter is not enabled in the AWS API. +- [#5118](https://github.com/hashicorp/terraform-provider-aws/issues/5118): support setting primary/replica AZ attributes inside NodeGroupConfiguration for RedisClusterModelEnabled + +### Workflow Improvements + +We’ll also be tackling some of the top reported issues in the provider that are causing disruptions to high priority workflows: + +- [#14373](https://github.com/hashicorp/terraform-provider-aws/issues/14373): cloudfront: support for cache and origin request policies +- [#11584](https://github.com/hashicorp/terraform-provider-aws/issues/11584): Add ability to manage VPN tunnel options +- [#13986](https://github.com/hashicorp/terraform-provider-aws/issues/13986): Feature request: Managed prefix lists +- [#8009](https://github.com/hashicorp/terraform-provider-aws/issues/8009): S3 settings on aws_dms_endpoint conflict with "extra_connection_attributes" +- [#11220](https://github.com/hashicorp/terraform-provider-aws/issues/11220): Set account recovery preference +- [#12272](https://github.com/hashicorp/terraform-provider-aws/issues/12272): CloudWatch composite alarms +- [#4058](https://github.com/hashicorp/terraform-provider-aws/issues/4058): Support Firewall Manager Policies +- [#10931](https://github.com/hashicorp/terraform-provider-aws/issues/10931): Resource aws_sns_topic_subscription, new argument redrive_policy +- [#11098](https://github.com/hashicorp/terraform-provider-aws/issues/11098): Support for AWS Config Conformance Packs +- [#6674](https://github.com/hashicorp/terraform-provider-aws/issues/6674): Feature Request: Security Hub +- [#3891](https://github.com/hashicorp/terraform-provider-aws/issues/3891): Adding custom cognito user pool attribute forces new resource +- [#2245](https://github.com/hashicorp/terraform-provider-aws/issues/2245): AWS security groups not being destroyed +- [#8114](https://github.com/hashicorp/terraform-provider-aws/issues/8114): Cognito User Pool UI Customization +- [#11348](https://github.com/hashicorp/terraform-provider-aws/issues/11348): Add Type to AWS SFN State Machine +- [#11586](https://github.com/hashicorp/terraform-provider-aws/issues/11586): Faulty Read of Client VPN Network associations break state + +### Technical Debt Theme + +Last quarter we made considerable progress in improving the stability of our Acceptance Test suite. We were able to reduce our consistent test failures by 50% in Commercial, and fixed hundreds of tests in GovCloud. We believe that keeping our focus in this area in the next quarter is the way forward that provides the most value. With another quarter of focus we are looking to have a test suite free of problematic tests, along with optimizations which should improve the speeds of the suite. + +### Research Topics + +Research topics include features, architectural changes, and ideas that we are pursuing in the longer term that may significantly impact the core user experience of the AWS provider. Research topics are discovery only and are not guaranteed to be included in a future release. + +We are interested in your thoughts and feedback about the proposals below and encourage you to comment on the linked issues or schedule time with @maryelizbeth via the link on her GitHub profile to discuss. + +#### Default Tags Implementation Design +Issue: [#7926](https://github.com/hashicorp/terraform-provider-aws/issues/7926) + +After completing user research and an internal review of our research conclusions, we will begin conducting engineering research and publish an RFC to address the implementation of this feature. Once the RFC has been approved, we will update the community with our plans for Default Tags. + +#### API Calls/IAM Actions Per Terraform Resource (Minimum IAM) +Issue: [#9154](https://github.com/hashicorp/terraform-provider-aws/issues/9154) + +To address security concerns and best practices we are considering how Terraform could surface minimally viable IAM policies for taking actions on resources or executing a TF plan. This is in the early stages of research and we are particularly interested in whether or not this would be useful and the resources or services areas for which it is most valuable. + +#### Lifecycle: Retain [Add 'retain' attribute to the Terraform lifecycle meta-parameter] +Issue: [#902](https://github.com/hashicorp/terraform-provider-aws/issues/902) + +Some resources (e.g. log groups) are intended to be created but never destroyed. Terraform currently does not have a lifecycle attribute for retaining such resources. We are curious as to whether or not retaining resources is a workflow that meets the needs of our community and if so, how and where we might make use of that in the AWS Provider. + +### Disclosures + +The product-development initiatives in this document reflect HashiCorp's current plans and are subject to change and/or cancellation in HashiCorp's sole discretion. From bc7e70dff9e684e44eb82cbea28ca083a6ac4cd4 Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Mon, 1 Feb 2021 08:32:02 -0800 Subject: [PATCH 0954/1212] Q1 2021 roadmap --- ROADMAP.md | 104 +++++++++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index c2295cf5a9a..ad235487ee5 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,4 +1,4 @@ -# Roadmap: November 2020 - January 2021 +# Roadmap: February 2021 - April 2021 Every few months, the team will highlight areas of focus for our work and upcoming research. @@ -8,31 +8,64 @@ Each weekly release will include necessary tasks that lead to the completion of This roadmap does not describe all the work that will be included within this timeframe, but it does describe our focus. We will include other work as events occur . -From [August through October](docs/roadmaps/2020_August_to_October.md), we committed to adding support for EventBridge, ImageBuilder , LakeFormation and Serverless Application Repository as new service offerings. We were able to deliver EventBridge within that time frame. Unfortunately for a number of reasons we weren’t able to release ImageBuilder, LakeFormation and Serverless Application Repository. That said, they are in progress and on track for release in early November. +From [November through January](docs/roadmaps/2020_November_to_January.md), we added support for (among other things): -From October-January ‘21, we will be prioritizing the following areas of work: +- SSO Permission Sets +- EC2 Managed Prefix Lists +- Firewall Manager Policies +- SASL/SCRAM Authentication for MSK +- ImageBuilder +- LakeFormation +- Serverless Application Repository +- Cloudwatch Composite Alarms + +As well as partnering with AWS to provide launch day support for: + +- Network Firewall +- Code Signing for Lambda +- Container Images for Lambda +- Gateway Load Balancer +- Spot Launch for EKS Managed Node Groups + +From February-April ‘21, we will be prioritizing the following areas of work: + +## Provider Functionality: Default Tags + +Issue: [#7926](https://github.com/hashicorp/terraform-provider-aws/issues/7926) + +Default Tags builds on the workflows in Ignore Tags to provide additional control over the ways Terraform manages tagging capabilities. Users will be able to specify lists of tags to apply to all resources in a configuration at the provider level. Our goal in offering this use case is to assist in tidying up configuration files, decreasing development efforts, and streamlining cost allocation and resource attribution within organizations of all sizes. ## New Services -### AWS SSO Permission Sets -Issue: [#15108](https://github.com/hashicorp/terraform-provider-aws/issues/15108) +### CloudWatch Synthetics +Issue: [#11145](https://github.com/hashicorp/terraform-provider-aws/issues/11145) -_[AWS SSO](https://docs.aws.amazon.com/singlesignon/latest/APIReference/welcome.html) account assignment APIs enable you to build automation to create and update permissions that align with your company's common job functions. You can then assign the permissions to users and groups to entitle them for access in their required accounts. For example, you can give your developers broad control over resources in developer accounts, and limit that control to authorized operations personnel in production accounts. The new AWS CloudFormation support enables you to automate account assignments as you build new accounts. You can also use the APIs to decode user and group names from the unique identifiers that appear in AWS CloudTrail logs._ +_[CloudWatch Synthetics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries.html) You can use Amazon CloudWatch Synthetics to create canaries, configurable scripts that run on a schedule, to monitor your endpoints and APIs. Canaries follow the same routes and perform the same actions as a customer, which makes it possible for you to continually verify your customer experience even when you don't have any customer traffic on your applications. By using canaries, you can discover issues before your customers do._ Support for AWS SSO Permission Sets will include: New Resource(s): -- aws_sso_permission_set -- aws_sso_permission_set_policy -- aws_sso_permission_set_policy_attachment -- aws_sso_account_assignment +- aws_synthetics_canary + +New Datasource(s): +- aws_synthetics_canary_runs + +### Managed Workflows for Apache Airflow + +Issue: [#16432](https://github.com/hashicorp/terraform-provider-aws/issues/16432) + +_[Managed Workflows for Apache Airflow](https://aws.amazon.com/blogs/aws/introducing-amazon-managed-workflows-for-apache-airflow-mwaa/) Amazon Managed Workflows for Apache Airflow (MWAA) is a managed orchestration service for Apache Airflow1 that makes it easier to set up and operate end-to-end data pipelines in the cloud at scale. Apache Airflow is an open-source tool used to programmatically author, schedule, and monitor sequences of processes and tasks referred to as “workflows.” With Managed Workflows, you can use Airflow and Python to create workflows without having to manage the underlying infrastructure for scalability, availability, and security. Managed Workflows automatically scales its workflow execution capacity to meet your needs, and is integrated with AWS security services to help provide you with fast and secure access to data._ -## Issues & Enhancements +Support for Amazon Managed Workflows for Apache Airflow will include: -### Core Service Reliability +New Resource(s): + +- aws_mwaa_environment + +## Core Service Reliability Core Services are areas of high usage or strategic importance for our users. We strive to offer rock solid reliability in these areas. This quarter we will have a focus on RDS and Elasticache (which we are also promoting to Core Service status) to address some common pain points in their usage and ensure they continue to meet our standards. -#### RDS +### RDS - [#15177](https://github.com/hashicorp/terraform-provider-aws/issues/15177): Subsequent plan/apply forces global cluster recreation when source cluster's storage_encrypted=true - [#15583](https://github.com/hashicorp/terraform-provider-aws/issues/15583): aws db parameter group ... converts keys and values to lowercase and fails 'apply' due to aws_db_parameter_group changes @@ -43,59 +76,28 @@ Core Services are areas of high usage or strategic importance for our users. We - [#10197](https://github.com/hashicorp/terraform-provider-aws/issues/10197): AWS aurora unexpected state 'configuring-iam-database-auth' when modifying the `iam_database_authentication_enabled` flag - [#13891](https://github.com/hashicorp/terraform-provider-aws/issues/13891): RDS Cluster is not reattached to Global Cluster after failing deletion -#### Elasticache -The Elasticache work will begin with a research spike to ensure that the we can solve the following issues without introducing breaking changes into the provider: - -- [#14959](https://github.com/hashicorp/terraform-provider-aws/issues/14959): Research Spike: Elasticache Service Fixes and Improvements -- [#12708](https://github.com/hashicorp/terraform-provider-aws/issues/12708): resource/aws_elasticache_replication_group: Add MultiAZ support -- ~[#13517](https://github.com/hashicorp/terraform-provider-aws/issues/13517): Feature Request: `aws_elasticache_cluster` allow auto-minor-version-upgrade to be set~ This parameter is not enabled in the AWS API. -- [#5118](https://github.com/hashicorp/terraform-provider-aws/issues/5118): support setting primary/replica AZ attributes inside NodeGroupConfiguration for RedisClusterModelEnabled - -### Workflow Improvements +## Technical Debt Theme -We’ll also be tackling some of the top reported issues in the provider that are causing disruptions to high priority workflows: +Last quarter we continued to improve the stability of our Acceptance Test suite. Following on from that work we will begin to integrate our Pull Request workflow with our Acceptance testing suite with a goal of being able to determine which tests to run, trigger, and view results of Acceptance Test runs on GitHub. This will improve our time to merge incoming PR's and further protect against regressions. -- [#14373](https://github.com/hashicorp/terraform-provider-aws/issues/14373): cloudfront: support for cache and origin request policies -- [#11584](https://github.com/hashicorp/terraform-provider-aws/issues/11584): Add ability to manage VPN tunnel options -- [#13986](https://github.com/hashicorp/terraform-provider-aws/issues/13986): Feature request: Managed prefix lists -- [#8009](https://github.com/hashicorp/terraform-provider-aws/issues/8009): S3 settings on aws_dms_endpoint conflict with "extra_connection_attributes" -- [#11220](https://github.com/hashicorp/terraform-provider-aws/issues/11220): Set account recovery preference -- [#12272](https://github.com/hashicorp/terraform-provider-aws/issues/12272): CloudWatch composite alarms -- [#4058](https://github.com/hashicorp/terraform-provider-aws/issues/4058): Support Firewall Manager Policies -- [#10931](https://github.com/hashicorp/terraform-provider-aws/issues/10931): Resource aws_sns_topic_subscription, new argument redrive_policy -- [#11098](https://github.com/hashicorp/terraform-provider-aws/issues/11098): Support for AWS Config Conformance Packs -- [#6674](https://github.com/hashicorp/terraform-provider-aws/issues/6674): Feature Request: Security Hub -- [#3891](https://github.com/hashicorp/terraform-provider-aws/issues/3891): Adding custom cognito user pool attribute forces new resource -- [#2245](https://github.com/hashicorp/terraform-provider-aws/issues/2245): AWS security groups not being destroyed -- [#8114](https://github.com/hashicorp/terraform-provider-aws/issues/8114): Cognito User Pool UI Customization -- [#11348](https://github.com/hashicorp/terraform-provider-aws/issues/11348): Add Type to AWS SFN State Machine -- [#11586](https://github.com/hashicorp/terraform-provider-aws/issues/11586): Faulty Read of Client VPN Network associations break state +We also spent time last quarter improving our documentation to give contributors more explicit guidance on best practice patterns for [data conversion](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/data-handling-and-conversion.md) and [error handling](https://github.com/hashicorp/terraform-provider-aws/blob/main/docs/contributing/error-handling.md). -### Technical Debt Theme - -Last quarter we made considerable progress in improving the stability of our Acceptance Test suite. We were able to reduce our consistent test failures by 50% in Commercial, and fixed hundreds of tests in GovCloud. We believe that keeping our focus in this area in the next quarter is the way forward that provides the most value. With another quarter of focus we are looking to have a test suite free of problematic tests, along with optimizations which should improve the speeds of the suite. - -### Research Topics +## Research Topics Research topics include features, architectural changes, and ideas that we are pursuing in the longer term that may significantly impact the core user experience of the AWS provider. Research topics are discovery only and are not guaranteed to be included in a future release. We are interested in your thoughts and feedback about the proposals below and encourage you to comment on the linked issues or schedule time with @maryelizbeth via the link on her GitHub profile to discuss. -#### Default Tags Implementation Design -Issue: [#7926](https://github.com/hashicorp/terraform-provider-aws/issues/7926) - -After completing user research and an internal review of our research conclusions, we will begin conducting engineering research and publish an RFC to address the implementation of this feature. Once the RFC has been approved, we will update the community with our plans for Default Tags. - -#### API Calls/IAM Actions Per Terraform Resource (Minimum IAM) +### API Calls/IAM Actions Per Terraform Resource (Minimum IAM) Issue: [#9154](https://github.com/hashicorp/terraform-provider-aws/issues/9154) To address security concerns and best practices we are considering how Terraform could surface minimally viable IAM policies for taking actions on resources or executing a TF plan. This is in the early stages of research and we are particularly interested in whether or not this would be useful and the resources or services areas for which it is most valuable. -#### Lifecycle: Retain [Add 'retain' attribute to the Terraform lifecycle meta-parameter] +### Lifecycle: Retain [Add 'retain' attribute to the Terraform lifecycle meta-parameter] Issue: [#902](https://github.com/hashicorp/terraform-provider-aws/issues/902) Some resources (e.g. log groups) are intended to be created but never destroyed. Terraform currently does not have a lifecycle attribute for retaining such resources. We are curious as to whether or not retaining resources is a workflow that meets the needs of our community and if so, how and where we might make use of that in the AWS Provider. -### Disclosures +## Disclosures The product-development initiatives in this document reflect HashiCorp's current plans and are subject to change and/or cancellation in HashiCorp's sole discretion. From d1097aa9b723127166bd90056064017cbc97bc1f Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 1 Feb 2021 13:49:41 -0500 Subject: [PATCH 0955/1212] provider: Fix reverse DNS prefix handling and save to AWSClient (#17142) * provider: Fix reverse DNS prefix handling and save to AWSClient Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17140 Output from acceptance testing: ``` --- PASS: TestAccAWSPartition_basic (8.02s) --- PASS: TestAccAWSProvider_AssumeRole_Empty (18.41s) --- PASS: TestAccAWSProvider_Endpoints (16.22s) --- PASS: TestAccAWSProvider_IgnoreTags_EmptyConfigurationBlock (15.68s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_Multiple (15.57s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_None (15.56s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_One (15.84s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_Multiple (15.92s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_None (15.99s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_One (16.10s) --- PASS: TestAccAWSProvider_Region_AwsC2S (13.71s) --- PASS: TestAccAWSProvider_Region_AwsChina (13.27s) --- PASS: TestAccAWSProvider_Region_AwsCommercial (12.85s) --- PASS: TestAccAWSProvider_Region_AwsGovCloudUs (12.59s) --- PASS: TestAccAWSProvider_Region_AwsSC2S (14.52s) ``` * Update CHANGELOG for #17142 --- .changelog/17142.txt | 3 + aws/config.go | 2 + aws/data_source_aws_partition.go | 9 +-- aws/provider.go | 12 ++++ aws/provider_test.go | 115 +++++++++++++++++++++++++++++-- 5 files changed, 130 insertions(+), 11 deletions(-) create mode 100644 .changelog/17142.txt diff --git a/.changelog/17142.txt b/.changelog/17142.txt new file mode 100644 index 00000000000..f5ee2f8a2ab --- /dev/null +++ b/.changelog/17142.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_partition: Correct `reverse_dns_prefix` value in AWS China, C2S, and SC2S +``` diff --git a/aws/config.go b/aws/config.go index 3463639e39d..eb4030d828d 100644 --- a/aws/config.go +++ b/aws/config.go @@ -333,6 +333,7 @@ type AWSClient struct { region string resourcegroupsconn *resourcegroups.ResourceGroups resourcegroupstaggingapiconn *resourcegroupstaggingapi.ResourceGroupsTaggingAPI + reverseDnsPrefix string route53domainsconn *route53domains.Route53Domains route53resolverconn *route53resolver.Route53Resolver s3conn *s3.S3 @@ -572,6 +573,7 @@ func (c *Config) Client() (interface{}, error) { region: c.Region, resourcegroupsconn: resourcegroups.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["resourcegroups"])})), resourcegroupstaggingapiconn: resourcegroupstaggingapi.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["resourcegroupstaggingapi"])})), + reverseDnsPrefix: ReverseDns(dnsSuffix), route53domainsconn: route53domains.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["route53domains"])})), route53resolverconn: route53resolver.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["route53resolver"])})), s3controlconn: s3control.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.Endpoints["s3control"])})), diff --git a/aws/data_source_aws_partition.go b/aws/data_source_aws_partition.go index 9e2c40e502a..adf7c5c85de 100644 --- a/aws/data_source_aws_partition.go +++ b/aws/data_source_aws_partition.go @@ -2,8 +2,6 @@ package aws import ( "log" - "sort" - "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -43,11 +41,8 @@ func dataSourceAwsPartitionRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Setting AWS URL Suffix to %s.", client.dnsSuffix) d.Set("dns_suffix", meta.(*AWSClient).dnsSuffix) - dnsParts := strings.Split(meta.(*AWSClient).dnsSuffix, ".") - sort.Sort(sort.Reverse(sort.StringSlice(dnsParts))) - servicePrefix := strings.Join(dnsParts, ".") - d.Set("reverse_dns_prefix", servicePrefix) - log.Printf("[DEBUG] Setting service prefix to %s.", servicePrefix) + d.Set("reverse_dns_prefix", meta.(*AWSClient).reverseDnsPrefix) + log.Printf("[DEBUG] Setting service prefix to %s.", meta.(*AWSClient).reverseDnsPrefix) return nil } diff --git a/aws/provider.go b/aws/provider.go index 66a97c15473..97bb7538de3 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -2,6 +2,7 @@ package aws import ( "log" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" @@ -1512,3 +1513,14 @@ func expandProviderIgnoreTags(l []interface{}) *keyvaluetags.IgnoreConfig { return ignoreConfig } + +// ReverseDns switches a DNS hostname to reverse DNS and vice-versa. +func ReverseDns(hostname string) string { + parts := strings.Split(hostname, ".") + + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + + return strings.Join(parts, ".") +} diff --git a/aws/provider_test.go b/aws/provider_test.go index cd15f855ff5..327575ff4ea 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -7,7 +7,6 @@ import ( "os" "reflect" "regexp" - "sort" "strings" "sync" "testing" @@ -179,6 +178,49 @@ func TestProvider_impl(t *testing.T) { var _ *schema.Provider = Provider() } +func TestReverseDns(t *testing.T) { + testCases := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "", + }, + { + name: "amazonaws.com", + input: "amazonaws.com", + expected: "com.amazonaws", + }, + { + name: "amazonaws.com.cn", + input: "amazonaws.com.cn", + expected: "cn.com.amazonaws", + }, + { + name: "sc2s.sgov.gov", + input: "sc2s.sgov.gov", + expected: "gov.sgov.sc2s", + }, + { + name: "c2s.ic.gov", + input: "c2s.ic.gov", + expected: "gov.ic.c2s", + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + + if got, want := ReverseDns(testCase.input), testCase.expected; got != want { + t.Errorf("got: %s, expected: %s", got, want) + } + }) + } +} + // testAccPreCheck verifies and sets required provider testing configuration // // This PreCheck function should be present in every acceptance test. It allows @@ -649,9 +691,7 @@ func testAccGetPartitionDNSSuffix() string { func testAccGetPartitionReverseDNSPrefix() string { if partition, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), testAccGetRegion()); ok { - dnsParts := strings.Split(partition.DNSSuffix(), ".") - sort.Sort(sort.Reverse(sort.StringSlice(dnsParts))) - return strings.Join(dnsParts, ".") + return ReverseDns(partition.DNSSuffix()) } return "com.amazonaws" @@ -1249,6 +1289,27 @@ func TestAccAWSProvider_IgnoreTags_Keys_Multiple(t *testing.T) { }) } +func TestAccAWSProvider_Region_AwsC2S(t *testing.T) { + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactoriesInternal(&providers), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSProviderConfigRegion("us-iso-east-1"), // lintignore:AWSAT003 + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSProviderDnsSuffix(&providers, "c2s.ic.gov"), + testAccCheckAWSProviderPartition(&providers, "aws-iso"), + testAccCheckAWSProviderReverseDnsPrefix(&providers, "gov.ic.c2s"), + ), + PlanOnly: true, + }, + }, + }) +} + func TestAccAWSProvider_Region_AwsChina(t *testing.T) { var providers []*schema.Provider @@ -1262,6 +1323,7 @@ func TestAccAWSProvider_Region_AwsChina(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSProviderDnsSuffix(&providers, "amazonaws.com.cn"), testAccCheckAWSProviderPartition(&providers, "aws-cn"), + testAccCheckAWSProviderReverseDnsPrefix(&providers, "cn.com.amazonaws"), ), PlanOnly: true, }, @@ -1282,6 +1344,7 @@ func TestAccAWSProvider_Region_AwsCommercial(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSProviderDnsSuffix(&providers, "amazonaws.com"), testAccCheckAWSProviderPartition(&providers, "aws"), + testAccCheckAWSProviderReverseDnsPrefix(&providers, "com.amazonaws"), ), PlanOnly: true, }, @@ -1302,6 +1365,28 @@ func TestAccAWSProvider_Region_AwsGovCloudUs(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSProviderDnsSuffix(&providers, "amazonaws.com"), testAccCheckAWSProviderPartition(&providers, "aws-us-gov"), + testAccCheckAWSProviderReverseDnsPrefix(&providers, "com.amazonaws"), + ), + PlanOnly: true, + }, + }, + }) +} + +func TestAccAWSProvider_Region_AwsSC2S(t *testing.T) { + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactoriesInternal(&providers), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccAWSProviderConfigRegion("us-isob-east-1"), // lintignore:AWSAT003 + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSProviderDnsSuffix(&providers, "sc2s.sgov.gov"), + testAccCheckAWSProviderPartition(&providers, "aws-iso-b"), + testAccCheckAWSProviderReverseDnsPrefix(&providers, "gov.sgov.sc2s"), ), PlanOnly: true, }, @@ -1570,6 +1655,28 @@ func testAccCheckAWSProviderPartition(providers *[]*schema.Provider, expectedPar } } +func testAccCheckAWSProviderReverseDnsPrefix(providers *[]*schema.Provider, expectedReverseDnsPrefix string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if providers == nil { + return fmt.Errorf("no providers initialized") + } + + for _, provider := range *providers { + if provider == nil || provider.Meta() == nil || provider.Meta().(*AWSClient) == nil { + continue + } + + providerReverseDnsPrefix := provider.Meta().(*AWSClient).reverseDnsPrefix + + if providerReverseDnsPrefix != expectedReverseDnsPrefix { + return fmt.Errorf("expected DNS Suffix (%s), got: %s", expectedReverseDnsPrefix, providerReverseDnsPrefix) + } + } + + return nil + } +} + // testAccPreCheckEc2ClassicOrHasDefaultVpcWithDefaultSubnets checks that the test region has either // - The EC2-Classic platform available, or // - A default VPC with default subnets. From 2322a740db3c24628a468c4b95d0582a217dcb0e Mon Sep 17 00:00:00 2001 From: changelogbot Date: Mon, 1 Feb 2021 18:51:19 +0000 Subject: [PATCH 0956/1212] Update CHANGELOG.md for #17142 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d65406a3ffc..9286049fa74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ ENHANCEMENTS: BUG FIXES: +* data-source/aws_partition: Correct `reverse_dns_prefix` value in AWS China, C2S, and SC2S ([#17142](https://github.com/hashicorp/terraform-provider-aws/issues/17142)) * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) From e38daff556075099c69cf984da87650cbd11ea7b Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:25:59 -0600 Subject: [PATCH 0957/1212] Update docs/roadmaps/2020_November_to_January.md Co-authored-by: Brian Flad --- docs/roadmaps/2020_November_to_January.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md index c2295cf5a9a..851a227f39e 100644 --- a/docs/roadmaps/2020_November_to_January.md +++ b/docs/roadmaps/2020_November_to_January.md @@ -8,7 +8,7 @@ Each weekly release will include necessary tasks that lead to the completion of This roadmap does not describe all the work that will be included within this timeframe, but it does describe our focus. We will include other work as events occur . -From [August through October](docs/roadmaps/2020_August_to_October.md), we committed to adding support for EventBridge, ImageBuilder , LakeFormation and Serverless Application Repository as new service offerings. We were able to deliver EventBridge within that time frame. Unfortunately for a number of reasons we weren’t able to release ImageBuilder, LakeFormation and Serverless Application Repository. That said, they are in progress and on track for release in early November. +From [August through October](2020_August_to_October.md), we committed to adding support for EventBridge, ImageBuilder , LakeFormation and Serverless Application Repository as new service offerings. We were able to deliver EventBridge within that time frame. Unfortunately for a number of reasons we weren’t able to release ImageBuilder, LakeFormation and Serverless Application Repository. That said, they are in progress and on track for release in early November. From October-January ‘21, we will be prioritizing the following areas of work: From d5060908f5f316097c5f90e9c4a887fcdb9089c8 Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:26:06 -0600 Subject: [PATCH 0958/1212] Update docs/roadmaps/2020_November_to_January.md Co-authored-by: Brian Flad --- docs/roadmaps/2020_November_to_January.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md index 851a227f39e..5acd5773814 100644 --- a/docs/roadmaps/2020_November_to_January.md +++ b/docs/roadmaps/2020_November_to_January.md @@ -2,7 +2,7 @@ Every few months, the team will highlight areas of focus for our work and upcoming research. -We select items for inclusion in the roadmap from the Top 10 Community Issues, [Core Services](docs/CORE_SERVICES.md), and internal priorities. Where community sourced contributions exist we will work with the authors to review and merge their work. Where this does not exist or the original contributors, are not available we will create the resources and implementation ourselves. +We select items for inclusion in the roadmap from the Top 10 Community Issues, [Core Services](../CORE_SERVICES.md), and internal priorities. Where community sourced contributions exist we will work with the authors to review and merge their work. Where this does not exist or the original contributors, are not available we will create the resources and implementation ourselves. Each weekly release will include necessary tasks that lead to the completion of the stated goals as well as community pull requests, enhancements, and features that are not highlighted in the roadmap. To view all the items we've prioritized for this quarter, please see the [Roadmap milestone](https://github.com/hashicorp/terraform-provider-aws/milestone/138). From b409c5426de82bd371e35fa4b5aa2185b071fc70 Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:31:50 -0600 Subject: [PATCH 0959/1212] Update ROADMAP.md --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index ad235487ee5..57402a07c29 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -42,7 +42,7 @@ Issue: [#11145](https://github.com/hashicorp/terraform-provider-aws/issues/11145 _[CloudWatch Synthetics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries.html) You can use Amazon CloudWatch Synthetics to create canaries, configurable scripts that run on a schedule, to monitor your endpoints and APIs. Canaries follow the same routes and perform the same actions as a customer, which makes it possible for you to continually verify your customer experience even when you don't have any customer traffic on your applications. By using canaries, you can discover issues before your customers do._ -Support for AWS SSO Permission Sets will include: +Support for CloudWatch Synthetics will include: New Resource(s): - aws_synthetics_canary From d491427772f04f0ee101dbb846cda63ce7c7bd43 Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:34:37 -0600 Subject: [PATCH 0960/1212] Update docs/roadmaps/2020_November_to_January.md --- docs/roadmaps/2020_November_to_January.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md index 5acd5773814..42defcaa2cc 100644 --- a/docs/roadmaps/2020_November_to_January.md +++ b/docs/roadmaps/2020_November_to_January.md @@ -22,6 +22,7 @@ _[AWS SSO](https://docs.aws.amazon.com/singlesignon/latest/APIReference/welcome. Support for AWS SSO Permission Sets will include: New Resource(s): + - aws_sso_permission_set - aws_sso_permission_set_policy - aws_sso_permission_set_policy_attachment From 20fa0b22e0fdebb033ff7f6266a0672aaeeea4cc Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:37:21 -0600 Subject: [PATCH 0961/1212] remove trailing whitespace & update previous roadmap --- docs/roadmaps/2020_November_to_January.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md index 42defcaa2cc..4d5c9d632c8 100644 --- a/docs/roadmaps/2020_November_to_January.md +++ b/docs/roadmaps/2020_November_to_January.md @@ -85,7 +85,7 @@ We are interested in your thoughts and feedback about the proposals below and en #### Default Tags Implementation Design Issue: [#7926](https://github.com/hashicorp/terraform-provider-aws/issues/7926) -After completing user research and an internal review of our research conclusions, we will begin conducting engineering research and publish an RFC to address the implementation of this feature. Once the RFC has been approved, we will update the community with our plans for Default Tags. +After completing user research and an internal review of our research conclusions, we will begin conducting engineering research and publish an RFC to address the implementation of this feature. Once the RFC has been approved, we will update the community with our plans for Default Tags. #### API Calls/IAM Actions Per Terraform Resource (Minimum IAM) Issue: [#9154](https://github.com/hashicorp/terraform-provider-aws/issues/9154) From 8f3e7da145fb6a196fb89291dd8bba75edf89f3a Mon Sep 17 00:00:00 2001 From: Mary Elizabeth Date: Mon, 1 Feb 2021 17:37:31 -0600 Subject: [PATCH 0962/1212] Update docs/roadmaps/2020_November_to_January.md --- docs/roadmaps/2020_November_to_January.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/roadmaps/2020_November_to_January.md b/docs/roadmaps/2020_November_to_January.md index 4d5c9d632c8..0e11c19e3e8 100644 --- a/docs/roadmaps/2020_November_to_January.md +++ b/docs/roadmaps/2020_November_to_January.md @@ -54,7 +54,7 @@ The Elasticache work will begin with a research spike to ensure that the we can ### Workflow Improvements -We’ll also be tackling some of the top reported issues in the provider that are causing disruptions to high priority workflows: +We’ll also be tackling some of the top reported issues in the provider that are causing disruptions to high priority workflows: - [#14373](https://github.com/hashicorp/terraform-provider-aws/issues/14373): cloudfront: support for cache and origin request policies - [#11584](https://github.com/hashicorp/terraform-provider-aws/issues/11584): Add ability to manage VPN tunnel options From 35cdc7e8a5d6662a52eee0a3093e9cb9ff4d7094 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 1 Feb 2021 17:25:00 -0800 Subject: [PATCH 0963/1212] Fixes acceptance test Terraform linting --- .github/workflows/acctest-terraform-lint.yml | 2 +- aws/resource_aws_lakeformation_permissions_test.go | 2 +- scripts/validate-terraform.sh | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index 7a24e1ae113..0aaacd59adc 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -54,7 +54,7 @@ jobs: - run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - uses: actions/setup-go@v2 with: - go-version: ${{ steps.go-version.outputs.content }} + go-version: ${{ env.GO_VERSION }} - uses: actions/cache@v2 continue-on-error: true timeout-minutes: 2 diff --git a/aws/resource_aws_lakeformation_permissions_test.go b/aws/resource_aws_lakeformation_permissions_test.go index 023beac6d70..c33019ffcdb 100644 --- a/aws/resource_aws_lakeformation_permissions_test.go +++ b/aws/resource_aws_lakeformation_permissions_test.go @@ -719,7 +719,7 @@ resource "aws_glue_catalog_table" "test" { } resource "aws_lakeformation_data_lake_settings" "test" { - // this will result in multiple permissions for iam role + # this will result in multiple permissions for iam role admins = [aws_iam_role.test.arn, data.aws_caller_identity.current.arn] } diff --git a/scripts/validate-terraform.sh b/scripts/validate-terraform.sh index bebe5fcee79..41bcb859347 100755 --- a/scripts/validate-terraform.sh +++ b/scripts/validate-terraform.sh @@ -29,7 +29,6 @@ rules=( "--only=aws_route_specified_multiple_targets" ) while read -r filename ; do - echo "$filename" block_number=0 while IFS= read -r block ; do From 890aa38d28714f211e9ecab57ae795465edb48f5 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 2 Feb 2021 08:58:26 +0200 Subject: [PATCH 0964/1212] Apply suggestions from code review Co-authored-by: Kit Ewbank --- aws/resource_aws_sagemaker_model_package_group.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_sagemaker_model_package_group.go b/aws/resource_aws_sagemaker_model_package_group.go index 19d36cd6045..06a655df984 100644 --- a/aws/resource_aws_sagemaker_model_package_group.go +++ b/aws/resource_aws_sagemaker_model_package_group.go @@ -68,7 +68,7 @@ func resourceAwsSagemakerModelPackageGroupCreate(d *schema.ResourceData, meta in _, err := conn.CreateModelPackageGroup(input) if err != nil { - return fmt.Errorf("error creating Sagemaker Model Package Group %s: %w", name, err) + return fmt.Errorf("error creating SageMaker Model Package Group %s: %w", name, err) } d.SetId(name) @@ -91,7 +91,7 @@ func resourceAwsSagemakerModelPackageGroupRead(d *schema.ResourceData, meta inte log.Printf("[WARN] Unable to find Sagemaker Model Package Group (%s); removing from state", d.Id()) return nil } - return fmt.Errorf("error reading Sagemaker Model Package Group (%s): %w", d.Id(), err) + return fmt.Errorf("error reading SageMaker Model Package Group (%s): %w", d.Id(), err) } @@ -103,7 +103,7 @@ func resourceAwsSagemakerModelPackageGroupRead(d *schema.ResourceData, meta inte tags, err := keyvaluetags.SagemakerListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for Sagemaker Model Package Group (%s): %w", d.Id(), err) + return fmt.Errorf("error listing tags for SageMaker Model Package Group (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { @@ -120,7 +120,7 @@ func resourceAwsSagemakerModelPackageGroupUpdate(d *schema.ResourceData, meta in o, n := d.GetChange("tags") if err := keyvaluetags.SagemakerUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating Sagemaker Model Package Group (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating SageMaker Model Package Group (%s) tags: %s", d.Id(), err) } } @@ -138,14 +138,14 @@ func resourceAwsSagemakerModelPackageGroupDelete(d *schema.ResourceData, meta in if isAWSErr(err, "ValidationException", "does not exist") { return nil } - return fmt.Errorf("error deleting Sagemaker Model Package Group (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting SageMaker Model Package Group (%s): %w", d.Id(), err) } if _, err := waiter.ModelPackageGroupDeleted(conn, d.Id()); err != nil { if isAWSErr(err, "ValidationException", "does not exist") { return nil } - return fmt.Errorf("error waiting for Sagemaker Model Package Group (%s) to delete: %w", d.Id(), err) + return fmt.Errorf("error waiting for SageMaker Model Package Group (%s) to delete: %w", d.Id(), err) } return nil From bc170b9b040232bf33a410befa628de2ad55f736 Mon Sep 17 00:00:00 2001 From: Matthias Rampke Date: Mon, 1 Feb 2021 17:02:05 +0000 Subject: [PATCH 0965/1212] aws_instance: acceptance test for gp3 with iops and throughput I am suspecting a bug when both iops and throughput are specified on an inline EBS block device. Test the expected behavior. Signed-off-by: Matthias Rampke --- aws/resource_aws_instance_test.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 4fc3687923c..020f546b2ef 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -513,6 +513,10 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { return fmt.Errorf("block device doesn't exist: /dev/sdf") } + if _, ok := blockDevices["/dev/sdg"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/sdg") + } + return nil } } @@ -534,7 +538,7 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "root_block_device.0.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", rootVolumeSize), resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_type", "gp2"), - resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "4"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "5"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ "device_name": "/dev/sdb", "volume_size": "9", @@ -555,6 +559,13 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { "volume_type": "gp3", "throughput": "300", }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]string{ + "device_name": "/dev/sdg", + "volume_size": "10", + "volume_type": "gp3", + "throughput": "300", + "iops": "4000", + }), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "ebs_block_device.*", map[string]*regexp.Regexp{ "volume_id": regexp.MustCompile("vol-[a-z0-9]+"), }), @@ -3871,6 +3882,14 @@ resource "aws_instance" "test" { throughput = 300 } + ebs_block_device { + device_name = "/dev/sdg" + volume_size = 10 + volume_type = "gp3" + throughput = 300 + iops = 4000 + } + } `, size, delete)) } From 74b2e96b8c3cfe98978d0faa87e0b44a05abfe56 Mon Sep 17 00:00:00 2001 From: Matthias Rampke Date: Tue, 2 Feb 2021 15:07:04 +0000 Subject: [PATCH 0966/1212] aws_instance: acceptance test for gp3 root w/ iops and throughput as requested by @ewbankkit: https://github.com/hashicorp/terraform-provider-aws/pull/17380#issuecomment-771696906 Signed-off-by: Matthias Rampke --- aws/resource_aws_instance_test.go | 65 +++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 020f546b2ef..6a7f04d24b1 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -3807,6 +3807,71 @@ resource "aws_instance" "test" { `, size, delete, volumeType, throughput)) } +func TestAccAWSInstance_GP3RootBlockDevice(t *testing.T) { + var v ec2.Instance + resourceName := "aws_instance.test" + + testCheck := func() resource.TestCheckFunc { + return func(*terraform.State) error { + // Map out the block devices by name, which should be unique. + blockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) + for _, blockDevice := range v.BlockDeviceMappings { + blockDevices[*blockDevice.DeviceName] = blockDevice + } + + // Check if the root block device exists. + if _, ok := blockDevices["/dev/xvda"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/xvda") + } + + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: resourceName, + IDRefreshIgnore: []string{"ephemeral_block_device", "user_data"}, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfigGP3RootBlockDevice(), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "root_block_device.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", "10"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_type", "gp3"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.iops", "4000"), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.throughput", "300"), + testCheck(), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccInstanceConfigGP3RootBlockDevice() string { + return composeConfig(testAccLatestAmazonLinuxHvmEbsAmiConfig(), ` +resource "aws_instance" "test" { + ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id + instance_type = "t2.medium" + + root_block_device { + volume_size = 10 + volume_type = "gp3" + throughput = 300 + iops = 4000 + } +} +`) +} + const testAccAwsEc2InstanceAmiWithEbsRootVolume = ` data "aws_ami" "ami" { owners = ["amazon"] From 9321d5d7a31b424bc6f7e50a6d51a14931c2fd33 Mon Sep 17 00:00:00 2001 From: Matthias Rampke Date: Mon, 1 Feb 2021 17:08:56 +0000 Subject: [PATCH 0967/1212] aws_instance: fix for setting iops+throughput at the same time When both `iops` and `throughput` are specified on an inline block device (`root_block_device` or `ebs_block_device`), both attributes should be honored. With the `else if`, specifying `iops` would make the `throughput` attribute a NOOP during instance creation. The desired throughput shows up correctly in the plan, but after apply, it is actually the default (125) in the state and in EC2. Signed-off-by: Matthias Rampke --- aws/resource_aws_instance.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_instance.go b/aws/resource_aws_instance.go index 7e69c85f6c3..1a649184a94 100644 --- a/aws/resource_aws_instance.go +++ b/aws/resource_aws_instance.go @@ -1987,7 +1987,8 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 return nil, fmt.Errorf("error creating resource: iops attribute not supported for ebs_block_device with volume_type %s", v) } - } else if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { + } + if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { // `throughput` is only valid for gp3 if ec2.VolumeTypeGp3 == strings.ToLower(v) { ebs.Throughput = aws.Int64(int64(throughput)) @@ -2061,7 +2062,8 @@ func readBlockDeviceMappingsFromConfig(d *schema.ResourceData, conn *ec2.EC2) ([ // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12667 return nil, fmt.Errorf("error creating resource: iops attribute not supported for root_block_device with volume_type %s", v) } - } else if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { + } + if throughput, ok := bd["throughput"].(int); ok && throughput > 0 { // throughput is only valid for gp3 if ec2.VolumeTypeGp3 == strings.ToLower(v) { ebs.Throughput = aws.Int64(int64(throughput)) From 9b9f23a90a9ca2e18acf43690bfe4e246c900e60 Mon Sep 17 00:00:00 2001 From: Matthias Rampke Date: Tue, 2 Feb 2021 07:39:48 +0000 Subject: [PATCH 0968/1212] Add changelog for #17380 Signed-off-by: Matthias Rampke --- .changelog/17380.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17380.txt diff --git a/.changelog/17380.txt b/.changelog/17380.txt new file mode 100644 index 00000000000..dd744dfeb3c --- /dev/null +++ b/.changelog/17380.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_instance: Fix use of `throughput` and `iops` for `gp3` volumes at the same time +``` From 51c2305420b832bcb1d30e4cc1e10e31f43fc042 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 2 Feb 2021 15:21:25 -0500 Subject: [PATCH 0969/1212] internal/naming: Do not discard terraform- prefixed name prefixes (#17030) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17017 Changes: ``` * resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ``` Output from acceptance testing: ``` --- PASS: TestAccAWSCloudWatchEventRule_basic (70.77s) --- PASS: TestAccAWSCloudWatchEventRule_description (52.06s) --- PASS: TestAccAWSCloudWatchEventRule_EventBusName (75.08s) --- PASS: TestAccAWSCloudWatchEventRule_IsEnabled (68.71s) --- PASS: TestAccAWSCloudWatchEventRule_Name_Generated (33.92s) --- PASS: TestAccAWSCloudWatchEventRule_NamePrefix (33.93s) --- PASS: TestAccAWSCloudWatchEventRule_pattern (50.64s) --- PASS: TestAccAWSCloudWatchEventRule_role (48.35s) --- PASS: TestAccAWSCloudWatchEventRule_ScheduleAndPattern (33.93s) --- PASS: TestAccAWSCloudWatchEventRule_tags (83.41s) --- PASS: TestAccAWSSecurityGroup_allowAll (39.02s) --- PASS: TestAccAWSSecurityGroup_basic (38.37s) --- PASS: TestAccAWSSecurityGroup_change (82.06s) --- PASS: TestAccAWSSecurityGroup_CIDRandGroups (55.51s) --- PASS: TestAccAWSSecurityGroup_defaultEgressClassic (13.81s) --- PASS: TestAccAWSSecurityGroup_defaultEgressVPC (49.44s) --- PASS: TestAccAWSSecurityGroup_drift (19.07s) --- PASS: TestAccAWSSecurityGroup_driftComplex (49.44s) --- PASS: TestAccAWSSecurityGroup_egressConfigMode (74.91s) --- PASS: TestAccAWSSecurityGroup_egressWithPrefixList (53.22s) --- PASS: TestAccAWSSecurityGroup_failWithDiffMismatch (39.04s) --- PASS: TestAccAWSSecurityGroup_forceRevokeRulesFalse (682.76s) --- PASS: TestAccAWSSecurityGroup_forceRevokeRulesTrue (737.58s) --- PASS: TestAccAWSSecurityGroup_ingressConfigMode (73.28s) --- PASS: TestAccAWSSecurityGroup_ingressWithCidrAndSGsClassic (14.36s) --- PASS: TestAccAWSSecurityGroup_ingressWithCidrAndSGsVPC (39.61s) --- PASS: TestAccAWSSecurityGroup_ingressWithPrefixList (56.36s) --- PASS: TestAccAWSSecurityGroup_invalidCIDRBlock (6.52s) --- PASS: TestAccAWSSecurityGroup_IPRangeAndSecurityGroupWithSameRules (48.10s) --- PASS: TestAccAWSSecurityGroup_IPRangesWithSameRules (35.83s) --- PASS: TestAccAWSSecurityGroup_ipv4andipv6Egress (35.09s) --- PASS: TestAccAWSSecurityGroup_ipv6 (46.26s) --- PASS: TestAccAWSSecurityGroup_multiIngress (45.44s) --- PASS: TestAccAWSSecurityGroup_Name_Generated (32.72s) --- PASS: TestAccAWSSecurityGroup_Name_TerraformPrefix (32.03s) --- PASS: TestAccAWSSecurityGroup_NamePrefix (34.45s) --- PASS: TestAccAWSSecurityGroup_NamePrefix_TerraformPrefix (32.44s) --- PASS: TestAccAWSSecurityGroup_ruleDescription (112.40s) --- PASS: TestAccAWSSecurityGroup_ruleGathering (57.85s) --- PASS: TestAccAWSSecurityGroup_ruleLimitCidrBlockExceededAppend (71.38s) --- PASS: TestAccAWSSecurityGroup_ruleLimitExceededAllNew (88.92s) --- PASS: TestAccAWSSecurityGroup_ruleLimitExceededAppend (81.66s) --- PASS: TestAccAWSSecurityGroup_ruleLimitExceededPrepend (72.60s) --- PASS: TestAccAWSSecurityGroup_rulesDropOnError (72.96s) --- PASS: TestAccAWSSecurityGroup_self (39.48s) --- PASS: TestAccAWSSecurityGroup_sourceSecurityGroup (31.15s) --- PASS: TestAccAWSSecurityGroup_tags (76.38s) --- PASS: TestAccAWSSecurityGroup_vpc (39.96s) --- PASS: TestAccAWSSecurityGroup_vpcNegOneIngress (45.88s) --- PASS: TestAccAWSSecurityGroup_vpcProtoNumIngress (37.04s) ``` --- .changelog/17030.txt | 7 + aws/internal/naming/naming.go | 23 +-- aws/internal/naming/naming_test.go | 61 ++------ aws/resource_aws_cloudwatch_event_rule.go | 3 +- ...resource_aws_cloudwatch_event_rule_test.go | 2 +- aws/resource_aws_security_group.go | 3 +- aws/resource_aws_security_group_test.go | 133 ++++++++++++++---- docs/contributing/contribution-checklists.md | 5 +- 8 files changed, 134 insertions(+), 103 deletions(-) create mode 100644 .changelog/17030.txt diff --git a/.changelog/17030.txt b/.changelog/17030.txt new file mode 100644 index 00000000000..3689040af2b --- /dev/null +++ b/.changelog/17030.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` +``` + +```release-note:bug +resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` +``` diff --git a/aws/internal/naming/naming.go b/aws/internal/naming/naming.go index fb7a2ee457b..68f414c2f65 100644 --- a/aws/internal/naming/naming.go +++ b/aws/internal/naming/naming.go @@ -3,7 +3,6 @@ package naming import ( "fmt" "regexp" - "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -28,11 +27,6 @@ func Generate(name string, namePrefix string) string { return resource.UniqueId() } -// HasResourceUniqueIdPrefix returns true if the string has the built-in unique ID prefix -func HasResourceUniqueIdPrefix(s string) bool { - return strings.HasPrefix(s, resource.UniqueIdPrefix) -} - // HasResourceUniqueIdSuffix returns true if the string has the built-in unique ID suffix func HasResourceUniqueIdSuffix(s string) bool { return resourceUniqueIDSuffixRegexp.MatchString(s) @@ -41,24 +35,17 @@ func HasResourceUniqueIdSuffix(s string) bool { // NamePrefixFromName returns a name prefix if the string matches prefix criteria // // The input to this function must be strictly the "name" and not any -// additional information such as a full Amazon Resource Name (ARN). The output -// is suitable for custom resource Importer State functions after nil checking -// to ensure differences are not reported with ImportStateVerify testing, e.g. +// additional information such as a full Amazon Resource Name (ARN). +// +// An expected usage might be: +// +// d.Set("name_prefix", naming.NamePrefixFromName(d.Id())) // -// if namePrefix := naming.NamePrefixFromName(d.Id()); namePrefix != nil { -// d.Set("name_prefix", namePrefix) -// } func NamePrefixFromName(name string) *string { if !HasResourceUniqueIdSuffix(name) { return nil } - // If the name begins with terraform-, then the name may have been fully - // generated (e.g. omitting both name and name_prefix arguments) - if HasResourceUniqueIdPrefix(name) { - return nil - } - namePrefixIndex := len(name) - resource.UniqueIDSuffixLength if namePrefixIndex <= 0 { diff --git a/aws/internal/naming/naming_test.go b/aws/internal/naming/naming_test.go index 2d0241e1097..a4cc0b6e0d0 100644 --- a/aws/internal/naming/naming_test.go +++ b/aws/internal/naming/naming_test.go @@ -53,40 +53,6 @@ func TestGenerate(t *testing.T) { } } -func TestHasResourceUniqueIdPrefix(t *testing.T) { - testCases := []struct { - TestName string - Input string - Expected bool - }{ - { - TestName: "empty", - Input: "", - Expected: false, - }, - { - TestName: "incorrect prefix", - Input: "test-20060102150405000000000001", - Expected: false, - }, - { - TestName: "correct prefix", - Input: "terraform-20060102150405000000000001", - Expected: true, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.TestName, func(t *testing.T) { - got := HasResourceUniqueIdPrefix(testCase.Input) - - if got != testCase.Expected { - t.Errorf("got %t, expected %t", got, testCase.Expected) - } - }) - } -} - func TestHasResourceUniqueIdSuffix(t *testing.T) { testCases := []struct { TestName string @@ -104,25 +70,15 @@ func TestHasResourceUniqueIdSuffix(t *testing.T) { Expected: false, }, { - TestName: "correct suffix, incorrect prefix", + TestName: "correct suffix with numbers", Input: "test-20060102150405000000000001", Expected: true, }, { - TestName: "correct suffix with hex, incorrect prefix", + TestName: "correct suffix with hex", Input: "test-200601021504050000000000a1", Expected: true, }, - { - TestName: "correct suffix, correct prefix", - Input: "terraform-20060102150405000000000001", - Expected: true, - }, - { - TestName: "correct suffix with hex, correct prefix", - Input: "terraform-2006010215040500000000000a", - Expected: true, - }, } for _, testCase := range testCases { @@ -148,29 +104,30 @@ func TestNamePrefixFromName(t *testing.T) { Expected: nil, }, { - TestName: "correct prefix, incorrect suffix", + TestName: "incorrect suffix", Input: "test-123", Expected: nil, }, { - TestName: "correct prefix without hyphen, correct suffix", + TestName: "prefix without hyphen, correct suffix", Input: "test20060102150405000000000001", Expected: strPtr("test"), }, { - TestName: "correct prefix with hyphen, correct suffix", + TestName: "prefix with hyphen, correct suffix", Input: "test-20060102150405000000000001", Expected: strPtr("test-"), }, { - TestName: "correct prefix with hyphen, correct suffix with hex", + TestName: "prefix with hyphen, correct suffix with hex", Input: "test-200601021504050000000000f1", Expected: strPtr("test-"), }, + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17017 { - TestName: "incorrect prefix, correct suffix", + TestName: "terraform prefix, correct suffix", Input: "terraform-20060102150405000000000001", - Expected: nil, + Expected: strPtr("terraform-"), }, } diff --git a/aws/resource_aws_cloudwatch_event_rule.go b/aws/resource_aws_cloudwatch_event_rule.go index 50ee65e2f59..26ce329a814 100644 --- a/aws/resource_aws_cloudwatch_event_rule.go +++ b/aws/resource_aws_cloudwatch_event_rule.go @@ -45,6 +45,7 @@ func resourceAwsCloudWatchEventRule() *schema.Resource { "name_prefix": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, ConflictsWith: []string{"name"}, ValidateFunc: validateCloudWatchEventRuleName, @@ -170,7 +171,7 @@ func resourceAwsCloudWatchEventRuleRead(d *schema.ResourceData, meta interface{} d.Set("event_pattern", pattern) } d.Set("name", out.Name) - d.Set("name_prefix", aws.StringValue(naming.NamePrefixFromName(aws.StringValue(out.Name)))) + d.Set("name_prefix", naming.NamePrefixFromName(aws.StringValue(out.Name))) d.Set("role_arn", out.RoleArn) d.Set("schedule_expression", out.ScheduleExpression) d.Set("event_bus_name", out.EventBusName) diff --git a/aws/resource_aws_cloudwatch_event_rule_test.go b/aws/resource_aws_cloudwatch_event_rule_test.go index 7e8a7fc6343..ca0768b38e8 100644 --- a/aws/resource_aws_cloudwatch_event_rule_test.go +++ b/aws/resource_aws_cloudwatch_event_rule_test.go @@ -363,7 +363,7 @@ func TestAccAWSCloudWatchEventRule_Name_Generated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudWatchEventRuleExists(resourceName, &v), naming.TestCheckResourceAttrNameGenerated(resourceName, "name"), - resource.TestCheckResourceAttr(resourceName, "name_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "terraform-"), ), }, { diff --git a/aws/resource_aws_security_group.go b/aws/resource_aws_security_group.go index ca3325b3579..eafb6c3d7ca 100644 --- a/aws/resource_aws_security_group.go +++ b/aws/resource_aws_security_group.go @@ -53,6 +53,7 @@ func resourceAwsSecurityGroup() *schema.Resource { "name_prefix": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, ConflictsWith: []string{"name"}, ValidateFunc: validation.StringLenBetween(0, 100), @@ -381,7 +382,7 @@ func resourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) erro d.Set("arn", sgArn.String()) d.Set("description", sg.Description) d.Set("name", sg.GroupName) - d.Set("name_prefix", aws.StringValue(naming.NamePrefixFromName(aws.StringValue(sg.GroupName)))) + d.Set("name_prefix", naming.NamePrefixFromName(aws.StringValue(sg.GroupName))) d.Set("owner_id", sg.OwnerId) d.Set("vpc_id", sg.VpcId) diff --git a/aws/resource_aws_security_group_test.go b/aws/resource_aws_security_group_test.go index 474c12ebd49..de08b82f655 100644 --- a/aws/resource_aws_security_group_test.go +++ b/aws/resource_aws_security_group_test.go @@ -1094,7 +1094,7 @@ func TestAccAWSSecurityGroup_ipv6(t *testing.T) { }) } -func TestAccAWSSecurityGroup_namePrefix(t *testing.T) { +func TestAccAWSSecurityGroup_Name_Generated(t *testing.T) { var group ec2.SecurityGroup resourceName := "aws_security_group.test" @@ -1104,10 +1104,94 @@ func TestAccAWSSecurityGroup_namePrefix(t *testing.T) { CheckDestroy: testAccCheckAWSSecurityGroupDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSecurityGroupPrefixNameConfig("tf-acc-test-prefix-"), + Config: testAccAWSSecurityGroupConfig_generatedName, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists(resourceName, &group), + naming.TestCheckResourceAttrNameGenerated(resourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "terraform-"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17017 +func TestAccAWSSecurityGroup_Name_TerraformPrefix(t *testing.T) { + var group ec2.SecurityGroup + resourceName := "aws_security_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSecurityGroupConfigName("terraform-test"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists(resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "name", "terraform-test"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + }, + }, + }) +} + +func TestAccAWSSecurityGroup_NamePrefix(t *testing.T) { + var group ec2.SecurityGroup + resourceName := "aws_security_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSecurityGroupConfigNamePrefix("tf-acc-test-prefix-"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupExists(resourceName, &group), naming.TestCheckResourceAttrNameFromPrefix(resourceName, "name", "tf-acc-test-prefix-"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "tf-acc-test-prefix-"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17017 +func TestAccAWSSecurityGroup_NamePrefix_TerraformPrefix(t *testing.T) { + var group ec2.SecurityGroup + resourceName := "aws_security_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSecurityGroupConfigNamePrefix("terraform-test"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists(resourceName, &group), + naming.TestCheckResourceAttrNameFromPrefix(resourceName, "name", "terraform-test"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "terraform-test"), ), }, { @@ -1455,32 +1539,6 @@ func TestAccAWSSecurityGroup_ruleDescription(t *testing.T) { }) } -func TestAccAWSSecurityGroup_generatedName(t *testing.T) { - var group ec2.SecurityGroup - resourceName := "aws_security_group.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSecurityGroupConfig_generatedName, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSecurityGroupExists(resourceName, &group), - naming.TestCheckResourceAttrNameGenerated(resourceName, "name"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, - }, - }, - }) -} - func TestAccAWSSecurityGroup_defaultEgressVPC(t *testing.T) { resourceName := "aws_security_group.test" @@ -3380,7 +3438,24 @@ resource "aws_security_group" "test" { `, rName)) } -func testAccAWSSecurityGroupPrefixNameConfig(namePrefix string) string { +func testAccAWSSecurityGroupConfigName(name string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "tf-acc-test-security-group-name" + } +} + +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id +} +`, name) +} + +func testAccAWSSecurityGroupConfigNamePrefix(namePrefix string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 75e6167dfdb..15195154c19 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -103,6 +103,7 @@ Implementing name generation support for Terraform AWS Provider resources requir "name_prefix": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, ConflictsWith: []string{"name"}, }, @@ -120,7 +121,7 @@ name := naming.Generate(d.Get("name").(string), d.Get("name_prefix").(string)) ```go d.Set("name", resp.Name) -d.Set("name_prefix", aws.StringValue(naming.NamePrefixFromName(aws.StringValue(resp.Name)))) +d.Set("name_prefix", naming.NamePrefixFromName(aws.StringValue(resp.Name))) ``` ### Resource Name Generation Testing Implementation @@ -143,6 +144,7 @@ func TestAccAWSServiceThing_Name_Generated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSServiceThingExists(resourceName, &thing), naming.TestCheckResourceAttrNameGenerated(resourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "terraform-"), ), }, // If the resource supports import: @@ -169,6 +171,7 @@ func TestAccAWSServiceThing_NamePrefix(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSServiceThingExists(resourceName, &thing), naming.TestCheckResourceAttrNameFromPrefix(resourceName, "name", "tf-acc-test-prefix-"), + resource.TestCheckResourceAttr(resourceName, "name_prefix", "tf-acc-test-prefix-"), ), }, // If the resource supports import: From 798ac2f8fad69fe661373d8b4ce1d3117e78cd01 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 2 Feb 2021 20:23:04 +0000 Subject: [PATCH 0970/1212] Update CHANGELOG.md for #17030 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9286049fa74..31e453a2749 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,12 @@ ENHANCEMENTS: BUG FIXES: * data-source/aws_partition: Correct `reverse_dns_prefix` value in AWS China, C2S, and SC2S ([#17142](https://github.com/hashicorp/terraform-provider-aws/issues/17142)) +* resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) +* resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) ## 3.26.0 (January 28, 2021) From e6e140437f165d1cf2e1efe52d2bcbc643038515 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Tue, 2 Feb 2021 16:49:47 -0800 Subject: [PATCH 0971/1212] Remove redundant code and modify test --- aws/resource_aws_sfn_state_machine.go | 23 +------- aws/resource_aws_sfn_state_machine_test.go | 66 +++++++++++----------- 2 files changed, 36 insertions(+), 53 deletions(-) diff --git a/aws/resource_aws_sfn_state_machine.go b/aws/resource_aws_sfn_state_machine.go index f10d32a2a47..0abf61bd088 100644 --- a/aws/resource_aws_sfn_state_machine.go +++ b/aws/resource_aws_sfn_state_machine.go @@ -45,12 +45,10 @@ func resourceAwsSfnStateMachine() *schema.Resource { "include_execution_data": { Type: schema.TypeBool, Optional: true, - // Default: false, }, "level": { Type: schema.TypeString, Optional: true, - // Default: sfn.LogLevelOff, ValidateFunc: validation.StringInSlice([]string{ sfn.LogLevelAll, sfn.LogLevelError, @@ -179,11 +177,8 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er loggingConfiguration := flattenAwsSfnLoggingConfiguration(sm.LoggingConfiguration) - if loggingConfiguration != nil { - err := d.Set("logging_configuration", loggingConfiguration) - if err != nil { - log.Printf("[DEBUG] Error setting logging_configuration %s \n", err) - } + if err := d.Set("logging_configuration", loggingConfiguration); err != nil { + log.Printf("[DEBUG] Error setting logging_configuration %s", err) } if err := d.Set("creation_date", sm.CreationDate.Format(time.RFC3339)); err != nil { @@ -206,18 +201,6 @@ func resourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) er func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sfnconn - params := &sfn.UpdateStateMachineInput{ - StateMachineArn: aws.String(d.Id()), - Definition: aws.String(d.Get("definition").(string)), - RoleArn: aws.String(d.Get("role_arn").(string)), - } - - log.Printf("[DEBUG] Updating Step Function State Machine: %#v", params) - - if d.HasChange("logging_configuration") { - params.LoggingConfiguration = expandAwsSfnLoggingConfiguration(d.Get("logging_configuration").([]interface{})) - } - if d.HasChanges("definition", "role_arn", "logging_configuration") { params := &sfn.UpdateStateMachineInput{ StateMachineArn: aws.String(d.Id()), @@ -248,7 +231,7 @@ func resourceAwsSfnStateMachineUpdate(d *schema.ResourceData, meta interface{}) } } - return nil + return resourceAwsSfnStateMachineRead(d, meta) } func resourceAwsSfnStateMachineDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index 4701b0c9c7b..b215f26fa39 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -57,7 +57,7 @@ func TestAccAWSSfnStateMachine_createUpdate(t *testing.T) { }) } -func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { +func TestAccAWSSfnStateMachine_expressUpdate(t *testing.T) { var sm sfn.DescribeStateMachineOutput name := acctest.RandString(10) @@ -76,8 +76,7 @@ func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), - resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), - resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "type", "EXPRESS"), ), }, { @@ -89,15 +88,14 @@ func TestAccAWSSfnStateMachine_express_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 10.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), - resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.#", "1"), - resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "logging_configuration.0.level", sfn.LogLevelOff), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "type", "EXPRESS"), ), }, }, }) } -func TestAccAWSSfnStateMachine_standard_createUpdate(t *testing.T) { +func TestAccAWSSfnStateMachine_standardUpdate(t *testing.T) { var sm sfn.DescribeStateMachineOutput name := acctest.RandString(10) @@ -116,6 +114,7 @@ func TestAccAWSSfnStateMachine_standard_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "definition"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 5.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "type", "STANDARD"), ), }, { @@ -127,6 +126,7 @@ func TestAccAWSSfnStateMachine_standard_createUpdate(t *testing.T) { resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "creation_date"), resource.TestMatchResourceAttr("aws_sfn_state_machine.foo", "definition", regexp.MustCompile(`.*\"MaxAttempts\": 10.*`)), resource.TestCheckResourceAttrSet("aws_sfn_state_machine.foo", "role_arn"), + resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "type", "STANDARD"), ), }, }, @@ -199,7 +199,7 @@ func TestAccAWSSfnStateMachine_disappears(t *testing.T) { }) } -func TestAccAWSSfnStateMachine_express_LoggingConfiguration(t *testing.T) { +func TestAccAWSSfnStateMachine_expressLoggingConfiguration(t *testing.T) { var sm sfn.DescribeStateMachineOutput name := acctest.RandString(10) @@ -209,7 +209,7 @@ func TestAccAWSSfnStateMachine_express_LoggingConfiguration(t *testing.T) { CheckDestroy: testAccCheckAWSSfnStateMachineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration1(sfn.StateMachineTypeExpress, name, sfn.LogLevelError), + Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration(sfn.StateMachineTypeExpress, name, sfn.LogLevelError), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), @@ -223,7 +223,7 @@ func TestAccAWSSfnStateMachine_express_LoggingConfiguration(t *testing.T) { ), }, { - Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration1(sfn.StateMachineTypeExpress, name, sfn.LogLevelAll), + Config: testAccAWSSfnStateMachineExpressConfigLogConfiguration(sfn.StateMachineTypeExpress, name, sfn.LogLevelAll), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSfnExists("aws_sfn_state_machine.foo", &sm), resource.TestCheckResourceAttr("aws_sfn_state_machine.foo", "status", sfn.StateMachineStatusActive), @@ -475,7 +475,7 @@ data "aws_partition" "current" {} data "aws_region" "current" {} resource "aws_iam_role_policy" "iam_policy_for_lambda" { - name = "iam_policy_for_lambda_%s" + name = "iam_policy_for_lambda_%[1]s" role = "${aws_iam_role.iam_for_lambda.id}" policy = < Date: Wed, 3 Feb 2021 01:05:25 -0500 Subject: [PATCH 0972/1212] include relay_state in the UpdatePermissionSet request if available --- .changelog/17423.txt | 3 + aws/resource_aws_ssoadmin_permission_set.go | 7 +- ...source_aws_ssoadmin_permission_set_test.go | 69 +++++++++++++++++++ 3 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 .changelog/17423.txt diff --git a/.changelog/17423.txt b/.changelog/17423.txt new file mode 100644 index 00000000000..96033f45acf --- /dev/null +++ b/.changelog/17423.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ssoadmin_permission_set: Properly update resource with `relay_state` argument +``` diff --git a/aws/resource_aws_ssoadmin_permission_set.go b/aws/resource_aws_ssoadmin_permission_set.go index 5d2b7542f14..0ca51ce9ae2 100644 --- a/aws/resource_aws_ssoadmin_permission_set.go +++ b/aws/resource_aws_ssoadmin_permission_set.go @@ -194,8 +194,11 @@ func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interfa input.Description = aws.String(d.Get("description").(string)) } - if d.HasChange("relay_state") { - input.RelayState = aws.String(d.Get("relay_state").(string)) + // The AWS SSO API requires we send the RelayState value regardless if it's unchanged + // else the existing Permission Set's RelayState value will be cleared + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17411 + if v, ok := d.GetOk("relay_state"); ok { + input.RelayState = aws.String(v.(string)) } if d.HasChange("session_duration") { diff --git a/aws/resource_aws_ssoadmin_permission_set_test.go b/aws/resource_aws_ssoadmin_permission_set_test.go index 315af037111..5ed652c5907 100644 --- a/aws/resource_aws_ssoadmin_permission_set_test.go +++ b/aws/resource_aws_ssoadmin_permission_set_test.go @@ -272,6 +272,47 @@ func TestAccAWSSSOAdminPermissionSet_updateSessionDuration(t *testing.T) { }) } +// TestAccAWSSSOAdminPermissionSet_relayState_updateSessionDuration validates +// the resource's unchanged values (primarily relay_state) after updating the session_duration argument +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17411 +func TestAccAWSSSOAdminPermissionSet_relayState_updateSessionDuration(t *testing.T) { + resourceName := "aws_ssoadmin_permission_set.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSSOAdminInstances(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSOAdminPermissionSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSOAdminPermissionSetRelayStateConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "relay_state", "https://example.com"), + resource.TestCheckResourceAttr(resourceName, "session_duration", "PT1H"), + ), + }, + { + Config: testAccAWSSSOAdminPermissionSetRelayStateConfig_updateSessionDuration(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSOAdminPermissionSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "description", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "relay_state", "https://example.com"), + resource.TestCheckResourceAttr(resourceName, "session_duration", "PT2H"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSSSOAdminPermissionSet_mixedPolicyAttachments(t *testing.T) { resourceName := "aws_ssoadmin_permission_set.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -416,6 +457,34 @@ resource "aws_ssoadmin_permission_set" "test" { `, rName) } +func testAccAWSSSOAdminPermissionSetRelayStateConfig(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + description = %[1]q + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" + session_duration = "PT1H" +} +`, rName) +} + +func testAccAWSSSOAdminPermissionSetRelayStateConfig_updateSessionDuration(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_permission_set" "test" { + description = %[1]q + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + relay_state = "https://example.com" + session_duration = "PT2H" +} +`, rName) +} + func testAccAWSSSOAdminPermissionSetConfigTagsSingle(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` data "aws_ssoadmin_instances" "test" {} From 012aa799ff85b40ab09a676adb13e467c5730a36 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Tue, 2 Feb 2021 22:06:53 -0800 Subject: [PATCH 0973/1212] Fix formatting --- website/docs/r/sfn_state_machine.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/r/sfn_state_machine.html.markdown b/website/docs/r/sfn_state_machine.html.markdown index 0cb2df13867..92f03321730 100644 --- a/website/docs/r/sfn_state_machine.html.markdown +++ b/website/docs/r/sfn_state_machine.html.markdown @@ -18,7 +18,7 @@ Provides a Step Function State Machine resource resource "aws_sfn_state_machine" "sfn_state_machine" { name = "my-state-machine" - role_arn = "${aws_iam_role.iam_for_sfn.arn}" + role_arn = aws_iam_role.iam_for_sfn.arn definition = < Date: Wed, 3 Feb 2021 09:38:20 -0500 Subject: [PATCH 0974/1212] tests/resource/aws_iot_policy: Remove TestAccAWSIoTPolicy_invalidJson test (#17408) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17397 During Terraform 0.12 syntax upgrades in the test configurations, the JSON policy was accidentally fixed so the API no longer threw the error the test was expecting: ``` === CONT TestAccAWSIoTPolicy_invalidJson resource_aws_iot_policy_test.go:70: Step 1/1, expected an error but got none --- FAIL: TestAccAWSIoTPolicy_invalidJson (16.73s) ``` Testing API errors is not a goal of our acceptance testing and this test should be removed rather than "broken" again. Output from acceptance testing: ``` --- PASS: TestAccAWSIoTPolicy_disappears (12.15s) --- PASS: TestAccAWSIoTPolicy_basic (16.11s) ``` --- aws/resource_aws_iot_policy_test.go | 43 ----------------------------- 1 file changed, 43 deletions(-) diff --git a/aws/resource_aws_iot_policy_test.go b/aws/resource_aws_iot_policy_test.go index 2c61833dae9..094211d1550 100644 --- a/aws/resource_aws_iot_policy_test.go +++ b/aws/resource_aws_iot_policy_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -64,22 +63,6 @@ func TestAccAWSIoTPolicy_disappears(t *testing.T) { }) } -func TestAccAWSIoTPolicy_invalidJson(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSIoTPolicyDestroy_basic, - Steps: []resource.TestStep{ - { - Config: testAccAWSIoTPolicyInvalidJsonConfig(rName), - ExpectError: regexp.MustCompile("MalformedPolicyException.*"), - }, - }, - }) -} - func testAccCheckAWSIoTPolicyDestroy_basic(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).iotconn @@ -164,29 +147,3 @@ EOF } `, rName) } - -func testAccAWSIoTPolicyInvalidJsonConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_iot_policy" "test" { - name = "%s" - - policy = < Date: Wed, 3 Feb 2021 09:53:20 -0500 Subject: [PATCH 0975/1212] resource/aws_api_gateway_method_settings: Prevent unexpected errors during creation and deletion (#17234) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13985 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16796 Fixing this as part of API Gateway service spike work. As mentioned in the issue this will not help mitigate the cause of the problem, but will at least give a better error message. Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayMethodSettings_basic (25.35s) --- PASS: TestAccAWSAPIGatewayMethodSettings_disappears (247.81s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CacheDataEncrypted (147.06s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CacheTtlInSeconds (340.17s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_CachingEnabled (448.46s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_DataTraceEnabled (216.61s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_LoggingLevel (55.96s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_MetricsEnabled (397.43s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_Multiple (692.67s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_RequireAuthorizationForCacheControl (178.96s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingBurstLimit (118.97s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingBurstLimitDisabledByDefault (86.39s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingRateLimit (515.72s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_ThrottlingRateLimitDisabledByDefault (484.35s) --- PASS: TestAccAWSAPIGatewayMethodSettings_Settings_UnauthorizedCacheControlHeaderStrategy (312.16s) ``` --- .changelog/17234.txt | 7 +++ ...esource_aws_api_gateway_method_settings.go | 48 ++++++++++++------- 2 files changed, 38 insertions(+), 17 deletions(-) create mode 100644 .changelog/17234.txt diff --git a/.changelog/17234.txt b/.changelog/17234.txt new file mode 100644 index 00000000000..31d9fd07d7c --- /dev/null +++ b/.changelog/17234.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_api_gateway_method_settings: Prevent confusing Terraform error on resource disappearance during creation +``` + +```release-note:bug +resource/aws_api_gateway_method_settings: Ignore non-existent resource errors during deletion +``` diff --git a/aws/resource_aws_api_gateway_method_settings.go b/aws/resource_aws_api_gateway_method_settings.go index 176ecb0bf14..204a01c5618 100644 --- a/aws/resource_aws_api_gateway_method_settings.go +++ b/aws/resource_aws_api_gateway_method_settings.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -112,6 +113,10 @@ func resourceAwsApiGatewayMethodSettings() *schema.Resource { } func flattenAwsApiGatewayMethodSettings(settings *apigateway.MethodSetting) []interface{} { + if settings == nil { + return nil + } + return []interface{}{ map[string]interface{}{ "metrics_enabled": settings.MetricsEnabled, @@ -131,26 +136,28 @@ func flattenAwsApiGatewayMethodSettings(settings *apigateway.MethodSetting) []in func resourceAwsApiGatewayMethodSettingsRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn - log.Printf("[DEBUG] Reading API Gateway Method Settings %s", d.Id()) - input := apigateway.GetStageInput{ + input := &apigateway.GetStageInput{ RestApiId: aws.String(d.Get("rest_api_id").(string)), StageName: aws.String(d.Get("stage_name").(string)), } - stage, err := conn.GetStage(&input) + + stage, err := conn.GetStage(input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, apigateway.ErrCodeNotFoundException) { + log.Printf("[WARN] API Gateway Stage Method Settings (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { - if isAWSErr(err, apigateway.ErrCodeNotFoundException, "") { - log.Printf("[WARN] API Gateway Stage (%s) not found, removing method settings", d.Id()) - d.SetId("") - return nil - } - return err + return fmt.Errorf("error getting API Gateway Stage Method Settings (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] Received API Gateway Stage: %s", stage) methodPath := d.Get("method_path").(string) settings, ok := stage.MethodSettings[methodPath] - if !ok { - log.Printf("[WARN] API Gateway Method Settings for %q not found, removing", methodPath) + + if !d.IsNewResource() && !ok { + log.Printf("[WARN] API Gateway Stage Method Settings (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -265,9 +272,8 @@ func resourceAwsApiGatewayMethodSettingsUpdate(d *schema.ResourceData, meta inte func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).apigatewayconn - log.Printf("[DEBUG] Deleting API Gateway Method Settings: %s", d.Id()) - input := apigateway.UpdateStageInput{ + input := &apigateway.UpdateStageInput{ RestApiId: aws.String(d.Get("rest_api_id").(string)), StageName: aws.String(d.Get("stage_name").(string)), PatchOperations: []*apigateway.PatchOperation{ @@ -277,12 +283,20 @@ func resourceAwsApiGatewayMethodSettingsDelete(d *schema.ResourceData, meta inte }, }, } - log.Printf("[DEBUG] Updating API Gateway Stage: %s", input) - _, err := conn.UpdateStage(&input) + _, err := conn.UpdateStage(input) + + if tfawserr.ErrCodeEquals(err, apigateway.ErrCodeNotFoundException) { + return nil + } + + // BadRequestException: Cannot remove method setting */* because there is no method setting for this method + if tfawserr.ErrMessageContains(err, apigateway.ErrCodeBadRequestException, "no method setting for this method") { + return nil + } if err != nil { - return fmt.Errorf("updating API Gateway Stage failed: %w", err) + return fmt.Errorf("error deleting API Gateway Stage Method Settings (%s): %w", d.Id(), err) } return nil From ebf5dcda7ccfd656d605c34068b63eaa0b8019b1 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 09:55:02 -0500 Subject: [PATCH 0976/1212] tests/provider: Refactor out Terraform 0.15 removed list/map functions (#17248) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17133 Output from acceptance testing: ``` --- PASS: TestAccAWSAPIGatewayDeployment_Triggers (61.68s) --- PASS: TestAccAWSAPIGatewayV2Deployment_Triggers (76.45s) --- PASS: TestAccAWSCodeBuildProject_Environment_RegistryCredential (40.80s) ``` --- aws/resource_aws_api_gateway_deployment_test.go | 4 +--- aws/resource_aws_apigatewayv2_deployment_test.go | 8 ++++---- aws/resource_aws_codebuild_project_test.go | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_api_gateway_deployment_test.go b/aws/resource_aws_api_gateway_deployment_test.go index fba017bc09f..885cdfa2891 100644 --- a/aws/resource_aws_api_gateway_deployment_test.go +++ b/aws/resource_aws_api_gateway_deployment_test.go @@ -403,9 +403,7 @@ resource "aws_api_gateway_deployment" "test" { stage_name = "tf-acc-test" triggers = { - redeployment = sha1(join(",", list( - jsonencode(aws_api_gateway_integration.test), - ))) + redeployment = sha1(jsonencode(aws_api_gateway_integration.test)) } lifecycle { diff --git a/aws/resource_aws_apigatewayv2_deployment_test.go b/aws/resource_aws_apigatewayv2_deployment_test.go index 5ceff222ead..39a9d2af1e7 100644 --- a/aws/resource_aws_apigatewayv2_deployment_test.go +++ b/aws/resource_aws_apigatewayv2_deployment_test.go @@ -256,10 +256,10 @@ resource "aws_apigatewayv2_deployment" "test" { api_id = aws_apigatewayv2_api.test.id triggers = { - redeployment = sha1(join(",", list( - jsonencode(aws_apigatewayv2_integration.test), - jsonencode(aws_apigatewayv2_route.test), - ))) + redeployment = sha1(jsonencode([ + aws_apigatewayv2_integration.test, + aws_apigatewayv2_route.test, + ])) } lifecycle { diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index fa27484da2f..873c60516b9 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -2723,7 +2723,7 @@ resource "aws_secretsmanager_secret" "test" { resource "aws_secretsmanager_secret_version" "test" { secret_id = aws_secretsmanager_secret.test.id - secret_string = jsonencode(map("username", "user", "password", "pass")) + secret_string = jsonencode({ username : "user", password : "pass" }) } `, rName)) } @@ -2763,7 +2763,7 @@ resource "aws_secretsmanager_secret" "test" { resource "aws_secretsmanager_secret_version" "test" { secret_id = aws_secretsmanager_secret.test.id - secret_string = jsonencode(map("username", "user", "password", "pass")) + secret_string = jsonencode({ username : "user", password : "pass" }) } `, rName)) } From 3155c4b0693085594ffb557571ca2d720db1b816 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 09:55:43 -0500 Subject: [PATCH 0977/1212] tests/resource/aws_emr_instance_group: Add status attribute to ImportStateVerifyIgnore (#17255) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16698 The `status` attribute for EMR Instance Groups can be eventually consistent or otherwise affected by background EMR operations after creation and attempting to re-read the information back for import testing: ``` === CONT TestAccAWSEMRInstanceGroup_ConfigurationsJson TestAccAWSEMRInstanceGroup_ConfigurationsJson: resource_aws_emr_instance_group_test.go:99: ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected. (map[string]string) (len=1) { (string) (len=6) "status": (string) (len=13) "RECONFIGURING" } (map[string]string) (len=1) { (string) (len=6) "status": (string) (len=7) "RUNNING" } --- FAIL: TestAccAWSEMRInstanceGroup_ConfigurationsJson (851.18s) ``` This value change is a false positive and can be ignored for all `aws_emr_instance_group` acceptance tests. Output from acceptance testing: ``` --- PASS: TestAccAWSEMRInstanceGroup_AutoScalingPolicy (871.02s) --- PASS: TestAccAWSEMRInstanceGroup_basic (825.33s) --- PASS: TestAccAWSEMRInstanceGroup_BidPrice (1399.89s) --- PASS: TestAccAWSEMRInstanceGroup_ConfigurationsJson (955.92s) --- PASS: TestAccAWSEMRInstanceGroup_disappears_EmrCluster (636.55s) --- PASS: TestAccAWSEMRInstanceGroup_EbsConfig_EbsOptimized (1078.71s) --- PASS: TestAccAWSEMRInstanceGroup_InstanceCount (825.82s) ``` --- aws/resource_aws_emr_instance_group_test.go | 81 ++++++++++++--------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/aws/resource_aws_emr_instance_group_test.go b/aws/resource_aws_emr_instance_group_test.go index ab797cf7347..821f7196d49 100644 --- a/aws/resource_aws_emr_instance_group_test.go +++ b/aws/resource_aws_emr_instance_group_test.go @@ -33,10 +33,11 @@ func TestAccAWSEMRInstanceGroup_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, }, }) @@ -60,10 +61,11 @@ func TestAccAWSEMRInstanceGroup_BidPrice(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_BidPrice(rInt), @@ -74,10 +76,11 @@ func TestAccAWSEMRInstanceGroup_BidPrice(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_basic(rInt), @@ -109,10 +112,11 @@ func TestAccAWSEMRInstanceGroup_ConfigurationsJson(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_ConfigurationsJson(rInt, "partitionName2"), @@ -122,10 +126,11 @@ func TestAccAWSEMRInstanceGroup_ConfigurationsJson(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, }, }) @@ -149,10 +154,11 @@ func TestAccAWSEMRInstanceGroup_AutoScalingPolicy(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_AutoScalingPolicy(rInt, 2, 3), @@ -162,10 +168,11 @@ func TestAccAWSEMRInstanceGroup_AutoScalingPolicy(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, }, }) @@ -188,10 +195,11 @@ func TestAccAWSEMRInstanceGroup_InstanceCount(t *testing.T) { Check: testAccCheckAWSEmrInstanceGroupExists(resourceName, &ig), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_zeroCount(rInt), @@ -246,10 +254,11 @@ func TestAccAWSEMRInstanceGroup_EbsConfig_EbsOptimized(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSEMRInstanceGroupResourceImportStateIdFunc(resourceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, }, { Config: testAccAWSEmrInstanceGroupConfig_ebsConfig(rInt, false), From 2b1ff33bdfd78147ac24ed36821f77fadc0fdb94 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 09:57:08 -0500 Subject: [PATCH 0978/1212] tests/service/imagebuilder: Fix distribution configuration InvalidParameterValueException (#17409) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17390 The Image Builder service in AWS Commercial is now performing additional validation on Distribution Configuration creation, presumably due to the launch of container distribution support: ``` === CONT TestAccAwsImageBuilderDistributionConfigurationDataSource_Arn data_source_aws_imagebuilder_distribution_configuration_test.go:16: Step 1/1 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfigurationDataSource_Arn (8.02s) === CONT TestAccAwsImageBuilderDistributionConfiguration_basic resource_aws_imagebuilder_distribution_configuration_test.go:80: Step 1/2 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfiguration_basic (29.55s) === CONT TestAccAwsImageBuilderDistributionConfiguration_disappears resource_aws_imagebuilder_distribution_configuration_test.go:110: Step 1/1 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfiguration_disappears (32.72s) === CONT TestAccAwsImageBuilderDistributionConfiguration_Description resource_aws_imagebuilder_distribution_configuration_test.go:131: Step 1/3 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfiguration_Description (33.09s) === CONT TestAccAwsImageBuilderDistributionConfiguration_Distribution resource_aws_imagebuilder_distribution_configuration_test.go:164: Step 1/2 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfiguration_Distribution (44.85s) === CONT TestAccAwsImageBuilderDistributionConfiguration_Tags resource_aws_imagebuilder_distribution_configuration_test.go:487: Step 1/4 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderDistributionConfiguration_Tags (33.09s) === CONT TestAccAwsImageBuilderImagePipeline_DistributionConfigurationArn resource_aws_imagebuilder_image_pipeline_test.go:178: Step 1/3 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderImagePipeline_DistributionConfigurationArn (48.35s) === CONT TestAccAwsImageBuilderImage_DistributionConfigurationArn resource_aws_imagebuilder_image_test.go:146: Step 1/2 error: Error running apply: Error: error creating Image Builder Distribution Configuration: InvalidParameterValueException: The value supplied for parameter 'distributions[0]' is not valid. One or more outputs should be provided for each region in a distribution configuration. --- FAIL: TestAccAwsImageBuilderImage_DistributionConfigurationArn (54.05s) ``` The test configurations are updated to include limited AMI distribution configuration so testing can pass without adding container distribution support. Output from acceptance testing in AWS Commercial: ``` --- FAIL: TestAccAwsImageBuilderImagePipeline_Schedule_PipelineExecutionStartCondition (37.70s) # https://github.com/hashicorp/terraform-provider-aws/issues/17396 --- FAIL: TestAccAwsImageBuilderImagePipeline_Schedule_ScheduleExpression (34.62s) # https://github.com/hashicorp/terraform-provider-aws/issues/17396 --- PASS: TestAccAwsImageBuilderDistributionConfiguration_basic (30.84s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Description (51.76s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_disappears (27.33s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution (30.73s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_AmiTags (60.25s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_Description (47.60s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_KmsKeyId (66.08s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_LaunchPermission_UserGroups (48.36s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_LaunchPermission_UserIds (63.06s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_Name (72.42s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_TargetAccountIds (73.19s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_LicenseConfigurationArns (69.33s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Tags (92.68s) --- PASS: TestAccAwsImageBuilderDistributionConfigurationDataSource_Arn (35.66s) --- PASS: TestAccAwsImageBuilderImage_basic (1449.31s) --- PASS: TestAccAwsImageBuilderImage_disappears (1449.08s) --- PASS: TestAccAwsImageBuilderImage_DistributionConfigurationArn (1829.34s) --- PASS: TestAccAwsImageBuilderImage_EnhancedImageMetadataEnabled (1207.33s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_ImageTestsEnabled (1095.28s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_TimeoutMinutes (1222.96s) --- PASS: TestAccAwsImageBuilderImage_Tags (1661.80s) --- PASS: TestAccAwsImageBuilderImagePipeline_basic (65.25s) --- PASS: TestAccAwsImageBuilderImagePipeline_Description (96.68s) --- PASS: TestAccAwsImageBuilderImagePipeline_disappears (60.46s) --- PASS: TestAccAwsImageBuilderImagePipeline_DistributionConfigurationArn (112.52s) --- PASS: TestAccAwsImageBuilderImagePipeline_EnhancedImageMetadataEnabled (95.24s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageRecipeArn (96.45s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageTestsConfiguration_ImageTestsEnabled (88.54s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageTestsConfiguration_TimeoutMinutes (107.57s) --- PASS: TestAccAwsImageBuilderImagePipeline_InfrastructureConfigurationArn (100.25s) --- PASS: TestAccAwsImageBuilderImagePipeline_Status (96.04s) --- PASS: TestAccAwsImageBuilderImagePipeline_Tags (121.74s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAwsImageBuilderDistributionConfiguration_basic (30.27s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Description (72.88s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_disappears (37.27s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution (51.44s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_AmiTags (50.52s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_Description (57.29s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_KmsKeyId (72.36s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_LaunchPermission_UserGroups (43.11s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_LaunchPermission_UserIds (76.37s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_Name (48.78s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_AmiDistributionConfiguration_TargetAccountIds (54.17s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Distribution_LicenseConfigurationArns (81.88s) --- PASS: TestAccAwsImageBuilderDistributionConfiguration_Tags (82.35s) --- PASS: TestAccAwsImageBuilderDistributionConfigurationDataSource_Arn (41.36s) --- PASS: TestAccAwsImageBuilderImage_basic (1481.43s) --- PASS: TestAccAwsImageBuilderImage_disappears (1354.00s) --- PASS: TestAccAwsImageBuilderImage_DistributionConfigurationArn (1731.97s) --- PASS: TestAccAwsImageBuilderImage_EnhancedImageMetadataEnabled (976.74s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_ImageTestsEnabled (976.18s) --- PASS: TestAccAwsImageBuilderImage_ImageTestsConfiguration_TimeoutMinutes (1472.80s) --- PASS: TestAccAwsImageBuilderImage_Tags (1287.54s) --- PASS: TestAccAwsImageBuilderImagePipeline_basic (63.98s) --- PASS: TestAccAwsImageBuilderImagePipeline_Description (107.93s) --- PASS: TestAccAwsImageBuilderImagePipeline_disappears (61.53s) --- PASS: TestAccAwsImageBuilderImagePipeline_DistributionConfigurationArn (108.80s) --- PASS: TestAccAwsImageBuilderImagePipeline_EnhancedImageMetadataEnabled (106.06s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageRecipeArn (65.08s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageTestsConfiguration_ImageTestsEnabled (65.10s) --- PASS: TestAccAwsImageBuilderImagePipeline_ImageTestsConfiguration_TimeoutMinutes (64.33s) --- PASS: TestAccAwsImageBuilderImagePipeline_InfrastructureConfigurationArn (107.98s) --- PASS: TestAccAwsImageBuilderImagePipeline_Schedule_PipelineExecutionStartCondition (63.33s) --- PASS: TestAccAwsImageBuilderImagePipeline_Schedule_ScheduleExpression (105.97s) --- PASS: TestAccAwsImageBuilderImagePipeline_Status (100.15s) --- PASS: TestAccAwsImageBuilderImagePipeline_Tags (129.05s) ``` --- ...builder_distribution_configuration_test.go | 4 ++++ ...builder_distribution_configuration_test.go | 24 +++++++++++++++++++ ...ce_aws_imagebuilder_image_pipeline_test.go | 8 +++++++ aws/resource_aws_imagebuilder_image_test.go | 4 ++++ 4 files changed, 40 insertions(+) diff --git a/aws/data_source_aws_imagebuilder_distribution_configuration_test.go b/aws/data_source_aws_imagebuilder_distribution_configuration_test.go index d1cc2a64253..5b25a6c352a 100644 --- a/aws/data_source_aws_imagebuilder_distribution_configuration_test.go +++ b/aws/data_source_aws_imagebuilder_distribution_configuration_test.go @@ -42,6 +42,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } } diff --git a/aws/resource_aws_imagebuilder_distribution_configuration_test.go b/aws/resource_aws_imagebuilder_distribution_configuration_test.go index 1e079a70058..7f44e6e445e 100644 --- a/aws/resource_aws_imagebuilder_distribution_configuration_test.go +++ b/aws/resource_aws_imagebuilder_distribution_configuration_test.go @@ -585,6 +585,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } } @@ -605,10 +609,18 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.alternate.name } } @@ -821,6 +833,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } } @@ -835,6 +851,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } @@ -853,6 +873,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } diff --git a/aws/resource_aws_imagebuilder_image_pipeline_test.go b/aws/resource_aws_imagebuilder_image_pipeline_test.go index 9935fb1f640..ac39b3b7c24 100644 --- a/aws/resource_aws_imagebuilder_image_pipeline_test.go +++ b/aws/resource_aws_imagebuilder_image_pipeline_test.go @@ -651,6 +651,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = "%[1]s-1" distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } @@ -676,6 +680,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = "%[1]s-2" distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } diff --git a/aws/resource_aws_imagebuilder_image_test.go b/aws/resource_aws_imagebuilder_image_test.go index 1bc883d76a7..159da5627bd 100644 --- a/aws/resource_aws_imagebuilder_image_test.go +++ b/aws/resource_aws_imagebuilder_image_test.go @@ -452,6 +452,10 @@ resource "aws_imagebuilder_distribution_configuration" "test" { name = %[1]q distribution { + ami_distribution_configuration { + name = "{{ imagebuilder:buildDate }}" + } + region = data.aws_region.current.name } } From b3299dc0dc71d3f83e6f039ac374a9fc3375453b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 09:58:32 -0500 Subject: [PATCH 0979/1212] resource/aws_subnet: Apply attribute waiter logic to map_public_ip_on_launch attribute and tidy up attribute testing (#17410) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16696 This resource attribute has long been the cause of flakey acceptance testing across the codebase, such as: ``` === CONT TestAccAWSLB_applicationLoadBalancer_updateHttp2 TestAccAWSLB_applicationLoadBalancer_updateHttp2: resource_aws_lb_test.go:522: Step 1/3 error: After applying this test step, the plan was not empty. stdout: An execution plan has been generated and is shown below. Resource actions are indicated with the following symbols: ~ update in-place Terraform will perform the following actions: # aws_subnet.alb_test[0] will be updated in-place ~ resource "aws_subnet" "alb_test" { id = "subnet-088715d2b9827af18" ~ map_public_ip_on_launch = false -> true tags = { "Name" = "tf-acc-lb-basic-0" } # (7 unchanged attributes hidden) } Plan: 0 to add, 1 to change, 0 to destroy. ``` Adding logic, similar to `SubnetMapCustomerOwnedIpOnLaunchUpdated` in https://github.com/hashicorp/terraform-provider-aws/pull/16676 can be used to ensure the attribute value has flipped correctly after calling the `ModifySubnetAttribute` API. This attribute waiter setup will be added to a forthcoming Retries and Waiters section in the Contribution Guide. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSLB_applicationLoadBalancer_updateHttp2 (302.23s) --- PASS: TestAccAWSSubnet_availabilityZoneId (37.76s) --- PASS: TestAccAWSSubnet_basic (35.50s) --- PASS: TestAccAWSSubnet_disappears (27.55s) --- PASS: TestAccAWSSubnet_enableIpv6 (88.46s) --- PASS: TestAccAWSSubnet_ignoreTags (57.96s) --- PASS: TestAccAWSSubnet_ipv6 (99.54s) --- PASS: TestAccAWSSubnet_MapPublicIpOnLaunch (99.92s) --- PASS: TestAccAWSSubnet_tags (84.05s) --- SKIP: TestAccAWSSubnet_CustomerOwnedIpv4Pool (7.36s) --- SKIP: TestAccAWSSubnet_MapCustomerOwnedIpOnLaunch (1.82s) --- SKIP: TestAccAWSSubnet_outpost (1.62s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSLB_applicationLoadBalancer_updateHttp2 (310.96s) --- PASS: TestAccAWSSubnet_availabilityZoneId (42.49s) --- PASS: TestAccAWSSubnet_basic (39.27s) --- PASS: TestAccAWSSubnet_disappears (31.78s) --- PASS: TestAccAWSSubnet_enableIpv6 (100.77s) --- PASS: TestAccAWSSubnet_ignoreTags (67.49s) --- PASS: TestAccAWSSubnet_ipv6 (108.38s) --- PASS: TestAccAWSSubnet_MapPublicIpOnLaunch (109.87s) --- PASS: TestAccAWSSubnet_tags (95.24s) --- SKIP: TestAccAWSSubnet_CustomerOwnedIpv4Pool (7.11s) --- SKIP: TestAccAWSSubnet_MapCustomerOwnedIpOnLaunch (2.17s) --- SKIP: TestAccAWSSubnet_outpost (10.06s) ``` --- aws/internal/service/ec2/waiter/status.go | 21 +++++ aws/internal/service/ec2/waiter/waiter.go | 18 +++++ aws/resource_aws_subnet.go | 12 ++- aws/resource_aws_subnet_test.go | 94 +++++++++++++++++------ 4 files changed, 117 insertions(+), 28 deletions(-) diff --git a/aws/internal/service/ec2/waiter/status.go b/aws/internal/service/ec2/waiter/status.go index 47a78775504..74e0c9308f5 100644 --- a/aws/internal/service/ec2/waiter/status.go +++ b/aws/internal/service/ec2/waiter/status.go @@ -259,6 +259,27 @@ func SubnetMapCustomerOwnedIpOnLaunch(conn *ec2.EC2, id string) resource.StateRe } } +// SubnetMapPublicIpOnLaunch fetches the Subnet and its MapPublicIpOnLaunch +func SubnetMapPublicIpOnLaunch(conn *ec2.EC2, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + subnet, err := finder.SubnetByID(conn, id) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidSubnetIDNotFound) { + return nil, "false", nil + } + + if err != nil { + return nil, "false", err + } + + if subnet == nil { + return nil, "false", nil + } + + return subnet, strconv.FormatBool(aws.BoolValue(subnet.MapPublicIpOnLaunch)), nil + } +} + const ( vpcPeeringConnectionStatusNotFound = "NotFound" vpcPeeringConnectionStatusUnknown = "Unknown" diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index c8c516294ef..0b76dd4f980 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -272,6 +272,24 @@ func SubnetMapCustomerOwnedIpOnLaunchUpdated(conn *ec2.EC2, subnetID string, exp return nil, err } +func SubnetMapPublicIpOnLaunchUpdated(conn *ec2.EC2, subnetID string, expectedValue bool) (*ec2.Subnet, error) { + stateConf := &resource.StateChangeConf{ + Target: []string{strconv.FormatBool(expectedValue)}, + Refresh: SubnetMapPublicIpOnLaunch(conn, subnetID), + Timeout: SubnetAttributePropagationTimeout, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.Subnet); ok { + return output, err + } + + return nil, err +} + const ( VpnGatewayVpcAttachmentAttachedTimeout = 15 * time.Minute diff --git a/aws/resource_aws_subnet.go b/aws/resource_aws_subnet.go index d05943849cf..d93e6ee1257 100644 --- a/aws/resource_aws_subnet.go +++ b/aws/resource_aws_subnet.go @@ -212,6 +212,10 @@ func resourceAwsSubnetCreate(d *schema.ResourceData, meta interface{}) error { if _, err := conn.ModifySubnetAttribute(input); err != nil { return fmt.Errorf("error enabling EC2 Subnet (%s) map public IP on launch: %w", d.Id(), err) } + + if _, err := waiter.SubnetMapPublicIpOnLaunchUpdated(conn, d.Id(), d.Get("map_public_ip_on_launch").(bool)); err != nil { + return fmt.Errorf("error waiting for EC2 Subnet (%s) map public IP on launch update: %w", d.Id(), err) + } } return resourceAwsSubnetRead(d, meta) @@ -316,12 +320,14 @@ func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { }, } - log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) - _, err := conn.ModifySubnetAttribute(modifyOpts) if err != nil { - return err + return fmt.Errorf("error updating EC2 Subnet (%s) map public IP on launch: %w", d.Id(), err) + } + + if _, err := waiter.SubnetMapPublicIpOnLaunchUpdated(conn, d.Id(), d.Get("map_public_ip_on_launch").(bool)); err != nil { + return fmt.Errorf("error waiting for EC2 Subnet (%s) map public IP on launch update: %w", d.Id(), err) } } diff --git a/aws/resource_aws_subnet_test.go b/aws/resource_aws_subnet_test.go index 7da2ee09b6a..fed7b085741 100644 --- a/aws/resource_aws_subnet_test.go +++ b/aws/resource_aws_subnet_test.go @@ -127,18 +127,6 @@ func TestAccAWSSubnet_basic(t *testing.T) { var v ec2.Subnet resourceName := "aws_subnet.test" - testCheck := func(*terraform.State) error { - if aws.StringValue(v.CidrBlock) != "10.1.1.0/24" { - return fmt.Errorf("bad cidr: %s", aws.StringValue(v.CidrBlock)) - } - - if !aws.BoolValue(v.MapPublicIpOnLaunch) { - return fmt.Errorf("bad MapPublicIpOnLaunch: %t", aws.BoolValue(v.MapPublicIpOnLaunch)) - } - - return nil - } - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, IDRefreshName: resourceName, @@ -149,16 +137,16 @@ func TestAccAWSSubnet_basic(t *testing.T) { Config: testAccSubnetConfig, Check: resource.ComposeTestCheckFunc( testAccCheckSubnetExists(resourceName, &v), - testCheck, - // ipv6 should be empty if disabled so we can still use the property in conditionals - resource.TestCheckResourceAttr(resourceName, "ipv6_cidr_block", ""), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`subnet/subnet-.+`)), - testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttrSet(resourceName, "availability_zone"), resource.TestCheckResourceAttrSet(resourceName, "availability_zone_id"), + resource.TestCheckResourceAttr(resourceName, "cidr_block", "10.1.1.0/24"), resource.TestCheckResourceAttr(resourceName, "customer_owned_ipv4_pool", ""), + resource.TestCheckResourceAttr(resourceName, "ipv6_cidr_block", ""), resource.TestCheckResourceAttr(resourceName, "map_customer_owned_ip_on_launch", "false"), + resource.TestCheckResourceAttr(resourceName, "map_public_ip_on_launch", "false"), resource.TestCheckResourceAttr(resourceName, "outpost_arn", ""), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -427,6 +415,45 @@ func TestAccAWSSubnet_MapCustomerOwnedIpOnLaunch(t *testing.T) { }) } +func TestAccAWSSubnet_MapPublicIpOnLaunch(t *testing.T) { + var subnet ec2.Subnet + resourceName := "aws_subnet.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSubnetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSubnetConfigMapPublicIpOnLaunch(true), + Check: resource.ComposeTestCheckFunc( + testAccCheckSubnetExists(resourceName, &subnet), + resource.TestCheckResourceAttr(resourceName, "map_public_ip_on_launch", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSubnetConfigMapPublicIpOnLaunch(false), + Check: resource.ComposeTestCheckFunc( + testAccCheckSubnetExists(resourceName, &subnet), + resource.TestCheckResourceAttr(resourceName, "map_public_ip_on_launch", "false"), + ), + }, + { + Config: testAccSubnetConfigMapPublicIpOnLaunch(true), + Check: resource.ComposeTestCheckFunc( + testAccCheckSubnetExists(resourceName, &subnet), + resource.TestCheckResourceAttr(resourceName, "map_public_ip_on_launch", "true"), + ), + }, + }, + }) +} + func TestAccAWSSubnet_outpost(t *testing.T) { var v ec2.Subnet outpostDataSourceName := "data.aws_outposts_outpost.test" @@ -567,9 +594,8 @@ resource "aws_vpc" "test" { } resource "aws_subnet" "test" { - cidr_block = "10.1.1.0/24" - vpc_id = aws_vpc.test.id - map_public_ip_on_launch = true + cidr_block = "10.1.1.0/24" + vpc_id = aws_vpc.test.id } ` @@ -627,9 +653,8 @@ resource "aws_vpc" "test" { } resource "aws_subnet" "test" { - cidr_block = "10.10.1.0/24" - vpc_id = aws_vpc.test.id - map_public_ip_on_launch = true + cidr_block = "10.10.1.0/24" + vpc_id = aws_vpc.test.id tags = { Name = "tf-acc-subnet-ipv6" @@ -651,7 +676,6 @@ resource "aws_subnet" "test" { cidr_block = "10.10.1.0/24" vpc_id = aws_vpc.test.id ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1) - map_public_ip_on_launch = true assign_ipv6_address_on_creation = true tags = { @@ -674,7 +698,6 @@ resource "aws_subnet" "test" { cidr_block = "10.10.1.0/24" vpc_id = aws_vpc.test.id ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1) - map_public_ip_on_launch = true assign_ipv6_address_on_creation = false tags = { @@ -697,7 +720,6 @@ resource "aws_subnet" "test" { cidr_block = "10.10.1.0/24" vpc_id = aws_vpc.test.id ipv6_cidr_block = cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 3) - map_public_ip_on_launch = true assign_ipv6_address_on_creation = false tags = { @@ -834,6 +856,28 @@ resource "aws_subnet" "test" { `, mapCustomerOwnedIpOnLaunch) } +func testAccSubnetConfigMapPublicIpOnLaunch(mapPublicIpOnLaunch bool) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = "tf-acc-test-subnet-map-public-ip-on-launch" + } +} + +resource "aws_subnet" "test" { + cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) + map_public_ip_on_launch = %[1]t + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-test-subnet-map-public-ip-on-launch" + } +} +`, mapPublicIpOnLaunch) +} + func testAccSubnetConfigOutpost() string { return ` data "aws_outposts_outposts" "test" {} From 0cff9268a7ace416616e6df792cfd7f0bc47fc93 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 3 Feb 2021 15:02:47 +0000 Subject: [PATCH 0980/1212] Update CHANGELOG.md for #17410 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31e453a2749..ae2990d3a2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ ENHANCEMENTS: BUG FIXES: * data-source/aws_partition: Correct `reverse_dns_prefix` value in AWS China, C2S, and SC2S ([#17142](https://github.com/hashicorp/terraform-provider-aws/issues/17142)) +* resource/aws_api_gateway_method_settings: Ignore non-existent resource errors during deletion ([#17234](https://github.com/hashicorp/terraform-provider-aws/issues/17234)) +* resource/aws_api_gateway_method_settings: Prevent confusing Terraform error on resource disappearance during creation ([#17234](https://github.com/hashicorp/terraform-provider-aws/issues/17234)) * resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) From 9f92117aab9f7478df5b0015d4e3684fdfa40395 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 10:16:07 -0500 Subject: [PATCH 0981/1212] service/ec2: Additional error handling for VPC Endpoint and VPC Endpoint Service deletion, sweeper fixes for Route Tables, VPC Endpoints, and VPC Endpoint Services (#16656) * service/ec2: Additional error handling for VPC Endpoint and VPC Endpoint Service deletion, sweeper fixes for Route Tables, VPC Endpoints, and VPC Endpoint Services The `DeleteVpcEndpoints` and `DeleteVpcEndpointServiceConfigurations` APIs will sometimes return failures in an `Unsuccessful` array in the response, instead of a normal error. Previously the resource and sweeper did not account for this type of error response and would timeout on deletion after never reporting underlying issue: ``` 2020/12/08 18:43:52 Sweeper Tests ran unsuccessfully: ... - aws_vpc_endpoint_service: error waiting for VPC Endpoint Service (vpce-svc-0c300eaebde5aec19) to delete: timeout while waiting for state to become 'Deleted' (last state: 'Available', timeout: 10m0s) ... - aws_vpc_endpoint: error waiting for VPC Endpoint (vpce-0395ac1f6cc86b11a) to delete: timeout while waiting for state to become 'deleted' (last state: 'available', timeout: 10m0s) ``` Now the resource will handle this response type, the VPC Endpoint sweepers have been refactored to use the resource deletion function, and the VPC Endpoint sweepers will correctly show the unsuccessful deletions while immediately continuing on to the next item: ``` 2020/12/08 20:46:59 Sweeper Tests ran unsuccessfully: - aws_vpc_endpoint_service: 1 error occurred: * error deleting EC2 VPC Endpoint Service (vpce-svc-0c300eaebde5aec19): error deleting EC2 VPC Endpoint Service (vpce-svc-0c300eaebde5aec19): 1 error occurred: * vpce-svc-0c300eaebde5aec19: ExistingVpcEndpointConnections: Service has existing active VPC Endpoint connections! ... - aws_vpc_endpoint: 1 error occurred: * error deleting EC2 VPC Endpoint (vpce-0395ac1f6cc86b11a): error deleting EC2 VPC Endpoint (vpce-0395ac1f6cc86b11a): 1 error occurred: * vpce-0395ac1f6cc86b11a: InvalidParameter: Endpoint must be removed from route table before deletion ``` To fix the underlying cause of these errors, the Route Table sweeper needed to be added as a VPC Endpoint dependency and the Route Table sweeper needed to delete non-local/non-public-IGW routes if the Route Table was the main route table for the VPC (as main Route Tables cannot be deleted): ``` 2020/12/08 21:12:50 [DEBUG] Running Sweepers for region (us-west-2): 2020/12/08 21:12:50 [DEBUG] Running Sweeper (aws_route_table) in region (us-west-2) 2020/12/08 21:12:50 [INFO] AWS Auth provider used: "SharedCredentialsProvider" 2020/12/08 21:12:50 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2020/12/08 21:12:50 [DEBUG] Trying to get account information via sts:GetCallerIdentity 2020/12/08 21:12:52 [DEBUG] Deleting EC2 Route Table (rtb-09af9318dcc5ccaf9) Route 2020/12/08 21:12:52 [DEBUG] Sweeper (aws_vpc_endpoint_service) has dependency (aws_vpc_endpoint), running.. 2020/12/08 21:12:52 [DEBUG] Sweeper (aws_vpc_endpoint) has dependency (aws_route_table), running.. 2020/12/08 21:12:52 [DEBUG] Sweeper (aws_route_table) already ran in region (us-west-2) 2020/12/08 21:12:52 [DEBUG] Running Sweeper (aws_vpc_endpoint) in region (us-west-2) 2020/12/08 21:12:53 [INFO] Deleting EC2 VPC Endpoint: vpce-0395ac1f6cc86b11a 2020/12/08 21:12:53 [DEBUG] Waiting for state to become: [deleted] 2020/12/08 21:12:58 [DEBUG] Reading VPC Endpoint: vpce-0395ac1f6cc86b11a 2020/12/08 21:12:59 [TRACE] Waiting 5s before next try 2020/12/08 21:13:04 [DEBUG] Reading VPC Endpoint: vpce-0395ac1f6cc86b11a 2020/12/08 21:13:04 [TRACE] Waiting 10s before next try 2020/12/08 21:13:14 [DEBUG] Reading VPC Endpoint: vpce-0395ac1f6cc86b11a 2020/12/08 21:13:15 [DEBUG] Running Sweeper (aws_vpc_endpoint_service) in region (us-west-2) 2020/12/08 21:13:15 [INFO] Deleting EC2 VPC Endpoint Service: vpce-svc-0c300eaebde5aec19 2020/12/08 21:13:16 [DEBUG] Waiting for state to become: [Deleted] 2020/12/08 21:13:21 [DEBUG] Reading VPC Endpoint Service Configuration: vpce-svc-0c300eaebde5aec19 2020/12/08 21:13:21 [DEBUG] Sweeper (aws_vpc_endpoint) has dependency (aws_route_table), running.. 2020/12/08 21:13:21 [DEBUG] Sweeper (aws_route_table) already ran in region (us-west-2) 2020/12/08 21:13:21 [DEBUG] Sweeper (aws_vpc_endpoint) already ran in region (us-west-2) 2020/12/08 21:13:21 Sweeper Tests ran successfully: - aws_vpc_endpoint_service - aws_route_table - aws_vpc_endpoint ok github.com/terraform-providers/terraform-provider-aws/aws 33.689s ``` Output from acceptance testing: ``` --- PASS: TestAccAWSVpcEndpoint_disappears (37.60s) --- PASS: TestAccAWSVpcEndpoint_gatewayBasic (38.82s) --- PASS: TestAccAWSVpcEndpoint_gatewayPolicy (72.11s) --- PASS: TestAccAWSVpcEndpoint_gatewayWithRouteTableAndPolicy (87.14s) --- PASS: TestAccAWSVpcEndpoint_interfaceBasic (78.17s) --- PASS: TestAccAWSVpcEndpoint_interfaceNonAWSServiceAcceptOnCreate (276.65s) --- PASS: TestAccAWSVpcEndpoint_interfaceNonAWSServiceAcceptOnUpdate (333.65s) --- PASS: TestAccAWSVpcEndpoint_interfaceWithSubnetAndSecurityGroup (448.87s) --- PASS: TestAccAWSVpcEndpoint_tags (89.87s) --- PASS: TestAccAWSVpcEndpoint_VpcEndpointType_GatewayLoadBalancer (274.15s) --- PASS: TestAccAWSVpcEndpointService_AllowedPrincipals (280.60s) --- PASS: TestAccAWSVpcEndpointService_basic (252.94s) --- PASS: TestAccAWSVpcEndpointService_disappears (258.46s) --- PASS: TestAccAWSVpcEndpointService_GatewayLoadBalancerArns (208.91s) --- PASS: TestAccAWSVpcEndpointService_tags (288.75s) ``` Note: When working with assume role credentials, some of these test configurations can error due to the STS `GetCallerIdentity` ARN: ``` === CONT TestAccAWSVpcEndpoint_VpcEndpointType_GatewayLoadBalancer resource_aws_vpc_endpoint_test.go:519: Step 1/2 error: Error running apply: Error: error adding VPC Endpoint Service permissions: InvalidPrincipal: Invalid Principal: 'arn:aws:sts::--OMITTED--:assumed-role/terraform_team1_dev-admin/--OMITTED--' status code: 400, request id: 375c4645-3761-49b1-9758-3c9b5a51c115 === CONT TestAccAWSVpcEndpointService_AllowedPrincipals resource_aws_vpc_endpoint_service_test.go:125: Step 1/3 error: Error running apply: Error: error adding VPC Endpoint Service permissions: InvalidPrincipal: Invalid Principal: 'arn:aws:sts::--OMITTED--:assumed-role/terraform_team1_dev-admin/--OMITTED--' status code: 400, request id: f3e9a77f-3c7d-4acc-9127-f931c4ffbb37 ``` Will create followup issue for that problem. * Update CHANGELOG for #16656 * internal/service/ec2: gofmt after rebase --- .changelog/16656.txt | 7 ++ aws/internal/service/ec2/errors.go | 39 +++++++++ aws/resource_aws_route_table_test.go | 79 ++++++++++++++++--- aws/resource_aws_vpc_endpoint.go | 28 +++++-- aws/resource_aws_vpc_endpoint_service.go | 28 +++++-- aws/resource_aws_vpc_endpoint_service_test.go | 61 +++++++------- aws/resource_aws_vpc_endpoint_test.go | 66 +++++++++------- 7 files changed, 225 insertions(+), 83 deletions(-) create mode 100644 .changelog/16656.txt diff --git a/.changelog/16656.txt b/.changelog/16656.txt new file mode 100644 index 00000000000..c9e3bd76202 --- /dev/null +++ b/.changelog/16656.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_vpc_endpoint: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion +``` + +```release-note:bug +resource/aws_vpc_endpoint_service: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion +``` diff --git a/aws/internal/service/ec2/errors.go b/aws/internal/service/ec2/errors.go index 9162be33aed..c63b775edc9 100644 --- a/aws/internal/service/ec2/errors.go +++ b/aws/internal/service/ec2/errors.go @@ -1,5 +1,13 @@ package ec2 +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/go-multierror" +) + const ( ErrCodeInvalidParameterValue = "InvalidParameterValue" ) @@ -28,6 +36,11 @@ const ( ErrCodeInvalidSubnetIDNotFound = "InvalidSubnetID.NotFound" ) +const ( + ErrCodeInvalidVpcEndpointIdNotFound = "InvalidVpcEndpointId.NotFound" + ErrCodeInvalidVpcEndpointServiceIdNotFound = "InvalidVpcEndpointServiceId.NotFound" +) + const ( ErrCodeInvalidVpcPeeringConnectionIDNotFound = "InvalidVpcPeeringConnectionID.NotFound" ) @@ -36,3 +49,29 @@ const ( InvalidVpnGatewayAttachmentNotFound = "InvalidVpnGatewayAttachment.NotFound" InvalidVpnGatewayIDNotFound = "InvalidVpnGatewayID.NotFound" ) + +func UnsuccessfulItemError(apiObject *ec2.UnsuccessfulItemError) error { + if apiObject == nil { + return nil + } + + return fmt.Errorf("%s: %s", aws.StringValue(apiObject.Code), aws.StringValue(apiObject.Message)) +} + +func UnsuccessfulItemsError(apiObjects []*ec2.UnsuccessfulItem) error { + var errors *multierror.Error + + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + err := UnsuccessfulItemError(apiObject.Error) + + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("%s: %w", aws.StringValue(apiObject.ResourceId), err)) + } + } + + return errors.ErrorOrNil() +} diff --git a/aws/resource_aws_route_table_test.go b/aws/resource_aws_route_table_test.go index 9424da8b107..837a438df52 100644 --- a/aws/resource_aws_route_table_test.go +++ b/aws/resource_aws_route_table_test.go @@ -4,11 +4,13 @@ import ( "fmt" "log" "regexp" + "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -23,35 +25,90 @@ func init() { func testSweepRouteTables(region string) error { client, err := sharedClientForRegion(region) + if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } + conn := client.(*AWSClient).ec2conn + var sweeperErrs *multierror.Error + input := &ec2.DescribeRouteTablesInput{} + err = conn.DescribeRouteTablesPages(input, func(page *ec2.DescribeRouteTablesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + for _, routeTable := range page.RouteTables { + if routeTable == nil { + continue + } + + id := aws.StringValue(routeTable.RouteTableId) isMainRouteTableAssociation := false for _, routeTableAssociation := range routeTable.Associations { + if routeTableAssociation == nil { + continue + } + if aws.BoolValue(routeTableAssociation.Main) { isMainRouteTableAssociation = true break } + associationID := aws.StringValue(routeTableAssociation.RouteTableAssociationId) + input := &ec2.DisassociateRouteTableInput{ AssociationId: routeTableAssociation.RouteTableAssociationId, } - log.Printf("[DEBUG] Deleting Route Table Association: %s", input) + log.Printf("[DEBUG] Deleting EC2 Route Table Association: %s", associationID) _, err := conn.DisassociateRouteTable(input) + if err != nil { - log.Printf("[ERROR] Error deleting Route Table Association (%s): %s", aws.StringValue(routeTableAssociation.RouteTableAssociationId), err) + sweeperErr := fmt.Errorf("error deleting EC2 Route Table (%s) Association (%s): %w", id, associationID, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } if isMainRouteTableAssociation { - log.Printf("[DEBUG] Skipping Main Route Table: %s", aws.StringValue(routeTable.RouteTableId)) + for _, route := range routeTable.Routes { + if route == nil { + continue + } + + if aws.StringValue(route.GatewayId) == "local" { + continue + } + + // Prevent deleting default VPC route for Internet Gateway + // which some testing is still reliant on operating correctly + if strings.HasPrefix(aws.StringValue(route.GatewayId), "igw-") && aws.StringValue(route.DestinationCidrBlock) == "0.0.0.0/0" { + continue + } + + input := &ec2.DeleteRouteInput{ + DestinationCidrBlock: route.DestinationCidrBlock, + DestinationIpv6CidrBlock: route.DestinationIpv6CidrBlock, + RouteTableId: routeTable.RouteTableId, + } + + log.Printf("[DEBUG] Deleting EC2 Route Table (%s) Route", id) + _, err := conn.DeleteRoute(input) + + if err != nil { + sweeperErr := fmt.Errorf("error deleting EC2 Route Table (%s) Route: %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + continue } @@ -59,10 +116,14 @@ func testSweepRouteTables(region string) error { RouteTableId: routeTable.RouteTableId, } - log.Printf("[DEBUG] Deleting Route Table: %s", input) + log.Printf("[DEBUG] Deleting EC2 Route Table: %s", id) _, err := conn.DeleteRouteTable(input) + if err != nil { - log.Printf("[ERROR] Error deleting Route Table (%s): %s", aws.StringValue(routeTable.RouteTableId), err) + sweeperErr := fmt.Errorf("error deleting EC2 Route Table (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } @@ -71,14 +132,14 @@ func testSweepRouteTables(region string) error { if testSweepSkipSweepError(err) { log.Printf("[WARN] Skipping EC2 Route Table sweep for %s: %s", region, err) - return nil + return sweeperErrs.ErrorOrNil() } if err != nil { - return fmt.Errorf("Error describing Route Tables: %s", err) + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EC2 Route Tables: %w", err)) } - return nil + return sweeperErrs.ErrorOrNil() } func TestAccAWSRouteTable_basic(t *testing.T) { diff --git a/aws/resource_aws_vpc_endpoint.go b/aws/resource_aws_vpc_endpoint.go index 0632a5fbee0..fc10d016a0f 100644 --- a/aws/resource_aws_vpc_endpoint.go +++ b/aws/resource_aws_vpc_endpoint.go @@ -9,11 +9,13 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" ) const ( @@ -358,20 +360,30 @@ func resourceAwsVpcEndpointUpdate(d *schema.ResourceData, meta interface{}) erro func resourceAwsVpcEndpointDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - log.Printf("[DEBUG] Deleting VPC Endpoint: %s", d.Id()) - _, err := conn.DeleteVpcEndpoints(&ec2.DeleteVpcEndpointsInput{ + input := &ec2.DeleteVpcEndpointsInput{ VpcEndpointIds: aws.StringSlice([]string{d.Id()}), - }) + } + + output, err := conn.DeleteVpcEndpoints(input) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidVpcEndpointIdNotFound) { + return nil + } + if err != nil { - if isAWSErr(err, "InvalidVpcEndpointId.NotFound", "") { - log.Printf("[DEBUG] VPC Endpoint %s is already gone", d.Id()) - } else { - return fmt.Errorf("Error deleting VPC Endpoint: %s", err) + return fmt.Errorf("error deleting EC2 VPC Endpoint (%s): %w", d.Id(), err) + } + + if output != nil && len(output.Unsuccessful) > 0 { + err := tfec2.UnsuccessfulItemsError(output.Unsuccessful) + + if err != nil { + return fmt.Errorf("error deleting EC2 VPC Endpoint (%s): %w", d.Id(), err) } } if err := vpcEndpointWaitUntilDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("error waiting for VPC Endpoint (%s) to delete: %s", d.Id(), err) + return fmt.Errorf("error waiting for EC2 VPC Endpoint (%s) to delete: %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_vpc_endpoint_service.go b/aws/resource_aws_vpc_endpoint_service.go index 0dbea69e416..97d4ef4569c 100644 --- a/aws/resource_aws_vpc_endpoint_service.go +++ b/aws/resource_aws_vpc_endpoint_service.go @@ -9,9 +9,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" ) func resourceAwsVpcEndpointService() *schema.Resource { @@ -340,20 +342,30 @@ func resourceAwsVpcEndpointServiceUpdate(d *schema.ResourceData, meta interface{ func resourceAwsVpcEndpointServiceDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - log.Printf("[DEBUG] Deleting VPC Endpoint Service: %s", d.Id()) - _, err := conn.DeleteVpcEndpointServiceConfigurations(&ec2.DeleteVpcEndpointServiceConfigurationsInput{ + input := &ec2.DeleteVpcEndpointServiceConfigurationsInput{ ServiceIds: aws.StringSlice([]string{d.Id()}), - }) + } + + output, err := conn.DeleteVpcEndpointServiceConfigurations(input) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidVpcEndpointServiceIdNotFound) { + return nil + } + if err != nil { - if isAWSErr(err, "InvalidVpcEndpointServiceId.NotFound", "") { - log.Printf("[DEBUG] VPC Endpoint Service %s is already gone", d.Id()) - } else { - return fmt.Errorf("Error deleting VPC Endpoint Service: %s", err.Error()) + return fmt.Errorf("error deleting EC2 VPC Endpoint Service (%s): %w", d.Id(), err) + } + + if output != nil && len(output.Unsuccessful) > 0 { + err := tfec2.UnsuccessfulItemsError(output.Unsuccessful) + + if err != nil { + return fmt.Errorf("error deleting EC2 VPC Endpoint Service (%s): %w", d.Id(), err) } } if err := waitForVpcEndpointServiceDeletion(conn, d.Id()); err != nil { - return fmt.Errorf("Error waiting for VPC Endpoint Service %s to delete: %s", d.Id(), err.Error()) + return fmt.Errorf("error waiting for EC2 VPC Endpoint Service (%s) to delete: %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_vpc_endpoint_service_test.go b/aws/resource_aws_vpc_endpoint_service_test.go index 9ef445892d9..56fce2961cf 100644 --- a/aws/resource_aws_vpc_endpoint_service_test.go +++ b/aws/resource_aws_vpc_endpoint_service_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -25,58 +26,62 @@ func init() { func testSweepEc2VpcEndpointServices(region string) error { client, err := sharedClientForRegion(region) + if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } + conn := client.(*AWSClient).ec2conn - input := &ec2.DescribeVpcEndpointServiceConfigurationsInput{} - for { - output, err := conn.DescribeVpcEndpointServiceConfigurations(input) + var sweeperErrs *multierror.Error - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping EC2 VPC Endpoint Service sweep for %s: %s", region, err) - return nil - } + input := &ec2.DescribeVpcEndpointServiceConfigurationsInput{} - if err != nil { - return fmt.Errorf("error retrieving EC2 VPC Endpoint Services: %s", err) + err = conn.DescribeVpcEndpointServiceConfigurationsPages(input, func(page *ec2.DescribeVpcEndpointServiceConfigurationsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - for _, serviceConfiguration := range output.ServiceConfigurations { + for _, serviceConfiguration := range page.ServiceConfigurations { + if serviceConfiguration == nil { + continue + } + if aws.StringValue(serviceConfiguration.ServiceState) == ec2.ServiceStateDeleted { continue } id := aws.StringValue(serviceConfiguration.ServiceId) - input := &ec2.DeleteVpcEndpointServiceConfigurationsInput{ - ServiceIds: []*string{serviceConfiguration.ServiceId}, - } log.Printf("[INFO] Deleting EC2 VPC Endpoint Service: %s", id) - _, err := conn.DeleteVpcEndpointServiceConfigurations(input) - if isAWSErr(err, "InvalidVpcEndpointServiceId.NotFound", "") { - continue - } + r := resourceAwsVpcEndpointService() + d := r.Data(nil) + d.SetId(id) - if err != nil { - return fmt.Errorf("error deleting EC2 VPC Endpoint Service (%s): %s", id, err) - } + err := r.Delete(d, client) - if err := waitForVpcEndpointServiceDeletion(conn, id); err != nil { - return fmt.Errorf("error waiting for VPC Endpoint Service (%s) to delete: %s", id, err) + if err != nil { + sweeperErr := fmt.Errorf("error deleting EC2 VPC Endpoint Service (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } - if aws.StringValue(output.NextToken) == "" { - break - } + return !lastPage + }) - input.NextToken = output.NextToken + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping EC2 VPC Endpoint Service sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() } - return nil + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EC2 VPC Endpoint Services: %w", err)) + } + + return sweeperErrs.ErrorOrNil() } func TestAccAWSVpcEndpointService_basic(t *testing.T) { diff --git a/aws/resource_aws_vpc_endpoint_test.go b/aws/resource_aws_vpc_endpoint_test.go index 74d7125fe58..413f33afb39 100644 --- a/aws/resource_aws_vpc_endpoint_test.go +++ b/aws/resource_aws_vpc_endpoint_test.go @@ -7,11 +7,11 @@ import ( "strconv" "strings" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -21,64 +21,70 @@ func init() { resource.AddTestSweepers("aws_vpc_endpoint", &resource.Sweeper{ Name: "aws_vpc_endpoint", F: testSweepEc2VpcEndpoints, + Dependencies: []string{ + "aws_route_table", + }, }) } func testSweepEc2VpcEndpoints(region string) error { client, err := sharedClientForRegion(region) + if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } + conn := client.(*AWSClient).ec2conn - input := &ec2.DescribeVpcEndpointsInput{} - for { - output, err := conn.DescribeVpcEndpoints(input) + var sweeperErrs *multierror.Error - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping EC2 VPC Endpoint sweep for %s: %s", region, err) - return nil - } + input := &ec2.DescribeVpcEndpointsInput{} - if err != nil { - return fmt.Errorf("error retrieving EC2 VPC Endpoints: %s", err) + err = conn.DescribeVpcEndpointsPages(input, func(page *ec2.DescribeVpcEndpointsOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - for _, vpcEndpoint := range output.VpcEndpoints { + for _, vpcEndpoint := range page.VpcEndpoints { + if vpcEndpoint == nil { + continue + } + if aws.StringValue(vpcEndpoint.State) != "available" { continue } id := aws.StringValue(vpcEndpoint.VpcEndpointId) - input := &ec2.DeleteVpcEndpointsInput{ - VpcEndpointIds: []*string{aws.String(id)}, - } - log.Printf("[INFO] Deleting EC2 VPC Endpoint: %s", id) - _, err := conn.DeleteVpcEndpoints(input) - if isAWSErr(err, "InvalidVpcEndpointId.NotFound", "") { - continue - } + r := resourceAwsVpcEndpoint() + d := r.Data(nil) + d.SetId(id) - if err != nil { - return fmt.Errorf("error deleting EC2 VPC Endpoint (%s): %s", id, err) - } + err := r.Delete(d, client) - if err := vpcEndpointWaitUntilDeleted(conn, id, 10*time.Minute); err != nil { - return fmt.Errorf("error waiting for VPC Endpoint (%s) to delete: %s", id, err) + if err != nil { + sweeperErr := fmt.Errorf("error deleting EC2 VPC Endpoint (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue } } - if aws.StringValue(output.NextToken) == "" { - break - } + return !lastPage + }) - input.NextToken = output.NextToken + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping EC2 VPC Endpoint sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() } - return nil + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EC2 VPC Endpoints: %w", err)) + } + + return sweeperErrs.ErrorOrNil() } func TestAccAWSVpcEndpoint_gatewayBasic(t *testing.T) { From 561546558036dd5357ff4f577576542f585093ef Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 3 Feb 2021 15:18:40 +0000 Subject: [PATCH 0982/1212] Update CHANGELOG.md for #16656 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae2990d3a2a..44ca9bcd7c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,8 @@ BUG FIXES: * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) +* resource/aws_vpc_endpoint: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion ([#16656](https://github.com/hashicorp/terraform-provider-aws/issues/16656)) +* resource/aws_vpc_endpoint_service: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion ([#16656](https://github.com/hashicorp/terraform-provider-aws/issues/16656)) ## 3.26.0 (January 28, 2021) From 81857fc34c3100a2683078d6bde2ac227c8c452b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 3 Feb 2021 11:12:23 -0500 Subject: [PATCH 0983/1212] resource/aws_glue_crawler: Use IAM timeout constant for retries, add LakeFormation permissions retries and configuration to tests (#17256) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16752 Previously: ``` === CONT TestAccAWSGlueCrawler_CatalogTarget resource_aws_glue_crawler_test.go:719: Step 1/3 error: Error running apply: Error: error creating Glue crawler: InvalidInputException: Insufficient Lake Formation permission(s) on tf-acc-test-1833852258513360098_table_0 (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 34c98eb1-4821-4ff5-897b-a9ef0acb7567; Proxy: null) --- FAIL: TestAccAWSGlueCrawler_CatalogTarget (20.93s) === CONT TestAccAWSGlueCrawler_CatalogTarget_Multiple resource_aws_glue_crawler_test.go:791: Step 1/4 error: Error running apply: Error: error creating Glue crawler: InvalidInputException: Insufficient Lake Formation permission(s) on tf-acc-test-3733486415820405861_table_0 (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 914fed0e-b5dd-4243-859d-f38341bb1ead; Proxy: null) --- FAIL: TestAccAWSGlueCrawler_CatalogTarget_Multiple (22.48s) ``` Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSGlueCrawler_CatalogTarget (73.93s) --- PASS: TestAccAWSGlueCrawler_CatalogTarget_Multiple (116.15s) --- PASS: TestAccAWSGlueCrawler_Classifiers (114.19s) --- PASS: TestAccAWSGlueCrawler_Configuration (94.89s) --- PASS: TestAccAWSGlueCrawler_Description (88.94s) --- PASS: TestAccAWSGlueCrawler_disappears (55.16s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget (96.79s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget_scanAll (81.31s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget_scanRate (114.15s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget (65.58s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget_Exclusions (95.61s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget_Multiple (62.19s) --- PASS: TestAccAWSGlueCrawler_lineageConfig (119.41s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget (54.79s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget_multiple (119.08s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget_scan_all (115.39s) --- PASS: TestAccAWSGlueCrawler_recrawlPolicy (115.84s) --- PASS: TestAccAWSGlueCrawler_RemoveTablePrefix (89.98s) --- PASS: TestAccAWSGlueCrawler_Role_ARN_NoPath (60.10s) --- PASS: TestAccAWSGlueCrawler_Role_ARN_Path (64.04s) --- PASS: TestAccAWSGlueCrawler_Role_Name_Path (58.68s) --- PASS: TestAccAWSGlueCrawler_S3Target (49.10s) --- PASS: TestAccAWSGlueCrawler_S3Target_ConnectionName (44.01s) --- PASS: TestAccAWSGlueCrawler_S3Target_Exclusions (47.69s) --- PASS: TestAccAWSGlueCrawler_S3Target_Multiple (76.37s) --- PASS: TestAccAWSGlueCrawler_Schedule (119.23s) --- PASS: TestAccAWSGlueCrawler_SchemaChangePolicy (89.88s) --- PASS: TestAccAWSGlueCrawler_SecurityConfiguration (96.75s) --- PASS: TestAccAWSGlueCrawler_TablePrefix (101.47s) --- PASS: TestAccAWSGlueCrawler_Tags (114.07s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSGlueCrawler_CatalogTarget (106.03s) --- PASS: TestAccAWSGlueCrawler_CatalogTarget_Multiple (150.49s) --- PASS: TestAccAWSGlueCrawler_Classifiers (112.27s) --- PASS: TestAccAWSGlueCrawler_Configuration (80.77s) --- PASS: TestAccAWSGlueCrawler_Description (83.16s) --- PASS: TestAccAWSGlueCrawler_disappears (50.28s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget (78.75s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget_scanAll (105.33s) --- PASS: TestAccAWSGlueCrawler_DynamodbTarget_scanRate (111.31s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget (82.87s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget_Exclusions (74.76s) --- PASS: TestAccAWSGlueCrawler_JdbcTarget_Multiple (101.25s) --- PASS: TestAccAWSGlueCrawler_lineageConfig (117.90s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget (82.21s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget_multiple (113.25s) --- PASS: TestAccAWSGlueCrawler_mongoDBTarget_scan_all (103.75s) --- PASS: TestAccAWSGlueCrawler_recrawlPolicy (112.81s) --- PASS: TestAccAWSGlueCrawler_RemoveTablePrefix (79.86s) --- PASS: TestAccAWSGlueCrawler_Role_ARN_NoPath (47.48s) --- PASS: TestAccAWSGlueCrawler_Role_ARN_Path (39.29s) --- PASS: TestAccAWSGlueCrawler_Role_Name_Path (40.08s) --- PASS: TestAccAWSGlueCrawler_S3Target (78.72s) --- PASS: TestAccAWSGlueCrawler_S3Target_ConnectionName (49.21s) --- PASS: TestAccAWSGlueCrawler_S3Target_Exclusions (82.72s) --- PASS: TestAccAWSGlueCrawler_S3Target_Multiple (101.74s) --- PASS: TestAccAWSGlueCrawler_Schedule (103.16s) --- PASS: TestAccAWSGlueCrawler_SchemaChangePolicy (71.90s) --- PASS: TestAccAWSGlueCrawler_SecurityConfiguration (77.77s) --- PASS: TestAccAWSGlueCrawler_TablePrefix (73.91s) --- PASS: TestAccAWSGlueCrawler_Tags (100.25s) ``` --- .changelog/17256.txt | 3 ++ aws/resource_aws_glue_crawler.go | 29 +++++++++++++----- aws/resource_aws_glue_crawler_test.go | 44 +++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 7 deletions(-) create mode 100644 .changelog/17256.txt diff --git a/.changelog/17256.txt b/.changelog/17256.txt new file mode 100644 index 00000000000..a24c6e4c844 --- /dev/null +++ b/.changelog/17256.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_glue_crawler: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation permissions errors +``` \ No newline at end of file diff --git a/aws/resource_aws_glue_crawler.go b/aws/resource_aws_glue_crawler.go index 93f13266e1a..c2e0dd20e2c 100644 --- a/aws/resource_aws_glue_crawler.go +++ b/aws/resource_aws_glue_crawler.go @@ -5,16 +5,17 @@ import ( "log" "regexp" "strings" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/glue" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + iamwaiter "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/iam/waiter" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -290,16 +291,23 @@ func resourceAwsGlueCrawlerCreate(d *schema.ResourceData, meta interface{}) erro } // Retry for IAM eventual consistency - err = resource.Retry(1*time.Minute, func() *resource.RetryError { + err = resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { _, err = glueConn.CreateCrawler(crawlerInput) if err != nil { - if isAWSErr(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { + // InvalidInputException: Insufficient Lake Formation permission(s) on xxx + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Insufficient Lake Formation permission") { return resource.RetryableError(err) } + + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { + return resource.RetryableError(err) + } + // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) - if isAWSErr(err, glue.ErrCodeInvalidInputException, "is not authorized") { + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "is not authorized") { return resource.RetryableError(err) } + return resource.NonRetryableError(err) } return nil @@ -599,16 +607,23 @@ func resourceAwsGlueCrawlerUpdate(d *schema.ResourceData, meta interface{}) erro } // Retry for IAM eventual consistency - err = resource.Retry(1*time.Minute, func() *resource.RetryError { + err = resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { _, err := glueConn.UpdateCrawler(updateCrawlerInput) if err != nil { - if isAWSErr(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { + // InvalidInputException: Insufficient Lake Formation permission(s) on xxx + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Insufficient Lake Formation permission") { return resource.RetryableError(err) } + + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { + return resource.RetryableError(err) + } + // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) - if isAWSErr(err, glue.ErrCodeInvalidInputException, "is not authorized") { + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "is not authorized") { return resource.RetryableError(err) } + return resource.NonRetryableError(err) } return nil diff --git a/aws/resource_aws_glue_crawler_test.go b/aws/resource_aws_glue_crawler_test.go index b4666abd946..37f9e9fd335 100644 --- a/aws/resource_aws_glue_crawler_test.go +++ b/aws/resource_aws_glue_crawler_test.go @@ -1462,6 +1462,26 @@ resource "aws_iam_role_policy_attachment" "test-AWSGlueServiceRole" { policy_arn = data.aws_iam_policy.AWSGlueServiceRole.arn role = aws_iam_role.test.name } + +resource "aws_iam_role_policy" "LakeFormationDataAccess" { + role = aws_iam_role.test.name + + policy = < Date: Wed, 3 Feb 2021 16:14:10 +0000 Subject: [PATCH 0984/1212] Update CHANGELOG.md for #17256 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44ca9bcd7c4..cfab560b3ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ BUG FIXES: * resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_glue_crawler: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation permissions errors ([#17256](https://github.com/hashicorp/terraform-provider-aws/issues/17256)) * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) From 60cbd9a6cf55d90d8a46f6d60767a4904585fe14 Mon Sep 17 00:00:00 2001 From: Nicolas Singh Oteiza Date: Wed, 3 Feb 2021 15:54:29 -0300 Subject: [PATCH 0985/1212] Update instance_class argument options Update instance_class argument options for docdb_cluster_instance resource --- website/docs/r/docdb_cluster_instance.html.markdown | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/docs/r/docdb_cluster_instance.html.markdown b/website/docs/r/docdb_cluster_instance.html.markdown index 7c4ceddb09f..f3a43ada558 100644 --- a/website/docs/r/docdb_cluster_instance.html.markdown +++ b/website/docs/r/docdb_cluster_instance.html.markdown @@ -52,12 +52,19 @@ The following arguments are supported: * `identifier_prefix` - (Optional, Forces new resource) Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. * `instance_class` - (Required) The instance class to use. For details on CPU and memory, see [Scaling for DocDB Instances][2]. DocDB currently supports the below instance classes. Please see [AWS Documentation][4] for complete details. + - db.r5.large + - db.r5.xlarge + - db.r5.2xlarge + - db.r5.4xlarge + - db.r5.12xlarge + - db.r5.24xlarge - db.r4.large - db.r4.xlarge - db.r4.2xlarge - db.r4.4xlarge - db.r4.8xlarge - db.r4.16xlarge + - db.t3.medium * `preferred_maintenance_window` - (Optional) The window to perform maintenance in. Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". * `promotion_tier` - (Optional) Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer. From 767e5ef764b19efce4591fb56e2e8ecca732fe22 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 15:32:12 -0800 Subject: [PATCH 0986/1212] Address issues from origin_policy code review --- ...udfront_origin_request_policy_structure.go | 135 ++++++++++-------- ...ce_aws_cloudfront_origin_request_policy.go | 73 +++++----- ...ce_aws_cloudfront_origin_request_policy.go | 25 ++-- ...dfront_origin_request_policy.html.markdown | 18 +-- ...dfront_origin_request_policy.html.markdown | 18 +-- 5 files changed, 145 insertions(+), 124 deletions(-) diff --git a/aws/cloudfront_origin_request_policy_structure.go b/aws/cloudfront_origin_request_policy_structure.go index 082656903b4..7d6342aa704 100644 --- a/aws/cloudfront_origin_request_policy_structure.go +++ b/aws/cloudfront_origin_request_policy_structure.go @@ -6,99 +6,118 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func expandCloudFrontOriginRequestPolicyCookieNames(cookieNamesFlat map[string]interface{}) *cloudfront.CookieNames { - cookieNames := &cloudfront.CookieNames{} +func expandCloudFrontOriginRequestPolicyCookieNames(tfMap map[string]interface{}) *cloudfront.CookieNames { + if tfMap == nil { + return nil + } + + apiObject := &cloudfront.CookieNames{} - var newCookieItems []*string - for _, cookie := range cookieNamesFlat["items"].(*schema.Set).List() { - newCookieItems = append(newCookieItems, aws.String(cookie.(string))) + var items []*string + for _, item := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(item.(string))) } - cookieNames.Items = newCookieItems - cookieNames.Quantity = aws.Int64(int64(len(newCookieItems))) + apiObject.Items = items + apiObject.Quantity = aws.Int64(int64(len(items))) - return cookieNames + return apiObject } -func expandCloudFrontOriginRequestPolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyCookiesConfig { - var cookies *cloudfront.CookieNames +func expandCloudFrontOriginRequestPolicyCookiesConfig(tfMap map[string]interface{}) *cloudfront.OriginRequestPolicyCookiesConfig { + if tfMap == nil { + return nil + } + + var itemsAPIObject *cloudfront.CookieNames - if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { - cookies = expandCloudFrontOriginRequestPolicyCookieNames(cookiesFlat[0].(map[string]interface{})) + if itemsFlat, ok := tfMap["cookies"].([]interface{}); ok && len(itemsFlat) == 1 { + itemsAPIObject = expandCloudFrontOriginRequestPolicyCookieNames(itemsFlat[0].(map[string]interface{})) } else { - cookies = nil + itemsAPIObject = nil } - cookiesConfig := &cloudfront.OriginRequestPolicyCookiesConfig{ - CookieBehavior: aws.String(cookiesConfigFlat["cookie_behavior"].(string)), - Cookies: cookies, + apiObject := &cloudfront.OriginRequestPolicyCookiesConfig{ + CookieBehavior: aws.String(tfMap["cookie_behavior"].(string)), + Cookies: itemsAPIObject, } - return cookiesConfig + return apiObject } -func expandCloudFrontOriginRequestPolicyHeaders(headerNamesFlat map[string]interface{}) *cloudfront.Headers { - headers := &cloudfront.Headers{} +func expandCloudFrontOriginRequestPolicyHeaders(tfMap map[string]interface{}) *cloudfront.Headers { + if tfMap == nil { + return nil + } + apiObject := &cloudfront.Headers{} - var newHeaderItems []*string - for _, header := range headerNamesFlat["items"].(*schema.Set).List() { - newHeaderItems = append(newHeaderItems, aws.String(header.(string))) + var items []*string + for _, item := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(item.(string))) } - headers.Items = newHeaderItems - headers.Quantity = aws.Int64(int64(len(newHeaderItems))) + apiObject.Items = items + apiObject.Quantity = aws.Int64(int64(len(items))) - return headers + return apiObject } -func expandCloudFrontOriginRequestPolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyHeadersConfig { - var headers *cloudfront.Headers +func expandCloudFrontOriginRequestPolicyHeadersConfig(tfMap map[string]interface{}) *cloudfront.OriginRequestPolicyHeadersConfig { + if tfMap == nil { + return nil + } + var itemsAPIObject *cloudfront.Headers - if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { - headers = expandCloudFrontOriginRequestPolicyHeaders(headersFlat[0].(map[string]interface{})) + if itemsFlat, ok := tfMap["headers"].([]interface{}); ok && len(itemsFlat) == 1 && tfMap["header_behavior"] != "none" { + itemsAPIObject = expandCloudFrontOriginRequestPolicyHeaders(itemsFlat[0].(map[string]interface{})) } else { - headers = nil + itemsAPIObject = nil } - headersConfig := &cloudfront.OriginRequestPolicyHeadersConfig{ - HeaderBehavior: aws.String(headersConfigFlat["header_behavior"].(string)), - Headers: headers, + apiObject := &cloudfront.OriginRequestPolicyHeadersConfig{ + HeaderBehavior: aws.String(tfMap["header_behavior"].(string)), + Headers: itemsAPIObject, } - return headersConfig + return apiObject } -func expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringNamesFlat map[string]interface{}) *cloudfront.QueryStringNames { - queryStringNames := &cloudfront.QueryStringNames{} +func expandCloudFrontOriginRequestPolicyQueryStringNames(tfMap map[string]interface{}) *cloudfront.QueryStringNames { + if tfMap == nil { + return nil + } + apiObject := &cloudfront.QueryStringNames{} - var newQueryStringItems []*string - for _, queryString := range queryStringNamesFlat["items"].(*schema.Set).List() { - newQueryStringItems = append(newQueryStringItems, aws.String(queryString.(string))) + var items []*string + for _, item := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(item.(string))) } - queryStringNames.Items = newQueryStringItems - queryStringNames.Quantity = aws.Int64(int64(len(newQueryStringItems))) + apiObject.Items = items + apiObject.Quantity = aws.Int64(int64(len(items))) - return queryStringNames + return apiObject } -func expandCloudFrontOriginRequestPolicyQueryStringsConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.OriginRequestPolicyQueryStringsConfig { - var queryStrings *cloudfront.QueryStringNames +func expandCloudFrontOriginRequestPolicyQueryStringsConfig(tfMap map[string]interface{}) *cloudfront.OriginRequestPolicyQueryStringsConfig { + if tfMap == nil { + return nil + } + var itemsAPIObject *cloudfront.QueryStringNames - if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { - queryStrings = expandCloudFrontOriginRequestPolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) + if itemsFlat, ok := tfMap["query_strings"].([]interface{}); ok && len(itemsFlat) == 1 { + itemsAPIObject = expandCloudFrontOriginRequestPolicyQueryStringNames(itemsFlat[0].(map[string]interface{})) } else { - queryStrings = nil + itemsAPIObject = nil } - queryStringConfig := &cloudfront.OriginRequestPolicyQueryStringsConfig{ - QueryStringBehavior: aws.String(queryStringConfigFlat["query_string_behavior"].(string)), - QueryStrings: queryStrings, + apiObject := &cloudfront.OriginRequestPolicyQueryStringsConfig{ + QueryStringBehavior: aws.String(tfMap["query_string_behavior"].(string)), + QueryStrings: itemsAPIObject, } - return queryStringConfig + return apiObject } func expandCloudFrontOriginRequestPolicyConfig(d *schema.ResourceData) *cloudfront.OriginRequestPolicyConfig { - - originRequestPolicy := &cloudfront.OriginRequestPolicyConfig{ + apiObject := &cloudfront.OriginRequestPolicyConfig{ Comment: aws.String(d.Get("comment").(string)), Name: aws.String(d.Get("name").(string)), CookiesConfig: expandCloudFrontOriginRequestPolicyCookiesConfig(d.Get("cookies_config").([]interface{})[0].(map[string]interface{})), @@ -106,7 +125,7 @@ func expandCloudFrontOriginRequestPolicyConfig(d *schema.ResourceData) *cloudfro QueryStringsConfig: expandCloudFrontOriginRequestPolicyQueryStringsConfig(d.Get("query_strings_config").([]interface{})[0].(map[string]interface{})), } - return originRequestPolicy + return apiObject } func flattenCloudFrontOriginRequestPolicyCookiesConfig(cookiesConfig *cloudfront.OriginRequestPolicyCookiesConfig) []map[string]interface{} { @@ -168,11 +187,3 @@ func flattenCloudFrontOriginRequestPolicyQueryStringsConfig(queryStringsConfig * queryStringsConfigFlat, } } - -func flattenCloudFrontOriginRequestPolicy(d *schema.ResourceData, originRequestPolicy *cloudfront.OriginRequestPolicyConfig) { - d.Set("comment", aws.StringValue(originRequestPolicy.Comment)) - d.Set("name", aws.StringValue(originRequestPolicy.Name)) - d.Set("cookies_config", flattenCloudFrontOriginRequestPolicyCookiesConfig(originRequestPolicy.CookiesConfig)) - d.Set("headers_config", flattenCloudFrontOriginRequestPolicyHeadersConfig(originRequestPolicy.HeadersConfig)) - d.Set("query_strings_config", flattenCloudFrontOriginRequestPolicyQueryStringsConfig(originRequestPolicy.QueryStringsConfig)) -} diff --git a/aws/data_source_aws_cloudfront_origin_request_policy.go b/aws/data_source_aws_cloudfront_origin_request_policy.go index 0e2a4366076..ce05f2a6183 100644 --- a/aws/data_source_aws_cloudfront_origin_request_policy.go +++ b/aws/data_source_aws_cloudfront_origin_request_policy.go @@ -11,22 +11,10 @@ func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { Read: dataSourceAwsCloudFrontOriginRequestPolicyRead, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "id": { - Type: schema.TypeString, - Optional: true, - }, "comment": { Type: schema.TypeString, Computed: true, }, - "etag": { - Type: schema.TypeString, - Computed: true, - }, "cookies_config": { Type: schema.TypeList, Computed: true, @@ -52,6 +40,10 @@ func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { }, }, }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, "headers_config": { Type: schema.TypeList, Computed: true, @@ -77,6 +69,14 @@ func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { }, }, }, + "id": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, "query_strings_config": { Type: schema.TypeList, Computed: true, @@ -106,27 +106,6 @@ func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { } } -func dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { - var originRequestPolicy *cloudfront.OriginRequestPolicy - request := &cloudfront.ListOriginRequestPoliciesInput{} - resp, err := conn.ListOriginRequestPolicies(request) - if err != nil { - return err - } - - for _, policySummary := range resp.OriginRequestPolicyList.Items { - if *policySummary.OriginRequestPolicy.OriginRequestPolicyConfig.Name == d.Get("name").(string) { - originRequestPolicy = policySummary.OriginRequestPolicy - break - } - } - - if originRequestPolicy != nil { - d.SetId(aws.StringValue(originRequestPolicy.Id)) - } - return nil -} - func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudfrontconn @@ -147,8 +126,34 @@ func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta } d.Set("etag", aws.StringValue(resp.ETag)) - flattenCloudFrontOriginRequestPolicy(d, resp.OriginRequestPolicy.OriginRequestPolicyConfig) + originRequestPolicy := *resp.OriginRequestPolicy.OriginRequestPolicyConfig + d.Set("comment", aws.StringValue(originRequestPolicy.Comment)) + d.Set("name", aws.StringValue(originRequestPolicy.Name)) + d.Set("cookies_config", flattenCloudFrontOriginRequestPolicyCookiesConfig(originRequestPolicy.CookiesConfig)) + d.Set("headers_config", flattenCloudFrontOriginRequestPolicyHeadersConfig(originRequestPolicy.HeadersConfig)) + d.Set("query_strings_config", flattenCloudFrontOriginRequestPolicyQueryStringsConfig(originRequestPolicy.QueryStringsConfig)) } return nil } + +func dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { + var originRequestPolicy *cloudfront.OriginRequestPolicy + request := &cloudfront.ListOriginRequestPoliciesInput{} + resp, err := conn.ListOriginRequestPolicies(request) + if err != nil { + return err + } + + for _, policySummary := range resp.OriginRequestPolicyList.Items { + if *policySummary.OriginRequestPolicy.OriginRequestPolicyConfig.Name == d.Get("name").(string) { + originRequestPolicy = policySummary.OriginRequestPolicy + break + } + } + + if originRequestPolicy != nil { + d.SetId(aws.StringValue(originRequestPolicy.Id)) + } + return nil +} diff --git a/aws/resource_aws_cloudfront_origin_request_policy.go b/aws/resource_aws_cloudfront_origin_request_policy.go index b97ffd8cf63..a9563b25eb9 100644 --- a/aws/resource_aws_cloudfront_origin_request_policy.go +++ b/aws/resource_aws_cloudfront_origin_request_policy.go @@ -22,15 +22,6 @@ func resourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "etag": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, "cookies_config": { Type: schema.TypeList, MaxItems: 1, @@ -59,6 +50,11 @@ func resourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { }, }, }, + "etag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "headers_config": { Type: schema.TypeList, MaxItems: 1, @@ -87,6 +83,10 @@ func resourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { }, }, }, + "name": { + Type: schema.TypeString, + Required: true, + }, "query_strings_config": { Type: schema.TypeList, MaxItems: 1, @@ -149,7 +149,12 @@ func resourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta i } d.Set("etag", aws.StringValue(resp.ETag)) - flattenCloudFrontOriginRequestPolicy(d, resp.OriginRequestPolicy.OriginRequestPolicyConfig) + originRequestPolicy := *resp.OriginRequestPolicy.OriginRequestPolicyConfig + d.Set("comment", aws.StringValue(originRequestPolicy.Comment)) + d.Set("name", aws.StringValue(originRequestPolicy.Name)) + d.Set("cookies_config", flattenCloudFrontOriginRequestPolicyCookiesConfig(originRequestPolicy.CookiesConfig)) + d.Set("headers_config", flattenCloudFrontOriginRequestPolicyHeadersConfig(originRequestPolicy.HeadersConfig)) + d.Set("query_strings_config", flattenCloudFrontOriginRequestPolicyQueryStringsConfig(originRequestPolicy.QueryStringsConfig)) return nil } diff --git a/website/docs/d/cloudfront_origin_request_policy.html.markdown b/website/docs/d/cloudfront_origin_request_policy.html.markdown index 5b6cfe1fece..4e25a4ea5a1 100644 --- a/website/docs/d/cloudfront_origin_request_policy.html.markdown +++ b/website/docs/d/cloudfront_origin_request_policy.html.markdown @@ -23,32 +23,32 @@ data "aws_cloudfront_origin_request_policy" "example" { The following arguments are supported: -* `name` - A unique name to identify the origin request policy. +* `name` - Unique name to identify the origin request policy. * `id` - The identifier for the origin request policy. ## Attributes Reference -* `comment` - A comment to describe the origin request policy. -* `cookies_config` - An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `comment` - Comment to describe the origin request policy. +* `cookies_config` - Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. * `etag` - The current version of the origin request policy. -* `headers_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `query_strings_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `headers_config` - Object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. ### Cookies Config `cookie_behavior` - Determines whether any cookies in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist` `all`. -`cookies` - An object that contains a list of cookie names. See [Items](#items) for more information. +`cookies` - Object that contains a list of cookie names. See [Items](#items) for more information. ### Headers Config `header_behavior` - Determines whether any HTTP headers are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allViewer`, `allViewerAndWhitelistCloudFront`. -`headers` - An object that contains a list of header names. See [Items](#items) for more information. +`headers` - Object that contains a list of header names. See [Items](#items) for more information. ### Query String Config `query_string_behavior` - Determines whether any URL query strings in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `all`. -`query_strings` - An object that contains a list of query string names. See [Items](#items) for more information. +`query_strings` - Object that contains a list of query string names. See [Items](#items) for more information. ### Items -`items` - A list of item names (cookies, headers, or query strings). +`items` - List of item names (cookies, headers, or query strings). diff --git a/website/docs/r/cloudfront_origin_request_policy.html.markdown b/website/docs/r/cloudfront_origin_request_policy.html.markdown index 4e19f1ecbba..e92e370dcd2 100644 --- a/website/docs/r/cloudfront_origin_request_policy.html.markdown +++ b/website/docs/r/cloudfront_origin_request_policy.html.markdown @@ -42,30 +42,30 @@ resource "aws_cloudfront_origin_request_policy" "example" { The following arguments are supported: -* `name` - (Required) A unique name to identify the origin request policy. -* `comment` - (Optional) A comment to describe the origin request policy. -* `cookies_config` - (Required) An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. -* `headers_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `query_strings_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `name` - (Required) Unique name to identify the origin request policy. +* `comment` - (Optional) Comment to describe the origin request policy. +* `cookies_config` - (Required) Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `headers_config` - (Required) Object that determines whether any HTTP headers (and if so, which headers) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - (Required) Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the origin request key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. ### Cookies Config `cookie_behavior` - (Required) Determines whether any cookies in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist` `all`. -`cookies` - (Optional) An object that contains a list of cookie names. See [Items](#items) for more information. +`cookies` - (Optional) Object that contains a list of cookie names. See [Items](#items) for more information. ### Headers Config `header_behavior` - (Required) Determines whether any HTTP headers are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allViewer`, `allViewerAndWhitelistCloudFront`. -`headers` - (Optional) An object that contains a list of header names. See [Items](#items) for more information. +`headers` - (Optional) Object that contains a list of header names. See [Items](#items) for more information. ### Query String Config `query_string_behavior` - (Required) Determines whether any URL query strings in viewer requests are included in the origin request key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `all`. -`query_strings` - (Optional) An object that contains a list of query string names. See [Items](#items) for more information. +`query_strings` - (Optional) Object that contains a list of query string names. See [Items](#items) for more information. ### Items -`items` - (Required) A list of item names (cookies, headers, or query strings). +`items` - (Required) List of item names (cookies, headers, or query strings). ## Attributes Reference From e11003e7058fe4fbb4dad74f4f3d602d67e3b3bc Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 16:33:15 -0800 Subject: [PATCH 0987/1212] Address feedback from cache policy review --- ...udfront_origin_request_policy_structure.go | 53 ++++++++---------- ...ce_aws_cloudfront_origin_request_policy.go | 6 +- ...s_cloudfront_origin_request_policy_test.go | 15 ++--- ...s_cloudfront_origin_request_policy_test.go | 55 ++++++++++--------- 4 files changed, 63 insertions(+), 66 deletions(-) diff --git a/aws/cloudfront_origin_request_policy_structure.go b/aws/cloudfront_origin_request_policy_structure.go index 7d6342aa704..f6faf8176a3 100644 --- a/aws/cloudfront_origin_request_policy_structure.go +++ b/aws/cloudfront_origin_request_policy_structure.go @@ -28,17 +28,12 @@ func expandCloudFrontOriginRequestPolicyCookiesConfig(tfMap map[string]interface return nil } - var itemsAPIObject *cloudfront.CookieNames - - if itemsFlat, ok := tfMap["cookies"].([]interface{}); ok && len(itemsFlat) == 1 { - itemsAPIObject = expandCloudFrontOriginRequestPolicyCookieNames(itemsFlat[0].(map[string]interface{})) - } else { - itemsAPIObject = nil - } - apiObject := &cloudfront.OriginRequestPolicyCookiesConfig{ CookieBehavior: aws.String(tfMap["cookie_behavior"].(string)), - Cookies: itemsAPIObject, + } + + if items, ok := tfMap["cookies"].([]interface{}); ok && len(items) == 1 { + apiObject.Cookies = expandCloudFrontOriginRequestPolicyCookieNames(items[0].(map[string]interface{})) } return apiObject @@ -48,14 +43,16 @@ func expandCloudFrontOriginRequestPolicyHeaders(tfMap map[string]interface{}) *c if tfMap == nil { return nil } - apiObject := &cloudfront.Headers{} var items []*string for _, item := range tfMap["items"].(*schema.Set).List() { items = append(items, aws.String(item.(string))) } - apiObject.Items = items - apiObject.Quantity = aws.Int64(int64(len(items))) + + apiObject := &cloudfront.Headers{ + Items: items, + Quantity: aws.Int64(int64(len(items))), + } return apiObject } @@ -64,17 +61,13 @@ func expandCloudFrontOriginRequestPolicyHeadersConfig(tfMap map[string]interface if tfMap == nil { return nil } - var itemsAPIObject *cloudfront.Headers - - if itemsFlat, ok := tfMap["headers"].([]interface{}); ok && len(itemsFlat) == 1 && tfMap["header_behavior"] != "none" { - itemsAPIObject = expandCloudFrontOriginRequestPolicyHeaders(itemsFlat[0].(map[string]interface{})) - } else { - itemsAPIObject = nil - } apiObject := &cloudfront.OriginRequestPolicyHeadersConfig{ HeaderBehavior: aws.String(tfMap["header_behavior"].(string)), - Headers: itemsAPIObject, + } + + if items, ok := tfMap["headers"].([]interface{}); ok && len(items) == 1 && tfMap["header_behavior"] != "none" { + apiObject.Headers = expandCloudFrontOriginRequestPolicyHeaders(items[0].(map[string]interface{})) } return apiObject @@ -84,14 +77,16 @@ func expandCloudFrontOriginRequestPolicyQueryStringNames(tfMap map[string]interf if tfMap == nil { return nil } - apiObject := &cloudfront.QueryStringNames{} var items []*string for _, item := range tfMap["items"].(*schema.Set).List() { items = append(items, aws.String(item.(string))) } - apiObject.Items = items - apiObject.Quantity = aws.Int64(int64(len(items))) + + apiObject := &cloudfront.QueryStringNames{ + Items: items, + Quantity: aws.Int64(int64(len(items))), + } return apiObject } @@ -100,17 +95,13 @@ func expandCloudFrontOriginRequestPolicyQueryStringsConfig(tfMap map[string]inte if tfMap == nil { return nil } - var itemsAPIObject *cloudfront.QueryStringNames - - if itemsFlat, ok := tfMap["query_strings"].([]interface{}); ok && len(itemsFlat) == 1 { - itemsAPIObject = expandCloudFrontOriginRequestPolicyQueryStringNames(itemsFlat[0].(map[string]interface{})) - } else { - itemsAPIObject = nil - } apiObject := &cloudfront.OriginRequestPolicyQueryStringsConfig{ QueryStringBehavior: aws.String(tfMap["query_string_behavior"].(string)), - QueryStrings: itemsAPIObject, + } + + if items, ok := tfMap["query_strings"].([]interface{}); ok && len(items) == 1 { + apiObject.QueryStrings = expandCloudFrontOriginRequestPolicyQueryStringNames(items[0].(map[string]interface{})) } return apiObject diff --git a/aws/data_source_aws_cloudfront_origin_request_policy.go b/aws/data_source_aws_cloudfront_origin_request_policy.go index ce05f2a6183..042bfa37755 100644 --- a/aws/data_source_aws_cloudfront_origin_request_policy.go +++ b/aws/data_source_aws_cloudfront_origin_request_policy.go @@ -1,6 +1,8 @@ package aws import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -111,7 +113,7 @@ func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta if d.Id() == "" { if err := dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d, conn); err != nil { - return err + return fmt.Errorf("Unable to find origin request policy by name: %s", err.Error()) } } @@ -122,7 +124,7 @@ func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta resp, err := conn.GetOriginRequestPolicy(request) if err != nil { - return err + return fmt.Errorf("Unable to retrieve origin request policy with ID %s: %s", d.Id(), err.Error()) } d.Set("etag", aws.StringValue(resp.ETag)) diff --git a/aws/data_source_aws_cloudfront_origin_request_policy_test.go b/aws/data_source_aws_cloudfront_origin_request_policy_test.go index 1ca5a7cc9cc..10b55921ba3 100644 --- a/aws/data_source_aws_cloudfront_origin_request_policy_test.go +++ b/aws/data_source_aws_cloudfront_origin_request_policy_test.go @@ -11,6 +11,7 @@ import ( func TestAccAWSCloudFrontDataSourceOriginRequestPolicy_basic(t *testing.T) { rInt := acctest.RandInt() + dataSourceName := "data.aws_cloudfront_origin_request_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -20,13 +21,13 @@ func TestAccAWSCloudFrontDataSourceOriginRequestPolicy_basic(t *testing.T) { { Config: testAccAWSCloudFrontDataSourceOriginRequestPolicyConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(dataSourceName, "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "query_strings_config.0.query_strings.0.items.0", "test"), ), }, }, diff --git a/aws/resource_aws_cloudfront_origin_request_policy_test.go b/aws/resource_aws_cloudfront_origin_request_policy_test.go index 1822f324413..e9bf7d1619f 100644 --- a/aws/resource_aws_cloudfront_origin_request_policy_test.go +++ b/aws/resource_aws_cloudfront_origin_request_policy_test.go @@ -11,6 +11,7 @@ import ( func TestAccAWSCloudFrontOriginRequestPolicy_basic(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_origin_request_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -20,13 +21,13 @@ func TestAccAWSCloudFrontOriginRequestPolicy_basic(t *testing.T) { { Config: testAccAWSCloudFrontOriginRequestPolicyConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_strings.0.items.0", "test"), ), }, { @@ -41,6 +42,7 @@ func TestAccAWSCloudFrontOriginRequestPolicy_basic(t *testing.T) { func TestAccAWSCloudFrontOriginRequestPolicy_update(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_origin_request_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -50,23 +52,23 @@ func TestAccAWSCloudFrontOriginRequestPolicy_update(t *testing.T) { { Config: testAccAWSCloudFrontOriginRequestPolicyConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_strings.0.items.0", "test"), ), }, { Config: testAccAWSCloudFrontOriginRequestPolicyConfigUpdate(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment updated"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.0.items.0", "test2"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.0.items.0", "test2"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment updated"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookies.0.items.0", "test2"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_strings.0.items.0", "test2"), ), }, { @@ -81,6 +83,7 @@ func TestAccAWSCloudFrontOriginRequestPolicy_update(t *testing.T) { func TestAccAWSCloudFrontOriginRequestPolicy_noneBehavior(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_origin_request_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -90,13 +93,13 @@ func TestAccAWSCloudFrontOriginRequestPolicy_noneBehavior(t *testing.T) { { Config: testAccAWSCloudFrontOriginRequestPolicyConfigNoneBehavior(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookie_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "cookies_config.0.cookies.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.header_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "headers_config.0.headers.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_string_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_origin_request_policy.example", "query_strings_config.0.query_strings.#", "0"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookie_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "cookies_config.0.cookies.#", "0"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_string_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "query_strings_config.0.query_strings.#", "0"), ), }, { From 8ee07d91759946efc462bb88dff89658f0a38de5 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 16:50:02 -0800 Subject: [PATCH 0988/1212] Address feedback from review --- aws/resource_aws_lb_test.go | 6 +++++- website/docs/r/lb.html.markdown | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index f2521e5cd1b..3459edb6e53 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -208,7 +208,9 @@ func TestAccAWSLB_IPv6SubnetMapping(t *testing.T) { Config: testAccAWSLBConfig_IPv6(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAWSLBExists(resourceName, &conf), - resource.TestMatchResourceAttr(resourceName, "subnet_mapping.0.ipv6_address", regexp.MustCompile("[a-f0-6]+:[a-f0-6:]+")), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "subnet_mapping.*", map[string]*regexp.Regexp{ + "ipv6_address": regexp.MustCompile("[a-f0-6]+:[a-f0-6:]+"), + }), ), }, { @@ -2032,6 +2034,8 @@ resource "aws_lb" "test" { tags = { Name = "TestAccAWSALB_ipv6address" } + + depends_on = [aws_internet_gateway.gw] } `, rName)) } diff --git a/website/docs/r/lb.html.markdown b/website/docs/r/lb.html.markdown index 168cf5e824b..0a0573ff792 100644 --- a/website/docs/r/lb.html.markdown +++ b/website/docs/r/lb.html.markdown @@ -135,7 +135,7 @@ Subnet Mapping (`subnet_mapping`) blocks support the following: * `subnet_id` - (Required) The id of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone. * `allocation_id` - (Optional) The allocation ID of the Elastic IP address. * `private_ipv4_address` - (Optional) A private ipv4 address within the subnet to assign to the internal-facing load balancer. -* `ipv6_address` - (Optional) An ipv6 address within the subnet to assign to the internal-facing load balancer. +* `ipv6_address` - (Optional) An ipv6 address within the subnet to assign to the internet-facing load balancer. ## Attributes Reference From 72bd194f39fb0cbc7074aee2ebf0dfadf4d36d19 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 16:54:16 -0800 Subject: [PATCH 0989/1212] Fix formatting issue --- aws/resource_aws_lb_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index 3459edb6e53..248b098aede 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -2035,7 +2035,7 @@ resource "aws_lb" "test" { Name = "TestAccAWSALB_ipv6address" } - depends_on = [aws_internet_gateway.gw] + depends_on = [aws_internet_gateway.gw] } `, rName)) } From c387d87ae71b4abd6d557851a1485b451137be98 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 17:17:31 -0800 Subject: [PATCH 0990/1212] Fix data source name lookup behavior --- aws/data_source_aws_cloudfront_origin_request_policy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_cloudfront_origin_request_policy.go b/aws/data_source_aws_cloudfront_origin_request_policy.go index 042bfa37755..2656c832b0d 100644 --- a/aws/data_source_aws_cloudfront_origin_request_policy.go +++ b/aws/data_source_aws_cloudfront_origin_request_policy.go @@ -111,7 +111,7 @@ func dataSourceAwsCloudFrontOriginRequestPolicy() *schema.Resource { func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudfrontconn - if d.Id() == "" { + if d.Get("id").(string) == "" { if err := dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d, conn); err != nil { return fmt.Errorf("Unable to find origin request policy by name: %s", err.Error()) } From 1c4ef70915ecb0daf0704f60b57d298f74a84c9c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 3 Feb 2021 22:02:39 -0800 Subject: [PATCH 0991/1212] Updates terrafmt to 0.3.0 + function parameter fix --- tools/go.mod | 4 ++-- tools/go.sum | 13 +++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/go.mod b/tools/go.mod index 2e6c49e71a3..1deb7134b80 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -7,11 +7,11 @@ require ( github.com/client9/misspell v0.3.4 github.com/golangci/golangci-lint v1.35.2 github.com/hashicorp/go-changelog v0.0.0-20201005170154-56335215ce3a - github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 + github.com/katbyte/terrafmt v0.3.0 github.com/pavius/impi v0.0.3 // indirect github.com/terraform-linters/tflint v0.20.3 ) -replace github.com/katbyte/terrafmt => github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af +replace github.com/katbyte/terrafmt => github.com/gdavison/terrafmt v0.3.1-0.20210204054728-84242796be99 replace github.com/hashicorp/go-changelog => github.com/breathingdust/go-changelog v0.0.0-20210127001721-f985d5709c15 diff --git a/tools/go.sum b/tools/go.sum index e252b7ba572..f8f1cc224a8 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -82,6 +82,7 @@ github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZy github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= @@ -193,8 +194,8 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af h1:hW6361l2aa2oIkHvcY6EZOvdyh6UL6hx744QfEifVX8= -github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af/go.mod h1:dV20mUxak+6H6va7sittpWdtEzs11ff4l7BMtY46MMk= +github.com/gdavison/terrafmt v0.3.1-0.20210204054728-84242796be99 h1:6Plt7xsU0Z1l7hu37EshJXFFi0VxGsmRG+i3lr6X/NA= +github.com/gdavison/terrafmt v0.3.1-0.20210204054728-84242796be99/go.mod h1:B3IJj041fZWEirIxp4l2YGNYXH1PdptEu8we8YzQm/8= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= @@ -398,6 +399,7 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-azure-helpers v0.12.0/go.mod h1:Zc3v4DNeX6PDdy7NljlYpnrdac1++qNW0I4U+ofGwpg= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= @@ -467,8 +469,12 @@ github.com/hashicorp/terraform v0.13.4/go.mod h1:1H1qcnppNc/bBGc7poOfnmmBeQMlF0s github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= +github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= +github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= +github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.4/go.mod h1:GP0lmw4Y+XV1OfTmi/hK75t5KWGGzoOzEgUBPGZ6Wq4= github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= @@ -767,7 +773,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.4.1 h1:asw9sl74539yqavKaglDM5hFpdJVK0Y5Dr/JOgQ89nQ= github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= @@ -873,6 +878,7 @@ github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= github.com/zclconf/go-cty v1.6.1 h1:wHtZ+LSSQVwUSb+XIJ5E9hgAQxyWATZsAWT+ESJ9dQ0= github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1129,7 +1135,6 @@ golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0 h1:SQvH+DjrwqD1hyyQU+K7JegHz1KEZgEwt17p9d6R2eg= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f h1:33yHANSyO/TeglgY9rBhUpX43wtonTXoFOsMRtNB6qE= golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= From 54d75b4060b8653c792c07aa9ca82b59ee067a2d Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 11:10:25 -0500 Subject: [PATCH 0992/1212] New Resource: aws_route53_key_signing_key (#16840) * New Resource: aws_route53_key_signing_key Reference: https://github.com/hashicorp/terraform-provider-aws/pull/16834 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16836 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsRoute53KeySigningKey_disappears (134.60s) --- PASS: TestAccAwsRoute53KeySigningKey_basic (135.34s) --- PASS: TestAccAwsRoute53KeySigningKey_Status (180.85s) ``` Output from acceptance testing in AWS GovCloud (US): ``` === CONT TestAccAwsRoute53KeySigningKey_basic route53_key_signing_key_test.go:40: Route 53 Key Signing Key not available in this AWS Partition --- SKIP: TestAccAwsRoute53KeySigningKey_basic (2.52s) === CONT TestAccAwsRoute53KeySigningKey_Status route53_key_signing_key_test.go:40: Route 53 Key Signing Key not available in this AWS Partition === CONT TestAccAwsRoute53KeySigningKey_disappears route53_key_signing_key_test.go:40: Route 53 Key Signing Key not available in this AWS Partition --- SKIP: TestAccAwsRoute53KeySigningKey_Status (2.52s) --- SKIP: TestAccAwsRoute53KeySigningKey_disappears (2.52s) ``` --- .changelog/16840.txt | 3 + aws/internal/service/route53/enum.go | 9 + aws/internal/service/route53/finder/finder.go | 50 +++ aws/internal/service/route53/id.go | 25 ++ aws/internal/service/route53/waiter/status.go | 44 +++ aws/internal/service/route53/waiter/waiter.go | 67 ++++ aws/provider.go | 1 + aws/resource_aws_route53_key_signing_key.go | 300 ++++++++++++++++++ ...source_aws_route53_key_signing_key_test.go | 243 ++++++++++++++ aws/route53_key_signing_key_test.go | 87 +++++ .../r/route53_key_signing_key.html.markdown | 97 ++++++ 11 files changed, 926 insertions(+) create mode 100644 .changelog/16840.txt create mode 100644 aws/internal/service/route53/enum.go create mode 100644 aws/internal/service/route53/finder/finder.go create mode 100644 aws/internal/service/route53/id.go create mode 100644 aws/internal/service/route53/waiter/status.go create mode 100644 aws/internal/service/route53/waiter/waiter.go create mode 100644 aws/resource_aws_route53_key_signing_key.go create mode 100644 aws/resource_aws_route53_key_signing_key_test.go create mode 100644 aws/route53_key_signing_key_test.go create mode 100644 website/docs/r/route53_key_signing_key.html.markdown diff --git a/.changelog/16840.txt b/.changelog/16840.txt new file mode 100644 index 00000000000..cbcde6925b2 --- /dev/null +++ b/.changelog/16840.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_route53_key_signing_key +``` diff --git a/aws/internal/service/route53/enum.go b/aws/internal/service/route53/enum.go new file mode 100644 index 00000000000..d10f69e9189 --- /dev/null +++ b/aws/internal/service/route53/enum.go @@ -0,0 +1,9 @@ +package route53 + +const ( + KeySigningKeyStatusActionNeeded = "ACTION_NEEDED" + KeySigningKeyStatusActive = "ACTIVE" + KeySigningKeyStatusDeleting = "DELETING" + KeySigningKeyStatusInactive = "INACTIVE" + KeySigningKeyStatusInternalFailure = "INTERNAL_FAILURE" +) diff --git a/aws/internal/service/route53/finder/finder.go b/aws/internal/service/route53/finder/finder.go new file mode 100644 index 00000000000..de40116d4e8 --- /dev/null +++ b/aws/internal/service/route53/finder/finder.go @@ -0,0 +1,50 @@ +package finder + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + tfroute53 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53" +) + +func KeySigningKey(conn *route53.Route53, hostedZoneID string, name string) (*route53.KeySigningKey, error) { + input := &route53.GetDNSSECInput{ + HostedZoneId: aws.String(hostedZoneID), + } + + var result *route53.KeySigningKey + + output, err := conn.GetDNSSEC(input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + for _, keySigningKey := range output.KeySigningKeys { + if keySigningKey == nil { + continue + } + + if aws.StringValue(keySigningKey.Name) == name { + result = keySigningKey + break + } + } + + return result, err +} + +func KeySigningKeyByResourceID(conn *route53.Route53, resourceID string) (*route53.KeySigningKey, error) { + hostedZoneID, name, err := tfroute53.KeySigningKeyParseResourceID(resourceID) + + if err != nil { + return nil, fmt.Errorf("error parsing Route 53 Key Signing Key (%s) identifier: %w", resourceID, err) + } + + return KeySigningKey(conn, hostedZoneID, name) +} diff --git a/aws/internal/service/route53/id.go b/aws/internal/service/route53/id.go new file mode 100644 index 00000000000..a8748fa4eef --- /dev/null +++ b/aws/internal/service/route53/id.go @@ -0,0 +1,25 @@ +package route53 + +import ( + "fmt" + "strings" +) + +const KeySigningKeyResourceIDSeparator = "," + +func KeySigningKeyCreateResourceID(transitGatewayRouteTableID string, prefixListID string) string { + parts := []string{transitGatewayRouteTableID, prefixListID} + id := strings.Join(parts, KeySigningKeyResourceIDSeparator) + + return id +} + +func KeySigningKeyParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, KeySigningKeyResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected hosted-zone-id%[2]sname", id, KeySigningKeyResourceIDSeparator) +} diff --git a/aws/internal/service/route53/waiter/status.go b/aws/internal/service/route53/waiter/status.go new file mode 100644 index 00000000000..eb520ef2d1e --- /dev/null +++ b/aws/internal/service/route53/waiter/status.go @@ -0,0 +1,44 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53/finder" +) + +func ChangeInfoStatus(conn *route53.Route53, changeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &route53.GetChangeInput{ + Id: aws.String(changeID), + } + + output, err := conn.GetChange(input) + + if err != nil { + return nil, "", err + } + + if output == nil || output.ChangeInfo == nil { + return nil, "", nil + } + + return output.ChangeInfo, aws.StringValue(output.ChangeInfo.Status), nil + } +} + +func KeySigningKeyStatus(conn *route53.Route53, hostedZoneID string, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + keySigningKey, err := finder.KeySigningKey(conn, hostedZoneID, name) + + if err != nil { + return nil, "", err + } + + if keySigningKey == nil { + return nil, "", nil + } + + return keySigningKey, aws.StringValue(keySigningKey.Status), nil + } +} diff --git a/aws/internal/service/route53/waiter/waiter.go b/aws/internal/service/route53/waiter/waiter.go new file mode 100644 index 00000000000..0c9647f9ca3 --- /dev/null +++ b/aws/internal/service/route53/waiter/waiter.go @@ -0,0 +1,67 @@ +package waiter + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + ChangeTimeout = 30 * time.Minute + + KeySigningKeyStatusTimeout = 5 * time.Minute +) + +func ChangeInfoStatusInsync(conn *route53.Route53, changeID string) (*route53.ChangeInfo, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{route53.ChangeStatusPending}, + Target: []string{route53.ChangeStatusInsync}, + Refresh: ChangeInfoStatus(conn, changeID), + Delay: 30 * time.Second, + MinTimeout: 5 * time.Second, + Timeout: ChangeTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*route53.ChangeInfo); ok { + return output, err + } + + return nil, err +} + +func KeySigningKeyStatusUpdated(conn *route53.Route53, hostedZoneID string, name string, status string) (*route53.KeySigningKey, error) { + stateConf := &resource.StateChangeConf{ + Target: []string{status}, + Refresh: KeySigningKeyStatus(conn, hostedZoneID, name), + MinTimeout: 5 * time.Second, + Timeout: KeySigningKeyStatusTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*route53.KeySigningKey); ok { + if err != nil && output != nil && output.Status != nil && output.StatusMessage != nil { + newErr := fmt.Errorf("%s: %s", aws.StringValue(output.Status), aws.StringValue(output.StatusMessage)) + + switch e := err.(type) { + case *resource.TimeoutError: + if e.LastError == nil { + e.LastError = newErr + } + case *resource.UnexpectedStateError: + if e.LastError == nil { + e.LastError = newErr + } + } + } + + return output, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index 97bb7538de3..d968689b008 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -864,6 +864,7 @@ func Provider() *schema.Provider { "aws_redshift_event_subscription": resourceAwsRedshiftEventSubscription(), "aws_resourcegroups_group": resourceAwsResourceGroupsGroup(), "aws_route53_delegation_set": resourceAwsRoute53DelegationSet(), + "aws_route53_key_signing_key": resourceAwsRoute53KeySigningKey(), "aws_route53_query_log": resourceAwsRoute53QueryLog(), "aws_route53_record": resourceAwsRoute53Record(), "aws_route53_zone_association": resourceAwsRoute53ZoneAssociation(), diff --git a/aws/resource_aws_route53_key_signing_key.go b/aws/resource_aws_route53_key_signing_key.go new file mode 100644 index 00000000000..46f93d6fc07 --- /dev/null +++ b/aws/resource_aws_route53_key_signing_key.go @@ -0,0 +1,300 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + tfroute53 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53/waiter" +) + +func resourceAwsRoute53KeySigningKey() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsRoute53KeySigningKeyCreate, + Read: resourceAwsRoute53KeySigningKeyRead, + Update: resourceAwsRoute53KeySigningKeyUpdate, + Delete: resourceAwsRoute53KeySigningKeyDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "digest_algorithm_mnemonic": { + Type: schema.TypeString, + Computed: true, + }, + "digest_algorithm_type": { + Type: schema.TypeInt, + Computed: true, + }, + "digest_value": { + Type: schema.TypeString, + Computed: true, + }, + "dnskey_record": { + Type: schema.TypeString, + Computed: true, + }, + "ds_record": { + Type: schema.TypeString, + Computed: true, + }, + "flag": { + Type: schema.TypeInt, + Computed: true, + }, + "hosted_zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "key_management_service_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "key_tag": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 128), + validation.StringMatch(regexp.MustCompile("^[a-zA-Z0-9._-]"), "must contain only alphanumeric characters, periods, underscores, or hyphens"), + ), + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + }, + "signing_algorithm_mnemonic": { + Type: schema.TypeString, + Computed: true, + }, + "signing_algorithm_type": { + Type: schema.TypeInt, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Default: tfroute53.KeySigningKeyStatusActive, + ValidateFunc: validation.StringInSlice([]string{ + tfroute53.KeySigningKeyStatusActive, + tfroute53.KeySigningKeyStatusInactive, + }, false), + }, + }, + } +} + +func resourceAwsRoute53KeySigningKeyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + hostedZoneID := d.Get("hosted_zone_id").(string) + name := d.Get("name").(string) + status := d.Get("status").(string) + + input := &route53.CreateKeySigningKeyInput{ + CallerReference: aws.String(resource.UniqueId()), + HostedZoneId: aws.String(hostedZoneID), + Name: aws.String(name), + Status: aws.String(status), + } + + if v, ok := d.GetOk("key_management_service_arn"); ok { + input.KeyManagementServiceArn = aws.String(v.(string)) + } + + output, err := conn.CreateKeySigningKey(input) + + if err != nil { + return fmt.Errorf("error creating Route 53 Key Signing Key: %w", err) + } + + d.SetId(tfroute53.KeySigningKeyCreateResourceID(hostedZoneID, name)) + + if output != nil && output.ChangeInfo != nil { + if _, err := waiter.ChangeInfoStatusInsync(conn, aws.StringValue(output.ChangeInfo.Id)); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) creation: %w", d.Id(), err) + } + } + + if _, err := waiter.KeySigningKeyStatusUpdated(conn, hostedZoneID, name, status); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) status (%s): %w", d.Id(), status, err) + } + + return resourceAwsRoute53KeySigningKeyRead(d, meta) +} + +func resourceAwsRoute53KeySigningKeyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + hostedZoneID, name, err := tfroute53.KeySigningKeyParseResourceID(d.Id()) + + if err != nil { + return fmt.Errorf("error parsing Route 53 Key Signing Key (%s) identifier: %w", d.Id(), err) + } + + keySigningKey, err := finder.KeySigningKey(conn, hostedZoneID, name) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchHostedZone) { + log.Printf("[WARN] Route 53 Key Signing Key (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchKeySigningKey) { + log.Printf("[WARN] Route 53 Key Signing Key (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Route 53 Key Signing Key (%s): %w", d.Id(), err) + } + + if keySigningKey == nil { + if d.IsNewResource() { + return fmt.Errorf("error reading Route 53 Key Signing Key (%s): not found", d.Id()) + } + + log.Printf("[WARN] Route 53 Key Signing Key (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("digest_algorithm_mnemonic", keySigningKey.DigestAlgorithmMnemonic) + d.Set("digest_algorithm_type", keySigningKey.DigestAlgorithmType) + d.Set("digest_value", keySigningKey.DigestValue) + d.Set("dnskey_record", keySigningKey.DNSKEYRecord) + d.Set("ds_record", keySigningKey.DSRecord) + d.Set("flag", keySigningKey.Flag) + d.Set("hosted_zone_id", hostedZoneID) + d.Set("key_management_service_arn", keySigningKey.KmsArn) + d.Set("key_tag", keySigningKey.KeyTag) + d.Set("name", keySigningKey.Name) + d.Set("public_key", keySigningKey.PublicKey) + d.Set("signing_algorithm_mnemonic", keySigningKey.SigningAlgorithmMnemonic) + d.Set("signing_algorithm_type", keySigningKey.SigningAlgorithmType) + d.Set("status", keySigningKey.Status) + + return nil +} + +func resourceAwsRoute53KeySigningKeyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + if d.HasChange("status") { + status := d.Get("status").(string) + + switch status { + default: + return fmt.Errorf("error updating Route 53 Key Signing Key (%s) status: unknown status (%s)", d.Id(), status) + case tfroute53.KeySigningKeyStatusActive: + input := &route53.ActivateKeySigningKeyInput{ + HostedZoneId: aws.String(d.Get("hosted_zone_id").(string)), + Name: aws.String(d.Get("name").(string)), + } + + output, err := conn.ActivateKeySigningKey(input) + + if err != nil { + return fmt.Errorf("error updating Route 53 Key Signing Key (%s) status (%s): %w", d.Id(), status, err) + } + + if output != nil && output.ChangeInfo != nil { + if _, err := waiter.ChangeInfoStatusInsync(conn, aws.StringValue(output.ChangeInfo.Id)); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) status (%s) update: %w", d.Id(), status, err) + } + } + case tfroute53.KeySigningKeyStatusInactive: + input := &route53.DeactivateKeySigningKeyInput{ + HostedZoneId: aws.String(d.Get("hosted_zone_id").(string)), + Name: aws.String(d.Get("name").(string)), + } + + output, err := conn.DeactivateKeySigningKey(input) + + if err != nil { + return fmt.Errorf("error updating Route 53 Key Signing Key (%s) status (%s): %w", d.Id(), status, err) + } + + if output != nil && output.ChangeInfo != nil { + if _, err := waiter.ChangeInfoStatusInsync(conn, aws.StringValue(output.ChangeInfo.Id)); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) status (%s) update: %w", d.Id(), status, err) + } + } + } + + if _, err := waiter.KeySigningKeyStatusUpdated(conn, d.Get("hosted_zone_id").(string), d.Get("name").(string), status); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) status (%s): %w", d.Id(), status, err) + } + } + + return resourceAwsRoute53KeySigningKeyRead(d, meta) +} + +func resourceAwsRoute53KeySigningKeyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + status := d.Get("status").(string) + + if status == tfroute53.KeySigningKeyStatusActive { + input := &route53.DeactivateKeySigningKeyInput{ + HostedZoneId: aws.String(d.Get("hosted_zone_id").(string)), + Name: aws.String(d.Get("name").(string)), + } + + output, err := conn.DeactivateKeySigningKey(input) + + if err != nil { + return fmt.Errorf("error updating Route 53 Key Signing Key (%s) status (%s): %w", d.Id(), status, err) + } + + if output != nil && output.ChangeInfo != nil { + if _, err := waiter.ChangeInfoStatusInsync(conn, aws.StringValue(output.ChangeInfo.Id)); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) status (%s) update: %w", d.Id(), status, err) + } + } + } + + input := &route53.DeleteKeySigningKeyInput{ + HostedZoneId: aws.String(d.Get("hosted_zone_id").(string)), + Name: aws.String(d.Get("name").(string)), + } + + output, err := conn.DeleteKeySigningKey(input) + + if tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchHostedZone) { + return nil + } + + if tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchKeySigningKey) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Route 53 Key Signing Key (%s): %w", d.Id(), err) + } + + if output != nil && output.ChangeInfo != nil { + if _, err := waiter.ChangeInfoStatusInsync(conn, aws.StringValue(output.ChangeInfo.Id)); err != nil { + return fmt.Errorf("error waiting for Route 53 Key Signing Key (%s) deletion: %w", d.Id(), err) + } + } + + return nil +} diff --git a/aws/resource_aws_route53_key_signing_key_test.go b/aws/resource_aws_route53_key_signing_key_test.go new file mode 100644 index 00000000000..fdfc60b2ab1 --- /dev/null +++ b/aws/resource_aws_route53_key_signing_key_test.go @@ -0,0 +1,243 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + tfroute53 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53/finder" +) + +func TestAccAwsRoute53KeySigningKey_basic(t *testing.T) { + kmsKeyResourceName := "aws_kms_key.test" + route53ZoneResourceName := "aws_route53_zone.test" + resourceName := "aws_route53_key_signing_key.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckRoute53KeySigningKey(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAwsRoute53KeySigningKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsRoute53KeySigningKeyConfig_Name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsRoute53KeySigningKeyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "digest_algorithm_mnemonic", "SHA-256"), + resource.TestCheckResourceAttr(resourceName, "digest_algorithm_type", "2"), + resource.TestMatchResourceAttr(resourceName, "digest_value", regexp.MustCompile(`^[0-9A-F]+$`)), + resource.TestMatchResourceAttr(resourceName, "dnskey_record", regexp.MustCompile(`^257 [0-9]+ [0-9]+ [a-zA-Z0-9+/]+={0,3}$`)), + resource.TestMatchResourceAttr(resourceName, "ds_record", regexp.MustCompile(`^[0-9]+ [0-9]+ [0-9]+ [0-9A-F]+$`)), + resource.TestCheckResourceAttr(resourceName, "flag", "257"), + resource.TestCheckResourceAttrPair(resourceName, "hosted_zone_id", route53ZoneResourceName, "id"), + resource.TestCheckResourceAttrPair(resourceName, "key_management_service_arn", kmsKeyResourceName, "arn"), + resource.TestMatchResourceAttr(resourceName, "key_tag", regexp.MustCompile(`^[0-9]+$`)), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestMatchResourceAttr(resourceName, "public_key", regexp.MustCompile(`^[a-zA-Z0-9+/]+={0,3}$`)), + resource.TestCheckResourceAttr(resourceName, "signing_algorithm_mnemonic", "ECDSAP256SHA256"), + resource.TestCheckResourceAttr(resourceName, "signing_algorithm_type", "13"), + resource.TestCheckResourceAttr(resourceName, "status", tfroute53.KeySigningKeyStatusActive), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsRoute53KeySigningKey_disappears(t *testing.T) { + resourceName := "aws_route53_key_signing_key.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckRoute53KeySigningKey(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAwsRoute53KeySigningKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsRoute53KeySigningKeyConfig_Name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsRoute53KeySigningKeyExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsRoute53KeySigningKey(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsRoute53KeySigningKey_Status(t *testing.T) { + resourceName := "aws_route53_key_signing_key.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckRoute53KeySigningKey(t) }, + ErrorCheck: testAccErrorCheckSkipRoute53(t), + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAwsRoute53KeySigningKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsRoute53KeySigningKeyConfig_Status(rName, tfroute53.KeySigningKeyStatusInactive), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsRoute53KeySigningKeyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "status", tfroute53.KeySigningKeyStatusInactive), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsRoute53KeySigningKeyConfig_Status(rName, tfroute53.KeySigningKeyStatusActive), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsRoute53KeySigningKeyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "status", tfroute53.KeySigningKeyStatusActive), + ), + }, + { + Config: testAccAwsRoute53KeySigningKeyConfig_Status(rName, tfroute53.KeySigningKeyStatusInactive), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsRoute53KeySigningKeyExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "status", tfroute53.KeySigningKeyStatusInactive), + ), + }, + }, + }) +} + +func testAccCheckAwsRoute53KeySigningKeyDestroy(s *terraform.State) error { + conn := testAccProviderRoute53KeySigningKey.Meta().(*AWSClient).r53conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_route53_key_signing_key" { + continue + } + + keySigningKey, err := finder.KeySigningKeyByResourceID(conn, rs.Primary.ID) + + if tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchHostedZone) { + continue + } + + if tfawserr.ErrCodeEquals(err, route53.ErrCodeNoSuchKeySigningKey) { + continue + } + + if err != nil { + return fmt.Errorf("error reading Route 53 Key Signing Key (%s): %w", rs.Primary.ID, err) + } + + if keySigningKey != nil { + return fmt.Errorf("Route 53 Key Signing Key (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccAwsRoute53KeySigningKeyExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("resource %s not found", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("resource %s has not set its id", resourceName) + } + + conn := testAccProviderRoute53KeySigningKey.Meta().(*AWSClient).r53conn + + keySigningKey, err := finder.KeySigningKeyByResourceID(conn, rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error reading Route 53 Key Signing Key (%s): %w", rs.Primary.ID, err) + } + + if keySigningKey == nil { + return fmt.Errorf("Route 53 Key Signing Key (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccAwsRoute53KeySigningKeyConfig_Base(rName string) string { + return composeConfig( + testAccRoute53KeySigningKeyRegionProviderConfig(), + fmt.Sprintf(` +resource "aws_kms_key" "test" { + customer_master_key_spec = "ECC_NIST_P256" + deletion_window_in_days = 7 + key_usage = "SIGN_VERIFY" + policy = jsonencode({ + Statement = [ + { + Action = [ + "kms:DescribeKey", + "kms:GetPublicKey", + "kms:Sign", + ], + Effect = "Allow" + Principal = { + Service = "api-service.dnssec.route53.aws.internal" + } + Sid = "Allow Route 53 DNSSEC Service" + }, + { + Action = "kms:*" + Effect = "Allow" + Principal = { + AWS = "*" + } + Resource = "*" + Sid = "Enable IAM User Permissions" + }, + ] + Version = "2012-10-17" + }) +} + +resource "aws_route53_zone" "test" { + name = "%[1]s.terraformtest.com" +} +`, rName)) +} + +func testAccAwsRoute53KeySigningKeyConfig_Name(rName string) string { + return composeConfig( + testAccAwsRoute53KeySigningKeyConfig_Base(rName), + fmt.Sprintf(` +resource "aws_route53_key_signing_key" "test" { + hosted_zone_id = aws_route53_zone.test.id + key_management_service_arn = aws_kms_key.test.arn + name = %[1]q +} +`, rName)) +} + +func testAccAwsRoute53KeySigningKeyConfig_Status(rName string, status string) string { + return composeConfig( + testAccAwsRoute53KeySigningKeyConfig_Base(rName), + fmt.Sprintf(` +resource "aws_route53_key_signing_key" "test" { + hosted_zone_id = aws_route53_zone.test.id + key_management_service_arn = aws_kms_key.test.arn + name = %[1]q + status = %[2]q +} +`, rName, status)) +} diff --git a/aws/route53_key_signing_key_test.go b/aws/route53_key_signing_key_test.go new file mode 100644 index 00000000000..ad2a87f7f00 --- /dev/null +++ b/aws/route53_key_signing_key_test.go @@ -0,0 +1,87 @@ +package aws + +import ( + "context" + "sync" + "testing" + + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// Route 53 Key Signing Key can only be enabled with KMS Keys in specific regions, + +// testAccRoute53KeySigningKeyRegion is the chosen Route 53 Key Signing Key testing region +// +// Cached to prevent issues should multiple regions become available. +var testAccRoute53KeySigningKeyRegion string + +// testAccProviderRoute53KeySigningKey is the Route 53 Key Signing Key provider instance +// +// This Provider can be used in testing code for API calls without requiring +// the use of saving and referencing specific ProviderFactories instances. +// +// testAccPreCheckRoute53KeySigningKey(t) must be called before using this provider instance. +var testAccProviderRoute53KeySigningKey *schema.Provider + +// testAccProviderRoute53KeySigningKeyConfigure ensures the provider is only configured once +var testAccProviderRoute53KeySigningKeyConfigure sync.Once + +// testAccPreCheckRoute53KeySigningKey verifies AWS credentials and that Route 53 Key Signing Key is supported +func testAccPreCheckRoute53KeySigningKey(t *testing.T) { + testAccPartitionHasServicePreCheck(route53.EndpointsID, t) + + region := testAccGetRoute53KeySigningKeyRegion() + + if region == "" { + t.Skip("Route 53 Key Signing Key not available in this AWS Partition") + } + + // Since we are outside the scope of the Terraform configuration we must + // call Configure() to properly initialize the provider configuration. + testAccProviderRoute53KeySigningKeyConfigure.Do(func() { + testAccProviderRoute53KeySigningKey = Provider() + + config := map[string]interface{}{ + "region": region, + } + + diags := testAccProviderRoute53KeySigningKey.Configure(context.Background(), terraform.NewResourceConfigRaw(config)) + + if diags != nil && diags.HasError() { + for _, d := range diags { + if d.Severity == diag.Error { + t.Fatalf("error configuring Route 53 Key Signing Key provider: %s", d.Summary) + } + } + } + }) +} + +// testAccRoute53KeySigningKeyRegionProviderConfig is the Terraform provider configuration for Route 53 Key Signing Key region testing +// +// Testing Route 53 Key Signing Key assumes no other provider configurations +// are necessary and overwrites the "aws" provider configuration. +func testAccRoute53KeySigningKeyRegionProviderConfig() string { + return testAccRegionalProviderConfig(testAccGetRoute53KeySigningKeyRegion()) +} + +// testAccGetRoute53KeySigningKeyRegion returns the Route 53 Key Signing Key region for testing +func testAccGetRoute53KeySigningKeyRegion() string { + if testAccRoute53KeySigningKeyRegion != "" { + return testAccRoute53KeySigningKeyRegion + } + + // AWS Commercial: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-configuring-dnssec-cmk-requirements.html + // AWS GovCloud (US) - not available yet: https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-r53.html + // AWS China - not available yet: https://docs.amazonaws.cn/en_us/aws/latest/userguide/route53.html + switch testAccGetPartition() { + case endpoints.AwsPartitionID: + testAccRoute53KeySigningKeyRegion = endpoints.UsEast1RegionID + } + + return testAccRoute53KeySigningKeyRegion +} diff --git a/website/docs/r/route53_key_signing_key.html.markdown b/website/docs/r/route53_key_signing_key.html.markdown new file mode 100644 index 00000000000..c0aa9d65356 --- /dev/null +++ b/website/docs/r/route53_key_signing_key.html.markdown @@ -0,0 +1,97 @@ +--- +subcategory: "Route53" +layout: "aws" +page_title: "AWS: aws_route53_key_signing_key" +description: |- + Manages an Route 53 Key Signing Key +--- + +# Resource: aws_route53_key_signing_key + +Manages an Route 53 Key Signing Key. For more information about managing Domain Name System Security Extensions (DNSSEC)in Route 53, see the [Route 53 Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-configuring-dnssec.html). + +## Example Usage + +```hcl +provider "aws" { + region = "us-east-1" +} + +resource "aws_kms_key" "example" { + customer_master_key_spec = "ECC_NIST_P256" + deletion_window_in_days = 7 + key_usage = "SIGN_VERIFY" + policy = jsonencode({ + Statement = [ + { + Action = [ + "kms:DescribeKey", + "kms:GetPublicKey", + "kms:Sign", + ], + Effect = "Allow" + Principal = { + Service = "api-service.dnssec.route53.aws.internal" + } + Sid = "Route 53 DNSSEC Permissions" + }, + { + Action = "kms:*" + Effect = "Allow" + Principal = { + AWS = "*" + } + Resource = "*" + Sid = "IAM User Permissions" + }, + ] + Version = "2012-10-17" + }) +} + +resource "aws_route53_zone" "example" { + name = "example.com" +} + +resource "aws_route53_key_signing_key" "example" { + hosted_zone_id = aws_route53_zone.test.id + key_management_service_arn = aws_kms_key.test.arn + name = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `hosted_zone_id` - (Required) Identifier of the Route 53 Hosted Zone. +* `key_management_service_arn` - (Required) Amazon Resource Name (ARN) of the Key Management Service (KMS) Key. This must be unique for each key-signing key (KSK) in a single hosted zone. This key must be in the `us-east-1` Region and meet certain requirements, which are described in the [Route 53 Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-configuring-dnssec-cmk-requirements.html) and [Route 53 API Reference](https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateKeySigningKey.html). +* `name` - (Required) Name of the key-signing key (KSK). Must be unique for each key-singing key in the same hosted zone. + +The following arguments are optional: + +* `status` - (Optional) Status of the key-signing key (KSK). Valid values: `ACTIVE`, `INACTIVE`. Defaults to `ACTIVE`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `digest_algorithm_mnemonic` - A string used to represent the delegation signer digest algorithm. This value must follow the guidelines provided by [RFC-8624 Section 3.3](https://tools.ietf.org/html/rfc8624#section-3.3). +* `digest_algorithm_type` - An integer used to represent the delegation signer digest algorithm. This value must follow the guidelines provided by [RFC-8624 Section 3.3](https://tools.ietf.org/html/rfc8624#section-3.3). +* `digest_value` - A cryptographic digest of a DNSKEY resource record (RR). DNSKEY records are used to publish the public key that resolvers can use to verify DNSSEC signatures that are used to secure certain kinds of information provided by the DNS system. +* `dnskey_record` - A string that represents a DNSKEY record. +* `ds_record` - A string that represents a delegation signer (DS) record. +* `flag` - An integer that specifies how the key is used. For key-signing key (KSK), this value is always 257. +* `id` - Route 53 Hosted Zone identifier and KMS Key identifier, separated by a comma (`,`). +* `key_tag` - An integer used to identify the DNSSEC record for the domain name. The process used to calculate the value is described in [RFC-4034 Appendix B](https://tools.ietf.org/rfc/rfc4034.txt). +* `public_key` - The public key, represented as a Base64 encoding, as required by [RFC-4034 Page 5](https://tools.ietf.org/rfc/rfc4034.txt). +* `signing_algorithm_mnemonic` - A string used to represent the signing algorithm. This value must follow the guidelines provided by [RFC-8624 Section 3.1](https://tools.ietf.org/html/rfc8624#section-3.1). +* `signing_algorithm_type` - An integer used to represent the signing algorithm. This value must follow the guidelines provided by [RFC-8624 Section 3.1](https://tools.ietf.org/html/rfc8624#section-3.1). + +## Import + +`aws_route53_key_signing_key` resources can be imported by using the Route 53 Hosted Zone identifier and KMS Key identifier, separated by a comma (`,`), e.g. + +``` +$ terraform import aws_route53_key_signing_key.example Z1D633PJN98FT9,example +``` From 3ea6308a8b9b4727dd743658288208864bf7a3f0 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 4 Feb 2021 16:12:16 +0000 Subject: [PATCH 0993/1212] Update CHANGELOG.md for #16840 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfab560b3ca..2d245b1153d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,14 @@ ## 3.27.0 (Unreleased) +FEATURES: + +* **New Resource:** `aws_route53_key_signing_key` ([#16840](https://github.com/hashicorp/terraform-provider-aws/issues/16840)) + ENHANCEMENTS: * data-source/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) * resource/aws_glacier_vault: Add plan-time validation for `notification` configuration block `events` and `sns_topic_arn` arguments ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) +* resource/aws_glue_catalog_table: Adds support for specifying schema from schema registry. ([#17335](https://github.com/hashicorp/terraform-provider-aws/issues/17335)) * resource/aws_iam_access_key: Add `create_date` attribute ([#17318](https://github.com/hashicorp/terraform-provider-aws/issues/17318)) * resource/aws_iam_access_key: Support resource import ([#17321](https://github.com/hashicorp/terraform-provider-aws/issues/17321)) * resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) @@ -17,6 +22,7 @@ BUG FIXES: * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glue_crawler: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation permissions errors ([#17256](https://github.com/hashicorp/terraform-provider-aws/issues/17256)) +* resource/aws_glue_partition - Fix `partition_values` to perserve order. ([#17344](https://github.com/hashicorp/terraform-provider-aws/issues/17344)) * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) From c80147312d4d35bd69a4a757e970c571d81c0fe2 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 11:28:42 -0500 Subject: [PATCH 0994/1212] Fix #17344 CHANGELOG entry typo and formatting --- .changelog/17344.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/17344.txt b/.changelog/17344.txt index 5500b9e7494..c3f95195eed 100644 --- a/.changelog/17344.txt +++ b/.changelog/17344.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_glue_partition - Fix `partition_values` to perserve order. +resource/aws_glue_partition: Fix `partition_values` to preserve order. ``` From b51b0f43c1d12558d90b585edbb698149aa9f40e Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 11:29:45 -0500 Subject: [PATCH 0995/1212] Regenerate CHANGELOG --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d245b1153d..c6189a9671a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ BUG FIXES: * resource/aws_glacier_vault: Prevent crash with `GetVaultAccessPolicy` API errors ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glacier_vault: Properly remove from state when resource does not exist ([#12645](https://github.com/hashicorp/terraform-provider-aws/issues/12645)) * resource/aws_glue_crawler: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation permissions errors ([#17256](https://github.com/hashicorp/terraform-provider-aws/issues/17256)) -* resource/aws_glue_partition - Fix `partition_values` to perserve order. ([#17344](https://github.com/hashicorp/terraform-provider-aws/issues/17344)) +* resource/aws_glue_partition: Fix `partition_values` to preserve order. ([#17344](https://github.com/hashicorp/terraform-provider-aws/issues/17344)) * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) From c90a3f3ccffab568b700a69bf4e795a2e573efee Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 11:37:48 -0500 Subject: [PATCH 0996/1212] tests/resource/aws_emr_managed_scaling_policy: CheckDestroy update for API changes (#17443) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17400 Due to an EMR API change, there are test failures with the following acceptance tests: ``` === CONT TestAccAwsEmrManagedScalingPolicy_basic testing_new.go:63: Error running post-test destroy, there may be dangling resources: Error: EMR Managed Scaling Policy still exists --- FAIL: TestAccAwsEmrManagedScalingPolicy_basic (499.57s) === CONT TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits testing_new.go:63: Error running post-test destroy, there may be dangling resources: Error: EMR Managed Scaling Policy still exists --- FAIL: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits (426.45s) === CONT TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits testing_new.go:63: Error running post-test destroy, there may be dangling resources: Error: EMR Managed Scaling Policy still exists --- FAIL: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits (476.03s) ``` The `CheckDestroy` function needs to account for the API response with a successful response, but no object. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits (464.08s) --- PASS: TestAccAwsEmrManagedScalingPolicy_basic (468.20s) --- PASS: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits (472.76s) --- PASS: TestAccAwsEmrManagedScalingPolicy_disappears (476.09s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- FAIL: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits (421.03s) # https://github.com/hashicorp/terraform-provider-aws/issues/17442 --- FAIL: TestAccAwsEmrManagedScalingPolicy_disappears (445.83s) # https://github.com/hashicorp/terraform-provider-aws/issues/17442 --- FAIL: TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits (462.71s) # https://github.com/hashicorp/terraform-provider-aws/issues/17442 --- FAIL: TestAccAwsEmrManagedScalingPolicy_basic (466.76s) # https://github.com/hashicorp/terraform-provider-aws/issues/17442 ``` --- aws/resource_aws_emr_managed_scaling_policy_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_emr_managed_scaling_policy_test.go b/aws/resource_aws_emr_managed_scaling_policy_test.go index 059afc271b0..c886e9c8e60 100644 --- a/aws/resource_aws_emr_managed_scaling_policy_test.go +++ b/aws/resource_aws_emr_managed_scaling_policy_test.go @@ -191,11 +191,11 @@ func testAccCheckAWSEmrManagedScalingPolicyDestroy(s *terraform.State) error { } if err != nil { - return err + return fmt.Errorf("error reading EMR Managed Scaling Policy (%s): %w", rs.Primary.ID, err) } - if resp != nil { - return fmt.Errorf("Error: EMR Managed Scaling Policy still exists") + if resp != nil && resp.ManagedScalingPolicy != nil { + return fmt.Errorf("EMR Managed Scaling Policy (%s) still exists", rs.Primary.ID) } } From 68778348d5fa2d6046e1bce5e39c109798c834a6 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 11:42:04 -0500 Subject: [PATCH 0997/1212] New Resource: aws_ec2_transit_gateway_prefix_list_reference (#16823) * New Resource: aws_ec2_transit_gateway_prefix_list_reference Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16572 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsEc2TransitGatewayPrefixListReference_disappears (214.31s) --- PASS: TestAccAwsEc2TransitGatewayPrefixListReference_basic (217.12s) --- PASS: TestAccAwsEc2TransitGatewayPrefixListReference_disappears_TransitGateway (219.77s) --- PASS: TestAccAwsEc2TransitGatewayPrefixListReference_TransitGatewayAttachmentId (397.05s) ``` * tests/resource/aws_ec2_transit_gateway_prefix_list_reference: Use variable to fix terrafmt issue Output from acceptance testing: ``` --- PASS: TestAccAwsEc2TransitGatewayPrefixListReference_TransitGatewayAttachmentId (403.66s) ``` * Update CHANGELOG for #16823 * resource/ec2_transit_gateway_prefix_list_reference: Add precheck Co-authored-by: Dirk Avery --- .changelog/16823.txt | 3 + aws/internal/service/ec2/errors.go | 4 + aws/internal/service/ec2/finder/finder.go | 46 ++++ aws/internal/service/ec2/id.go | 19 ++ aws/internal/service/ec2/waiter/status.go | 16 ++ aws/internal/service/ec2/waiter/waiter.go | 60 +++++ aws/provider.go | 1 + ...2_transit_gateway_prefix_list_reference.go | 206 ++++++++++++++ ...nsit_gateway_prefix_list_reference_test.go | 254 ++++++++++++++++++ ...ateway_prefix_list_reference.html.markdown | 59 ++++ 10 files changed, 668 insertions(+) create mode 100644 .changelog/16823.txt create mode 100644 aws/resource_aws_ec2_transit_gateway_prefix_list_reference.go create mode 100644 aws/resource_aws_ec2_transit_gateway_prefix_list_reference_test.go create mode 100644 website/docs/r/ec2_transit_gateway_prefix_list_reference.html.markdown diff --git a/.changelog/16823.txt b/.changelog/16823.txt new file mode 100644 index 00000000000..d8c9dc8e9d4 --- /dev/null +++ b/.changelog/16823.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_ec2_transit_gateway_prefix_list_reference +``` diff --git a/aws/internal/service/ec2/errors.go b/aws/internal/service/ec2/errors.go index c63b775edc9..5a95520c06b 100644 --- a/aws/internal/service/ec2/errors.go +++ b/aws/internal/service/ec2/errors.go @@ -20,6 +20,10 @@ const ( ErrCodeInvalidPrefixListIDNotFound = "InvalidPrefixListID.NotFound" ) +const ( + ErrCodeInvalidRouteTableIDNotFound = "InvalidRouteTableID.NotFound" +) + const ( ErrCodeClientVpnEndpointIdNotFound = "InvalidClientVpnEndpointId.NotFound" ErrCodeClientVpnAuthorizationRuleNotFound = "InvalidClientVpnEndpointAuthorizationRuleNotFound" diff --git a/aws/internal/service/ec2/finder/finder.go b/aws/internal/service/ec2/finder/finder.go index 10e42d1faca..121a51a5def 100644 --- a/aws/internal/service/ec2/finder/finder.go +++ b/aws/internal/service/ec2/finder/finder.go @@ -1,6 +1,8 @@ package finder import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" @@ -110,6 +112,50 @@ func SubnetByID(conn *ec2.EC2, id string) (*ec2.Subnet, error) { return output.Subnets[0], nil } +func TransitGatewayPrefixListReference(conn *ec2.EC2, transitGatewayRouteTableID string, prefixListID string) (*ec2.TransitGatewayPrefixListReference, error) { + filters := map[string]string{ + "prefix-list-id": prefixListID, + } + + input := &ec2.GetTransitGatewayPrefixListReferencesInput{ + TransitGatewayRouteTableId: aws.String(transitGatewayRouteTableID), + Filters: tfec2.BuildAttributeFilterList(filters), + } + + var result *ec2.TransitGatewayPrefixListReference + + err := conn.GetTransitGatewayPrefixListReferencesPages(input, func(page *ec2.GetTransitGatewayPrefixListReferencesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, transitGatewayPrefixListReference := range page.TransitGatewayPrefixListReferences { + if transitGatewayPrefixListReference == nil { + continue + } + + if aws.StringValue(transitGatewayPrefixListReference.PrefixListId) == prefixListID { + result = transitGatewayPrefixListReference + return false + } + } + + return !lastPage + }) + + return result, err +} + +func TransitGatewayPrefixListReferenceByID(conn *ec2.EC2, resourceID string) (*ec2.TransitGatewayPrefixListReference, error) { + transitGatewayRouteTableID, prefixListID, err := tfec2.TransitGatewayPrefixListReferenceParseID(resourceID) + + if err != nil { + return nil, fmt.Errorf("error parsing EC2 Transit Gateway Prefix List Reference (%s) identifier: %w", resourceID, err) + } + + return TransitGatewayPrefixListReference(conn, transitGatewayRouteTableID, prefixListID) +} + // VpcPeeringConnectionByID returns the VPC peering connection corresponding to the specified identifier. // Returns nil and potentially an error if no VPC peering connection is found. func VpcPeeringConnectionByID(conn *ec2.EC2, id string) (*ec2.VpcPeeringConnection, error) { diff --git a/aws/internal/service/ec2/id.go b/aws/internal/service/ec2/id.go index c3797b4a2ed..3eb2a65c3e1 100644 --- a/aws/internal/service/ec2/id.go +++ b/aws/internal/service/ec2/id.go @@ -71,6 +71,25 @@ func ClientVpnRouteParseID(id string) (string, string, string, error) { "target-subnet-id"+clientVpnRouteIDSeparator+"destination-cidr-block", id) } +const transitGatewayPrefixListReferenceSeparator = "_" + +func TransitGatewayPrefixListReferenceCreateID(transitGatewayRouteTableID string, prefixListID string) string { + parts := []string{transitGatewayRouteTableID, prefixListID} + id := strings.Join(parts, transitGatewayPrefixListReferenceSeparator) + + return id +} + +func TransitGatewayPrefixListReferenceParseID(id string) (string, string, error) { + parts := strings.Split(id, transitGatewayPrefixListReferenceSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected transit-gateway-route-table-id%[2]sprefix-list-id", id, transitGatewayPrefixListReferenceSeparator) +} + func VpnGatewayVpcAttachmentCreateID(vpnGatewayID, vpcID string) string { return fmt.Sprintf("vpn-attachment-%x", hashcode.String(fmt.Sprintf("%s-%s", vpcID, vpnGatewayID))) } diff --git a/aws/internal/service/ec2/waiter/status.go b/aws/internal/service/ec2/waiter/status.go index 74e0c9308f5..9f00de1e99a 100644 --- a/aws/internal/service/ec2/waiter/status.go +++ b/aws/internal/service/ec2/waiter/status.go @@ -280,6 +280,22 @@ func SubnetMapPublicIpOnLaunch(conn *ec2.EC2, id string) resource.StateRefreshFu } } +func TransitGatewayPrefixListReferenceState(conn *ec2.EC2, transitGatewayRouteTableID string, prefixListID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + transitGatewayPrefixListReference, err := finder.TransitGatewayPrefixListReference(conn, transitGatewayRouteTableID, prefixListID) + + if err != nil { + return nil, "", err + } + + if transitGatewayPrefixListReference == nil { + return nil, "", nil + } + + return transitGatewayPrefixListReference, aws.StringValue(transitGatewayPrefixListReference.State), nil + } +} + const ( vpcPeeringConnectionStatusNotFound = "NotFound" vpcPeeringConnectionStatusUnknown = "Unknown" diff --git a/aws/internal/service/ec2/waiter/waiter.go b/aws/internal/service/ec2/waiter/waiter.go index 0b76dd4f980..44a202f01cd 100644 --- a/aws/internal/service/ec2/waiter/waiter.go +++ b/aws/internal/service/ec2/waiter/waiter.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" ) const ( @@ -290,6 +291,65 @@ func SubnetMapPublicIpOnLaunchUpdated(conn *ec2.EC2, subnetID string, expectedVa return nil, err } +const ( + TransitGatewayPrefixListReferenceTimeout = 5 * time.Minute +) + +func TransitGatewayPrefixListReferenceStateCreated(conn *ec2.EC2, transitGatewayRouteTableID string, prefixListID string) (*ec2.TransitGatewayPrefixListReference, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.TransitGatewayPrefixListReferenceStatePending}, + Target: []string{ec2.TransitGatewayPrefixListReferenceStateAvailable}, + Timeout: TransitGatewayPrefixListReferenceTimeout, + Refresh: TransitGatewayPrefixListReferenceState(conn, transitGatewayRouteTableID, prefixListID), + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.TransitGatewayPrefixListReference); ok { + return output, err + } + + return nil, err +} + +func TransitGatewayPrefixListReferenceStateDeleted(conn *ec2.EC2, transitGatewayRouteTableID string, prefixListID string) (*ec2.TransitGatewayPrefixListReference, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.TransitGatewayPrefixListReferenceStateDeleting}, + Target: []string{}, + Timeout: TransitGatewayPrefixListReferenceTimeout, + Refresh: TransitGatewayPrefixListReferenceState(conn, transitGatewayRouteTableID, prefixListID), + } + + outputRaw, err := stateConf.WaitForState() + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidRouteTableIDNotFound) { + return nil, nil + } + + if output, ok := outputRaw.(*ec2.TransitGatewayPrefixListReference); ok { + return output, err + } + + return nil, err +} + +func TransitGatewayPrefixListReferenceStateUpdated(conn *ec2.EC2, transitGatewayRouteTableID string, prefixListID string) (*ec2.TransitGatewayPrefixListReference, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ec2.TransitGatewayPrefixListReferenceStateModifying}, + Target: []string{ec2.TransitGatewayPrefixListReferenceStateAvailable}, + Timeout: TransitGatewayPrefixListReferenceTimeout, + Refresh: TransitGatewayPrefixListReferenceState(conn, transitGatewayRouteTableID, prefixListID), + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*ec2.TransitGatewayPrefixListReference); ok { + return output, err + } + + return nil, err +} + const ( VpnGatewayVpcAttachmentAttachedTimeout = 15 * time.Minute diff --git a/aws/provider.go b/aws/provider.go index d968689b008..14740ee83be 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -623,6 +623,7 @@ func Provider() *schema.Provider { "aws_ec2_transit_gateway": resourceAwsEc2TransitGateway(), "aws_ec2_transit_gateway_peering_attachment": resourceAwsEc2TransitGatewayPeeringAttachment(), "aws_ec2_transit_gateway_peering_attachment_accepter": resourceAwsEc2TransitGatewayPeeringAttachmentAccepter(), + "aws_ec2_transit_gateway_prefix_list_reference": resourceAwsEc2TransitGatewayPrefixListReference(), "aws_ec2_transit_gateway_route": resourceAwsEc2TransitGatewayRoute(), "aws_ec2_transit_gateway_route_table": resourceAwsEc2TransitGatewayRouteTable(), "aws_ec2_transit_gateway_route_table_association": resourceAwsEc2TransitGatewayRouteTableAssociation(), diff --git a/aws/resource_aws_ec2_transit_gateway_prefix_list_reference.go b/aws/resource_aws_ec2_transit_gateway_prefix_list_reference.go new file mode 100644 index 00000000000..c53416bea29 --- /dev/null +++ b/aws/resource_aws_ec2_transit_gateway_prefix_list_reference.go @@ -0,0 +1,206 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/waiter" +) + +func resourceAwsEc2TransitGatewayPrefixListReference() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEc2TransitGatewayPrefixListReferenceCreate, + Read: resourceAwsEc2TransitGatewayPrefixListReferenceRead, + Update: resourceAwsEc2TransitGatewayPrefixListReferenceUpdate, + Delete: resourceAwsEc2TransitGatewayPrefixListReferenceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "blackhole": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "prefix_list_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "prefix_list_owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "transit_gateway_attachment_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.NoZeroValues, + }, + "transit_gateway_route_table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + }, + } +} + +func resourceAwsEc2TransitGatewayPrefixListReferenceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.CreateTransitGatewayPrefixListReferenceInput{} + + if v, ok := d.GetOk("blackhole"); ok { + input.Blackhole = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("prefix_list_id"); ok { + input.PrefixListId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("transit_gateway_attachment_id"); ok { + input.TransitGatewayAttachmentId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("transit_gateway_route_table_id"); ok { + input.TransitGatewayRouteTableId = aws.String(v.(string)) + } + + output, err := conn.CreateTransitGatewayPrefixListReference(input) + + if err != nil { + return fmt.Errorf("error creating EC2 Transit Gateway Prefix List Reference: %w", err) + } + + if output == nil || output.TransitGatewayPrefixListReference == nil { + return fmt.Errorf("error creating EC2 Transit Gateway Prefix List Reference: empty response") + } + + d.SetId(tfec2.TransitGatewayPrefixListReferenceCreateID(aws.StringValue(output.TransitGatewayPrefixListReference.TransitGatewayRouteTableId), aws.StringValue(output.TransitGatewayPrefixListReference.PrefixListId))) + + if _, err := waiter.TransitGatewayPrefixListReferenceStateCreated(conn, aws.StringValue(output.TransitGatewayPrefixListReference.TransitGatewayRouteTableId), aws.StringValue(output.TransitGatewayPrefixListReference.PrefixListId)); err != nil { + return fmt.Errorf("error waiting for EC2 Transit Gateway Prefix List Reference (%s) creation: %w", d.Id(), err) + } + + return resourceAwsEc2TransitGatewayPrefixListReferenceRead(d, meta) +} + +func resourceAwsEc2TransitGatewayPrefixListReferenceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + transitGatewayPrefixListReference, err := finder.TransitGatewayPrefixListReferenceByID(conn, d.Id()) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidRouteTableIDNotFound) { + log.Printf("[WARN] EC2 Transit Gateway Prefix List Reference (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading EC2 Transit Gateway Prefix List Reference (%s): %w", d.Id(), err) + } + + if transitGatewayPrefixListReference == nil { + log.Printf("[WARN] EC2 Transit Gateway Prefix List Reference (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if aws.StringValue(transitGatewayPrefixListReference.State) == ec2.TransitGatewayPrefixListReferenceStateDeleting { + log.Printf("[WARN] EC2 Transit Gateway Prefix List Reference (%s) deleting, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("blackhole", transitGatewayPrefixListReference.Blackhole) + d.Set("prefix_list_id", transitGatewayPrefixListReference.PrefixListId) + d.Set("prefix_list_owner_id", transitGatewayPrefixListReference.PrefixListOwnerId) + + if transitGatewayPrefixListReference.TransitGatewayAttachment == nil { + d.Set("transit_gateway_attachment_id", nil) + } else { + d.Set("transit_gateway_attachment_id", transitGatewayPrefixListReference.TransitGatewayAttachment.TransitGatewayAttachmentId) + } + + d.Set("transit_gateway_route_table_id", transitGatewayPrefixListReference.TransitGatewayRouteTableId) + + return nil +} + +func resourceAwsEc2TransitGatewayPrefixListReferenceUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + input := &ec2.ModifyTransitGatewayPrefixListReferenceInput{} + + if v, ok := d.GetOk("blackhole"); ok { + input.Blackhole = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("prefix_list_id"); ok { + input.PrefixListId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("transit_gateway_attachment_id"); ok { + input.TransitGatewayAttachmentId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("transit_gateway_route_table_id"); ok { + input.TransitGatewayRouteTableId = aws.String(v.(string)) + } + + output, err := conn.ModifyTransitGatewayPrefixListReference(input) + + if err != nil { + return fmt.Errorf("error updating EC2 Transit Gateway Prefix List Reference (%s): %w", d.Id(), err) + } + + if output == nil || output.TransitGatewayPrefixListReference == nil { + return fmt.Errorf("error updating EC2 Transit Gateway Prefix List Reference (%s): empty response", d.Id()) + } + + if _, err := waiter.TransitGatewayPrefixListReferenceStateUpdated(conn, aws.StringValue(output.TransitGatewayPrefixListReference.TransitGatewayRouteTableId), aws.StringValue(output.TransitGatewayPrefixListReference.PrefixListId)); err != nil { + return fmt.Errorf("error waiting for EC2 Transit Gateway Prefix List Reference (%s) update: %w", d.Id(), err) + } + + return resourceAwsEc2TransitGatewayPrefixListReferenceRead(d, meta) +} + +func resourceAwsEc2TransitGatewayPrefixListReferenceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + transitGatewayRouteTableID, prefixListID, err := tfec2.TransitGatewayPrefixListReferenceParseID(d.Id()) + + if err != nil { + return fmt.Errorf("error parsing EC2 Transit Gateway Prefix List Reference (%s) idenfitier: %w", d.Id(), err) + } + + input := &ec2.DeleteTransitGatewayPrefixListReferenceInput{ + PrefixListId: aws.String(prefixListID), + TransitGatewayRouteTableId: aws.String(transitGatewayRouteTableID), + } + + _, err = conn.DeleteTransitGatewayPrefixListReference(input) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidRouteTableIDNotFound) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting EC2 Transit Gateway Prefix List Reference (%s): %w", d.Id(), err) + } + + if _, err := waiter.TransitGatewayPrefixListReferenceStateDeleted(conn, transitGatewayRouteTableID, prefixListID); err != nil { + return fmt.Errorf("error waiting for EC2 Transit Gateway Prefix List Reference (%s) deletion: %w", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_ec2_transit_gateway_prefix_list_reference_test.go b/aws/resource_aws_ec2_transit_gateway_prefix_list_reference_test.go new file mode 100644 index 00000000000..21cdfadbfd6 --- /dev/null +++ b/aws/resource_aws_ec2_transit_gateway_prefix_list_reference_test.go @@ -0,0 +1,254 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + tfec2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/ec2/finder" +) + +func TestAccAwsEc2TransitGatewayPrefixListReference_basic(t *testing.T) { + managedPrefixListResourceName := "aws_ec2_managed_prefix_list.test" + resourceName := "aws_ec2_transit_gateway_prefix_list_reference.test" + transitGatewayResourceName := "aws_ec2_transit_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSEc2TransitGateway(t) + testAccPreCheckEc2ManagedPrefixList(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2TransitGatewayPrefixListReferenceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2TransitGatewayPrefixListReferenceConfig_Blackhole(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "blackhole", "true"), + resource.TestCheckResourceAttrPair(resourceName, "prefix_list_id", managedPrefixListResourceName, "id"), + testAccCheckResourceAttrAccountID(resourceName, "prefix_list_owner_id"), + resource.TestCheckResourceAttr(resourceName, "transit_gateway_attachment_id", ""), + resource.TestCheckResourceAttrPair(resourceName, "transit_gateway_route_table_id", transitGatewayResourceName, "association_default_route_table_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsEc2TransitGatewayPrefixListReference_disappears(t *testing.T) { + resourceName := "aws_ec2_transit_gateway_prefix_list_reference.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSEc2TransitGateway(t) + testAccPreCheckEc2ManagedPrefixList(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2TransitGatewayPrefixListReferenceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2TransitGatewayPrefixListReferenceConfig_Blackhole(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2TransitGatewayPrefixListReference(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsEc2TransitGatewayPrefixListReference_disappears_TransitGateway(t *testing.T) { + resourceName := "aws_ec2_transit_gateway_prefix_list_reference.test" + transitGatewayResourceName := "aws_ec2_transit_gateway.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSEc2TransitGateway(t) + testAccPreCheckEc2ManagedPrefixList(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2TransitGatewayPrefixListReferenceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2TransitGatewayPrefixListReferenceConfig_Blackhole(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2TransitGateway(), transitGatewayResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsEc2TransitGatewayPrefixListReference_TransitGatewayAttachmentId(t *testing.T) { + resourceName := "aws_ec2_transit_gateway_prefix_list_reference.test" + transitGatewayVpcAttachmentResourceName1 := "aws_ec2_transit_gateway_vpc_attachment.test.0" + transitGatewayVpcAttachmentResourceName2 := "aws_ec2_transit_gateway_vpc_attachment.test.1" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSEc2TransitGateway(t) + testAccPreCheckEc2ManagedPrefixList(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsEc2TransitGatewayPrefixListReferenceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsEc2TransitGatewayPrefixListReferenceConfig_TransitGatewayAttachmentId(rName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "blackhole", "false"), + resource.TestCheckResourceAttrPair(resourceName, "transit_gateway_attachment_id", transitGatewayVpcAttachmentResourceName1, "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAwsEc2TransitGatewayPrefixListReferenceConfig_TransitGatewayAttachmentId(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "blackhole", "false"), + resource.TestCheckResourceAttrPair(resourceName, "transit_gateway_attachment_id", transitGatewayVpcAttachmentResourceName2, "id"), + ), + }, + }, + }) +} + +func testAccCheckAwsEc2TransitGatewayPrefixListReferenceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ec2_transit_gateway_prefix_list_reference" { + continue + } + + transitGatewayPrefixListReference, err := finder.TransitGatewayPrefixListReferenceByID(conn, rs.Primary.ID) + + if tfawserr.ErrCodeEquals(err, tfec2.ErrCodeInvalidRouteTableIDNotFound) { + continue + } + + if err != nil { + return fmt.Errorf("error reading EC2 Transit Gateway Prefix List Reference (%s): %w", rs.Primary.ID, err) + } + + if transitGatewayPrefixListReference != nil { + return fmt.Errorf("EC2 Transit Gateway Prefix List Reference (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccAwsEc2TransitGatewayPrefixListReferenceExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("resource %s not found", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("resource %s has not set its id", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + transitGatewayPrefixListReference, err := finder.TransitGatewayPrefixListReferenceByID(conn, rs.Primary.ID) + + if err != nil { + return fmt.Errorf("error reading EC2 Transit Gateway Prefix List Reference (%s): %w", rs.Primary.ID, err) + } + + if transitGatewayPrefixListReference == nil { + return fmt.Errorf("EC2 Transit Gateway Prefix List Reference (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccAwsEc2TransitGatewayPrefixListReferenceConfig_Blackhole(rName string) string { + return fmt.Sprintf(` +resource "aws_ec2_managed_prefix_list" "test" { + address_family = "IPv4" + max_entries = 1 + name = %[1]q +} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_prefix_list_reference" "test" { + blackhole = true + prefix_list_id = aws_ec2_managed_prefix_list.test.id + transit_gateway_route_table_id = aws_ec2_transit_gateway.test.association_default_route_table_id +} +`, rName) +} + +func testAccAwsEc2TransitGatewayPrefixListReferenceConfig_TransitGatewayAttachmentId(rName string, index int) string { + return fmt.Sprintf(` +variable "index" { + default = %[2]d +} + +resource "aws_ec2_managed_prefix_list" "test" { + address_family = "IPv4" + max_entries = 1 + name = %[1]q +} + +resource "aws_vpc" "test" { + count = 2 + + cidr_block = "10.${count.index}.0.0/16" +} + +resource "aws_subnet" "test" { + count = 2 + + cidr_block = cidrsubnet(aws_vpc.test[count.index].cidr_block, 8, 0) + vpc_id = aws_vpc.test[count.index].id +} + +resource "aws_ec2_transit_gateway" "test" {} + +resource "aws_ec2_transit_gateway_vpc_attachment" "test" { + count = 2 + + subnet_ids = [aws_subnet.test[count.index].id] + transit_gateway_id = aws_ec2_transit_gateway.test.id + vpc_id = aws_vpc.test[count.index].id +} + +resource "aws_ec2_transit_gateway_prefix_list_reference" "test" { + prefix_list_id = aws_ec2_managed_prefix_list.test.id + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.test[var.index].id + transit_gateway_route_table_id = aws_ec2_transit_gateway.test.association_default_route_table_id +} +`, rName, index) +} diff --git a/website/docs/r/ec2_transit_gateway_prefix_list_reference.html.markdown b/website/docs/r/ec2_transit_gateway_prefix_list_reference.html.markdown new file mode 100644 index 00000000000..f6d884e8837 --- /dev/null +++ b/website/docs/r/ec2_transit_gateway_prefix_list_reference.html.markdown @@ -0,0 +1,59 @@ +--- +subcategory: "EC2" +layout: "aws" +page_title: "AWS: aws_ec2_transit_gateway_prefix_list_reference" +description: |- + Manages an EC2 Transit Gateway Prefix List Reference +--- + +# Resource: aws_ec2_transit_gateway_prefix_list_reference + +Manages an EC2 Transit Gateway Prefix List Reference. + +## Example Usage + +### Attachment Routing + +```hcl +resource "aws_ec2_transit_gateway_prefix_list_reference" "example" { + prefix_list_id = aws_ec2_managed_prefix_list.example.id + transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.example.id + transit_gateway_route_table_id = aws_ec2_transit_gateway.example.association_default_route_table_id +} +``` + +### Blackhole Routing + +```hcl +resource "aws_ec2_transit_gateway_prefix_list_reference" "example" { + blackhole = true + prefix_list_id = aws_ec2_managed_prefix_list.example.id + transit_gateway_route_table_id = aws_ec2_transit_gateway.example.association_default_route_table_id +} +``` + +## Argument Reference + +The following arguments are required: + +* `prefix_list_id` - (Required) Identifier of EC2 Prefix List. +* `transit_gateway_route_table_id` - (Required) Identifier of EC2 Transit Gateway Route Table. + +The following arguments are optional: + +* `blackhole` - (Optional) Indicates whether to drop traffic that matches the Prefix List. Defaults to `false`. +* `transit_gateway_attachment_id` - (Optional) Identifier of EC2 Transit Gateway Attachment. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - EC2 Transit Gateway Route Table identifier and EC2 Prefix List identifier, separated by an underscore (`_`) + +## Import + +`aws_ec2_transit_gateway_prefix_list_reference` can be imported by using the EC2 Transit Gateway Route Table identifier and EC2 Prefix List identifier, separated by an underscore (`_`), e.g. + +```console +$ terraform import aws_ec2_transit_gateway_prefix_list_reference.example tgw-rtb-12345678_pl-12345678 +``` From a638958e5e7341be823d17551f450bba31edf653 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 4 Feb 2021 16:43:38 +0000 Subject: [PATCH 0998/1212] Update CHANGELOG.md for #16823 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6189a9671a..8ac5813aa23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Resource:** `aws_ec2_transit_gateway_prefix_list_reference` ([#16823](https://github.com/hashicorp/terraform-provider-aws/issues/16823)) * **New Resource:** `aws_route53_key_signing_key` ([#16840](https://github.com/hashicorp/terraform-provider-aws/issues/16840)) ENHANCEMENTS: From 11cd8e2ff49cbb55c17490c57fed3112b53e676c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 12:08:54 -0500 Subject: [PATCH 0999/1212] tests/provider: Migrate environment variable functionality into shared package with constants and testing (#17197) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17083 Other environment variable handling in the code and testing can be migrated in the future for consistency. Please note that if attempting to use the returned value in a test configuration function, it must be done outside the test PreCheck function due to execution during compilation versus run. Without GITHUB_TOKEN set: ``` === RUN TestAccAWSCodePipeline_WithGitHubv1SourceAction resource_aws_codepipeline_test.go:470: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline source configuration --- SKIP: TestAccAWSCodePipeline_WithGitHubv1SourceAction (0.00s) === RUN TestAccAWSCodePipelineWebhook_basic resource_aws_codepipeline_webhook_test.go:18: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline webhook creation --- SKIP: TestAccAWSCodePipelineWebhook_basic (0.00s) === RUN TestAccAWSCodePipelineWebhook_ipAuth resource_aws_codepipeline_webhook_test.go:52: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline webhook creation --- SKIP: TestAccAWSCodePipelineWebhook_ipAuth (0.00s) === RUN TestAccAWSCodePipelineWebhook_unauthenticated resource_aws_codepipeline_webhook_test.go:86: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline webhook creation --- SKIP: TestAccAWSCodePipelineWebhook_unauthenticated (0.00s) === RUN TestAccAWSCodePipelineWebhook_tags resource_aws_codepipeline_webhook_test.go:118: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline webhook creation --- SKIP: TestAccAWSCodePipelineWebhook_tags (0.00s) === RUN TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken resource_aws_codepipeline_webhook_test.go:181: skipping test; environment variable GITHUB_TOKEN must be set. Usage: token with GitHub permissions to repository for CodePipeline webhook creation --- SKIP: TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken (0.00s) ``` Output from acceptance testing: ``` # Verify GITHUB_TOKEN working --- PASS: TestAccAWSCodePipeline_WithGitHubv1SourceAction (55.64s) --- PASS: TestAccAWSCodePipelineWebhook_basic (32.74s) --- PASS: TestAccAWSCodePipelineWebhook_ipAuth (33.24s) --- PASS: TestAccAWSCodePipelineWebhook_tags (71.80s) --- PASS: TestAccAWSCodePipelineWebhook_unauthenticated (32.65s) --- PASS: TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken (54.08s) # Verify cross-region and cross-account --- PASS: TestAccAWSDynamoDbTable_Replica_Multiple (1646.42s) --- PASS: TestAccAWSRoute53ZoneAssociation_CrossAccount (160.38s) --- PASS: TestAccAWSProvider_AssumeRole_Empty (15.68s) --- PASS: TestAccAWSProvider_Endpoints (13.10s) --- PASS: TestAccAWSProvider_IgnoreTags_EmptyConfigurationBlock (12.44s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_Multiple (12.87s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_None (12.49s) --- PASS: TestAccAWSProvider_IgnoreTags_KeyPrefixes_One (12.71s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_Multiple (12.57s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_None (12.23s) --- PASS: TestAccAWSProvider_IgnoreTags_Keys_One (12.71s) --- PASS: TestAccAWSProvider_Region_AwsChina (9.82s) --- PASS: TestAccAWSProvider_Region_AwsCommercial (10.42s) --- PASS: TestAccAWSProvider_Region_AwsGovCloudUs (11.37s) ``` --- aws/internal/envvar/consts.go | 50 ++++++ aws/internal/envvar/doc.go | 2 + aws/internal/envvar/funcs.go | 16 ++ aws/internal/envvar/funcs_test.go | 50 ++++++ aws/internal/envvar/testing_funcs.go | 56 ++++++ aws/internal/envvar/testing_funcs_test.go | 170 ++++++++++++++++++ aws/provider_test.go | 68 +++---- aws/resource_aws_codepipeline_test.go | 6 +- aws/resource_aws_codepipeline_webhook_test.go | 19 +- docs/MAINTAINING.md | 2 +- 10 files changed, 378 insertions(+), 61 deletions(-) create mode 100644 aws/internal/envvar/consts.go create mode 100644 aws/internal/envvar/doc.go create mode 100644 aws/internal/envvar/funcs.go create mode 100644 aws/internal/envvar/funcs_test.go create mode 100644 aws/internal/envvar/testing_funcs.go create mode 100644 aws/internal/envvar/testing_funcs_test.go diff --git a/aws/internal/envvar/consts.go b/aws/internal/envvar/consts.go new file mode 100644 index 00000000000..9cbf802ef6c --- /dev/null +++ b/aws/internal/envvar/consts.go @@ -0,0 +1,50 @@ +package envvar + +// Standard AWS environment variables used in the Terraform AWS Provider testing. +// These are not provided as constants in the AWS Go SDK currently. +const ( + // Default static credential identifier for tests (AWS Go SDK does not provide this as constant) + // See also AWS_SECRET_ACCESS_KEY and AWS_PROFILE + AwsAccessKeyId = "AWS_ACCESS_KEY_ID" + + // Container credentials endpoint + // See also AWS_ACCESS_KEY_ID and AWS_PROFILE + AwsContainerCredentialsFullUri = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + + // Default AWS region for tests (AWS Go SDK does not provide this as constant) + AwsDefaultRegion = "AWS_DEFAULT_REGION" + + // Default AWS shared configuration profile for tests (AWS Go SDK does not provide this as constant) + AwsProfile = "AWS_PROFILE" + + // Default static credential value for tests (AWS Go SDK does not provide this as constant) + // See also AWS_ACCESS_KEY_ID and AWS_PROFILE + AwsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" +) + +// Custom environment variables used in the Terraform AWS Provider testing. +// Additions should also be documented in the Environment Variable Dictionary +// of the Maintainers Guide: docs/MAINTAINING.md +const ( + // For tests using an alternate AWS account, the equivalent of AWS_ACCESS_KEY_ID for that account + AwsAlternateAccessKeyId = "AWS_ALTERNATE_ACCESS_KEY_ID" + + // For tests using an alternate AWS account, the equivalent of AWS_PROFILE for that account + AwsAlternateProfile = "AWS_PROFILE" + + // For tests using an alternate AWS region, the equivalent of AWS_DEFAULT_REGION for that account + AwsAlternateRegion = "AWS_ALTERNATE_REGION" + + // For tests using an alternate AWS account, the equivalent of AWS_SECRET_ACCESS_KEY for that account + AwsAlternateSecretAccessKey = "AWS_ALTERNATE_SECRET_ACCESS_KEY" + + // For tests using a third AWS region, the equivalent of AWS_DEFAULT_REGION for that region + AwsThirdRegion = "AWS_THIRD_REGION" + + // For tests requiring GitHub permissions + GithubToken = "GITHUB_TOKEN" + + // For tests requiring restricted IAM permissions, an existing IAM Role to assume + // An inline assume role policy is then used to deny actions for the test + TfAccAssumeRoleArn = "TF_ACC_ASSUME_ROLE_ARN" +) diff --git a/aws/internal/envvar/doc.go b/aws/internal/envvar/doc.go new file mode 100644 index 00000000000..b3c64d3875a --- /dev/null +++ b/aws/internal/envvar/doc.go @@ -0,0 +1,2 @@ +// envvar contains constants and helpers for environment variable usage in testing. +package envvar diff --git a/aws/internal/envvar/funcs.go b/aws/internal/envvar/funcs.go new file mode 100644 index 00000000000..de23c2e08a7 --- /dev/null +++ b/aws/internal/envvar/funcs.go @@ -0,0 +1,16 @@ +package envvar + +import ( + "os" +) + +// GetWithDefault gets an environment variable value if non-empty or returns the default. +func GetWithDefault(variable string, defaultValue string) string { + value := os.Getenv(variable) + + if value == "" { + return defaultValue + } + + return value +} diff --git a/aws/internal/envvar/funcs_test.go b/aws/internal/envvar/funcs_test.go new file mode 100644 index 00000000000..1a232627283 --- /dev/null +++ b/aws/internal/envvar/funcs_test.go @@ -0,0 +1,50 @@ +package envvar_test + +import ( + "os" + "testing" + + "github.com/terraform-providers/terraform-provider-aws/aws/internal/envvar" +) + +func TestGetWithDefault(t *testing.T) { + envVar := "TESTENVVAR_GETWITHDEFAULT" + + t.Run("missing", func(t *testing.T) { + want := "default" + + os.Unsetenv(envVar) + + got := envvar.GetWithDefault(envVar, want) + + if got != want { + t.Fatalf("expected %s, got %s", want, got) + } + }) + + t.Run("empty", func(t *testing.T) { + want := "default" + + os.Setenv(envVar, "") + defer os.Unsetenv(envVar) + + got := envvar.GetWithDefault(envVar, want) + + if got != want { + t.Fatalf("expected %s, got %s", want, got) + } + }) + + t.Run("not empty", func(t *testing.T) { + want := "notempty" + + os.Setenv(envVar, want) + defer os.Unsetenv(envVar) + + got := envvar.GetWithDefault(envVar, "default") + + if got != want { + t.Fatalf("expected %s, got %s", want, got) + } + }) +} diff --git a/aws/internal/envvar/testing_funcs.go b/aws/internal/envvar/testing_funcs.go new file mode 100644 index 00000000000..ff01b950a02 --- /dev/null +++ b/aws/internal/envvar/testing_funcs.go @@ -0,0 +1,56 @@ +package envvar + +import ( + "os" + + "github.com/mitchellh/go-testing-interface" +) + +// TestFailIfAllEmpty verifies that at least one environment variable is non-empty or fails the test. +// +// If at lease one environment variable is non-empty, returns the first name and value. +func TestFailIfAllEmpty(t testing.T, names []string, usageMessage string) (string, string) { + t.Helper() + + for _, variable := range names { + value := os.Getenv(variable) + + if value != "" { + return variable, value + } + } + + t.Fatalf("at least one environment variable of %v must be set. Usage: %s", names, usageMessage) + + return "", "" +} + +// TestFailIfEmpty verifies that an environment variable is non-empty or fails the test. +// +// For acceptance tests, this function must be used outside PreCheck functions to set values for configurations. +func TestFailIfEmpty(t testing.T, name string, usageMessage string) string { + t.Helper() + + value := os.Getenv(name) + + if value == "" { + t.Fatalf("environment variable %s must be set. Usage: %s", name, usageMessage) + } + + return value +} + +// TestSkipIfEmpty verifies that an environment variable is non-empty or skips the test. +// +// For acceptance tests, this function must be used outside PreCheck functions to set values for configurations. +func TestSkipIfEmpty(t testing.T, name string, usageMessage string) string { + t.Helper() + + value := os.Getenv(name) + + if value == "" { + t.Skipf("skipping test; environment variable %s must be set. Usage: %s", name, usageMessage) + } + + return value +} diff --git a/aws/internal/envvar/testing_funcs_test.go b/aws/internal/envvar/testing_funcs_test.go new file mode 100644 index 00000000000..0d9459c0f02 --- /dev/null +++ b/aws/internal/envvar/testing_funcs_test.go @@ -0,0 +1,170 @@ +package envvar_test + +import ( + "os" + "testing" + + testingiface "github.com/mitchellh/go-testing-interface" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/envvar" +) + +func TestTestFailIfAllEmpty(t *testing.T) { + envVar1 := "TESTENVVAR_FAILIFALLEMPTY1" + envVar2 := "TESTENVVAR_FAILIFALLEMPTY2" + envVars := []string{envVar1, envVar2} + + t.Run("missing", func(t *testing.T) { + defer testingifaceRecover() + + for _, envVar := range envVars { + os.Unsetenv(envVar) + } + + envvar.TestFailIfAllEmpty(&testingiface.RuntimeT{}, envVars, "usage") + + t.Fatal("expected to fail previously") + }) + + t.Run("all empty", func(t *testing.T) { + defer testingifaceRecover() + + os.Setenv(envVar1, "") + os.Setenv(envVar2, "") + defer unsetEnvVars(envVars) + + envvar.TestFailIfAllEmpty(&testingiface.RuntimeT{}, envVars, "usage") + + t.Fatal("expected to fail previously") + }) + + t.Run("some empty", func(t *testing.T) { + wantValue := "pickme" + + os.Setenv(envVar1, "") + os.Setenv(envVar2, wantValue) + defer unsetEnvVars(envVars) + + gotName, gotValue := envvar.TestFailIfAllEmpty(&testingiface.RuntimeT{}, envVars, "usage") + + if gotName != envVar2 { + t.Fatalf("expected name: %s, got: %s", envVar2, gotName) + } + + if gotValue != wantValue { + t.Fatalf("expected value: %s, got: %s", wantValue, gotValue) + } + }) + + t.Run("all not empty", func(t *testing.T) { + wantValue := "pickme" + + os.Setenv(envVar1, wantValue) + os.Setenv(envVar2, "other") + defer unsetEnvVars(envVars) + + gotName, gotValue := envvar.TestFailIfAllEmpty(&testingiface.RuntimeT{}, envVars, "usage") + + if gotName != envVar1 { + t.Fatalf("expected name: %s, got: %s", envVar1, gotName) + } + + if gotValue != wantValue { + t.Fatalf("expected value: %s, got: %s", wantValue, gotValue) + } + }) +} + +func TestTestFailIfEmpty(t *testing.T) { + envVar := "TESTENVVAR_FAILIFEMPTY" + + t.Run("missing", func(t *testing.T) { + defer testingifaceRecover() + + os.Unsetenv(envVar) + + envvar.TestFailIfEmpty(&testingiface.RuntimeT{}, envVar, "usage") + + t.Fatal("expected to fail previously") + }) + + t.Run("empty", func(t *testing.T) { + defer testingifaceRecover() + + os.Setenv(envVar, "") + defer os.Unsetenv(envVar) + + envvar.TestFailIfEmpty(&testingiface.RuntimeT{}, envVar, "usage") + + t.Fatal("expected to fail previously") + }) + + t.Run("not empty", func(t *testing.T) { + want := "notempty" + + os.Setenv(envVar, want) + defer os.Unsetenv(envVar) + + got := envvar.TestFailIfEmpty(&testingiface.RuntimeT{}, envVar, "usage") + + if got != want { + t.Fatalf("expected value: %s, got: %s", want, got) + } + }) +} + +func TestTestSkipIfEmpty(t *testing.T) { + envVar := "TESTENVVAR_SKIPIFEMPTY" + + t.Run("missing", func(t *testing.T) { + mockT := &testingiface.RuntimeT{} + + os.Unsetenv(envVar) + + envvar.TestSkipIfEmpty(mockT, envVar, "usage") + + if !mockT.Skipped() { + t.Fatal("expected to skip previously") + } + }) + + t.Run("empty", func(t *testing.T) { + mockT := &testingiface.RuntimeT{} + + os.Setenv(envVar, "") + defer os.Unsetenv(envVar) + + envvar.TestSkipIfEmpty(mockT, envVar, "usage") + + if !mockT.Skipped() { + t.Fatal("expected to skip previously") + } + }) + + t.Run("not empty", func(t *testing.T) { + want := "notempty" + + os.Setenv(envVar, want) + defer os.Unsetenv(envVar) + + got := envvar.TestSkipIfEmpty(&testingiface.RuntimeT{}, envVar, "usage") + + if got != want { + t.Fatalf("expected value: %s, got: %s", want, got) + } + }) +} + +func testingifaceRecover() { + r := recover() + + // this string is hardcoded in github.com/mitchellh/go-testing-interface + if s, ok := r.(string); !ok || s != "testing.T failed, see logs for output (if any)" { + panic(r) + } +} + +func unsetEnvVars(envVars []string) { + for _, envVar := range envVars { + os.Unsetenv(envVar) + } +} diff --git a/aws/provider_test.go b/aws/provider_test.go index 327575ff4ea..77627e2155a 100644 --- a/aws/provider_test.go +++ b/aws/provider_test.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/envvar" ) const ( @@ -233,12 +234,10 @@ func testAccPreCheck(t *testing.T) { // Since we are outside the scope of the Terraform configuration we must // call Configure() to properly initialize the provider configuration. testAccProviderConfigure.Do(func() { - if os.Getenv("AWS_PROFILE") == "" && os.Getenv("AWS_ACCESS_KEY_ID") == "" && os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") == "" { - t.Fatal("AWS_ACCESS_KEY_ID, AWS_PROFILE, or AWS_CONTAINER_CREDENTIALS_FULL_URI must be set for acceptance tests") - } + envvar.TestFailIfAllEmpty(t, []string{envvar.AwsProfile, envvar.AwsAccessKeyId, envvar.AwsContainerCredentialsFullUri}, "credentials for running acceptance testing") - if os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { - t.Fatal("AWS_SECRET_ACCESS_KEY must be set for acceptance tests") + if os.Getenv(envvar.AwsAccessKeyId) != "" { + envvar.TestFailIfEmpty(t, envvar.AwsSecretAccessKey, "static credentials value when using "+envvar.AwsAccessKeyId) } // Setting the AWS_DEFAULT_REGION environment variable here allows all tests to omit @@ -249,7 +248,7 @@ func testAccPreCheck(t *testing.T) { // * AWS_DEFAULT_REGION is required and checked above (should mention us-west-2 default) // * Region is automatically handled via shared AWS configuration file and still verified region := testAccGetRegion() - os.Setenv("AWS_DEFAULT_REGION", region) + os.Setenv(envvar.AwsDefaultRegion, region) err := testAccProvider.Configure(context.Background(), terraform.NewResourceConfigRaw(nil)) if err != nil { @@ -652,27 +651,15 @@ func testAccGetAccountID() string { } func testAccGetRegion() string { - v := os.Getenv("AWS_DEFAULT_REGION") - if v == "" { - return "us-west-2" // lintignore:AWSAT003 - } - return v + return envvar.GetWithDefault(envvar.AwsDefaultRegion, endpoints.UsWest2RegionID) } func testAccGetAlternateRegion() string { - v := os.Getenv("AWS_ALTERNATE_REGION") - if v == "" { - return "us-east-1" // lintignore:AWSAT003 - } - return v + return envvar.GetWithDefault(envvar.AwsAlternateRegion, endpoints.UsEast1RegionID) } func testAccGetThirdRegion() string { - v := os.Getenv("AWS_THIRD_REGION") - if v == "" { - return "us-east-2" // lintignore:AWSAT003 - } - return v + return envvar.GetWithDefault(envvar.AwsThirdRegion, endpoints.UsEast2RegionID) } func testAccGetPartition() string { @@ -712,12 +699,10 @@ func testAccGetThirdRegionPartition() string { } func testAccAlternateAccountPreCheck(t *testing.T) { - if os.Getenv("AWS_ALTERNATE_PROFILE") == "" && os.Getenv("AWS_ALTERNATE_ACCESS_KEY_ID") == "" { - t.Fatal("AWS_ALTERNATE_ACCESS_KEY_ID or AWS_ALTERNATE_PROFILE must be set for acceptance tests") - } + envvar.TestFailIfAllEmpty(t, []string{envvar.AwsAlternateProfile, envvar.AwsAlternateAccessKeyId}, "credentials for running acceptance testing in alternate AWS account") - if os.Getenv("AWS_ALTERNATE_ACCESS_KEY_ID") != "" && os.Getenv("AWS_ALTERNATE_SECRET_ACCESS_KEY") == "" { - t.Fatal("AWS_ALTERNATE_SECRET_ACCESS_KEY must be set for acceptance tests") + if os.Getenv(envvar.AwsAlternateAccessKeyId) != "" { + envvar.TestFailIfEmpty(t, envvar.AwsAlternateSecretAccessKey, "static credentials value when using "+envvar.AwsAlternateAccessKeyId) } } @@ -741,24 +726,24 @@ func testAccPartitionHasServicePreCheck(serviceId string, t *testing.T) { func testAccMultipleRegionPreCheck(t *testing.T, regions int) { if testAccGetRegion() == testAccGetAlternateRegion() { - t.Fatal("AWS_DEFAULT_REGION and AWS_ALTERNATE_REGION must be set to different values for acceptance tests") + t.Fatalf("%s and %s must be set to different values for acceptance tests", envvar.AwsDefaultRegion, envvar.AwsAlternateRegion) } if testAccGetPartition() != testAccGetAlternateRegionPartition() { - t.Fatalf("AWS_ALTERNATE_REGION partition (%s) does not match AWS_DEFAULT_REGION partition (%s)", testAccGetAlternateRegionPartition(), testAccGetPartition()) + t.Fatalf("%s partition (%s) does not match %s partition (%s)", envvar.AwsAlternateRegion, testAccGetAlternateRegionPartition(), envvar.AwsDefaultRegion, testAccGetPartition()) } if regions >= 3 { if testAccGetRegion() == testAccGetThirdRegion() { - t.Fatal("AWS_DEFAULT_REGION and AWS_THIRD_REGION must be set to different values for acceptance tests") + t.Fatalf("%s and %s must be set to different values for acceptance tests", envvar.AwsDefaultRegion, envvar.AwsThirdRegion) } if testAccGetAlternateRegion() == testAccGetThirdRegion() { - t.Fatal("AWS_ALTERNATE_REGION and AWS_THIRD_REGION must be set to different values for acceptance tests") + t.Fatalf("%s and %s must be set to different values for acceptance tests", envvar.AwsAlternateRegion, envvar.AwsThirdRegion) } if testAccGetPartition() != testAccGetThirdRegionPartition() { - t.Fatalf("AWS_THIRD_REGION partition (%s) does not match AWS_DEFAULT_REGION partition (%s)", testAccGetThirdRegionPartition(), testAccGetPartition()) + t.Fatalf("%s partition (%s) does not match %s partition (%s)", envvar.AwsThirdRegion, testAccGetThirdRegionPartition(), envvar.AwsDefaultRegion, testAccGetPartition()) } } @@ -772,7 +757,7 @@ func testAccMultipleRegionPreCheck(t *testing.T, regions int) { // testAccRegionPreCheck checks that the test region is the specified region. func testAccRegionPreCheck(t *testing.T, region string) { if testAccGetRegion() != region { - t.Skipf("skipping tests; AWS_DEFAULT_REGION (%s) does not equal %s", testAccGetRegion(), region) + t.Skipf("skipping tests; %s (%s) does not equal %s", envvar.AwsDefaultRegion, testAccGetRegion(), region) } } @@ -838,12 +823,6 @@ func testAccPreCheckIamServiceLinkedRole(t *testing.T, pathPrefix string) { } } -func testAccEnvironmentVariableSetPreCheck(variable string, t *testing.T) { - if os.Getenv(variable) == "" { - t.Skipf("skipping tests; environment variable %s must be set", variable) - } -} - func testAccAlternateAccountProviderConfig() string { //lintignore:AT004 return fmt.Sprintf(` @@ -852,7 +831,7 @@ provider "awsalternate" { profile = %[2]q secret_key = %[3]q } -`, os.Getenv("AWS_ALTERNATE_ACCESS_KEY_ID"), os.Getenv("AWS_ALTERNATE_PROFILE"), os.Getenv("AWS_ALTERNATE_SECRET_ACCESS_KEY")) +`, os.Getenv(envvar.AwsAlternateAccessKeyId), os.Getenv(envvar.AwsAlternateProfile), os.Getenv(envvar.AwsAlternateSecretAccessKey)) } func testAccAlternateAccountAlternateRegionProviderConfig() string { @@ -864,7 +843,7 @@ provider "awsalternate" { region = %[3]q secret_key = %[4]q } -`, os.Getenv("AWS_ALTERNATE_ACCESS_KEY_ID"), os.Getenv("AWS_ALTERNATE_PROFILE"), testAccGetAlternateRegion(), os.Getenv("AWS_ALTERNATE_SECRET_ACCESS_KEY")) +`, os.Getenv(envvar.AwsAlternateAccessKeyId), os.Getenv(envvar.AwsAlternateProfile), testAccGetAlternateRegion(), os.Getenv(envvar.AwsAlternateSecretAccessKey)) } // When testing needs to distinguish a second region and second account in the same region @@ -888,7 +867,7 @@ provider "awsalternateaccountsameregion" { provider "awssameaccountalternateregion" { region = %[3]q } -`, os.Getenv("AWS_ALTERNATE_ACCESS_KEY_ID"), os.Getenv("AWS_ALTERNATE_PROFILE"), testAccGetAlternateRegion(), os.Getenv("AWS_ALTERNATE_SECRET_ACCESS_KEY")) +`, os.Getenv(envvar.AwsAlternateAccessKeyId), os.Getenv(envvar.AwsAlternateProfile), testAccGetAlternateRegion(), os.Getenv(envvar.AwsAlternateSecretAccessKey)) } // Deprecated: Use testAccMultipleRegionProviderConfig instead @@ -1924,10 +1903,7 @@ data "aws_arn" "test" { } func testAccAssumeRoleARNPreCheck(t *testing.T) { - v := os.Getenv("TF_ACC_ASSUME_ROLE_ARN") - if v == "" { - t.Skip("skipping tests; TF_ACC_ASSUME_ROLE_ARN must be set") - } + envvar.TestSkipIfEmpty(t, envvar.TfAccAssumeRoleArn, "Amazon Resource Name (ARN) of existing IAM Role to assume for testing restricted permissions") } func testAccProviderConfigAssumeRolePolicy(policy string) string { @@ -1939,7 +1915,7 @@ provider "aws" { policy = %q } } -`, os.Getenv("TF_ACC_ASSUME_ROLE_ARN"), policy) +`, os.Getenv(envvar.TfAccAssumeRoleArn), policy) } const testAccCheckAWSProviderConfigAssumeRoleEmpty = ` diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index b485e8effe5..70f61c09c13 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "os" "regexp" "testing" @@ -13,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/envvar" ) func TestAccAWSCodePipeline_basic(t *testing.T) { @@ -467,15 +467,15 @@ func TestAccAWSCodePipeline_WithNamespace(t *testing.T) { } func TestAccAWSCodePipeline_WithGitHubv1SourceAction(t *testing.T) { + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, "token with GitHub permissions to repository for CodePipeline source configuration") + var v codepipeline.PipelineDeclaration name := acctest.RandString(10) resourceName := "aws_codepipeline.test" - githubToken := os.Getenv("GITHUB_TOKEN") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, diff --git a/aws/resource_aws_codepipeline_webhook_test.go b/aws/resource_aws_codepipeline_webhook_test.go index cf13fdc8900..07de47ac9a0 100644 --- a/aws/resource_aws_codepipeline_webhook_test.go +++ b/aws/resource_aws_codepipeline_webhook_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "os" "testing" "github.com/aws/aws-sdk-go/aws" @@ -10,10 +9,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/envvar" ) +const envVarGithubTokenUsageCodePipelineWebhook = "token with GitHub permissions to repository for CodePipeline webhook creation" + func TestAccAWSCodePipelineWebhook_basic(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, envVarGithubTokenUsageCodePipelineWebhook) var v codepipeline.ListWebhookItem rName := acctest.RandomWithPrefix("tf-acc-test") @@ -22,7 +24,6 @@ func TestAccAWSCodePipelineWebhook_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, @@ -48,7 +49,7 @@ func TestAccAWSCodePipelineWebhook_basic(t *testing.T) { } func TestAccAWSCodePipelineWebhook_ipAuth(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, envVarGithubTokenUsageCodePipelineWebhook) var v codepipeline.ListWebhookItem rName := acctest.RandomWithPrefix("tf-acc-test") @@ -57,7 +58,6 @@ func TestAccAWSCodePipelineWebhook_ipAuth(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, @@ -83,7 +83,7 @@ func TestAccAWSCodePipelineWebhook_ipAuth(t *testing.T) { } func TestAccAWSCodePipelineWebhook_unauthenticated(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, envVarGithubTokenUsageCodePipelineWebhook) var v codepipeline.ListWebhookItem rName := acctest.RandomWithPrefix("tf-acc-test") @@ -92,7 +92,6 @@ func TestAccAWSCodePipelineWebhook_unauthenticated(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, @@ -116,7 +115,7 @@ func TestAccAWSCodePipelineWebhook_unauthenticated(t *testing.T) { } func TestAccAWSCodePipelineWebhook_tags(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, envVarGithubTokenUsageCodePipelineWebhook) var v1, v2, v3 codepipeline.ListWebhookItem rName := acctest.RandomWithPrefix("tf-acc-test") @@ -125,7 +124,6 @@ func TestAccAWSCodePipelineWebhook_tags(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, @@ -180,7 +178,7 @@ func TestAccAWSCodePipelineWebhook_tags(t *testing.T) { } func TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken(t *testing.T) { - githubToken := os.Getenv("GITHUB_TOKEN") + githubToken := envvar.TestSkipIfEmpty(t, envvar.GithubToken, envVarGithubTokenUsageCodePipelineWebhook) var v1, v2 codepipeline.ListWebhookItem rName := acctest.RandomWithPrefix("tf-acc-test") @@ -189,7 +187,6 @@ func TestAccAWSCodePipelineWebhook_UpdateAuthenticationConfiguration_SecretToken resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) - testAccEnvironmentVariableSetPreCheck("GITHUB_TOKEN", t) testAccPreCheckAWSCodePipelineSupported(t) }, Providers: testAccProviders, diff --git a/docs/MAINTAINING.md b/docs/MAINTAINING.md index bcb27a651a9..783189a5394 100644 --- a/docs/MAINTAINING.md +++ b/docs/MAINTAINING.md @@ -332,7 +332,7 @@ Additional branch naming recommendations can be found in the [Pull Request Submi ## Environment Variable Dictionary -Environment variables (beyond standard AWS Go SDK ones) used by acceptance testing. +Environment variables (beyond standard AWS Go SDK ones) used by acceptance testing. See also the `aws/internal/envvar` package. | Variable | Description | |----------|-------------| From 30d728c52af9d4e7a2ef5a39b6d3e216516dabd7 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 4 Feb 2021 12:32:30 -0500 Subject: [PATCH 1000/1212] add .changelog dir to PR path and add it to misspell run command --- .github/workflows/changelog.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 2086b017faf..cf1a3809ea7 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -6,6 +6,7 @@ on: - 'release/**' pull_request: paths: + - .changelog/ - .go-version - CHANGELOG.md pull_request_target: @@ -70,4 +71,4 @@ jobs: path: ~/go/pkg/mod key: ${{ runner.os }}-go-pkg-mod-${{ hashFiles('go.sum') }} - run: cd tools && go install github.com/client9/misspell/cmd/misspell - - run: misspell -error -source text CHANGELOG.md + - run: misspell -error -source text CHANGELOG.md .changelog From 39828e17a2a8f2a0e5054abbae34713472b91a32 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 4 Feb 2021 12:37:23 -0500 Subject: [PATCH 1001/1212] add .changelog dir to docscheck --- GNUmakefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GNUmakefile b/GNUmakefile index cc657a9d3de..c0ff0752ddb 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -81,7 +81,7 @@ docscheck: -allowed-resource-subcategories-file website/allowed-subcategories.txt \ -ignore-side-navigation-data-sources aws_alb,aws_alb_listener,aws_alb_target_group,aws_kms_secret \ -require-resource-subcategory - @misspell -error -source text CHANGELOG.md + @misspell -error -source text CHANGELOG.md .changelog lint: golangci-lint awsproviderlint importlint From 72bab54528e7f7af9c882747d462e57bed5aa951 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 10:05:24 -0800 Subject: [PATCH 1002/1212] Add IsNewResource check on read --- aws/resource_aws_cloudfront_origin_request_policy.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/aws/resource_aws_cloudfront_origin_request_policy.go b/aws/resource_aws_cloudfront_origin_request_policy.go index a9563b25eb9..0b5b295302a 100644 --- a/aws/resource_aws_cloudfront_origin_request_policy.go +++ b/aws/resource_aws_cloudfront_origin_request_policy.go @@ -1,8 +1,11 @@ package aws import ( + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -144,6 +147,12 @@ func resourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta i } resp, err := conn.GetOriginRequestPolicy(request) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, "ResourceNotFoundException") { + log.Printf("[WARN] CloudFront Origin Request Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return err } From 5f4d249596ddbb4e36a253f17c9bfcdf1787e788 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 4 Feb 2021 10:10:33 -0800 Subject: [PATCH 1003/1212] Updates tflint to v0.24.1 --- .tflint.hcl | 3 ++ tools/go.mod | 2 +- tools/go.sum | 106 +++++++++++++++++++++++++++++++++++---------------- 3 files changed, 77 insertions(+), 34 deletions(-) create mode 100644 .tflint.hcl diff --git a/.tflint.hcl b/.tflint.hcl new file mode 100644 index 00000000000..a1361a3ff55 --- /dev/null +++ b/.tflint.hcl @@ -0,0 +1,3 @@ +plugin "aws" { + enabled = true +} diff --git a/tools/go.mod b/tools/go.mod index 2e6c49e71a3..a999cb2328b 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -9,7 +9,7 @@ require ( github.com/hashicorp/go-changelog v0.0.0-20201005170154-56335215ce3a github.com/katbyte/terrafmt v0.2.1-0.20200913185704-5ff4421407b4 github.com/pavius/impi v0.0.3 // indirect - github.com/terraform-linters/tflint v0.20.3 + github.com/terraform-linters/tflint v0.24.1 ) replace github.com/katbyte/terrafmt => github.com/gdavison/terrafmt v0.2.1-0.20201026181004-a896893cd6af diff --git a/tools/go.sum b/tools/go.sum index e252b7ba572..e4758e741cc 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -15,6 +15,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.61.0 h1:NLQf5e1OMspfNT1RAHOB3ublr1TW3YTXO8OiWwVjK2U= cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -82,6 +85,7 @@ github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZy github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= @@ -97,8 +101,8 @@ github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/ github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-userdirs v0.0.0-20200915174352-b0c018a67c13/go.mod h1:7kfpUbyCdGJ9fDRCp3fopPQi5+cKNHgTE4ZuNrO71Cw= -github.com/apparentlymart/go-versions v1.0.0 h1:4A4CekGuwDUQqc+uTXCrdb9Y98JZsML2sdfNTeVjsK4= -github.com/apparentlymart/go-versions v1.0.0/go.mod h1:YF5j7IQtrOAOnsGkniupEA5bfCjzd7i14yu0shZavyM= +github.com/apparentlymart/go-versions v1.0.1 h1:ECIpSn0adcYNsBfSRwdDdz9fWlL+S/6EUd9+irwkBgU= +github.com/apparentlymart/go-versions v1.0.1/go.mod h1:YF5j7IQtrOAOnsGkniupEA5bfCjzd7i14yu0shZavyM= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -115,9 +119,9 @@ github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9D github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.35.2/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.35.7 h1:FHMhVhyc/9jljgFAcGkQDYjpC9btM0B8VfkLBfctdNE= -github.com/aws/aws-sdk-go v1.35.7/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY= +github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -263,7 +267,6 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09Vjb github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -293,7 +296,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= @@ -404,16 +406,18 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 h1:l1KB3bHVdvegcIf5upQ5mjcHjs2qsWnKh4Yr9xgIuu8= -github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= +github.com/hashicorp/go-getter v1.5.1 h1:LZ49OxqBBtdKJymlpX7oTyqGBQRg4xxQDyPW4hzoZqM= +github.com/hashicorp/go-getter v1.5.1/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.10.0 h1:b86HUuA126IcSHyC55WjPo7KtCOVeTCKIjr+3lBhPxI= github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -424,6 +428,8 @@ github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -453,8 +459,8 @@ github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggU github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/hcl/v2 v2.6.0 h1:3krZOfGY6SziUXa6H9PJU6TyohHn7I+ARYnhbeNBz+o= github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= -github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= +github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -462,18 +468,20 @@ github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform v0.13.4 h1:xjyfIScorWvCYqssp0DDvWX1oZ36Csk7zk64/tfdJUI= -github.com/hashicorp/terraform v0.13.4/go.mod h1:1H1qcnppNc/bBGc7poOfnmmBeQMlF0stEN3haY3emCU= +github.com/hashicorp/terraform v0.14.5 h1:q6g6pn8VWDWwwS4KYIfaKH1U1xhs4LwfaT48nzeR83Y= +github.com/hashicorp/terraform v0.14.5/go.mod h1:DfimDncvShwtT6by5OzM5GZQObjHHk5XxKqqMDQ4uKg= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= -github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= +github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.4/go.mod h1:GP0lmw4Y+XV1OfTmi/hK75t5KWGGzoOzEgUBPGZ6Wq4= -github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= +github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= +github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.1/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -551,6 +559,7 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/likexian/gokit v0.0.0-20190309162924-0a377eecf7aa/go.mod h1:QdfYv6y6qPA9pbBA2qXtoT8BMKha6UyNbxWGWl/9Jfk= github.com/likexian/gokit v0.0.0-20190418170008-ace88ad0983b/go.mod h1:KKqSnk/VVSW8kEyO2vVCXoanzEutKdlBAPohmGXkxCk= github.com/likexian/gokit v0.0.0-20190501133040-e77ea8b19cdc/go.mod h1:3kvONayqCaj+UgrRZGpgfXzHdMYCAO0KAt4/8n0L57Y= @@ -597,6 +606,7 @@ github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= @@ -611,6 +621,8 @@ github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go. github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.4 h1:ZU1VNC02qyufSZsjjs7+khruk2fKvbQ3TwRV/IBCeFA= github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -650,6 +662,7 @@ github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhX github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -662,6 +675,7 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -670,10 +684,10 @@ github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pavius/impi v0.0.3 h1:DND6MzU+BLABhOZXbELR3FU8b+zDgcq4dOCNLhiTYuI= -github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -696,7 +710,7 @@ github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:w github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= -github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -730,7 +744,7 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/securego/gosec/v2 v2.5.0 h1:kjfXLeKdk98gBe2+eYRFMpC4+mxmQQtbidpiiOQ69Qc= github.com/securego/gosec/v2 v2.5.0/go.mod h1:L/CDXVntIff5ypVHIkqPXbtRpJiNCh6c6Amn68jXDjo= -github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= @@ -767,9 +781,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU= github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.4.1 h1:asw9sl74539yqavKaglDM5hFpdJVK0Y5Dr/JOgQ89nQ= -github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= @@ -812,12 +825,14 @@ github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 h1:Xr9gkxfOP0K github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk= -github.com/terraform-linters/tflint v0.20.3 h1:h+HGQ+q6h3qrswZPwZnQzKbeRV+640YARbSSTivPigM= -github.com/terraform-linters/tflint v0.20.3/go.mod h1:vwCB1D7ZfyjgwC+C9D77t4DmjF7z0LV90khz1Lvx78M= -github.com/terraform-linters/tflint-plugin-sdk v0.5.0 h1:wnVl1oaGoKWhwJCkok82DpiKpO19TuuBljMALTWZXoA= -github.com/terraform-linters/tflint-plugin-sdk v0.5.0/go.mod h1:xbvHhlyCO/04nM+PBTERWP6VOIYGG5QLZNIgvjxi3xc= -github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20201015205411-546f68d4a935 h1:PbobnAeVvdzE1/qqTYxaB9h/YIpHCZXbCRBaXNIi0qA= -github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20201015205411-546f68d4a935/go.mod h1:DdjydHaAmjsZl+uZ4QLwfx9iP+trTBMjEqLeAV9/OFE= +github.com/terraform-linters/tflint v0.24.1 h1:j3H53bueAlYZ3Lyw25ac61m+L53AejLZWcwTPmrNxVk= +github.com/terraform-linters/tflint v0.24.1/go.mod h1:QI7BW9c84h9tlXyCYTZLBx340W0hRJMauUovDBJ0eIw= +github.com/terraform-linters/tflint-plugin-sdk v0.8.1 h1:KklKztWgRzvZLSi77GFU2y/jaA/e+OUWEV3bdouzPWw= +github.com/terraform-linters/tflint-plugin-sdk v0.8.1/go.mod h1:A/6/RIqmPGmLWnI1JZef2Tyzw7/MFTl6t6G0BH9qALA= +github.com/terraform-linters/tflint-ruleset-aws v0.2.1 h1:fT9oGCkqKh66gHXdoKKNMprzC+SzUIuZIhdTD0seSRQ= +github.com/terraform-linters/tflint-ruleset-aws v0.2.1/go.mod h1:9WyZWmZoTC7ckUEOzoc32KuJCMi4rVI5ongnBmGk2k8= +github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20210128214539-ac3363c699ef h1:+UFdPdf7zyrxH6XVRs7FJpAc2ajrDx1OKWY7BBrQRds= +github.com/terraform-providers/terraform-provider-aws v1.60.1-0.20210128214539-ac3363c699ef/go.mod h1:2FJRHL/0yjp+iYXWoW2v4U80Zkz+mGHsHOB/Jdbw7AI= github.com/tetafro/godot v1.3.2 h1:HzWC3XjadkyeuBZxkfAFNY20UVvle0YD51I6zf6RKlU= github.com/tetafro/godot v1.3.2/go.mod h1:ah7jjYmOMnIjS9ku2krapvGQrFNtTLo9Z/qB3dGU1eU= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q= @@ -845,8 +860,8 @@ github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= -github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= @@ -871,8 +886,9 @@ github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= -github.com/zclconf/go-cty v1.6.1 h1:wHtZ+LSSQVwUSb+XIJ5E9hgAQxyWATZsAWT+ESJ9dQ0= -github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty v1.7.1 h1:AvsC01GMhMLFL8CgEYdHGM+yLnnDOwhPAYcgTkeF0Gw= +github.com/zclconf/go-cty v1.7.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -981,12 +997,17 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1046,6 +1067,8 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSK golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666 h1:gVCS+QOncANNPlmlO1AhlU3oxs4V9z+gTtPwIk3p2N8= golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 h1:bNEHhJCnrwMKNMmOx3yAynp5vs5/gRy+XWFtZFu7NBM= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1127,16 +1150,21 @@ golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0 h1:SQvH+DjrwqD1hyyQU+K7JegHz1KEZgEwt17p9d6R2eg= golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f h1:33yHANSyO/TeglgY9rBhUpX43wtonTXoFOsMRtNB6qE= golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201030010431-2feb2bb1ff51/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1166,6 +1194,9 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= +google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1202,6 +1233,11 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1216,6 +1252,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 4441763d1c7877f498435525bff99e4649190be1 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 4 Feb 2021 13:21:43 -0500 Subject: [PATCH 1004/1212] Update .github/workflows/changelog.yml Co-authored-by: Brian Flad --- .github/workflows/changelog.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index cf1a3809ea7..7f193f4fe07 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -6,7 +6,7 @@ on: - 'release/**' pull_request: paths: - - .changelog/ + - .changelog/* - .go-version - CHANGELOG.md pull_request_target: From 14bf872c4122d1c63f03b2ae0c2e5f0fb8d998bd Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 14:09:42 -0500 Subject: [PATCH 1005/1212] tests/resource/aws_api_gateway_domain_name: Create and use EDGE endpoint CheckExists and CheckDestroy (#17426) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17420 Currently the `TestAccAWSAPIGatewayDomainName_CertificateArn` acceptance test fails in TeamCity: ``` === CONT TestAccAWSAPIGatewayDomainName_CertificateArn resource_aws_api_gateway_domain_name_test.go:24: Step 1/2 error: Check failed: Check 1/6 error: NotFoundException: Invalid domain name identifier specified --- FAIL: TestAccAWSAPIGatewayDomainName_CertificateArn (964.31s) ``` EDGE endpoint type resources can only exist in us-east-1 region. While the test function and configuration are set up to switch the region, the test is using `testAccCheckAWSAPIGatewayDomainNameDestroy` and `testAccCheckAWSAPIGatewayDomainNameExists` which use `testAccProvider` instead of `testAccProviderApigatewayEdgeDomainName`. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSAPIGatewayDomainName_CertificateArn (972.57s) --- PASS: TestAccAWSAPIGatewayDomainName_disappears (25.73s) --- PASS: TestAccAWSAPIGatewayDomainName_MutualTlsAuthentication (198.61s) --- PASS: TestAccAWSAPIGatewayDomainName_RegionalCertificateArn (249.05s) --- PASS: TestAccAWSAPIGatewayDomainName_SecurityPolicy (129.72s) --- PASS: TestAccAWSAPIGatewayDomainName_Tags (223.17s) --- SKIP: TestAccAWSAPIGatewayDomainName_CertificateName (0.00s) --- SKIP: TestAccAWSAPIGatewayDomainName_RegionalCertificateName (0.00s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSAPIGatewayDomainName_disappears (42.47s) --- PASS: TestAccAWSAPIGatewayDomainName_RegionalCertificateArn (80.03s) --- PASS: TestAccAWSAPIGatewayDomainName_SecurityPolicy (171.64s) --- PASS: TestAccAWSAPIGatewayDomainName_Tags (112.22s) --- SKIP: TestAccAWSAPIGatewayDomainName_CertificateArn (0.00s) --- SKIP: TestAccAWSAPIGatewayDomainName_CertificateName (0.00s) --- SKIP: TestAccAWSAPIGatewayDomainName_MutualTlsAuthentication (0.00s) --- SKIP: TestAccAWSAPIGatewayDomainName_RegionalCertificateName (0.00s) ``` --- ...source_aws_api_gateway_domain_name_test.go | 65 ++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_api_gateway_domain_name_test.go b/aws/resource_aws_api_gateway_domain_name_test.go index 4dc3bb755ce..1a2b4740767 100644 --- a/aws/resource_aws_api_gateway_domain_name_test.go +++ b/aws/resource_aws_api_gateway_domain_name_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -24,12 +25,12 @@ func TestAccAWSAPIGatewayDomainName_CertificateArn(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckApigatewayEdgeDomainName(t) }, ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAWSAPIGatewayDomainNameDestroy, + CheckDestroy: testAccCheckAWSAPIGatewayEdgeDomainNameDestroy, Steps: []resource.TestStep{ { Config: testAccAWSAPIGatewayDomainNameConfig_CertificateArn(rootDomain, domain), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayDomainNameExists(resourceName, &domainName), + testAccCheckAWSAPIGatewayEdgeDomainNameExists(resourceName, &domainName), testAccCheckResourceAttrRegionalARNApigatewayEdgeDomainName(resourceName, "arn", "apigateway", domain), resource.TestCheckResourceAttrPair(resourceName, "certificate_arn", acmCertificateResourceName, "arn"), resource.TestMatchResourceAttr(resourceName, "cloudfront_domain_name", regexp.MustCompile(`[a-z0-9]+.cloudfront.net`)), @@ -385,6 +386,66 @@ func testAccCheckAWSAPIGatewayDomainNameDestroy(s *terraform.State) error { return nil } +func testAccCheckAWSAPIGatewayEdgeDomainNameExists(resourceName string, domainName *apigateway.DomainName) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("resource ID not set") + } + + conn := testAccProviderApigatewayEdgeDomainName.Meta().(*AWSClient).apigatewayconn + + input := &apigateway.GetDomainNameInput{ + DomainName: aws.String(rs.Primary.ID), + } + + output, err := conn.GetDomainName(input) + + if err != nil { + return fmt.Errorf("error reading API Gateway Domain Name (%s): %w", rs.Primary.ID, err) + } + + *domainName = *output + + return nil + } +} + +func testAccCheckAWSAPIGatewayEdgeDomainNameDestroy(s *terraform.State) error { + conn := testAccProviderApigatewayEdgeDomainName.Meta().(*AWSClient).apigatewayconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_api_gateway_domain_name" { + continue + } + + input := &apigateway.GetDomainNameInput{ + DomainName: aws.String(rs.Primary.ID), + } + + output, err := conn.GetDomainName(input) + + if tfawserr.ErrCodeEquals(err, apigateway.ErrCodeNotFoundException) { + continue + } + + if err != nil { + return fmt.Errorf("error reading API Gateway Domain Name (%s): %w", rs.Primary.ID, err) + } + + if output != nil && aws.StringValue(output.DomainName) == rs.Primary.ID { + return fmt.Errorf("API Gateway Domain Name (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + func testAccAWSAPIGatewayDomainNameConfigPublicCert(rootDomain, domain string) string { return fmt.Sprintf(` data "aws_route53_zone" "test" { From 9df549faf3ef0d21557296d0695bdb6a1a53d401 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Feb 2021 15:38:54 -0500 Subject: [PATCH 1006/1212] build(deps): bump github.com/aws/aws-sdk-go from 1.37.0 to 1.37.4 (#17469) * build(deps): bump github.com/aws/aws-sdk-go from 1.37.0 to 1.37.4 Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.37.0 to 1.37.4. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.37.0...v1.37.4) Signed-off-by: dependabot[bot] * Update CHANGELOG for #17469 Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Brian Flad --- .changelog/17469.txt | 3 +++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 .changelog/17469.txt diff --git a/.changelog/17469.txt b/.changelog/17469.txt new file mode 100644 index 00000000000..ac9d51110bb --- /dev/null +++ b/.changelog/17469.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Only validate AWS shared configuration profile SSO configuration when attempting to use SSO cached credentials +``` diff --git a/go.mod b/go.mod index 375c3664226..a3e079bc71f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws go 1.15 require ( - github.com/aws/aws-sdk-go v1.37.0 + github.com/aws/aws-sdk-go v1.37.4 github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 diff --git a/go.sum b/go.sum index 78a3559f029..b3b36197d8c 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.4 h1:tWxrpMK/oRSXVnjUzhGeCWLR00fW0WF4V4sycYPPrJ8= +github.com/aws/aws-sdk-go v1.37.4/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= From e6556f6d0536ed199c66921f8c2ac8176802fbd1 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 4 Feb 2021 15:40:41 -0500 Subject: [PATCH 1007/1212] use d.GetOk to retrieve input values on update --- aws/resource_aws_ssoadmin_permission_set.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_ssoadmin_permission_set.go b/aws/resource_aws_ssoadmin_permission_set.go index 0ca51ce9ae2..15da5c76fc1 100644 --- a/aws/resource_aws_ssoadmin_permission_set.go +++ b/aws/resource_aws_ssoadmin_permission_set.go @@ -190,19 +190,21 @@ func resourceAwsSsoAdminPermissionSetUpdate(d *schema.ResourceData, meta interfa PermissionSetArn: aws.String(arn), } - if d.HasChange("description") { - input.Description = aws.String(d.Get("description").(string)) - } - // The AWS SSO API requires we send the RelayState value regardless if it's unchanged - // else the existing Permission Set's RelayState value will be cleared + // else the existing Permission Set's RelayState value will be cleared; + // for consistency, we'll check for the "presence of" instead of "if changed" for all input fields // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17411 + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + if v, ok := d.GetOk("relay_state"); ok { input.RelayState = aws.String(v.(string)) } - if d.HasChange("session_duration") { - input.SessionDuration = aws.String(d.Get("session_duration").(string)) + if v, ok := d.GetOk("session_duration"); ok { + input.SessionDuration = aws.String(v.(string)) } _, err := conn.UpdatePermissionSet(input) From e1a41b9515e4f4eb3eff42133d00a710e6b98c72 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 4 Feb 2021 20:40:43 +0000 Subject: [PATCH 1008/1212] Update CHANGELOG.md for #17469 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ac5813aa23..f3ec9f94cad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ ENHANCEMENTS: BUG FIXES: * data-source/aws_partition: Correct `reverse_dns_prefix` value in AWS China, C2S, and SC2S ([#17142](https://github.com/hashicorp/terraform-provider-aws/issues/17142)) +* provider: Only validate AWS shared configuration profile SSO configuration when attempting to use SSO cached credentials ([#17469](https://github.com/hashicorp/terraform-provider-aws/issues/17469)) * resource/aws_api_gateway_method_settings: Ignore non-existent resource errors during deletion ([#17234](https://github.com/hashicorp/terraform-provider-aws/issues/17234)) * resource/aws_api_gateway_method_settings: Prevent confusing Terraform error on resource disappearance during creation ([#17234](https://github.com/hashicorp/terraform-provider-aws/issues/17234)) * resource/aws_cloudwatch_event_rule: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) From 5a8dc1c8d5dd6cf9c5fb6ac8e5d8a599116995d6 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 12:49:10 -0800 Subject: [PATCH 1009/1212] Remove unneeded precheck --- aws/resource_aws_lb_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index 248b098aede..fad97f412ef 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -200,7 +200,7 @@ func TestAccAWSLB_IPv6SubnetMapping(t *testing.T) { resourceName := "aws_lb.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckElbv2GatewayLoadBalancer(t) }, + PreCheck: func() { testAccPreCheck(t) }, ProviderFactories: testAccProviderFactories, CheckDestroy: testAccCheckAWSLBDestroy, Steps: []resource.TestStep{ From 5efab4bb779610a8d8e62341c2b081d1fb877c2d Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 4 Feb 2021 15:58:02 -0500 Subject: [PATCH 1010/1212] update deprecated config syntax --- aws/resource_aws_sfn_state_machine_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_sfn_state_machine_test.go b/aws/resource_aws_sfn_state_machine_test.go index 93afea27576..6aeb6a3beda 100644 --- a/aws/resource_aws_sfn_state_machine_test.go +++ b/aws/resource_aws_sfn_state_machine_test.go @@ -484,7 +484,7 @@ data "aws_region" "current" {} resource "aws_iam_role_policy" "iam_policy_for_lambda" { name = "iam_policy_for_lambda_%[1]s" - role = "${aws_iam_role.iam_for_lambda.id}" + role = aws_iam_role.iam_for_lambda.id policy = < Date: Thu, 4 Feb 2021 21:24:53 +0000 Subject: [PATCH 1011/1212] Update CHANGELOG.md for #17423 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3ec9f94cad..2c76b312cc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ BUG FIXES: * resource/aws_iam_access_key: Ensure `Inactive` `status` is properly configured during resource creation ([#17322](https://github.com/hashicorp/terraform-provider-aws/issues/17322)) * resource/aws_kinesis_firehose_delivery_stream: Use standard retry timeout for IAM eventual consistency and retry on LakeFormation access errors ([#17254](https://github.com/hashicorp/terraform-provider-aws/issues/17254)) * resource/aws_security_group: Prevent perpetual differences with `name_prefix` argument values beginning with `terraform-` ([#17030](https://github.com/hashicorp/terraform-provider-aws/issues/17030)) +* resource/aws_ssoadmin_permission_set: Properly update resource with `relay_state` argument ([#17423](https://github.com/hashicorp/terraform-provider-aws/issues/17423)) * resource/aws_vpc_endpoint: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion ([#16656](https://github.com/hashicorp/terraform-provider-aws/issues/16656)) * resource/aws_vpc_endpoint_service: Return unsuccessful deletion information immediately as an error instead of timing out while waiting for deletion ([#16656](https://github.com/hashicorp/terraform-provider-aws/issues/16656)) From 53f4c46e1502e441bec8e0a536dbdd0e2b8364bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Feb 2021 21:25:14 +0000 Subject: [PATCH 1012/1212] build(deps): bump github.com/aws/aws-sdk-go in /awsproviderlint (#17470) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 24 +++++++++++ .../aws/aws-sdk-go/aws/session/credentials.go | 10 +++-- .../aws-sdk-go/aws/session/shared_config.go | 12 +++--- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/s3/endpoint.go | 13 ++---- .../aws-sdk-go/service/s3/endpoint_builder.go | 42 ++++++++++++------- .../aws/aws-sdk-go/service/s3/service.go | 3 ++ awsproviderlint/vendor/modules.txt | 2 +- 10 files changed, 74 insertions(+), 40 deletions(-) diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index b826dab31c5..9b820c021a0 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -3,7 +3,7 @@ module github.com/terraform-providers/terraform-provider-aws/awsproviderlint go 1.15 require ( - github.com/aws/aws-sdk-go v1.37.0 + github.com/aws/aws-sdk-go v1.37.4 github.com/bflad/tfproviderlint v0.21.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index 659221bcc8d..eff5dc75ac7 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -56,8 +56,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.4 h1:tWxrpMK/oRSXVnjUzhGeCWLR00fW0WF4V4sycYPPrJ8= +github.com/aws/aws-sdk-go v1.37.4/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.21.0 h1:iSNU4khz+55oYA+5aXXMrz5Max4Mytb0JwPGhOwTIJo= diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 3cc48800d6d..6ee1adc08ae 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -9173,6 +9173,12 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "s3": service{ @@ -9892,12 +9898,30 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "medialive": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "monitoring": service{ Endpoints: endpoints{ "us-iso-east-1": endpoint{}, }, }, + "outposts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index b0cef7575d2..3ddd4e51282 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -102,7 +102,7 @@ func resolveCredsFromProfile(cfg *aws.Config, ) case sharedCfg.hasSSOConfiguration(): - creds = resolveSSOCredentials(cfg, sharedCfg, handlers) + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess @@ -155,7 +155,11 @@ func resolveCredsFromProfile(cfg *aws.Config, return creds, nil } -func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) *credentials.Credentials { +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + cfgCopy := cfg.Copy() cfgCopy.Region = &sharedCfg.SSORegion @@ -167,7 +171,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, - ) + ), nil } // valid credential source values diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 5ab05d56cc6..c3f38b6ec07 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -70,6 +70,8 @@ const ( // sharedConfig represents the configuration fields of the SDK config files. type sharedConfig struct { + Profile string + // Credentials values from the config file. Both aws_access_key_id and // aws_secret_access_key must be provided together in the same file to be // considered valid. The values will be ignored if not a complete group. @@ -201,6 +203,8 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { } func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + // Trim files from the list that don't exist. var skippedFiles int var profileNotFoundErr error @@ -365,10 +369,6 @@ func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { return err } - if err := cfg.validateSSOConfiguration(profile); err != nil { - return err - } - return nil } @@ -409,7 +409,7 @@ func (cfg *sharedConfig) validateCredentialType() error { return nil } -func (cfg *sharedConfig) validateSSOConfiguration(profile string) error { +func (cfg *sharedConfig) validateSSOConfiguration() error { if !cfg.hasSSOConfiguration() { return nil } @@ -433,7 +433,7 @@ func (cfg *sharedConfig) validateSSOConfiguration(profile string) error { if len(missing) > 0 { return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - profile, strings.Join(missing, ", ")) + cfg.Profile, strings.Join(missing, ", ")) } return nil diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go index cf26997eb29..781c2ca1086 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.0" +const SDKVersion = "1.37.4" diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index 403aebb688c..6346b927960 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -98,7 +98,7 @@ func endpointHandler(req *request.Request) { Request: req, } - if resReq.IsCrossPartition() { + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { req.Error = s3shared.NewClientPartitionMismatchError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) return @@ -110,11 +110,6 @@ func endpointHandler(req *request.Request) { return } - if resReq.HasCustomEndpoint() { - req.Error = s3shared.NewInvalidARNWithCustomEndpointError(resource, nil) - return - } - switch tv := resource.(type) { case arn.AccessPointARN: err = updateRequestAccessPointEndpoint(req, tv) @@ -155,8 +150,7 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } - // Ignore the disable host prefix for access points since custom endpoints - // are not supported. + // Ignore the disable host prefix for access points req.Config.DisableEndpointHostPrefix = aws.Bool(false) if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { @@ -181,8 +175,7 @@ func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint a req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } - // Ignore the disable host prefix for access points since custom endpoints - // are not supported. + // Ignore the disable host prefix for access points req.Config.DisableEndpointHostPrefix = aws.Bool(false) if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go index c1c77da9adb..eb77d981ef6 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -22,6 +22,11 @@ const ( outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." ) +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + // accessPointEndpointBuilder represents the endpoint builder for access point arn type accessPointEndpointBuilder arn.AccessPointARN @@ -55,16 +60,19 @@ func (a accessPointEndpointBuilder) build(req *request.Request) error { req.ClientInfo.PartitionID, cfgRegion, err) } - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) - const serviceEndpointLabel = "s3-accesspoint" + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + const serviceEndpointLabel = "s3-accesspoint" - // dual stack provided by endpoint resolver - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, "s3") { - req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + // dual stack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } } protocol.HostPrefixBuilder{ @@ -116,14 +124,17 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { req.ClientInfo.PartitionID, resolveRegion, err) } - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { - return err - } + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) - // add url host as s3-outposts - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, endpointsID) { - req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } } protocol.HostPrefixBuilder{ @@ -159,7 +170,6 @@ func resolveRegionalEndpoint(r *request.Request, region string, endpointsID stri } func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) if err != nil { diff --git a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index b4c07b4d47e..1b78b5d45e1 100644 --- a/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/awsproviderlint/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -48,6 +48,9 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index 7e6068d0087..ebe9107044f 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -12,7 +12,7 @@ cloud.google.com/go/storage github.com/agext/levenshtein # github.com/apparentlymart/go-textseg v1.0.0 github.com/apparentlymart/go-textseg/textseg -# github.com/aws/aws-sdk-go v1.37.0 +# github.com/aws/aws-sdk-go v1.37.4 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 5cce35923debf1f6175f6b892d4a4974387df017 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 4 Feb 2021 17:32:03 -0500 Subject: [PATCH 1013/1212] add ErrorCheck to tests w/known unsupported feature errors --- aws/resource_aws_emr_managed_scaling_policy_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aws/resource_aws_emr_managed_scaling_policy_test.go b/aws/resource_aws_emr_managed_scaling_policy_test.go index 059afc271b0..92d72b3fc77 100644 --- a/aws/resource_aws_emr_managed_scaling_policy_test.go +++ b/aws/resource_aws_emr_managed_scaling_policy_test.go @@ -17,6 +17,7 @@ func TestAccAwsEmrManagedScalingPolicy_basic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipEmrManagedScalingPolicy(t), Providers: testAccProviders, CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, @@ -41,6 +42,7 @@ func TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits(t rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipEmrManagedScalingPolicy(t), Providers: testAccProviders, CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, @@ -65,6 +67,7 @@ func TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnit rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipEmrManagedScalingPolicy(t), Providers: testAccProviders, CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, @@ -89,6 +92,7 @@ func TestAccAwsEmrManagedScalingPolicy_disappears(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, + ErrorCheck: testAccErrorCheckSkipEmrManagedScalingPolicy(t), Providers: testAccProviders, CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, Steps: []resource.TestStep{ @@ -104,6 +108,13 @@ func TestAccAwsEmrManagedScalingPolicy_disappears(t *testing.T) { }) } +// testAccErrorCheckSkipEmrManagedScalingPolicy skips tests that have error messages indicating unsupported features +func testAccErrorCheckSkipEmrManagedScalingPolicy(t *testing.T) resource.ErrorCheckFunc { + return testAccErrorCheckSkipMessagesContaining(t, + "Managed scaling is not available", + ) +} + func testAccAWSEmrManagedScalingPolicy_basic(r string) string { return fmt.Sprintf(testAccAWSEmrManagedScalingPolicyBase+` resource "aws_emr_managed_scaling_policy" "testpolicy" { From 480870c54c4838123bd27b07aa8730d2888bfe6b Mon Sep 17 00:00:00 2001 From: tf-release-bot Date: Fri, 5 Feb 2021 00:04:38 +0000 Subject: [PATCH 1014/1212] v3.27.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c76b312cc9..4b0d4766f2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 3.27.0 (Unreleased) +## 3.27.0 (February 05, 2021) FEATURES: From 88d07d4078c46c0a8715f0f62eeda8755ab61f9f Mon Sep 17 00:00:00 2001 From: Simon Davis Date: Thu, 4 Feb 2021 16:35:18 -0800 Subject: [PATCH 1015/1212] Update CHANGELOG.md after release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b0d4766f2c..1f59be45136 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## 3.28.0 (Unreleased) + ## 3.27.0 (February 05, 2021) FEATURES: From f0c7b29a339e927c2106d28f5211ee5aaa71ca75 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 19:43:27 -0500 Subject: [PATCH 1016/1212] tests/resource/aws_db_security_group: Fix CheckDestroy and CheckExists Provider (#17438) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17392 Currently the `TestAccAWSDBSecurityGroup_basic` acceptance test fails in TeamCity: ``` === CONT TestAccAWSDBSecurityGroup_basic resource_aws_db_security_group_test.go:20: Step 1/2 error: Check failed: Check 1/8 error: DBSecurityGroupNotFound: DBSecurityGroup tf-acc-fwy4o not found. status code: 404, request id: fd813712-f78f-435a-99f5-cee60d22d51d --- FAIL: TestAccAWSDBSecurityGroup_basic (15.08s) ``` The resource can only exist in an EC2-Classic enabled region. While the test function and configuration are set up to switch the region, the `testAccCheckAWSDBSecurityGroupDestroy` and `testAccCheckAWSDBSecurityGroupExists` use `testAccProvider` instead of `testAccProviderEc2Classic`. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSDBSecurityGroup_basic (14.52s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- SKIP: TestAccAWSDBSecurityGroup_basic (2.76s) ``` --- aws/resource_aws_db_security_group_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_db_security_group_test.go b/aws/resource_aws_db_security_group_test.go index 739b6923791..a23192e6a36 100644 --- a/aws/resource_aws_db_security_group_test.go +++ b/aws/resource_aws_db_security_group_test.go @@ -47,7 +47,7 @@ func TestAccAWSDBSecurityGroup_basic(t *testing.T) { } func testAccCheckAWSDBSecurityGroupDestroy(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).rdsconn + conn := testAccProviderEc2Classic.Meta().(*AWSClient).rdsconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_db_security_group" { @@ -114,7 +114,7 @@ func testAccCheckAWSDBSecurityGroupExists(n string, v *rds.DBSecurityGroup) reso return fmt.Errorf("No DB Security Group ID is set") } - conn := testAccProvider.Meta().(*AWSClient).rdsconn + conn := testAccProviderEc2Classic.Meta().(*AWSClient).rdsconn opts := rds.DescribeDBSecurityGroupsInput{ DBSecurityGroupName: aws.String(rs.Primary.ID), From 0e98817ff861c752f244da66b79c9acf9aa15af0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 4 Feb 2021 19:44:01 -0500 Subject: [PATCH 1017/1212] tests/data-source/aws_workspaces_workspace: Fix TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName for any Terraform CLI version (#17445) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17401 This acceptance test is currently failing on Terraform 0.14: ``` === CONT TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName data_source_aws_workspaces_workspace_test.go:48: Step 1/1 error: Expected a non-empty plan, but got an empty plan --- FAIL: TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName (1701.01s) ``` Using `depends_on` with a data source after Terraform 0.12 (or 0.13, I forget off the top of head) will not show a perpetual plan difference. Instead of using `depends_on`, we should be able to passthrough the graph ordering via `directory_id = aws_workspace_workspace.test.directory_id` and remove the `ExpectNonEmptyPlan` for all Terraform versions. Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName (1736.18s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- FAIL: TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName (21.31s) # https://github.com/hashicorp/terraform-provider-aws/issues/17401 ``` --- aws/data_source_aws_workspaces_workspace_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/aws/data_source_aws_workspaces_workspace_test.go b/aws/data_source_aws_workspaces_workspace_test.go index 2e8a910fe6b..f60a85e8b18 100644 --- a/aws/data_source_aws_workspaces_workspace_test.go +++ b/aws/data_source_aws_workspaces_workspace_test.go @@ -67,7 +67,6 @@ func TestAccDataSourceAwsWorkspacesWorkspace_byDirectoryID_userName(t *testing.T resource.TestCheckResourceAttrPair(dataSourceName, "workspace_properties.0.user_volume_size_gib", resourceName, "workspace_properties.0.user_volume_size_gib"), resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), ), - ExpectNonEmptyPlan: true, // Hack to overcome data source with depends_on refresh }, }, }) @@ -137,10 +136,8 @@ resource "aws_workspaces_workspace" "test" { } data "aws_workspaces_workspace" "test" { - directory_id = aws_workspaces_directory.test.id - user_name = "Administrator" - - depends_on = [aws_workspaces_workspace.test] + directory_id = aws_workspaces_workspace.test.directory_id + user_name = aws_workspaces_workspace.test.user_name } `) } From c699ff9513184f386520452f0bd495442ba34a1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Feb 2021 21:43:22 -0500 Subject: [PATCH 1018/1212] build(deps): bump github.com/hashicorp/go-cleanhttp from 0.5.1 to 0.5.2 (#17454) Bumps [github.com/hashicorp/go-cleanhttp](https://github.com/hashicorp/go-cleanhttp) from 0.5.1 to 0.5.2. - [Release notes](https://github.com/hashicorp/go-cleanhttp/releases) - [Commits](https://github.com/hashicorp/go-cleanhttp/compare/v0.5.1...v0.5.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a3e079bc71f..dc933bb76ed 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/beevik/etree v1.1.0 github.com/fatih/color v1.9.0 // indirect github.com/hashicorp/aws-sdk-go-base v0.7.0 - github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 diff --git a/go.sum b/go.sum index b3b36197d8c..03b9dbf4d66 100644 --- a/go.sum +++ b/go.sum @@ -175,6 +175,8 @@ github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuD github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= From ce0ef46a651df80ffcdd9d89c5b40cfbf8a5a970 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 5 Feb 2021 20:37:49 +0200 Subject: [PATCH 1019/1212] Add sweeper + use catalog_id when deleting --- aws/resource_aws_glue_catalog_database.go | 27 +++++----- ...resource_aws_glue_catalog_database_test.go | 50 +++++++++++++++++++ 2 files changed, 62 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_glue_catalog_database.go b/aws/resource_aws_glue_catalog_database.go index 17394f0abe1..21e6be86204 100644 --- a/aws/resource_aws_glue_catalog_database.go +++ b/aws/resource_aws_glue_catalog_database.go @@ -162,37 +162,34 @@ func resourceAwsGlueCatalogDatabaseRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error reading Glue Catalog Database: %s", err.Error()) } + database := out.Database databaseArn := arn.ARN{ Partition: meta.(*AWSClient).partition, Service: "glue", Region: meta.(*AWSClient).region, AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("database/%s", aws.StringValue(out.Database.Name)), + Resource: fmt.Sprintf("database/%s", aws.StringValue(database.Name)), }.String() d.Set("arn", databaseArn) - - d.Set("name", out.Database.Name) - d.Set("catalog_id", catalogID) - d.Set("description", out.Database.Description) - d.Set("location_uri", out.Database.LocationUri) - d.Set("parameters", aws.StringValueMap(out.Database.Parameters)) + d.Set("name", database.Name) + d.Set("catalog_id", database.CatalogId) + d.Set("description", database.Description) + d.Set("location_uri", database.LocationUri) + d.Set("parameters", aws.StringValueMap(database.Parameters)) return nil } func resourceAwsGlueCatalogDatabaseDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).glueconn - catalogID, name, err := readAwsGlueCatalogID(d.Id()) - if err != nil { - return err - } - log.Printf("[DEBUG] Glue Catalog Database: %s:%s", catalogID, name) - _, err = conn.DeleteDatabase(&glue.DeleteDatabaseInput{ - Name: aws.String(name), + log.Printf("[DEBUG] Glue Catalog Database: %s", d.Id()) + _, err := conn.DeleteDatabase(&glue.DeleteDatabaseInput{ + Name: aws.String(d.Get("name").(string)), + CatalogId: aws.String(d.Get("catalog_id").(string)), }) if err != nil { - return fmt.Errorf("Error deleting Glue Catalog Database: %s", err.Error()) + return fmt.Errorf("Error deleting Glue Catalog Database: %w", err) } return nil } diff --git a/aws/resource_aws_glue_catalog_database_test.go b/aws/resource_aws_glue_catalog_database_test.go index 70f2bc4ae41..46388f318b9 100644 --- a/aws/resource_aws_glue_catalog_database_test.go +++ b/aws/resource_aws_glue_catalog_database_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "testing" "github.com/aws/aws-sdk-go/aws" @@ -11,6 +12,55 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) +func init() { + resource.AddTestSweepers("aws_glue_catalog_database", &resource.Sweeper{ + Name: "aws_glue_catalog_database", + F: testSweepGlueCatalogDatabases, + }) +} + +func testSweepGlueCatalogDatabases(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).glueconn + + input := &glue.GetDatabasesInput{} + err = conn.GetDatabasesPages(input, func(page *glue.GetDatabasesOutput, lastPage bool) bool { + if len(page.DatabaseList) == 0 { + log.Printf("[INFO] No Glue Catalog Databases to sweep") + return false + } + for _, database := range page.DatabaseList { + name := aws.StringValue(database.Name) + + log.Printf("[INFO] Deleting Glue Catalog Database: %s", name) + + r := resourceAwsGlueCatalogDatabase() + d := r.Data(nil) + d.SetId("???") + d.Set("name", name) + d.Set("catalog_id", database.CatalogId) + + err := r.Delete(d, client) + if err != nil { + log.Printf("[ERROR] Failed to delete Glue Catalog Database %s: %s", name, err) + } + } + return !lastPage + }) + if err != nil { + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Glue Catalog Database sweep for %s: %s", region, err) + return nil + } + return fmt.Errorf("Error retrieving Glue Catalog Databases: %s", err) + } + + return nil +} + func TestAccAWSGlueCatalogDatabase_full(t *testing.T) { resourceName := "aws_glue_catalog_database.test" rName := acctest.RandomWithPrefix("tf-acc-test") From a9a74fbff63471053cf8300a25ceab937f0782fe Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 5 Feb 2021 20:40:42 +0200 Subject: [PATCH 1020/1212] changelog --- .changelog/17489.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17489.txt diff --git a/.changelog/17489.txt b/.changelog/17489.txt new file mode 100644 index 00000000000..53accd63185 --- /dev/null +++ b/.changelog/17489.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_glue_catalog_database: Use Catalog Id when deleting Databases. +``` From 5ef13da2b5d964ab3bab1e0192817386b76fb500 Mon Sep 17 00:00:00 2001 From: Bill Rich Date: Fri, 5 Feb 2021 14:37:41 -0800 Subject: [PATCH 1021/1212] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f59be45136..7caded3ed3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ FEATURES: * **New Resource:** `aws_ec2_transit_gateway_prefix_list_reference` ([#16823](https://github.com/hashicorp/terraform-provider-aws/issues/16823)) * **New Resource:** `aws_route53_key_signing_key` ([#16840](https://github.com/hashicorp/terraform-provider-aws/issues/16840)) +* **New Resource:** `aws_cloudfront_origin_request_policy` ([#17342](https://github.com/hashicorp/terraform-provider-aws/issues/17342)) +* **New Data Source:** `aws_cloudfront_origin_request_policy` ([#17342](https://github.com/hashicorp/terraform-provider-aws/issues/17342)) ENHANCEMENTS: @@ -15,6 +17,10 @@ ENHANCEMENTS: * resource/aws_iam_access_key: Add `create_date` attribute ([#17318](https://github.com/hashicorp/terraform-provider-aws/issues/17318)) * resource/aws_iam_access_key: Support resource import ([#17321](https://github.com/hashicorp/terraform-provider-aws/issues/17321)) * resource/aws_subnet: Add `customer_owned_ipv4_pool` and `map_customer_owned_ip_on_launch` attributes ([#16676](https://github.com/hashicorp/terraform-provider-aws/issues/16676)) +* resource/aws_lb: Add `ipv6_address` attribute ([#17229](https://github.com/hashicorp/terraform-provider-aws/issues/17229)) +* resource/aws_sfn_state_machine: Add support for `EXPRESS` state machine `type` ([#12249](https://github.com/hashicorp/terraform-provider-aws/issues/12249)) +* resource/aws_lb_target_group: Add `protocol_version` attribute ([#17260](https://github.com/hashicorp/terraform-provider-aws/issues/17260)) +* resource/aws_cloudfront_distribution: Add `cloudfront_origin_request_policy_id` attribute ([#17342](https://github.com/hashicorp/terraform-provider-aws/issues/17342)) BUG FIXES: From 942c78eca8c34427c7ec7f5c219fe475ca55c42e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Fri, 5 Feb 2021 15:06:44 -0800 Subject: [PATCH 1022/1212] Updates version of go-getter to avoid checksum error --- tools/go.sum | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 3 deletions(-) diff --git a/tools/go.sum b/tools/go.sum index 127b26c7909..678e9fe5719 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -17,6 +17,11 @@ cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAq cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.76.0 h1:Ckw+E/QYZgd/5bpI4wz4h6f+jmpvh9S9uSrKNnbicJI= +cloud.google.com/go v0.76.0/go.mod h1:r9EvIAvLrunusnetGdQ50M/gKui1x3zdGW/VELGkdpw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -36,6 +41,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.13.0 h1:amPvhCOI+Hltp6rPu+62YdwhIrjf+34PKVAL4HwgYwk= +cloud.google.com/go/storage v1.13.0/go.mod h1:pqFyBUK3zZqMIIU5+8NaZq6/Ma3ClgUg9Hv5jfuJnvo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v45.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -120,6 +127,8 @@ github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.1 h1:BTHmuN+gzhxkvU9sac2tZvaY0gV9ihbHw+KxZOecYvY= github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.6 h1:SWYjRvyZw6DJc3pkZfRWVRD/5wiTDuwOkyb89AAkEBY= +github.com/aws/aws-sdk-go v1.37.6/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -149,6 +158,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -181,6 +192,8 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -290,6 +303,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= @@ -329,6 +344,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -338,8 +354,9 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -347,10 +364,16 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -392,11 +415,13 @@ github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuD github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= -github.com/hashicorp/go-getter v1.5.1 h1:LZ49OxqBBtdKJymlpX7oTyqGBQRg4xxQDyPW4hzoZqM= -github.com/hashicorp/go-getter v1.5.1/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= +github.com/hashicorp/go-getter v1.5.2 h1:XDo8LiAcDisiqZdv0TKgz+HtX3WN7zA2JD1R1tjsabE= +github.com/hashicorp/go-getter v1.5.2/go.mod h1:orNH3BTYLu/fIxGIdLjLoAJHWMDQ/UKQr5O4m3iBuoo= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -468,6 +493,7 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -517,6 +543,10 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.2 h1:MiK62aErc3gIiVEtyzKfeOHgW7atJb5g/KNX5m3c2nQ= +github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -813,6 +843,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg= @@ -861,6 +893,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.22.6 h1:BdkrbWrzDlV9dnbzoP7sfN+dHheJ4J9JOaYxcUDL+ok= +go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -903,6 +938,8 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -913,6 +950,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -956,9 +995,14 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -966,6 +1010,11 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g= +golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -975,6 +1024,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1027,6 +1077,13 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634 h1:bNEHhJCnrwMKNMmOx3yAynp5vs5/gRy+XWFtZFu7NBM= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1036,6 +1093,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1117,12 +1176,19 @@ golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201030010431-2feb2bb1ff51/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105210202-9ed45478a130 h1:8qSBr5nyKsEgkP918Pu5FFDZpTtLIjXSo6mrtdVOFfk= golang.org/x/tools v0.0.0-20210105210202-9ed45478a130/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1146,6 +1212,11 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.39.0 h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM= +google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1153,6 +1224,8 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1186,6 +1259,15 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210204154452-deb828366460 h1:pvsg2TgyP8bWrYqyL10tbNHu5KypD5DWJPrCjaTkwZA= +google.golang.org/genproto v0.0.0-20210204154452-deb828366460/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1203,6 +1285,10 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 234e6a9cb09f5dae237689a5b6f81d9f49e522cc Mon Sep 17 00:00:00 2001 From: Ricool06 <14017085+Ricool06@users.noreply.github.com> Date: Mon, 27 Jan 2020 13:57:33 +0000 Subject: [PATCH 1023/1212] resource/aws_sns_topic_subscription: Add redrive_policy argument --- aws/resource_aws_sns_topic_subscription.go | 21 ++++ ...esource_aws_sns_topic_subscription_test.go | 108 ++++++++++++++++++ .../r/sns_topic_subscription.html.markdown | 1 + 3 files changed, 130 insertions(+) diff --git a/aws/resource_aws_sns_topic_subscription.go b/aws/resource_aws_sns_topic_subscription.go index 353c20d9d9a..ec60c7042c2 100644 --- a/aws/resource_aws_sns_topic_subscription.go +++ b/aws/resource_aws_sns_topic_subscription.go @@ -73,6 +73,11 @@ func resourceAwsSnsTopicSubscription() *schema.Resource { ValidateFunc: validation.StringIsJSON, DiffSuppressFunc: suppressEquivalentSnsTopicSubscriptionDeliveryPolicy, }, + "redrive_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.ValidateJsonString, + }, "raw_message_delivery": { Type: schema.TypeBool, Optional: true, @@ -147,6 +152,12 @@ func resourceAwsSnsTopicSubscriptionUpdate(d *schema.ResourceData, meta interfac } } + if d.HasChange("redrive_policy") { + if err := snsSubscriptionAttributeUpdate(snsconn, d.Id(), "RedrivePolicy", d.Get("redrive_policy").(string)); err != nil { + return err + } + } + return resourceAwsSnsTopicSubscriptionRead(d, meta) } @@ -175,6 +186,7 @@ func resourceAwsSnsTopicSubscriptionRead(d *schema.ResourceData, meta interface{ d.Set("arn", attributeOutput.Attributes["SubscriptionArn"]) d.Set("delivery_policy", attributeOutput.Attributes["DeliveryPolicy"]) + d.Set("redrive_policy", attributeOutput.Attributes["RedrivePolicy"]) d.Set("endpoint", attributeOutput.Attributes["Endpoint"]) d.Set("filter_policy", attributeOutput.Attributes["FilterPolicy"]) d.Set("protocol", attributeOutput.Attributes["Protocol"]) @@ -373,6 +385,11 @@ func snsSubscriptionAttributeUpdate(snsconn *sns.SNS, subscriptionArn, attribute AttributeName: aws.String(attributeName), AttributeValue: aws.String(attributeValue), } + + if attributeName == "RedrivePolicy" && attributeValue == "" { + req.AttributeValue = nil + } + _, err := snsconn.SetSubscriptionAttributes(req) if err != nil { @@ -381,6 +398,10 @@ func snsSubscriptionAttributeUpdate(snsconn *sns.SNS, subscriptionArn, attribute return nil } +type snsTopicSubscriptionRedrivePolicy struct { + DeadLetterTargetArn string `json:"deadLetterTargetArn,omitempty"` +} + type snsTopicSubscriptionDeliveryPolicy struct { Guaranteed bool `json:"guaranteed,omitempty"` HealthyRetryPolicy *snsTopicSubscriptionDeliveryPolicyHealthyRetryPolicy `json:"healthyRetryPolicy,omitempty"` diff --git a/aws/resource_aws_sns_topic_subscription_test.go b/aws/resource_aws_sns_topic_subscription_test.go index 083f647965b..57cf1174833 100644 --- a/aws/resource_aws_sns_topic_subscription_test.go +++ b/aws/resource_aws_sns_topic_subscription_test.go @@ -199,6 +199,60 @@ func TestAccAWSSNSTopicSubscription_deliveryPolicy(t *testing.T) { }) } +func TestAccAWSSNSTopicSubscription_redrivePolicy(t *testing.T) { + attributes := make(map[string]string) + resourceName := "aws_sns_topic_subscription.test_subscription" + ri := acctest.RandInt() + dlqQueueName := fmt.Sprintf("queue-dlq-%d", ri) + updatedDlqQueueName := fmt.Sprintf("updated-queue-dlq-%d", ri) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSNSTopicSubscriptionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy( + ri, + dlqQueueName, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSNSTopicSubscriptionExists(resourceName, attributes), + testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, dlqQueueName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "confirmation_timeout_in_minutes", + "endpoint_auto_confirms", + }, + }, + // Test attribute update + { + Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy( + ri, + updatedDlqQueueName, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSNSTopicSubscriptionExists(resourceName, attributes), + testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, updatedDlqQueueName), + ), + }, + // Test attribute removal + { + Config: testAccAWSSNSTopicSubscriptionConfig(ri), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSNSTopicSubscriptionExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "redrive_policy", ""), + ), + }, + }, + }) +} + func TestAccAWSSNSTopicSubscription_rawMessageDelivery(t *testing.T) { attributes := make(map[string]string) resourceName := "aws_sns_topic_subscription.test_subscription" @@ -379,6 +433,37 @@ func testAccCheckAWSSNSTopicSubscriptionDeliveryPolicyAttribute(attributes map[s } } +func testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes map[string]string, expectedDlqName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + apiRedrivePolicyJSONString, ok := attributes["RedrivePolicy"] + + if !ok { + return fmt.Errorf("RedrivePolicy attribute not found in attributes: %s", attributes) + } + + var apiRedrivePolicy snsTopicSubscriptionRedrivePolicy + if err := json.Unmarshal([]byte(apiRedrivePolicyJSONString), &apiRedrivePolicy); err != nil { + return fmt.Errorf("unable to unmarshal SNS Topic Subscription redrive policy JSON (%s): %s", apiRedrivePolicyJSONString, err) + } + + accountID := testAccProvider.Meta().(*AWSClient).accountid + expectedDlqArn := fmt.Sprintf( + "arn:aws:sqs:us-west-2:%s:%s", + accountID, + expectedDlqName, + ) + expectedRedrivePolicy := &snsTopicSubscriptionRedrivePolicy{ + DeadLetterTargetArn: expectedDlqArn, + } + + if reflect.DeepEqual(apiRedrivePolicy, *expectedRedrivePolicy) { + return nil + } + + return fmt.Errorf("SNS Topic Subscription redrive policy did not match:\n\nReceived\n\n%s\n\nExpected\n\n%s\n\n", apiRedrivePolicy, *expectedRedrivePolicy) + } +} + func TestObfuscateEndpointPassword(t *testing.T) { checks := map[string]string{ "https://example.com/myroute": "https://example.com/myroute", @@ -450,6 +535,29 @@ resource "aws_sns_topic_subscription" "test_subscription" { `, i, i, policy) } +func testAccAWSSNSTopicSubscriptionConfig_redrivePolicy(i int, dlqName string) string { + return fmt.Sprintf(` +resource "aws_sns_topic" "test_topic" { + name = "terraform-test-topic-%d" +} + +resource "aws_sqs_queue" "test_queue" { + name = "terraform-subscription-test-queue-%d" +} + +resource "aws_sqs_queue" "test_queue_dlq" { + name = "%s" +} + +resource "aws_sns_topic_subscription" "test_subscription" { + redrive_policy = %s + endpoint = "${aws_sqs_queue.test_queue.arn}" + protocol = "sqs" + topic_arn = "${aws_sns_topic.test_topic.arn}" +} +`, i, i, dlqName, strconv.Quote(`{"deadLetterTargetArn": "${aws_sqs_queue.test_queue_dlq.arn}"}`)) +} + func testAccAWSSNSTopicSubscriptionConfig_rawMessageDelivery(i int, rawMessageDelivery bool) string { return fmt.Sprintf(` resource "aws_sns_topic" "test_topic" { diff --git a/website/docs/r/sns_topic_subscription.html.markdown b/website/docs/r/sns_topic_subscription.html.markdown index 02e132769ea..d53f6b8186c 100644 --- a/website/docs/r/sns_topic_subscription.html.markdown +++ b/website/docs/r/sns_topic_subscription.html.markdown @@ -239,6 +239,7 @@ The following arguments are supported: * `raw_message_delivery` - (Optional) Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property) (default is false). * `filter_policy` - (Optional) JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details. * `delivery_policy` - (Optional) JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details. +* `redrive_policy` - (Optional) JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details. ### Protocols supported From 4ac036224780df3dbfb234c22abef073f802a25b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Sat, 6 Feb 2021 09:57:55 -0500 Subject: [PATCH 1024/1212] CR updates; pass arg on create, generalize test check --- .changelog/11770.txt | 3 + aws/resource_aws_sns_topic_subscription.go | 24 ++++--- ...esource_aws_sns_topic_subscription_test.go | 64 ++++++++++--------- 3 files changed, 53 insertions(+), 38 deletions(-) create mode 100644 .changelog/11770.txt diff --git a/.changelog/11770.txt b/.changelog/11770.txt new file mode 100644 index 00000000000..c51abe16fc6 --- /dev/null +++ b/.changelog/11770.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sns_topic_subscription: Add `redrive_policy` argument +``` \ No newline at end of file diff --git a/aws/resource_aws_sns_topic_subscription.go b/aws/resource_aws_sns_topic_subscription.go index ec60c7042c2..db3dd5ec76b 100644 --- a/aws/resource_aws_sns_topic_subscription.go +++ b/aws/resource_aws_sns_topic_subscription.go @@ -74,9 +74,10 @@ func resourceAwsSnsTopicSubscription() *schema.Resource { DiffSuppressFunc: suppressEquivalentSnsTopicSubscriptionDeliveryPolicy, }, "redrive_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.ValidateJsonString, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: suppressEquivalentJsonDiffs, }, "raw_message_delivery": { Type: schema.TypeBool, @@ -186,7 +187,6 @@ func resourceAwsSnsTopicSubscriptionRead(d *schema.ResourceData, meta interface{ d.Set("arn", attributeOutput.Attributes["SubscriptionArn"]) d.Set("delivery_policy", attributeOutput.Attributes["DeliveryPolicy"]) - d.Set("redrive_policy", attributeOutput.Attributes["RedrivePolicy"]) d.Set("endpoint", attributeOutput.Attributes["Endpoint"]) d.Set("filter_policy", attributeOutput.Attributes["FilterPolicy"]) d.Set("protocol", attributeOutput.Attributes["Protocol"]) @@ -196,6 +196,7 @@ func resourceAwsSnsTopicSubscriptionRead(d *schema.ResourceData, meta interface{ d.Set("raw_message_delivery", true) } + d.Set("redrive_policy", attributeOutput.Attributes["RedrivePolicy"]) d.Set("topic_arn", attributeOutput.Attributes["TopicArn"]) return nil @@ -217,6 +218,7 @@ func getResourceAttributes(d *schema.ResourceData) (output map[string]*string) { delivery_policy := d.Get("delivery_policy").(string) filter_policy := d.Get("filter_policy").(string) raw_message_delivery := d.Get("raw_message_delivery").(bool) + redrive_policy := d.Get("redrive_policy").(string) // Collect attributes if available attributes := map[string]*string{} @@ -233,6 +235,10 @@ func getResourceAttributes(d *schema.ResourceData) (output map[string]*string) { attributes["RawMessageDelivery"] = aws.String(fmt.Sprintf("%t", raw_message_delivery)) } + if redrive_policy != "" { + attributes["RedrivePolicy"] = aws.String(redrive_policy) + } + return attributes } @@ -386,6 +392,8 @@ func snsSubscriptionAttributeUpdate(snsconn *sns.SNS, subscriptionArn, attribute AttributeValue: aws.String(attributeValue), } + // The AWS API requires a non-empty string value or nil for the RedrivePolicy attribute, + // else throws an InvalidParameter error if attributeName == "RedrivePolicy" && attributeValue == "" { req.AttributeValue = nil } @@ -398,10 +406,6 @@ func snsSubscriptionAttributeUpdate(snsconn *sns.SNS, subscriptionArn, attribute return nil } -type snsTopicSubscriptionRedrivePolicy struct { - DeadLetterTargetArn string `json:"deadLetterTargetArn,omitempty"` -} - type snsTopicSubscriptionDeliveryPolicy struct { Guaranteed bool `json:"guaranteed,omitempty"` HealthyRetryPolicy *snsTopicSubscriptionDeliveryPolicyHealthyRetryPolicy `json:"healthyRetryPolicy,omitempty"` @@ -465,6 +469,10 @@ func (s snsTopicSubscriptionDeliveryPolicyThrottlePolicy) GoString() string { return s.String() } +type snsTopicSubscriptionRedrivePolicy struct { + DeadLetterTargetArn string `json:"deadLetterTargetArn,omitempty"` +} + func suppressEquivalentSnsTopicSubscriptionDeliveryPolicy(k, old, new string, d *schema.ResourceData) bool { var deliveryPolicy snsTopicSubscriptionDeliveryPolicy diff --git a/aws/resource_aws_sns_topic_subscription_test.go b/aws/resource_aws_sns_topic_subscription_test.go index 57cf1174833..3ddc936a27c 100644 --- a/aws/resource_aws_sns_topic_subscription_test.go +++ b/aws/resource_aws_sns_topic_subscription_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/sns" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -203,8 +204,8 @@ func TestAccAWSSNSTopicSubscription_redrivePolicy(t *testing.T) { attributes := make(map[string]string) resourceName := "aws_sns_topic_subscription.test_subscription" ri := acctest.RandInt() - dlqQueueName := fmt.Sprintf("queue-dlq-%d", ri) - updatedDlqQueueName := fmt.Sprintf("updated-queue-dlq-%d", ri) + dlqName := fmt.Sprintf("tf-acc-test-queue-dlq-%d", ri) + updatedDlqName := fmt.Sprintf("tf-acc-test-queue-dlq-update-%d", ri) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -212,13 +213,10 @@ func TestAccAWSSNSTopicSubscription_redrivePolicy(t *testing.T) { CheckDestroy: testAccCheckAWSSNSTopicSubscriptionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy( - ri, - dlqQueueName, - ), + Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy(ri, dlqName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSNSTopicSubscriptionExists(resourceName, attributes), - testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, dlqQueueName), + testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, dlqName), ), }, { @@ -232,15 +230,21 @@ func TestAccAWSSNSTopicSubscription_redrivePolicy(t *testing.T) { }, // Test attribute update { - Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy( - ri, - updatedDlqQueueName, - ), + Config: testAccAWSSNSTopicSubscriptionConfig_redrivePolicy(ri, updatedDlqName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSNSTopicSubscriptionExists(resourceName, attributes), - testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, updatedDlqQueueName), + testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes, updatedDlqName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "confirmation_timeout_in_minutes", + "endpoint_auto_confirms", + }, + }, // Test attribute removal { Config: testAccAWSSNSTopicSubscriptionConfig(ri), @@ -433,7 +437,7 @@ func testAccCheckAWSSNSTopicSubscriptionDeliveryPolicyAttribute(attributes map[s } } -func testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes map[string]string, expectedDlqName string) resource.TestCheckFunc { +func testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes map[string]string, expectedRedrivePolicyResource string) resource.TestCheckFunc { return func(s *terraform.State) error { apiRedrivePolicyJSONString, ok := attributes["RedrivePolicy"] @@ -446,21 +450,21 @@ func testAccCheckAWSSNSTopicSubscriptionRedrivePolicyAttribute(attributes map[st return fmt.Errorf("unable to unmarshal SNS Topic Subscription redrive policy JSON (%s): %s", apiRedrivePolicyJSONString, err) } - accountID := testAccProvider.Meta().(*AWSClient).accountid - expectedDlqArn := fmt.Sprintf( - "arn:aws:sqs:us-west-2:%s:%s", - accountID, - expectedDlqName, - ) - expectedRedrivePolicy := &snsTopicSubscriptionRedrivePolicy{ - DeadLetterTargetArn: expectedDlqArn, + expectedRedrivePolicy := snsTopicSubscriptionRedrivePolicy{ + DeadLetterTargetArn: arn.ARN{ + AccountID: testAccGetAccountID(), + Partition: testAccGetPartition(), + Region: testAccGetRegion(), + Resource: expectedRedrivePolicyResource, + Service: "sqs", + }.String(), } - if reflect.DeepEqual(apiRedrivePolicy, *expectedRedrivePolicy) { + if reflect.DeepEqual(apiRedrivePolicy, expectedRedrivePolicy) { return nil } - return fmt.Errorf("SNS Topic Subscription redrive policy did not match:\n\nReceived\n\n%s\n\nExpected\n\n%s\n\n", apiRedrivePolicy, *expectedRedrivePolicy) + return fmt.Errorf("SNS Topic Subscription redrive policy did not match:\n\nReceived\n\n%s\n\nExpected\n\n%s\n\n", apiRedrivePolicy, expectedRedrivePolicy) } } @@ -538,11 +542,11 @@ resource "aws_sns_topic_subscription" "test_subscription" { func testAccAWSSNSTopicSubscriptionConfig_redrivePolicy(i int, dlqName string) string { return fmt.Sprintf(` resource "aws_sns_topic" "test_topic" { - name = "terraform-test-topic-%d" + name = "terraform-test-topic-%[1]d" } resource "aws_sqs_queue" "test_queue" { - name = "terraform-subscription-test-queue-%d" + name = "terraform-subscription-test-queue-%[1]d" } resource "aws_sqs_queue" "test_queue_dlq" { @@ -550,12 +554,12 @@ resource "aws_sqs_queue" "test_queue_dlq" { } resource "aws_sns_topic_subscription" "test_subscription" { - redrive_policy = %s - endpoint = "${aws_sqs_queue.test_queue.arn}" - protocol = "sqs" - topic_arn = "${aws_sns_topic.test_topic.arn}" + redrive_policy = jsonencode({ deadLetterTargetArn : aws_sqs_queue.test_queue_dlq.arn }) + endpoint = aws_sqs_queue.test_queue.arn + protocol = "sqs" + topic_arn = aws_sns_topic.test_topic.arn } -`, i, i, dlqName, strconv.Quote(`{"deadLetterTargetArn": "${aws_sqs_queue.test_queue_dlq.arn}"}`)) +`, i, dlqName) } func testAccAWSSNSTopicSubscriptionConfig_rawMessageDelivery(i int, rawMessageDelivery bool) string { From 7e60f5e64bc7452fc3952ce521bc7f150292c7da Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 2 Sep 2020 09:03:08 -0400 Subject: [PATCH 1025/1212] r/aws_cloudfront_realtime_log_config: New resource. Acceptance testing output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSCloudFrontRealtimeLogConfig_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSCloudFrontRealtimeLogConfig_ -timeout 120m === RUN TestAccAWSCloudFrontRealtimeLogConfig_basic === PAUSE TestAccAWSCloudFrontRealtimeLogConfig_basic === RUN TestAccAWSCloudFrontRealtimeLogConfig_disappears === PAUSE TestAccAWSCloudFrontRealtimeLogConfig_disappears === RUN TestAccAWSCloudFrontRealtimeLogConfig_updates === PAUSE TestAccAWSCloudFrontRealtimeLogConfig_updates === CONT TestAccAWSCloudFrontRealtimeLogConfig_basic === CONT TestAccAWSCloudFrontRealtimeLogConfig_updates === CONT TestAccAWSCloudFrontRealtimeLogConfig_disappears resource_aws_cloudfront_realtime_log_config_test.go:119: [INFO] Got non-empty plan, as expected --- PASS: TestAccAWSCloudFrontRealtimeLogConfig_disappears (62.40s) --- PASS: TestAccAWSCloudFrontRealtimeLogConfig_basic (65.18s) --- PASS: TestAccAWSCloudFrontRealtimeLogConfig_updates (111.77s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 111.823s --- .../service/cloudfront/finder/finder.go | 25 ++ aws/provider.go | 1 + ...urce_aws_cloudfront_realtime_log_config.go | 244 +++++++++++++ ...aws_cloudfront_realtime_log_config_test.go | 341 ++++++++++++++++++ ...oudfront_realtime_log_config.html.markdown | 109 ++++++ 5 files changed, 720 insertions(+) create mode 100644 aws/internal/service/cloudfront/finder/finder.go create mode 100644 aws/resource_aws_cloudfront_realtime_log_config.go create mode 100644 aws/resource_aws_cloudfront_realtime_log_config_test.go create mode 100644 website/docs/r/cloudfront_realtime_log_config.html.markdown diff --git a/aws/internal/service/cloudfront/finder/finder.go b/aws/internal/service/cloudfront/finder/finder.go new file mode 100644 index 00000000000..baabd19f2fd --- /dev/null +++ b/aws/internal/service/cloudfront/finder/finder.go @@ -0,0 +1,25 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" +) + +// RealtimeLogConfigByARN returns the real-time log configuration corresponding to the specified ARN. +// Returns nil if no configuration is found. +func RealtimeLogConfigByARN(conn *cloudfront.CloudFront, arn string) (*cloudfront.RealtimeLogConfig, error) { + input := &cloudfront.GetRealtimeLogConfigInput{ + ARN: aws.String(arn), + } + + output, err := conn.GetRealtimeLogConfig(input) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output.RealtimeLogConfig, nil +} diff --git a/aws/provider.go b/aws/provider.go index be2f297a3ad..ab3250a783c 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -490,6 +490,7 @@ func Provider() *schema.Provider { "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), "aws_cloudfront_public_key": resourceAwsCloudFrontPublicKey(), + "aws_cloudfront_realtime_log_config": resourceAwsCloudFrontRealtimeLogConfig(), "aws_cloudtrail": resourceAwsCloudTrail(), "aws_cloudwatch_event_bus": resourceAwsCloudWatchEventBus(), "aws_cloudwatch_event_permission": resourceAwsCloudWatchEventPermission(), diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go new file mode 100644 index 00000000000..f2efaeec876 --- /dev/null +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -0,0 +1,244 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/cloudfront/finder" +) + +func resourceAwsCloudFrontRealtimeLogConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFrontRealtimeLogConfigCreate, + Read: resourceAwsCloudFrontRealtimeLogConfigRead, + Update: resourceAwsCloudFrontRealtimeLogConfigUpdate, + Delete: resourceAwsCloudFrontRealtimeLogConfigDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "endpoint": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stream_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"Kinesis"}, false), + }, + + "kinesis_stream_config": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + + "stream_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + + "fields": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "sampling_rate": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 100), + }, + }, + } +} + +func resourceAwsCloudFrontRealtimeLogConfigCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + input := &cloudfront.CreateRealtimeLogConfigInput{ + EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").(*schema.Set).List()), + Fields: expandStringSet(d.Get("fields").(*schema.Set)), + Name: aws.String(d.Get("name").(string)), + SamplingRate: aws.Int64(int64(d.Get("sampling_rate").(int))), + } + + log.Printf("[DEBUG] Creating CloudFront Real-time Log Config: %s", input) + output, err := conn.CreateRealtimeLogConfig(input) + + if err != nil { + return fmt.Errorf("error creating CloudFront Real-time Log Config: %w", err) + } + + d.SetId(aws.StringValue(output.RealtimeLogConfig.ARN)) + + return resourceAwsCloudFrontRealtimeLogConfigRead(d, meta) +} + +func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + logConfig, err := finder.RealtimeLogConfigByARN(conn, d.Id()) + + if isAWSErr(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig, "") { + log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading CloudFront Real-time Log Config (%s): %w", d.Id(), err) + } + + if logConfig == nil { + log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("arn", logConfig.ARN) + if err := d.Set("endpoint", flattenCloudFrontEndPoints(logConfig.EndPoints)); err != nil { + return fmt.Errorf("error setting endpoint: %w", err) + } + if err := d.Set("fields", flattenStringSet(logConfig.Fields)); err != nil { + return fmt.Errorf("error setting fields: %w", err) + } + d.Set("sampling_rate", int(aws.Int64Value(logConfig.SamplingRate))) + + return nil +} + +func resourceAwsCloudFrontRealtimeLogConfigUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + input := &cloudfront.UpdateRealtimeLogConfigInput{ + ARN: aws.String(d.Id()), + EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").(*schema.Set).List()), + Fields: expandStringSet(d.Get("fields").((*schema.Set))), + SamplingRate: aws.Int64(int64(d.Get("sampling_rate").(int))), + } + + log.Printf("[DEBUG] Updating CloudFront Real-time Log Config: %s", input) + _, err := conn.UpdateRealtimeLogConfig(input) + + if err != nil { + return fmt.Errorf("error updating CloudFront Real-time Log Config (%s): %s", d.Id(), err) + } + + return resourceAwsCloudFrontRealtimeLogConfigRead(d, meta) +} + +func resourceAwsCloudFrontRealtimeLogConfigDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + log.Printf("[DEBUG] Deleting CloudFront Real-time Log Config (%s)", d.Id()) + _, err := conn.DeleteRealtimeLogConfig(&cloudfront.DeleteRealtimeLogConfigInput{ + ARN: aws.String(d.Id()), + }) + + if isAWSErr(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig, "") { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting Route53 Resolver Query Log Config (%s): %w", d.Id(), err) + } + + return nil +} + +func expandCloudFrontEndPoints(vEndpoints []interface{}) []*cloudfront.EndPoint { + if len(vEndpoints) == 0 || vEndpoints[0] == nil { + return nil + } + + endpoints := []*cloudfront.EndPoint{} + + for _, vEndpoint := range vEndpoints { + endpoint := &cloudfront.EndPoint{} + + mEndpoint := vEndpoint.(map[string]interface{}) + + if vStreamType, ok := mEndpoint["stream_type"].(string); ok && vStreamType != "" { + endpoint.StreamType = aws.String(vStreamType) + } + if vKinesisStreamConfig, ok := mEndpoint["kinesis_stream_config"].([]interface{}); ok && len(vKinesisStreamConfig) > 0 && vKinesisStreamConfig[0] != nil { + kinesisStreamConfig := &cloudfront.KinesisStreamConfig{} + + mKinesisStreamConfig := vKinesisStreamConfig[0].(map[string]interface{}) + + if vRoleArn, ok := mKinesisStreamConfig["role_arn"].(string); ok && vRoleArn != "" { + kinesisStreamConfig.RoleARN = aws.String(vRoleArn) + } + if vStreamArn, ok := mKinesisStreamConfig["stream_arn"].(string); ok && vStreamArn != "" { + kinesisStreamConfig.StreamARN = aws.String(vStreamArn) + } + + endpoint.KinesisStreamConfig = kinesisStreamConfig + } + + endpoints = append(endpoints, endpoint) + } + + return endpoints +} + +func flattenCloudFrontEndPoints(endpoints []*cloudfront.EndPoint) []interface{} { + if endpoints == nil { + return []interface{}{} + } + + vEndpoints := []interface{}{} + + for _, endpoint := range endpoints { + mEndpoint := map[string]interface{}{ + "stream_type": aws.StringValue(endpoint.StreamType), + } + + if kinesisStreamConfig := endpoint.KinesisStreamConfig; kinesisStreamConfig != nil { + mKinesisStreamConfig := map[string]interface{}{ + "role_arn": aws.StringValue(kinesisStreamConfig.RoleARN), + "stream_arn": aws.StringValue(kinesisStreamConfig.StreamARN), + } + + mEndpoint["kinesis_stream_config"] = []interface{}{mKinesisStreamConfig} + } + + vEndpoints = append(vEndpoints, mEndpoint) + } + + return vEndpoints +} diff --git a/aws/resource_aws_cloudfront_realtime_log_config_test.go b/aws/resource_aws_cloudfront_realtime_log_config_test.go new file mode 100644 index 00000000000..4ed3e62e448 --- /dev/null +++ b/aws/resource_aws_cloudfront_realtime_log_config_test.go @@ -0,0 +1,341 @@ +package aws + +import ( + "fmt" + "log" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/cloudfront/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func init() { + resource.AddTestSweepers("aws_cloudfront_realtime_log_config", &resource.Sweeper{ + Name: "aws_cloudfront_realtime_log_config", + F: testSweepCloudFrontRealtimeLogConfigs, + }) +} + +func testSweepCloudFrontRealtimeLogConfigs(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).cloudfrontconn + input := &cloudfront.ListRealtimeLogConfigsInput{} + var sweeperErrs *multierror.Error + + for { + output, err := conn.ListRealtimeLogConfigs(input) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping CloudFront Real-time Log Configs sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving CloudFront Real-time Log Configs: %w", err)) + return sweeperErrs + } + + for _, config := range output.RealtimeLogConfigs.Items { + id := aws.StringValue(config.ARN) + + log.Printf("[INFO] Deleting CloudFront Real-time Log Config: %s", id) + r := resourceAwsCloudFrontRealtimeLogConfig() + d := r.Data(nil) + d.SetId(id) + err := r.Delete(d, client) + + if err != nil { + log.Printf("[ERROR] %s", err) + sweeperErrs = multierror.Append(sweeperErrs, err) + continue + } + } + + if aws.StringValue(output.RealtimeLogConfigs.NextMarker) == "" { + break + } + input.Marker = output.RealtimeLogConfigs.NextMarker + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAWSCloudFrontRealtimeLogConfig_basic(t *testing.T) { + var v cloudfront.RealtimeLogConfig + rName := acctest.RandomWithPrefix("tf-acc-test") + samplingRate := acctest.RandIntRange(1, 100) + resourceName := "aws_cloudfront_realtime_log_config.test" + roleResourceName := "aws_iam_role.test.0" + streamResourceName := "aws_kinesis_stream.test.0" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontRealtimeLogConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontRealtimeLogConfigConfig(rName, samplingRate), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), + testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), + tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ + "stream_type": "Kinesis", + "kinesis_stream_config.#": "1", + }), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", roleResourceName, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", streamResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "fields.#", "2"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "timestamp"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "sampling_rate", strconv.Itoa(samplingRate)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSCloudFrontRealtimeLogConfig_disappears(t *testing.T) { + var v cloudfront.RealtimeLogConfig + rName := acctest.RandomWithPrefix("tf-acc-test") + samplingRate := acctest.RandIntRange(1, 100) + resourceName := "aws_cloudfront_realtime_log_config.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontRealtimeLogConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontRealtimeLogConfigConfig(rName, samplingRate), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudFrontRealtimeLogConfig(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSCloudFrontRealtimeLogConfig_updates(t *testing.T) { + var v cloudfront.RealtimeLogConfig + rName := acctest.RandomWithPrefix("tf-acc-test") + samplingRate1 := acctest.RandIntRange(1, 100) + samplingRate2 := acctest.RandIntRange(1, 100) + resourceName := "aws_cloudfront_realtime_log_config.test" + role1ResourceName := "aws_iam_role.test.0" + stream1ResourceName := "aws_kinesis_stream.test.0" + role2ResourceName := "aws_iam_role.test.1" + stream2ResourceName := "aws_kinesis_stream.test.1" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontRealtimeLogConfigDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontRealtimeLogConfigConfig(rName, samplingRate1), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), + testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), + tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ + "stream_type": "Kinesis", + "kinesis_stream_config.#": "1", + }), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", role1ResourceName, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", stream1ResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "fields.#", "2"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "timestamp"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "sampling_rate", strconv.Itoa(samplingRate1)), + ), + }, + { + Config: testAccAWSCloudFrontRealtimeLogConfigConfigUpdated(rName, samplingRate2), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), + testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), + tfawsresource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ + "stream_type": "Kinesis", + "kinesis_stream_config.#": "1", + }), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", role2ResourceName, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", stream2ResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "fields.#", "3"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "cs-host"), + tfawsresource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "sc-status"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "sampling_rate", strconv.Itoa(samplingRate2)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckCloudFrontRealtimeLogConfigDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudfrontconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudfront_realtime_log_config" { + continue + } + + // Try to find the resource + _, err := finder.RealtimeLogConfigByARN(conn, rs.Primary.ID) + // Verify the error is what we want + if isAWSErr(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig, "") { + continue + } + if err != nil { + return err + } + return fmt.Errorf("CloudFront Real-time Log Config still exists: %s", rs.Primary.ID) + } + + return nil +} + +func testAccCheckCloudFrontRealtimeLogConfigExists(n string, v *cloudfront.RealtimeLogConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No CloudFront Real-time Log Config ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).cloudfrontconn + out, err := finder.RealtimeLogConfigByARN(conn, rs.Primary.ID) + if err != nil { + return err + } + + *v = *out + + return nil + } +} + +func testAccAWSCloudFrontRealtimeLogConfigConfigBase(rName string, count int) string { + return fmt.Sprintf(` +resource "aws_kinesis_stream" "test" { + count = %[2]d + + name = format("%%s-%%d", %[1]q, count.index) + shard_count = 2 +} + +resource "aws_iam_role" "test" { + count = %[2]d + + name = format("%%s-%%d", %[1]q, count.index) + + assume_role_policy = < Date: Thu, 3 Sep 2020 15:58:40 -0400 Subject: [PATCH 1026/1212] r/aws_cloudfront_distribution: Add 'realtime_log_config_arn' attributes. --- ...nt_distribution_configuration_structure.go | 9 + ...stribution_configuration_structure_test.go | 1 + aws/resource_aws_cloudfront_distribution.go | 10 + ...source_aws_cloudfront_distribution_test.go | 248 ++++++++++++++++++ ...aws_cloudfront_realtime_log_config_test.go | 39 ++- .../r/cloudfront_distribution.html.markdown | 5 +- 6 files changed, 291 insertions(+), 21 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 6473b5ccef7..3f298fcab8a 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -219,6 +219,9 @@ func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront. if v, ok := m["cached_methods"]; ok { dcb.AllowedMethods.CachedMethods = expandCachedMethods(v.(*schema.Set)) } + if v, ok := m["realtime_log_config_arn"]; ok && v.(string) != "" { + dcb.RealtimeLogConfigArn = aws.String(v.(string)) + } return dcb } @@ -258,6 +261,10 @@ func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { if v, ok := m["path_pattern"]; ok { cb.PathPattern = aws.String(v.(string)) } + if v, ok := m["realtime_log_config_arn"]; ok && v.(string) != "" { + cb.RealtimeLogConfigArn = aws.String(v.(string)) + } + return cb } @@ -269,6 +276,7 @@ func flattenCloudFrontDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) "target_origin_id": aws.StringValue(dcb.TargetOriginId), "min_ttl": aws.Int64Value(dcb.MinTTL), "origin_request_policy_id": aws.StringValue(dcb.OriginRequestPolicyId), + "realtime_log_config_arn": aws.StringValue(dcb.RealtimeLogConfigArn), } if dcb.ForwardedValues != nil { @@ -308,6 +316,7 @@ func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m["target_origin_id"] = aws.StringValue(cb.TargetOriginId) m["min_ttl"] = int(aws.Int64Value(cb.MinTTL)) m["origin_request_policy_id"] = aws.StringValue(cb.OriginRequestPolicyId) + m["realtime_log_config_arn"] = aws.StringValue(cb.RealtimeLogConfigArn) if cb.ForwardedValues != nil { m["forwarded_values"] = []interface{}{flattenForwardedValues(cb.ForwardedValues)} diff --git a/aws/cloudfront_distribution_configuration_structure_test.go b/aws/cloudfront_distribution_configuration_structure_test.go index 61659ef7577..a1d4fa2aaf6 100644 --- a/aws/cloudfront_distribution_configuration_structure_test.go +++ b/aws/cloudfront_distribution_configuration_structure_test.go @@ -25,6 +25,7 @@ func defaultCacheBehaviorConf() map[string]interface{} { "cached_methods": cachedMethodsConf(), "compress": true, "field_level_encryption_id": "", + "realtime_log_config_arn": "", } } diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index f63cfd2745e..f58f818d287 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -159,6 +159,11 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Type: schema.TypeString, Required: true, }, + "realtime_log_config_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, "smooth_streaming": { Type: schema.TypeBool, Optional: true, @@ -321,6 +326,11 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "realtime_log_config_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, + }, "smooth_streaming": { Type: schema.TypeBool, Optional: true, diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index 29badfe2ce8..81021940170 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -624,6 +624,72 @@ func TestAccAWSCloudFrontDistribution_DefaultCacheBehavior_TrustedSigners(t *tes }) } +func TestAccAWSCloudFrontDistribution_DefaultCacheBehavior_RealtimeLogConfigArn(t *testing.T) { + var distribution cloudfront.Distribution + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_cloudfront_distribution.test" + realtimeLogConfigResourceName := "aws_cloudfront_realtime_log_config.test" + retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDistributionConfigDefaultCacheBehaviorRealtimeLogConfigArn(rName, retainOnDelete), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontDistributionExists(resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "default_cache_behavior.0.realtime_log_config_arn", realtimeLogConfigResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + }, + }) +} + +func TestAccAWSCloudFrontDistribution_OrderedCacheBehavior_RealtimeLogConfigArn(t *testing.T) { + var distribution cloudfront.Distribution + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_cloudfront_distribution.test" + realtimeLogConfigResourceName := "aws_cloudfront_realtime_log_config.test" + retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDistributionConfigOrderedCacheBehaviorRealtimeLogConfigArn(rName, retainOnDelete), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontDistributionExists(resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "ordered_cache_behavior.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "ordered_cache_behavior.0.realtime_log_config_arn", realtimeLogConfigResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + }, + }) +} + func TestAccAWSCloudFrontDistribution_Enabled(t *testing.T) { var distribution cloudfront.Distribution resourceName := "aws_cloudfront_distribution.test" @@ -2826,3 +2892,185 @@ resource "aws_cloudfront_distribution" "test" { } `, enabled, waitForDeployment) } + +func testAccAWSCloudFrontDistributionConfigCacheBehaviorRealtimeLogConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_kinesis_stream" "test" { + name = %[1]q + shard_count = 2 +} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Fri, 4 Sep 2020 09:15:37 -0400 Subject: [PATCH 1027/1212] r/aws_cloudfront_realtime_log_config: Change 'endpoint' from TypeSet to TypeList as it has only 1 element. Fixes awsproviderlint S018. --- ...urce_aws_cloudfront_realtime_log_config.go | 6 ++-- ...aws_cloudfront_realtime_log_config_test.go | 33 +++++++++---------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index f2efaeec876..3bd3ba74c02 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -28,7 +28,7 @@ func resourceAwsCloudFrontRealtimeLogConfig() *schema.Resource { }, "endpoint": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, MinItems: 1, MaxItems: 1, @@ -90,7 +90,7 @@ func resourceAwsCloudFrontRealtimeLogConfigCreate(d *schema.ResourceData, meta i conn := meta.(*AWSClient).cloudfrontconn input := &cloudfront.CreateRealtimeLogConfigInput{ - EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").(*schema.Set).List()), + EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").([]interface{})), Fields: expandStringSet(d.Get("fields").(*schema.Set)), Name: aws.String(d.Get("name").(string)), SamplingRate: aws.Int64(int64(d.Get("sampling_rate").(int))), @@ -146,7 +146,7 @@ func resourceAwsCloudFrontRealtimeLogConfigUpdate(d *schema.ResourceData, meta i input := &cloudfront.UpdateRealtimeLogConfigInput{ ARN: aws.String(d.Id()), - EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").(*schema.Set).List()), + EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").([]interface{})), Fields: expandStringSet(d.Get("fields").((*schema.Set))), SamplingRate: aws.Int64(int64(d.Get("sampling_rate").(int))), } diff --git a/aws/resource_aws_cloudfront_realtime_log_config_test.go b/aws/resource_aws_cloudfront_realtime_log_config_test.go index f7fbe67758d..2bcb92202b2 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config_test.go +++ b/aws/resource_aws_cloudfront_realtime_log_config_test.go @@ -87,12 +87,11 @@ func TestAccAWSCloudFrontRealtimeLogConfig_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ - "stream_type": "Kinesis", - "kinesis_stream_config.#": "1", - }), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", roleResourceName, "arn"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", streamResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "endpoint.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.stream_type", "Kinesis"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.kinesis_stream_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.role_arn", roleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.stream_arn", streamResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "fields.#", "2"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "timestamp"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), @@ -153,12 +152,11 @@ func TestAccAWSCloudFrontRealtimeLogConfig_updates(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ - "stream_type": "Kinesis", - "kinesis_stream_config.#": "1", - }), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", role1ResourceName, "arn"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", stream1ResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "endpoint.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.stream_type", "Kinesis"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.kinesis_stream_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.role_arn", role1ResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.stream_arn", stream1ResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "fields.#", "2"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "timestamp"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), @@ -171,12 +169,11 @@ func TestAccAWSCloudFrontRealtimeLogConfig_updates(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudFrontRealtimeLogConfigExists(resourceName, &v), testAccCheckResourceAttrGlobalARN(resourceName, "arn", "cloudfront", fmt.Sprintf("realtime-log-config/%s", rName)), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "endpoint.*", map[string]string{ - "stream_type": "Kinesis", - "kinesis_stream_config.#": "1", - }), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.role_arn", role2ResourceName, "arn"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "endpoint.*.kinesis_stream_config.0.stream_arn", stream2ResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "endpoint.#", "1"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.stream_type", "Kinesis"), + resource.TestCheckResourceAttr(resourceName, "endpoint.0.kinesis_stream_config.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.role_arn", role2ResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "endpoint.0.kinesis_stream_config.0.stream_arn", stream2ResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "fields.#", "3"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "c-ip"), resource.TestCheckTypeSetElemAttr(resourceName, "fields.*", "cs-host"), From d56952a395bbb07078ff9eb7c0ba029803f9647d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 6 Feb 2021 17:38:59 -0500 Subject: [PATCH 1028/1212] Add CHANGELOG entries. --- .changelog/14974.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/14974.txt diff --git a/.changelog/14974.txt b/.changelog/14974.txt new file mode 100644 index 00000000000..a0f239bf6bc --- /dev/null +++ b/.changelog/14974.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_cloudfront_realtime_log_config +``` + +```release-note:enhancement +resource/aws_cloudfront_distribution: Add `realtime_log_config_arn` attribute to `default_cache_behavior` and `ordered_cache_behavior` configuration blocks +``` From c3940be838542a1279118422ababbd53520ae3fc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 6 Feb 2021 18:08:56 -0500 Subject: [PATCH 1029/1212] r/aws_cloudfront_realtime_log_config: Ensure that 'name' is set on import. --- aws/resource_aws_cloudfront_realtime_log_config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 3bd3ba74c02..4159d293e93 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -136,6 +136,7 @@ func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta int if err := d.Set("fields", flattenStringSet(logConfig.Fields)); err != nil { return fmt.Errorf("error setting fields: %w", err) } + d.Set("name", logConfig.Name) d.Set("sampling_rate", int(aws.Int64Value(logConfig.SamplingRate))) return nil From 1f37e9e8eaf7ff9a21a293a05e9198656e6d5200 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 7 Feb 2021 17:24:03 -0500 Subject: [PATCH 1030/1212] r/aws_apigatewayv2_route: Better documentation for 'authorization_type' and 'target' attributes. --- .../docs/r/apigatewayv2_route.html.markdown | 46 +++++++++++++++---- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/website/docs/r/apigatewayv2_route.html.markdown b/website/docs/r/apigatewayv2_route.html.markdown index 2f0087ae5a2..e45a982ef5d 100644 --- a/website/docs/r/apigatewayv2_route.html.markdown +++ b/website/docs/r/apigatewayv2_route.html.markdown @@ -9,37 +9,67 @@ description: |- # Resource: aws_apigatewayv2_route Manages an Amazon API Gateway Version 2 route. -More information can be found in the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api.html). +More information can be found in the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/welcome.html) for [WebSocket](https://docs.aws.amazon.com/apigateway/latest/developerguide/websocket-api-develop-routes.html) and [HTTP](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-routes.html) APIs. ## Example Usage ### Basic ```hcl +resource "aws_apigatewayv2_api" "example" { + name = "example-websocket-api" + protocol_type = "WEBSOCKET" + route_selection_expression = "$request.body.action" +} + resource "aws_apigatewayv2_route" "example" { api_id = aws_apigatewayv2_api.example.id route_key = "$default" } ``` +### HTTP Proxy Integration + +```hcl +resource "aws_apigatewayv2_api" "example" { + name = "example-http-api" + protocol_type = "HTTP" +} + +resource "aws_apigatewayv2_integration" "example" { + api_id = aws_apigatewayv2_api.example.id + integration_type = "HTTP_PROXY" + + integration_method = "ANY" + integration_uri = "https://example.com/{proxy}" +} + +resource "aws_apigatewayv2_route" "example" { + api_id = aws_apigatewayv2_api.example.id + route_key = "ANY /example/{proxy+}" + + target = "integrations/${aws_apigatewayv2_integration.example.id}" +} +``` + ## Argument Reference The following arguments are supported: * `api_id` - (Required) The API identifier. * `route_key` - (Required) The route key for the route. For HTTP APIs, the route key can be either `$default`, or a combination of an HTTP method and resource path, for example, `GET /pets`. -* `api_key_required` - (Optional) Boolean whether an API key is required for the route. Defaults to `false`. +* `api_key_required` - (Optional) Boolean whether an API key is required for the route. Defaults to `false`. Supported only for WebSocket APIs. * `authorization_scopes` - (Optional) The authorization scopes supported by this route. The scopes are used with a JWT authorizer to authorize the method invocation. * `authorization_type` - (Optional) The authorization type for the route. For WebSocket APIs, valid values are `NONE` for open access, `AWS_IAM` for using AWS IAM permissions, and `CUSTOM` for using a Lambda authorizer. -For HTTP APIs, valid values are `NONE` for open access, or `JWT` for using JSON Web Tokens. +For HTTP APIs, valid values are `NONE` for open access, `JWT` for using JSON Web Tokens, `AWS_IAM` for using AWS IAM permissions, and `CUSTOM` for using a Lambda authorizer. Defaults to `NONE`. -* `authorizer_id` - (Optional) The identifier of the [`aws_apigatewayv2_authorizer`](/docs/providers/aws/r/apigatewayv2_authorizer.html) resource to be associated with this route, if the authorizationType is `CUSTOM`. -* `model_selection_expression` - (Optional) The [model selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-model-selection-expressions) for the route. +* `authorizer_id` - (Optional) The identifier of the [`aws_apigatewayv2_authorizer`](apigatewayv2_authorizer.html) resource to be associated with this route. +* `model_selection_expression` - (Optional) The [model selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-model-selection-expressions) for the route. Supported only for WebSocket APIs. * `operation_name` - (Optional) The operation name for the route. Must be between 1 and 64 characters in length. -* `request_models` - (Optional) The request models for the route. -* `route_response_selection_expression` - (Optional) The [route response selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-route-response-selection-expressions) for the route. -* `target` - (Optional) The target for the route. Must be between 1 and 128 characters in length. +* `request_models` - (Optional) The request models for the route. Supported only for WebSocket APIs. +* `route_response_selection_expression` - (Optional) The [route response selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-route-response-selection-expressions) for the route. Supported only for WebSocket APIs. +* `target` - (Optional) The target for the route, of the form `integrations/`*`IntegrationID`*, where *`IntegrationID`* is the identifier of an [`aws_apigatewayv2_integration`](apigatewayv2_integration.html) resource. ## Attributes Reference From 9c4cfa4792421f0ac9ebec411e6dca1bbf8bddf8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Sun, 7 Feb 2021 21:58:30 -0500 Subject: [PATCH 1031/1212] new resource: securityhub_organization_admin_account --- .../service/securityhub/finder/finder.go | 32 +++++ .../service/securityhub/waiter/status.go | 33 +++++ .../service/securityhub/waiter/waiter.go | 52 +++++++ aws/provider.go | 1 + aws/resource_aws_securityhub_account.go | 28 +++- ...urce_aws_securityhub_action_target_test.go | 6 + ..._securityhub_organization_admin_account.go | 112 +++++++++++++++ ...rityhub_organization_admin_account_test.go | 136 ++++++++++++++++++ aws/resource_aws_securityhub_test.go | 4 + ...b_organization_admin_account.html.markdown | 48 +++++++ 10 files changed, 450 insertions(+), 2 deletions(-) create mode 100644 aws/internal/service/securityhub/finder/finder.go create mode 100644 aws/internal/service/securityhub/waiter/status.go create mode 100644 aws/internal/service/securityhub/waiter/waiter.go create mode 100644 aws/resource_aws_securityhub_organization_admin_account.go create mode 100644 aws/resource_aws_securityhub_organization_admin_account_test.go create mode 100644 website/docs/r/securityhub_organization_admin_account.html.markdown diff --git a/aws/internal/service/securityhub/finder/finder.go b/aws/internal/service/securityhub/finder/finder.go new file mode 100644 index 00000000000..e13cd6a0708 --- /dev/null +++ b/aws/internal/service/securityhub/finder/finder.go @@ -0,0 +1,32 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/securityhub" +) + +func AdminAccount(conn *securityhub.SecurityHub, adminAccountID string) (*securityhub.AdminAccount, error) { + input := &securityhub.ListOrganizationAdminAccountsInput{} + var result *securityhub.AdminAccount + + err := conn.ListOrganizationAdminAccountsPages(input, func(page *securityhub.ListOrganizationAdminAccountsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, adminAccount := range page.AdminAccounts { + if adminAccount == nil { + continue + } + + if aws.StringValue(adminAccount.AccountId) == adminAccountID { + result = adminAccount + return false + } + } + + return !lastPage + }) + + return result, err +} diff --git a/aws/internal/service/securityhub/waiter/status.go b/aws/internal/service/securityhub/waiter/status.go new file mode 100644 index 00000000000..6ab9c336004 --- /dev/null +++ b/aws/internal/service/securityhub/waiter/status.go @@ -0,0 +1,33 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/securityhub/finder" +) + +const ( + // AdminStatus NotFound + AdminStatusNotFound = "NotFound" + + // AdminStatus Unknown + AdminStatusUnknown = "Unknown" +) + +// AdminAccountAdminStatus fetches the AdminAccount and its AdminStatus +func AdminAccountAdminStatus(conn *securityhub.SecurityHub, adminAccountID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + adminAccount, err := finder.AdminAccount(conn, adminAccountID) + + if err != nil { + return nil, AdminStatusUnknown, err + } + + if adminAccount == nil { + return adminAccount, AdminStatusNotFound, nil + } + + return adminAccount, aws.StringValue(adminAccount.Status), nil + } +} diff --git a/aws/internal/service/securityhub/waiter/waiter.go b/aws/internal/service/securityhub/waiter/waiter.go new file mode 100644 index 00000000000..deac42a9091 --- /dev/null +++ b/aws/internal/service/securityhub/waiter/waiter.go @@ -0,0 +1,52 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + // Maximum amount of time to wait for an AdminAccount to return Enabled + AdminAccountEnabledTimeout = 5 * time.Minute + + // Maximum amount of time to wait for an AdminAccount to return NotFound + AdminAccountNotFoundTimeout = 5 * time.Minute +) + +// AdminAccountEnabled waits for an AdminAccount to return Enabled +func AdminAccountEnabled(conn *securityhub.SecurityHub, adminAccountID string) (*securityhub.AdminAccount, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{AdminStatusNotFound}, + Target: []string{securityhub.AdminStatusEnabled}, + Refresh: AdminAccountAdminStatus(conn, adminAccountID), + Timeout: AdminAccountEnabledTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*securityhub.AdminAccount); ok { + return output, err + } + + return nil, err +} + +// AdminAccountNotFound waits for an AdminAccount to return NotFound +func AdminAccountNotFound(conn *securityhub.SecurityHub, adminAccountID string) (*securityhub.AdminAccount, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{securityhub.AdminStatusDisableInProgress}, + Target: []string{AdminStatusNotFound}, + Refresh: AdminAccountAdminStatus(conn, adminAccountID), + Timeout: AdminAccountNotFoundTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*securityhub.AdminAccount); ok { + return output, err + } + + return nil, err +} diff --git a/aws/provider.go b/aws/provider.go index be2f297a3ad..c350c16fdd4 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -936,6 +936,7 @@ func Provider() *schema.Provider { "aws_securityhub_account": resourceAwsSecurityHubAccount(), "aws_securityhub_action_target": resourceAwsSecurityHubActionTarget(), "aws_securityhub_member": resourceAwsSecurityHubMember(), + "aws_securityhub_organization_admin_account": resourceAwsSecurityHubOrganizationAdminAccount(), "aws_securityhub_product_subscription": resourceAwsSecurityHubProductSubscription(), "aws_securityhub_standards_subscription": resourceAwsSecurityHubStandardsSubscription(), "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), diff --git a/aws/resource_aws_securityhub_account.go b/aws/resource_aws_securityhub_account.go index 49bb9fc4a4c..356eb361273 100644 --- a/aws/resource_aws_securityhub_account.go +++ b/aws/resource_aws_securityhub_account.go @@ -5,7 +5,11 @@ import ( "log" "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/securityhub/waiter" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsSecurityHubAccount() *schema.Resource { @@ -58,10 +62,30 @@ func resourceAwsSecurityHubAccountDelete(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).securityhubconn log.Print("[DEBUG] Disabling Security Hub for account") - _, err := conn.DisableSecurityHub(&securityhub.DisableSecurityHubInput{}) + err := resource.Retry(waiter.AdminAccountNotFoundTimeout, func() *resource.RetryError { + _, err := conn.DisableSecurityHub(&securityhub.DisableSecurityHubInput{}) + + if tfawserr.ErrMessageContains(err, securityhub.ErrCodeInvalidInputException, "Cannot disable Security Hub on the Security Hub administrator") { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if tfresource.TimedOut(err) { + _, err = conn.DisableSecurityHub(&securityhub.DisableSecurityHubInput{}) + } + + if tfawserr.ErrCodeEquals(err, securityhub.ErrCodeResourceNotFoundException) { + return nil + } if err != nil { - return fmt.Errorf("Error disabling Security Hub for account: %s", err) + return fmt.Errorf("Error disabling Security Hub for account: %w", err) } return nil diff --git a/aws/resource_aws_securityhub_action_target_test.go b/aws/resource_aws_securityhub_action_target_test.go index 7648ab476e7..bf6b21c0cb9 100644 --- a/aws/resource_aws_securityhub_action_target_test.go +++ b/aws/resource_aws_securityhub_action_target_test.go @@ -4,6 +4,8 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -154,6 +156,10 @@ func testAccCheckAwsSecurityHubActionTargetDestroy(s *terraform.State) error { action, err := resourceAwsSecurityHubActionTargetCheckExists(conn, rs.Primary.ID) + if tfawserr.ErrMessageContains(err, securityhub.ErrCodeInvalidAccessException, "not subscribed to AWS Security Hub") { + continue + } + if err != nil { return err } diff --git a/aws/resource_aws_securityhub_organization_admin_account.go b/aws/resource_aws_securityhub_organization_admin_account.go new file mode 100644 index 00000000000..0f1846c37f5 --- /dev/null +++ b/aws/resource_aws_securityhub_organization_admin_account.go @@ -0,0 +1,112 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/securityhub/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/securityhub/waiter" +) + +func resourceAwsSecurityHubOrganizationAdminAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSecurityHubOrganizationAdminAccountCreate, + Read: resourceAwsSecurityHubOrganizationAdminAccountRead, + Delete: resourceAwsSecurityHubOrganizationAdminAccountDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "admin_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + }, + }, + } +} + +func resourceAwsSecurityHubOrganizationAdminAccountCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).securityhubconn + + adminAccountID := d.Get("admin_account_id").(string) + + input := &securityhub.EnableOrganizationAdminAccountInput{ + AdminAccountId: aws.String(adminAccountID), + } + + _, err := conn.EnableOrganizationAdminAccount(input) + + if err != nil { + return fmt.Errorf("error enabling Security Hub Organization Admin Account (%s): %w", adminAccountID, err) + } + + d.SetId(adminAccountID) + + if _, err := waiter.AdminAccountEnabled(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Security Hub Organization Admin Account (%s) to enable: %w", d.Id(), err) + } + + return resourceAwsSecurityHubOrganizationAdminAccountRead(d, meta) +} + +func resourceAwsSecurityHubOrganizationAdminAccountRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).securityhubconn + + adminAccount, err := finder.AdminAccount(conn, d.Id()) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, securityhub.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Security Hub Organization Admin Account (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Security Hub Organization Admin Account (%s): %w", d.Id(), err) + } + + if adminAccount == nil { + if d.IsNewResource() { + return fmt.Errorf("error reading Security Hub Organization Admin Account (%s): %w", d.Id(), err) + } + + log.Printf("[WARN] Security Hub Organization Admin Account (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + d.Set("admin_account_id", adminAccount.AccountId) + + return nil +} + +func resourceAwsSecurityHubOrganizationAdminAccountDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).securityhubconn + + input := &securityhub.DisableOrganizationAdminAccountInput{ + AdminAccountId: aws.String(d.Id()), + } + + _, err := conn.DisableOrganizationAdminAccount(input) + + if tfawserr.ErrCodeEquals(err, securityhub.ErrCodeResourceNotFoundException) { + return nil + } + + if err != nil { + return fmt.Errorf("error disabling Security Hub Organization Admin Account (%s): %w", d.Id(), err) + } + + if _, err := waiter.AdminAccountNotFound(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Security Hub Organization Admin Account (%s) to disable: %w", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_securityhub_organization_admin_account_test.go b/aws/resource_aws_securityhub_organization_admin_account_test.go new file mode 100644 index 00000000000..165d0e0cc14 --- /dev/null +++ b/aws/resource_aws_securityhub_organization_admin_account_test.go @@ -0,0 +1,136 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/securityhub/finder" +) + +func testAccAwsSecurityHubOrganizationAdminAccount_basic(t *testing.T) { + resourceName := "aws_securityhub_organization_admin_account.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccOrganizationsAccountPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsSecurityHubOrganizationAdminAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSecurityHubOrganizationAdminAccountConfigSelf(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSecurityHubOrganizationAdminAccountExists(resourceName), + testAccCheckResourceAttrAccountID(resourceName, "admin_account_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAwsSecurityHubOrganizationAdminAccount_disappears(t *testing.T) { + resourceName := "aws_securityhub_organization_admin_account.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccOrganizationsAccountPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsSecurityHubOrganizationAdminAccountDestroy, + Steps: []resource.TestStep{ + { + Config: testAccSecurityHubOrganizationAdminAccountConfigSelf(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSecurityHubOrganizationAdminAccountExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSecurityHubOrganizationAdminAccount(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckAwsSecurityHubOrganizationAdminAccountDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).securityhubconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_securityhub_organization_admin_account" { + continue + } + + adminAccount, err := finder.AdminAccount(conn, rs.Primary.ID) + + // Because of this resource's dependency, the Organizations organization + // will be deleted first, resulting in the following valid error + if tfawserr.ErrMessageContains(err, securityhub.ErrCodeAccessDeniedException, "account is not a member of an organization") { + continue + } + + if err != nil { + return err + } + + if adminAccount == nil { + continue + } + + return fmt.Errorf("expected Security Hub Organization Admin Account (%s) to be removed", rs.Primary.ID) + } + + return nil +} + +func testAccCheckAwsSecurityHubOrganizationAdminAccountExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).securityhubconn + + adminAccount, err := finder.AdminAccount(conn, rs.Primary.ID) + + if err != nil { + return err + } + + if adminAccount == nil { + return fmt.Errorf("Security Hub Organization Admin Account (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccSecurityHubOrganizationAdminAccountConfigSelf() string { + return ` +data "aws_caller_identity" "current" {} + +data "aws_partition" "current" {} + +resource "aws_organizations_organization" "test" { + aws_service_access_principals = ["securityhub.${data.aws_partition.current.dns_suffix}"] + feature_set = "ALL" +} + +resource "aws_securityhub_account" "test" {} + +resource "aws_securityhub_organization_admin_account" "test" { + depends_on = [aws_organizations_organization.test] + + admin_account_id = data.aws_caller_identity.current.account_id +} +` +} diff --git a/aws/resource_aws_securityhub_test.go b/aws/resource_aws_securityhub_test.go index cdc4edeabbf..d08c0b1e647 100644 --- a/aws/resource_aws_securityhub_test.go +++ b/aws/resource_aws_securityhub_test.go @@ -19,6 +19,10 @@ func TestAccAWSSecurityHub_serial(t *testing.T) { "Description": testAccAwsSecurityHubActionTarget_Description, "Name": testAccAwsSecurityHubActionTarget_Name, }, + "OrganizationAdminAccount": { + "basic": testAccAwsSecurityHubOrganizationAdminAccount_basic, + "disappears": testAccAwsSecurityHubOrganizationAdminAccount_disappears, + }, "ProductSubscription": { "basic": testAccAWSSecurityHubProductSubscription_basic, }, diff --git a/website/docs/r/securityhub_organization_admin_account.html.markdown b/website/docs/r/securityhub_organization_admin_account.html.markdown new file mode 100644 index 00000000000..54760557bf3 --- /dev/null +++ b/website/docs/r/securityhub_organization_admin_account.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "Security Hub" +layout: "aws" +page_title: "AWS: aws_securityhub_organization_admin_account" +description: |- + Manages a Security Hub administrator account for an organization. +--- + +# Resource: aws_securityhub_organization_admin_account + +Manages a Security Hub administrator account for an organization. The AWS account utilizing this resource must be an Organizations primary account. More information about Organizations support in Security Hub can be found in the [Security Hub User Guide](https://docs.aws.amazon.com/securityhub/latest/userguide/designate-orgs-admin-account.html). + +## Example Usage + +```hcl +resource "aws_organizations_organization" "example" { + aws_service_access_principals = ["securityhub.amazonaws.com"] + feature_set = "ALL" +} + +resource "aws_securityhub_account" "example" {} + +resource "aws_securityhub_organization_admin_account" "example" { + depends_on = [aws_organizations_organization.example] + + admin_account_id = "123456789012" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `admin_account_id` - (Required) The AWS account identifier of the account to designate as the Security Hub administrator account. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - AWS account identifier. + +## Import + +Security Hub Organization Admin Accounts can be imported using the AWS account ID, e.g. + +``` +$ terraform import aws_securityhub_organization_admin_account.example 123456789012 +``` From 49da3cc1525075b733fd1c002b90f95872b2254b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Mon, 8 Feb 2021 00:56:27 -0500 Subject: [PATCH 1032/1212] Update CHANGELOG for #17501 --- .changelog/17501.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17501.txt diff --git a/.changelog/17501.txt b/.changelog/17501.txt new file mode 100644 index 00000000000..a6b764a31b2 --- /dev/null +++ b/.changelog/17501.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_securityhub_organization_admin_account +``` \ No newline at end of file From 509d38b9d6bb204870a9a431332a1073c03f3858 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Mon, 8 Feb 2021 13:22:25 -0500 Subject: [PATCH 1033/1212] Update CHANGELOG for #11770 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7caded3ed3d..959f391089b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 3.28.0 (Unreleased) +ENHANCEMENTS: + +* resource/aws_sns_topic_subscription: Add `redrive_policy` argument [GH-11770] + ## 3.27.0 (February 05, 2021) FEATURES: From 7a0bb65e526f2a91aca6afb5082dd5208e7f6fee Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 8 Feb 2021 15:22:40 -0500 Subject: [PATCH 1034/1212] provider: Add terraform-provider-aws/VERSION to User-Agent header (#17486) * provider: Add terraform-provider-aws/VERSION to User-Agent header Reference: https://github.com/hashicorp/terraform-provider-aws/issues/17483 Filled in during the TeamCity release process and future-proofed with `.goreleaser.yml` update. * Update CHANGELOG for #17486 --- .changelog/17486.txt | 3 +++ .goreleaser.yml | 2 +- aws/config.go | 5 +++-- version/version.go | 4 ++++ 4 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 .changelog/17486.txt create mode 100644 version/version.go diff --git a/.changelog/17486.txt b/.changelog/17486.txt new file mode 100644 index 00000000000..4c1c97d637d --- /dev/null +++ b/.changelog/17486.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +provider: Add terraform-provider-aws version to HTTP User-Agent header +``` diff --git a/.goreleaser.yml b/.goreleaser.yml index 8d1a78155dd..fe1eb97fe8c 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -24,7 +24,7 @@ builds: - goarch: '386' goos: darwin ldflags: - - -s -w -X aws/version.ProviderVersion={{.Version}} + - -s -w -X version.ProviderVersion={{.Version}} mod_timestamp: '{{ .CommitTimestamp }}' changelog: skip: true diff --git a/aws/config.go b/aws/config.go index eb4030d828d..d8138aa4f11 100644 --- a/aws/config.go +++ b/aws/config.go @@ -168,6 +168,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/version" ) type Config struct { @@ -425,8 +426,8 @@ func (c *Config) Client() (interface{}, error) { UserAgentProducts: []*awsbase.UserAgentProduct{ {Name: "APN", Version: "1.0"}, {Name: "HashiCorp", Version: "1.0"}, - {Name: "Terraform", Version: c.terraformVersion, - Extra: []string{"+https://www.terraform.io"}}, + {Name: "Terraform", Version: c.terraformVersion, Extra: []string{"+https://www.terraform.io"}}, + {Name: "terraform-provider-aws", Version: version.ProviderVersion, Extra: []string{"+https://registry.terraform.io/providers/hashicorp/aws"}}, }, } diff --git a/version/version.go b/version/version.go new file mode 100644 index 00000000000..081f1948ffe --- /dev/null +++ b/version/version.go @@ -0,0 +1,4 @@ +package version + +// ProviderVersion is set during the release process to the release version of the binary +var ProviderVersion = "dev" From 3408713a4d636e4c9f074f8d5185b19daa65c5f3 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Mon, 8 Feb 2021 20:24:45 +0000 Subject: [PATCH 1035/1212] Update CHANGELOG.md for #17486 --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 959f391089b..f07198be927 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,9 @@ ## 3.28.0 (Unreleased) -ENHANCEMENTS: +ENHANCEMENTS: -* resource/aws_sns_topic_subscription: Add `redrive_policy` argument [GH-11770] +* provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) ## 3.27.0 (February 05, 2021) From 6095c79597690975997f292a13e8a43267090b68 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 8 Feb 2021 17:48:46 -0800 Subject: [PATCH 1036/1212] Fixes errorlint reports for internal packages and data sources A-E --- aws/awserr.go | 21 ++++---------- aws/config.go | 7 +---- aws/configservice.go | 2 +- aws/data_source_aws_acm_certificate.go | 8 +++--- ...source_aws_acmpca_certificate_authority.go | 15 +++++----- aws/data_source_aws_ami.go | 2 +- aws/data_source_aws_api_gateway_api_key.go | 2 +- ...data_source_aws_api_gateway_domain_name.go | 4 +-- aws/data_source_aws_api_gateway_resource.go | 2 +- aws/data_source_aws_api_gateway_rest_api.go | 6 ++-- aws/data_source_aws_api_gateway_vpc_link.go | 4 +-- aws/data_source_aws_arn.go | 2 +- aws/data_source_aws_autoscaling_groups.go | 6 ++-- aws/data_source_aws_availability_zones.go | 8 +++--- aws/data_source_aws_backup_plan.go | 6 ++-- aws/data_source_aws_backup_selection.go | 4 +-- aws/data_source_aws_backup_vault.go | 6 ++-- aws/data_source_aws_batch_job_queue.go | 4 +-- aws/data_source_aws_caller_identity.go | 2 +- aws/data_source_aws_cloudformation_export.go | 2 +- aws/data_source_aws_cloudformation_stack.go | 6 ++-- ...data_source_aws_cloudfront_distribution.go | 2 +- ...ce_aws_cloudfront_origin_request_policy.go | 4 +-- aws/data_source_aws_cloudhsm2_cluster.go | 6 ++-- aws/data_source_aws_cloudwatch_log_group.go | 4 +-- aws/data_source_aws_codecommit_repository.go | 2 +- aws/data_source_aws_cognito_user_pools.go | 2 +- aws/data_source_aws_customer_gateway.go | 6 ++-- aws/data_source_aws_db_cluster_snapshot.go | 6 ++-- aws/data_source_aws_db_event_categories.go | 2 +- aws/data_source_aws_db_instance.go | 14 +++++----- aws/data_source_aws_dx_gateway.go | 2 +- aws/data_source_aws_dynamodb_table.go | 10 +++---- aws/data_source_aws_ebs_default_kms_key.go | 2 +- ...ta_source_aws_ebs_encryption_by_default.go | 2 +- aws/data_source_aws_ebs_snapshot.go | 2 +- aws/data_source_aws_ebs_volume.go | 2 +- aws/data_source_aws_ec2_coip_pool.go | 4 +-- ..._source_aws_ec2_instance_type_offerings.go | 2 +- aws/data_source_aws_ec2_local_gateway.go | 2 +- ...ource_aws_ec2_local_gateway_route_table.go | 2 +- aws/data_source_aws_ec2_transit_gateway.go | 4 +-- ...2_transit_gateway_dx_gateway_attachment.go | 4 +-- ..._ec2_transit_gateway_peering_attachment.go | 4 +-- ...rce_aws_ec2_transit_gateway_route_table.go | 4 +-- ..._aws_ec2_transit_gateway_vpc_attachment.go | 6 ++-- ..._aws_ec2_transit_gateway_vpn_attachment.go | 4 +-- ...data_source_aws_ecr_authorization_token.go | 4 +-- aws/data_source_aws_ecr_image.go | 12 ++++---- aws/data_source_aws_ecs_cluster.go | 2 +- aws/data_source_aws_ecs_task_definition.go | 2 +- aws/data_source_aws_efs_access_point.go | 8 +++--- aws/data_source_aws_efs_file_system.go | 2 +- aws/data_source_aws_efs_mount_target.go | 2 +- aws/data_source_aws_eip.go | 4 +-- aws/data_source_aws_eks_cluster.go | 12 ++++---- aws/data_source_aws_eks_cluster_auth.go | 4 +-- ...ource_aws_elastic_beanstalk_application.go | 2 +- aws/data_source_aws_elasticsearch_domain.go | 28 +++++++++---------- aws/data_source_aws_elb.go | 2 +- aws/internal/naming/naming.go | 2 +- .../service/cloudformation/waiter/status.go | 2 +- .../service/datasync/waiter/waiter.go | 16 +++++------ aws/internal/service/eks/token/arn.go | 3 +- aws/internal/service/eks/token/token.go | 3 +- aws/internal/service/eks/token/token_test.go | 3 +- aws/internal/service/route53/waiter/waiter.go | 16 +++++------ aws/internal/tfresource/errors.go | 3 +- aws/opsworks_layers.go | 2 +- docs/contributing/contribution-checklists.md | 4 +-- 70 files changed, 174 insertions(+), 187 deletions(-) diff --git a/aws/awserr.go b/aws/awserr.go index d0454afecfb..cc1c6ba1d0a 100644 --- a/aws/awserr.go +++ b/aws/awserr.go @@ -2,10 +2,10 @@ package aws import ( "errors" - "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) @@ -15,11 +15,7 @@ import ( // * Error.Code() matches code // * Error.Message() contains message func isAWSErr(err error, code string, message string) bool { - var awsErr awserr.Error - if errors.As(err, &awsErr) { - return awsErr.Code() == code && strings.Contains(awsErr.Message(), message) - } - return false + return tfawserr.ErrMessageContains(err, code, message) } // Returns true if the error matches all these conditions: @@ -28,11 +24,7 @@ func isAWSErr(err error, code string, message string) bool { // It is always preferable to use isAWSErr() except in older APIs (e.g. S3) // that sometimes only respond with status codes. func isAWSErrRequestFailureStatusCode(err error, statusCode int) bool { - var awsErr awserr.RequestFailure - if errors.As(err, &awsErr) { - return awsErr.StatusCode() == statusCode - } - return false + return tfawserr.ErrStatusCodeEquals(err, statusCode) } func retryOnAwsCode(code string, f func() (interface{}, error)) (interface{}, error) { @@ -41,8 +33,7 @@ func retryOnAwsCode(code string, f func() (interface{}, error)) (interface{}, er var err error resp, err = f() if err != nil { - awsErr, ok := err.(awserr.Error) - if ok && awsErr.Code() == code { + if tfawserr.ErrCodeEquals(err, code) { return resource.RetryableError(err) } return resource.NonRetryableError(err) @@ -65,8 +56,8 @@ func RetryOnAwsCodes(codes []string, f func() (interface{}, error)) (interface{} var err error resp, err = f() if err != nil { - awsErr, ok := err.(awserr.Error) - if ok { + var awsErr awserr.Error + if errors.As(err, &awsErr) { for _, code := range codes { if awsErr.Code() == code { return resource.RetryableError(err) diff --git a/aws/config.go b/aws/config.go index d8138aa4f11..0493b8d71a0 100644 --- a/aws/config.go +++ b/aws/config.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/accessanalyzer" @@ -667,11 +666,7 @@ func (c *Config) Client() (interface{}, error) { if !strings.HasPrefix(r.Operation.Name, "Describe") && !strings.HasPrefix(r.Operation.Name, "List") { return } - err, ok := r.Error.(awserr.Error) - if !ok || err == nil { - return - } - if err.Code() == applicationautoscaling.ErrCodeFailedResourceAccessException { + if tfawserr.ErrCodeEquals(r.Error, applicationautoscaling.ErrCodeFailedResourceAccessException) { r.Retryable = aws.Bool(true) } }) diff --git a/aws/configservice.go b/aws/configservice.go index 53dbfc10559..c46ee4a3091 100644 --- a/aws/configservice.go +++ b/aws/configservice.go @@ -116,7 +116,7 @@ func configRefreshOrganizationConfigRuleStatus(conn *configservice.ConfigService memberAccountStatuses, err := configGetOrganizationConfigRuleDetailedStatus(conn, name, aws.StringValue(status.OrganizationRuleStatus)) if err != nil { - return status, aws.StringValue(status.OrganizationRuleStatus), fmt.Errorf("unable to get Organization Config Rule detailed status for showing member account errors: %s", err) + return status, aws.StringValue(status.OrganizationRuleStatus), fmt.Errorf("unable to get Organization Config Rule detailed status for showing member account errors: %w", err) } var errBuilder strings.Builder diff --git a/aws/data_source_aws_acm_certificate.go b/aws/data_source_aws_acm_certificate.go index b08aea0c391..47de5653366 100644 --- a/aws/data_source_aws_acm_certificate.go +++ b/aws/data_source_aws_acm_certificate.go @@ -91,7 +91,7 @@ func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) e return true }) if err != nil { - return fmt.Errorf("Error listing certificates: %q", err) + return fmt.Errorf("Error listing certificates: %w", err) } if len(arns) == 0 { @@ -119,7 +119,7 @@ func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Describing ACM Certificate: %s", input) output, err := conn.DescribeCertificate(input) if err != nil { - return fmt.Errorf("Error describing ACM certificate: %q", err) + return fmt.Errorf("Error describing ACM certificate: %w", err) } certificate := output.Certificate @@ -174,11 +174,11 @@ func dataSourceAwsAcmCertificateRead(d *schema.ResourceData, meta interface{}) e tags, err := keyvaluetags.AcmListTags(conn, aws.StringValue(matchedCertificate.CertificateArn)) if err != nil { - return fmt.Errorf("error listing tags for ACM Certificate (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for ACM Certificate (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_acmpca_certificate_authority.go b/aws/data_source_aws_acmpca_certificate_authority.go index 84dcff34c1a..1047cd971ef 100644 --- a/aws/data_source_aws_acmpca_certificate_authority.go +++ b/aws/data_source_aws_acmpca_certificate_authority.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/acmpca" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) @@ -106,7 +107,7 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in describeCertificateAuthorityOutput, err := conn.DescribeCertificateAuthority(describeCertificateAuthorityInput) if err != nil { - return fmt.Errorf("error reading ACMPCA Certificate Authority: %s", err) + return fmt.Errorf("error reading ACMPCA Certificate Authority: %w", err) } if describeCertificateAuthorityOutput.CertificateAuthority == nil { @@ -119,7 +120,7 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in d.Set("not_before", aws.TimeValue(certificateAuthority.NotBefore).Format(time.RFC3339)) if err := d.Set("revocation_configuration", flattenAcmpcaRevocationConfiguration(certificateAuthority.RevocationConfiguration)); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("serial", certificateAuthority.Serial) @@ -136,8 +137,8 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in if err != nil { // Returned when in PENDING_CERTIFICATE status // InvalidStateException: The certificate authority XXXXX is not in the correct state to have a certificate signing request. - if !isAWSErr(err, acmpca.ErrCodeInvalidStateException, "") { - return fmt.Errorf("error reading ACMPCA Certificate Authority Certificate: %s", err) + if !tfawserr.ErrCodeEquals(err, acmpca.ErrCodeInvalidStateException) { + return fmt.Errorf("error reading ACMPCA Certificate Authority Certificate: %w", err) } } @@ -156,7 +157,7 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in getCertificateAuthorityCsrOutput, err := conn.GetCertificateAuthorityCsr(getCertificateAuthorityCsrInput) if err != nil { - return fmt.Errorf("error reading ACMPCA Certificate Authority Certificate Signing Request: %s", err) + return fmt.Errorf("error reading ACMPCA Certificate Authority Certificate Signing Request: %w", err) } d.Set("certificate_signing_request", "") @@ -167,11 +168,11 @@ func dataSourceAwsAcmpcaCertificateAuthorityRead(d *schema.ResourceData, meta in tags, err := keyvaluetags.AcmpcaListTags(conn, certificateAuthorityArn) if err != nil { - return fmt.Errorf("error listing tags for ACMPCA Certificate Authority (%s): %s", certificateAuthorityArn, err) + return fmt.Errorf("error listing tags for ACMPCA Certificate Authority (%s): %w", certificateAuthorityArn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.SetId(certificateAuthorityArn) diff --git a/aws/data_source_aws_ami.go b/aws/data_source_aws_ami.go index 7f53f605fc5..c0a5035d645 100644 --- a/aws/data_source_aws_ami.go +++ b/aws/data_source_aws_ami.go @@ -298,7 +298,7 @@ func amiDescriptionAttributes(d *schema.ResourceData, image *ec2.Image, meta int return err } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(image.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } imageArn := arn.ARN{ diff --git a/aws/data_source_aws_api_gateway_api_key.go b/aws/data_source_aws_api_gateway_api_key.go index a76f673ea40..a54afb6e6bf 100644 --- a/aws/data_source_aws_api_gateway_api_key.go +++ b/aws/data_source_aws_api_gateway_api_key.go @@ -70,7 +70,7 @@ func dataSourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) d.Set("last_updated_date", aws.TimeValue(apiKey.LastUpdatedDate).Format(time.RFC3339)) if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(apiKey.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil } diff --git a/aws/data_source_aws_api_gateway_domain_name.go b/aws/data_source_aws_api_gateway_domain_name.go index 34cfd5111c7..18b7cd9a56f 100644 --- a/aws/data_source_aws_api_gateway_domain_name.go +++ b/aws/data_source_aws_api_gateway_domain_name.go @@ -118,7 +118,7 @@ func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interfac d.Set("domain_name", domainName.DomainName) if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(domainName.EndpointConfiguration)); err != nil { - return fmt.Errorf("error setting endpoint_configuration: %s", err) + return fmt.Errorf("error setting endpoint_configuration: %w", err) } d.Set("regional_certificate_arn", domainName.RegionalCertificateArn) @@ -128,7 +128,7 @@ func dataSourceAwsApiGatewayDomainNameRead(d *schema.ResourceData, meta interfac d.Set("security_policy", domainName.SecurityPolicy) if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(domainName.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_api_gateway_resource.go b/aws/data_source_aws_api_gateway_resource.go index 3f971a714b4..ec09979b76e 100644 --- a/aws/data_source_aws_api_gateway_resource.go +++ b/aws/data_source_aws_api_gateway_resource.go @@ -52,7 +52,7 @@ func dataSourceAwsApiGatewayResourceRead(d *schema.ResourceData, meta interface{ return !lastPage }) if err != nil { - return fmt.Errorf("error describing API Gateway Resources: %s", err) + return fmt.Errorf("error describing API Gateway Resources: %w", err) } if match == nil { diff --git a/aws/data_source_aws_api_gateway_rest_api.go b/aws/data_source_aws_api_gateway_rest_api.go index 047337dc8de..ef32a8f0b25 100644 --- a/aws/data_source_aws_api_gateway_rest_api.go +++ b/aws/data_source_aws_api_gateway_rest_api.go @@ -93,7 +93,7 @@ func dataSourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{} return !lastPage }) if err != nil { - return fmt.Errorf("error describing API Gateway REST APIs: %s", err) + return fmt.Errorf("error describing API Gateway REST APIs: %w", err) } if len(matchedApis) == 0 { @@ -126,11 +126,11 @@ func dataSourceAwsApiGatewayRestApiRead(d *schema.ResourceData, meta interface{} } if err := d.Set("endpoint_configuration", flattenApiGatewayEndpointConfiguration(match.EndpointConfiguration)); err != nil { - return fmt.Errorf("error setting endpoint_configuration: %s", err) + return fmt.Errorf("error setting endpoint_configuration: %w", err) } if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(match.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } executionArn := arn.ARN{ diff --git a/aws/data_source_aws_api_gateway_vpc_link.go b/aws/data_source_aws_api_gateway_vpc_link.go index 46d029c4f13..0c453cb7631 100644 --- a/aws/data_source_aws_api_gateway_vpc_link.go +++ b/aws/data_source_aws_api_gateway_vpc_link.go @@ -64,7 +64,7 @@ func dataSourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{} return !lastPage }) if err != nil { - return fmt.Errorf("error describing API Gateway VPC links: %s", err) + return fmt.Errorf("error describing API Gateway VPC links: %w", err) } if len(matchedVpcLinks) == 0 { @@ -84,7 +84,7 @@ func dataSourceAwsApiGatewayVpcLinkRead(d *schema.ResourceData, meta interface{} d.Set("target_arns", flattenStringList(match.TargetArns)) if err := d.Set("tags", keyvaluetags.ApigatewayKeyValueTags(match.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_arn.go b/aws/data_source_aws_arn.go index f0a7fa5d418..d7fff00e3d1 100644 --- a/aws/data_source_aws_arn.go +++ b/aws/data_source_aws_arn.go @@ -45,7 +45,7 @@ func dataSourceAwsArnRead(d *schema.ResourceData, meta interface{}) error { v := d.Get("arn").(string) arn, err := arn.Parse(v) if err != nil { - return fmt.Errorf("Error parsing '%s': %s", v, err.Error()) + return fmt.Errorf("Error parsing '%s': %w", v, err) } d.SetId(arn.String()) diff --git a/aws/data_source_aws_autoscaling_groups.go b/aws/data_source_aws_autoscaling_groups.go index fe1d95885fd..f3a584be48d 100644 --- a/aws/data_source_aws_autoscaling_groups.go +++ b/aws/data_source_aws_autoscaling_groups.go @@ -98,7 +98,7 @@ func dataSourceAwsAutoscalingGroupsRead(d *schema.ResourceData, meta interface{} }) } if err != nil { - return fmt.Errorf("Error fetching Autoscaling Groups: %s", err) + return fmt.Errorf("Error fetching Autoscaling Groups: %w", err) } d.SetId(meta.(*AWSClient).region) @@ -107,11 +107,11 @@ func dataSourceAwsAutoscalingGroupsRead(d *schema.ResourceData, meta interface{} sort.Strings(rawArn) if err := d.Set("names", rawName); err != nil { - return fmt.Errorf("[WARN] Error setting Autoscaling Group Names: %s", err) + return fmt.Errorf("[WARN] Error setting Autoscaling Group Names: %w", err) } if err := d.Set("arns", rawArn); err != nil { - return fmt.Errorf("[WARN] Error setting Autoscaling Group Arns: %s", err) + return fmt.Errorf("[WARN] Error setting Autoscaling Group ARNs: %w", err) } return nil diff --git a/aws/data_source_aws_availability_zones.go b/aws/data_source_aws_availability_zones.go index 86c51b3c0ba..9d48df9b8a8 100644 --- a/aws/data_source_aws_availability_zones.go +++ b/aws/data_source_aws_availability_zones.go @@ -94,7 +94,7 @@ func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Reading Availability Zones: %s", request) resp, err := conn.DescribeAvailabilityZones(request) if err != nil { - return fmt.Errorf("Error fetching Availability Zones: %s", err) + return fmt.Errorf("Error fetching Availability Zones: %w", err) } sort.Slice(resp.AvailabilityZones, func(i, j int) bool { @@ -131,13 +131,13 @@ func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{} d.SetId(meta.(*AWSClient).region) if err := d.Set("group_names", groupNames); err != nil { - return fmt.Errorf("error setting group_names: %s", err) + return fmt.Errorf("error setting group_names: %w", err) } if err := d.Set("names", names); err != nil { - return fmt.Errorf("Error setting Availability Zone names: %s", err) + return fmt.Errorf("Error setting Availability Zone names: %w", err) } if err := d.Set("zone_ids", zoneIds); err != nil { - return fmt.Errorf("Error setting Availability Zone IDs: %s", err) + return fmt.Errorf("Error setting Availability Zone IDs: %w", err) } return nil diff --git a/aws/data_source_aws_backup_plan.go b/aws/data_source_aws_backup_plan.go index 2685f78b80d..a732a259ca9 100644 --- a/aws/data_source_aws_backup_plan.go +++ b/aws/data_source_aws_backup_plan.go @@ -45,7 +45,7 @@ func dataSourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error BackupPlanId: aws.String(id), }) if err != nil { - return fmt.Errorf("Error getting Backup Plan: %v", err) + return fmt.Errorf("Error getting Backup Plan: %w", err) } d.SetId(aws.StringValue(resp.BackupPlanId)) @@ -55,10 +55,10 @@ func dataSourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error tags, err := keyvaluetags.BackupListTags(conn, aws.StringValue(resp.BackupPlanArn)) if err != nil { - return fmt.Errorf("error listing tags for Backup Plan (%s): %s", id, err) + return fmt.Errorf("error listing tags for Backup Plan (%s): %w", id, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_backup_selection.go b/aws/data_source_aws_backup_selection.go index 30f1d64038b..945caabcca4 100644 --- a/aws/data_source_aws_backup_selection.go +++ b/aws/data_source_aws_backup_selection.go @@ -48,7 +48,7 @@ func dataSourceAwsBackupSelectionRead(d *schema.ResourceData, meta interface{}) resp, err := conn.GetBackupSelection(input) if err != nil { - return fmt.Errorf("Error getting Backup Selection: %s", err) + return fmt.Errorf("Error getting Backup Selection: %w", err) } d.SetId(aws.StringValue(resp.SelectionId)) @@ -57,7 +57,7 @@ func dataSourceAwsBackupSelectionRead(d *schema.ResourceData, meta interface{}) if resp.BackupSelection.Resources != nil { if err := d.Set("resources", aws.StringValueSlice(resp.BackupSelection.Resources)); err != nil { - return fmt.Errorf("error setting resources: %s", err) + return fmt.Errorf("error setting resources: %w", err) } } diff --git a/aws/data_source_aws_backup_vault.go b/aws/data_source_aws_backup_vault.go index 1007159d002..11cb7ac0347 100644 --- a/aws/data_source_aws_backup_vault.go +++ b/aws/data_source_aws_backup_vault.go @@ -46,7 +46,7 @@ func dataSourceAwsBackupVaultRead(d *schema.ResourceData, meta interface{}) erro resp, err := conn.DescribeBackupVault(input) if err != nil { - return fmt.Errorf("Error getting Backup Vault: %v", err) + return fmt.Errorf("Error getting Backup Vault: %w", err) } d.SetId(aws.StringValue(resp.BackupVaultName)) @@ -57,10 +57,10 @@ func dataSourceAwsBackupVaultRead(d *schema.ResourceData, meta interface{}) erro tags, err := keyvaluetags.BackupListTags(conn, aws.StringValue(resp.BackupVaultArn)) if err != nil { - return fmt.Errorf("error listing tags for Backup Vault (%s): %s", name, err) + return fmt.Errorf("error listing tags for Backup Vault (%s): %w", name, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_batch_job_queue.go b/aws/data_source_aws_batch_job_queue.go index 5575b5534c4..3a3c7b3a431 100644 --- a/aws/data_source_aws_batch_job_queue.go +++ b/aws/data_source_aws_batch_job_queue.go @@ -106,11 +106,11 @@ func dataSourceAwsBatchJobQueueRead(d *schema.ResourceData, meta interface{}) er ceos = append(ceos, ceo) } if err := d.Set("compute_environment_order", ceos); err != nil { - return fmt.Errorf("error setting compute_environment_order: %s", err) + return fmt.Errorf("error setting compute_environment_order: %w", err) } if err := d.Set("tags", keyvaluetags.BatchKeyValueTags(jobQueue.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_caller_identity.go b/aws/data_source_aws_caller_identity.go index 8c4f9a3847e..04b0b8c52d3 100644 --- a/aws/data_source_aws_caller_identity.go +++ b/aws/data_source_aws_caller_identity.go @@ -39,7 +39,7 @@ func dataSourceAwsCallerIdentityRead(d *schema.ResourceData, meta interface{}) e res, err := client.GetCallerIdentity(&sts.GetCallerIdentityInput{}) if err != nil { - return fmt.Errorf("Error getting Caller Identity: %v", err) + return fmt.Errorf("Error getting Caller Identity: %w", err) } log.Printf("[DEBUG] Received Caller Identity: %s", res) diff --git a/aws/data_source_aws_cloudformation_export.go b/aws/data_source_aws_cloudformation_export.go index 9eb19141649..991b18b560c 100644 --- a/aws/data_source_aws_cloudformation_export.go +++ b/aws/data_source_aws_cloudformation_export.go @@ -49,7 +49,7 @@ func dataSourceAwsCloudFormationExportRead(d *schema.ResourceData, meta interfac return !lastPage }) if err != nil { - return fmt.Errorf("Failed listing CloudFormation exports: %s", err) + return fmt.Errorf("Failed listing CloudFormation exports: %w", err) } if value == "" { return fmt.Errorf("%s was not found in CloudFormation Exports for region %s", name, region) diff --git a/aws/data_source_aws_cloudformation_stack.go b/aws/data_source_aws_cloudformation_stack.go index 3678534742f..48a369ce242 100644 --- a/aws/data_source_aws_cloudformation_stack.go +++ b/aws/data_source_aws_cloudformation_stack.go @@ -78,7 +78,7 @@ func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Reading CloudFormation Stack: %s", input) out, err := conn.DescribeStacks(input) if err != nil { - return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err) + return fmt.Errorf("Failed describing CloudFormation stack (%s): %w", name, err) } if l := len(out.Stacks); l != 1 { return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l) @@ -97,7 +97,7 @@ func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters)) if err := d.Set("tags", keyvaluetags.CloudformationKeyValueTags(stack.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) @@ -115,7 +115,7 @@ func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface template, err := normalizeJsonOrYamlString(*tOut.TemplateBody) if err != nil { - return fmt.Errorf("template body contains an invalid JSON or YAML: %s", err) + return fmt.Errorf("template body contains an invalid JSON or YAML: %w", err) } d.Set("template_body", template) diff --git a/aws/data_source_aws_cloudfront_distribution.go b/aws/data_source_aws_cloudfront_distribution.go index aea4300f1ba..7e4a4d6035b 100644 --- a/aws/data_source_aws_cloudfront_distribution.go +++ b/aws/data_source_aws_cloudfront_distribution.go @@ -88,7 +88,7 @@ func dataSourceAwsCloudFrontDistributionRead(d *schema.ResourceData, meta interf return fmt.Errorf("error listing tags for CloudFront Distribution (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("hosted_zone_id", cloudFrontRoute53ZoneID) diff --git a/aws/data_source_aws_cloudfront_origin_request_policy.go b/aws/data_source_aws_cloudfront_origin_request_policy.go index 2656c832b0d..c5ac2a9babb 100644 --- a/aws/data_source_aws_cloudfront_origin_request_policy.go +++ b/aws/data_source_aws_cloudfront_origin_request_policy.go @@ -113,7 +113,7 @@ func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta if d.Get("id").(string) == "" { if err := dataSourceAwsCloudFrontOriginRequestPolicyFindByName(d, conn); err != nil { - return fmt.Errorf("Unable to find origin request policy by name: %s", err.Error()) + return fmt.Errorf("Unable to find origin request policy by name: %w", err) } } @@ -124,7 +124,7 @@ func dataSourceAwsCloudFrontOriginRequestPolicyRead(d *schema.ResourceData, meta resp, err := conn.GetOriginRequestPolicy(request) if err != nil { - return fmt.Errorf("Unable to retrieve origin request policy with ID %s: %s", d.Id(), err.Error()) + return fmt.Errorf("Unable to retrieve origin request policy with ID %s: %w", d.Id(), err) } d.Set("etag", aws.StringValue(resp.ETag)) diff --git a/aws/data_source_aws_cloudhsm2_cluster.go b/aws/data_source_aws_cloudhsm2_cluster.go index 21690b2ceaa..1d2fd420d2f 100644 --- a/aws/data_source_aws_cloudhsm2_cluster.go +++ b/aws/data_source_aws_cloudhsm2_cluster.go @@ -94,7 +94,7 @@ func dataSourceCloudHsmV2ClusterRead(d *schema.ResourceData, meta interface{}) e out, err := conn.DescribeClusters(input) if err != nil { - return fmt.Errorf("error describing CloudHSM v2 Cluster: %s", err) + return fmt.Errorf("error describing CloudHSM v2 Cluster: %w", err) } var cluster *cloudhsmv2.Cluster @@ -114,7 +114,7 @@ func dataSourceCloudHsmV2ClusterRead(d *schema.ResourceData, meta interface{}) e d.Set("security_group_id", cluster.SecurityGroup) d.Set("cluster_state", cluster.State) if err := d.Set("cluster_certificates", readCloudHsmV2ClusterCertificates(cluster)); err != nil { - return fmt.Errorf("error setting cluster_certificates: %s", err) + return fmt.Errorf("error setting cluster_certificates: %w", err) } var subnets []string @@ -123,7 +123,7 @@ func dataSourceCloudHsmV2ClusterRead(d *schema.ResourceData, meta interface{}) e } if err := d.Set("subnet_ids", subnets); err != nil { - return fmt.Errorf("[DEBUG] Error saving Subnet IDs to state for CloudHSM v2 Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("[DEBUG] Error saving Subnet IDs to state for CloudHSM v2 Cluster (%s): %w", d.Id(), err) } return nil diff --git a/aws/data_source_aws_cloudwatch_log_group.go b/aws/data_source_aws_cloudwatch_log_group.go index 349451aac74..58121e60fb9 100644 --- a/aws/data_source_aws_cloudwatch_log_group.go +++ b/aws/data_source_aws_cloudwatch_log_group.go @@ -59,11 +59,11 @@ func dataSourceAwsCloudwatchLogGroupRead(d *schema.ResourceData, meta interface{ tags, err := keyvaluetags.CloudwatchlogsListTags(conn, name) if err != nil { - return fmt.Errorf("error listing tags for CloudWatch Logs Group (%s): %s", name, err) + return fmt.Errorf("error listing tags for CloudWatch Logs Group (%s): %w", name, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_codecommit_repository.go b/aws/data_source_aws_codecommit_repository.go index f842de3aac1..eab1b845491 100644 --- a/aws/data_source_aws_codecommit_repository.go +++ b/aws/data_source_aws_codecommit_repository.go @@ -59,7 +59,7 @@ func dataSourceAwsCodeCommitRepositoryRead(d *schema.ResourceData, meta interfac d.SetId("") return fmt.Errorf("Resource codecommit repository not found for %s", repositoryName) } else { - return fmt.Errorf("Error reading CodeCommit Repository: %s", err.Error()) + return fmt.Errorf("Error reading CodeCommit Repository: %w", err) } } diff --git a/aws/data_source_aws_cognito_user_pools.go b/aws/data_source_aws_cognito_user_pools.go index 954c4ceeace..664016403c6 100644 --- a/aws/data_source_aws_cognito_user_pools.go +++ b/aws/data_source_aws_cognito_user_pools.go @@ -39,7 +39,7 @@ func dataSourceAwsCognitoUserPoolsRead(d *schema.ResourceData, meta interface{}) pools, err := getAllCognitoUserPools(conn) if err != nil { - return fmt.Errorf("Error listing cognito user pools: %s", err) + return fmt.Errorf("Error listing cognito user pools: %w", err) } for _, pool := range pools { if name == aws.StringValue(pool.Name) { diff --git a/aws/data_source_aws_customer_gateway.go b/aws/data_source_aws_customer_gateway.go index 325e0c4b06b..3caa53207ba 100644 --- a/aws/data_source_aws_customer_gateway.go +++ b/aws/data_source_aws_customer_gateway.go @@ -63,7 +63,7 @@ func dataSourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) output, err := conn.DescribeCustomerGateways(&input) if err != nil { - return fmt.Errorf("error reading EC2 Customer Gateways: %s", err) + return fmt.Errorf("error reading EC2 Customer Gateways: %w", err) } if output == nil || len(output.CustomerGateways) == 0 { @@ -86,14 +86,14 @@ func dataSourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) if v := aws.StringValue(cg.BgpAsn); v != "" { asn, err := strconv.ParseInt(v, 0, 0) if err != nil { - return fmt.Errorf("error parsing BGP ASN %q: %s", v, err) + return fmt.Errorf("error parsing BGP ASN %q: %w", v, err) } d.Set("bgp_asn", int(asn)) } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(cg.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags for EC2 Customer Gateway %q: %s", aws.StringValue(cg.CustomerGatewayId), err) + return fmt.Errorf("error setting tags for EC2 Customer Gateway %q: %w", aws.StringValue(cg.CustomerGatewayId), err) } arn := arn.ARN{ diff --git a/aws/data_source_aws_db_cluster_snapshot.go b/aws/data_source_aws_db_cluster_snapshot.go index 0d75c89164f..39b92a91082 100644 --- a/aws/data_source_aws_db_cluster_snapshot.go +++ b/aws/data_source_aws_db_cluster_snapshot.go @@ -161,7 +161,7 @@ func dataSourceAwsDbClusterSnapshotRead(d *schema.ResourceData, meta interface{} d.SetId(aws.StringValue(snapshot.DBClusterSnapshotIdentifier)) d.Set("allocated_storage", snapshot.AllocatedStorage) if err := d.Set("availability_zones", flattenStringList(snapshot.AvailabilityZones)); err != nil { - return fmt.Errorf("error setting availability_zones: %s", err) + return fmt.Errorf("error setting availability_zones: %w", err) } d.Set("db_cluster_identifier", snapshot.DBClusterIdentifier) d.Set("db_cluster_snapshot_arn", snapshot.DBClusterSnapshotArn) @@ -183,11 +183,11 @@ func dataSourceAwsDbClusterSnapshotRead(d *schema.ResourceData, meta interface{} tags, err := keyvaluetags.RdsListTags(conn, d.Get("db_cluster_snapshot_arn").(string)) if err != nil { - return fmt.Errorf("error listing tags for RDS DB Cluster Snapshot (%s): %s", d.Get("db_cluster_snapshot_arn").(string), err) + return fmt.Errorf("error listing tags for RDS DB Cluster Snapshot (%s): %w", d.Get("db_cluster_snapshot_arn").(string), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_db_event_categories.go b/aws/data_source_aws_db_event_categories.go index 38337a8f2b8..1d719fec00c 100644 --- a/aws/data_source_aws_db_event_categories.go +++ b/aws/data_source_aws_db_event_categories.go @@ -57,7 +57,7 @@ func dataSourceAwsDbEventCategoriesRead(d *schema.ResourceData, meta interface{} d.SetId(meta.(*AWSClient).region) if err := d.Set("event_categories", eventCategories); err != nil { - return fmt.Errorf("Error setting Event Categories: %s", err) + return fmt.Errorf("Error setting Event Categories: %w", err) } return nil diff --git a/aws/data_source_aws_db_instance.go b/aws/data_source_aws_db_instance.go index 47a8746b434..9c856bc7dd3 100644 --- a/aws/data_source_aws_db_instance.go +++ b/aws/data_source_aws_db_instance.go @@ -256,7 +256,7 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error parameterGroups = append(parameterGroups, *v.DBParameterGroupName) } if err := d.Set("db_parameter_groups", parameterGroups); err != nil { - return fmt.Errorf("Error setting db_parameter_groups attribute: %#v, error: %#v", parameterGroups, err) + return fmt.Errorf("Error setting db_parameter_groups attribute: %#v, error: %w", parameterGroups, err) } var dbSecurityGroups []string @@ -264,7 +264,7 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error dbSecurityGroups = append(dbSecurityGroups, *v.DBSecurityGroupName) } if err := d.Set("db_security_groups", dbSecurityGroups); err != nil { - return fmt.Errorf("Error setting db_security_groups attribute: %#v, error: %#v", dbSecurityGroups, err) + return fmt.Errorf("Error setting db_security_groups attribute: %#v, error: %w", dbSecurityGroups, err) } if dbInstance.DBSubnetGroup != nil { @@ -289,7 +289,7 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("endpoint", fmt.Sprintf("%s:%d", *dbInstance.Endpoint.Address, *dbInstance.Endpoint.Port)) if err := d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbInstance.EnabledCloudwatchLogsExports)); err != nil { - return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %#v", err) + return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %w", err) } var optionGroups []string @@ -297,7 +297,7 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error optionGroups = append(optionGroups, *v.OptionGroupName) } if err := d.Set("option_group_memberships", optionGroups); err != nil { - return fmt.Errorf("Error setting option_group_memberships attribute: %#v, error: %#v", optionGroups, err) + return fmt.Errorf("Error setting option_group_memberships attribute: %#v, error: %w", optionGroups, err) } d.Set("preferred_backup_window", dbInstance.PreferredBackupWindow) @@ -314,17 +314,17 @@ func dataSourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error vpcSecurityGroups = append(vpcSecurityGroups, *v.VpcSecurityGroupId) } if err := d.Set("vpc_security_groups", vpcSecurityGroups); err != nil { - return fmt.Errorf("Error setting vpc_security_groups attribute: %#v, error: %#v", vpcSecurityGroups, err) + return fmt.Errorf("Error setting vpc_security_groups attribute: %#v, error: %w", vpcSecurityGroups, err) } tags, err := keyvaluetags.RdsListTags(conn, d.Get("db_instance_arn").(string)) if err != nil { - return fmt.Errorf("error listing tags for RDS DB Instance (%s): %s", d.Get("db_instance_arn").(string), err) + return fmt.Errorf("error listing tags for RDS DB Instance (%s): %w", d.Get("db_instance_arn").(string), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_dx_gateway.go b/aws/data_source_aws_dx_gateway.go index 60808677d55..c05d87d99dd 100644 --- a/aws/data_source_aws_dx_gateway.go +++ b/aws/data_source_aws_dx_gateway.go @@ -40,7 +40,7 @@ func dataSourceAwsDxGatewayRead(d *schema.ResourceData, meta interface{}) error for { output, err := conn.DescribeDirectConnectGateways(input) if err != nil { - return fmt.Errorf("error reading Direct Connect Gateway: %s", err) + return fmt.Errorf("error reading Direct Connect Gateway: %w", err) } for _, gateway := range output.DirectConnectGateways { if aws.StringValue(gateway.DirectConnectGatewayName) == name { diff --git a/aws/data_source_aws_dynamodb_table.go b/aws/data_source_aws_dynamodb_table.go index 94418453b33..60dae0aaac6 100644 --- a/aws/data_source_aws_dynamodb_table.go +++ b/aws/data_source_aws_dynamodb_table.go @@ -221,7 +221,7 @@ func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) er }) if err != nil { - return fmt.Errorf("Error retrieving DynamoDB table: %s", err) + return fmt.Errorf("Error retrieving DynamoDB table: %w", err) } d.SetId(aws.StringValue(result.Table.TableName)) @@ -235,20 +235,20 @@ func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) er TableName: aws.String(d.Id()), }) if err != nil { - return fmt.Errorf("error describing DynamoDB Table (%s) Time to Live: %s", d.Id(), err) + return fmt.Errorf("error describing DynamoDB Table (%s) Time to Live: %w", d.Id(), err) } if err := d.Set("ttl", flattenDynamoDbTtl(ttlOut)); err != nil { - return fmt.Errorf("error setting ttl: %s", err) + return fmt.Errorf("error setting ttl: %w", err) } tags, err := keyvaluetags.DynamodbListTags(conn, d.Get("arn").(string)) if err != nil && !isAWSErr(err, "UnknownOperationException", "Tagging is not currently supported in DynamoDB Local.") { - return fmt.Errorf("error listing tags for DynamoDB Table (%s): %s", d.Get("arn").(string), err) + return fmt.Errorf("error listing tags for DynamoDB Table (%s): %w", d.Get("arn").(string), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } pitrOut, err := conn.DescribeContinuousBackups(&dynamodb.DescribeContinuousBackupsInput{ diff --git a/aws/data_source_aws_ebs_default_kms_key.go b/aws/data_source_aws_ebs_default_kms_key.go index 00b34a8c538..f21e0aea3e9 100644 --- a/aws/data_source_aws_ebs_default_kms_key.go +++ b/aws/data_source_aws_ebs_default_kms_key.go @@ -24,7 +24,7 @@ func dataSourceAwsEbsDefaultKmsKeyRead(d *schema.ResourceData, meta interface{}) res, err := conn.GetEbsDefaultKmsKeyId(&ec2.GetEbsDefaultKmsKeyIdInput{}) if err != nil { - return fmt.Errorf("Error reading EBS default KMS key: %q", err) + return fmt.Errorf("Error reading EBS default KMS key: %w", err) } d.SetId(meta.(*AWSClient).region) diff --git a/aws/data_source_aws_ebs_encryption_by_default.go b/aws/data_source_aws_ebs_encryption_by_default.go index cae99edb34f..dea83480edc 100644 --- a/aws/data_source_aws_ebs_encryption_by_default.go +++ b/aws/data_source_aws_ebs_encryption_by_default.go @@ -24,7 +24,7 @@ func dataSourceAwsEbsEncryptionByDefaultRead(d *schema.ResourceData, meta interf res, err := conn.GetEbsEncryptionByDefault(&ec2.GetEbsEncryptionByDefaultInput{}) if err != nil { - return fmt.Errorf("Error reading default EBS encryption toggle: %q", err) + return fmt.Errorf("Error reading default EBS encryption toggle: %w", err) } d.SetId(meta.(*AWSClient).region) diff --git a/aws/data_source_aws_ebs_snapshot.go b/aws/data_source_aws_ebs_snapshot.go index 211275e9818..971864c3e86 100644 --- a/aws/data_source_aws_ebs_snapshot.go +++ b/aws/data_source_aws_ebs_snapshot.go @@ -155,7 +155,7 @@ func snapshotDescriptionAttributes(d *schema.ResourceData, snapshot *ec2.Snapsho d.Set("owner_alias", snapshot.OwnerAlias) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(snapshot.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } snapshotArn := arn.ARN{ diff --git a/aws/data_source_aws_ebs_volume.go b/aws/data_source_aws_ebs_volume.go index 6279bd9de6a..8e1988f0ee3 100644 --- a/aws/data_source_aws_ebs_volume.go +++ b/aws/data_source_aws_ebs_volume.go @@ -158,7 +158,7 @@ func volumeDescriptionAttributes(d *schema.ResourceData, client *AWSClient, volu d.Set("throughput", volume.Throughput) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(volume.Tags).IgnoreAws().IgnoreConfig(client.IgnoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_coip_pool.go b/aws/data_source_aws_ec2_coip_pool.go index 703fe13b73f..f6d21f9bab4 100644 --- a/aws/data_source_aws_ec2_coip_pool.go +++ b/aws/data_source_aws_ec2_coip_pool.go @@ -92,13 +92,13 @@ func dataSourceAwsEc2CoipPoolRead(d *schema.ResourceData, meta interface{}) erro d.Set("local_gateway_route_table_id", coip.LocalGatewayRouteTableId) if err := d.Set("pool_cidrs", aws.StringValueSlice(coip.PoolCidrs)); err != nil { - return fmt.Errorf("error setting pool_cidrs: %s", err) + return fmt.Errorf("error setting pool_cidrs: %w", err) } d.Set("pool_id", coip.PoolId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(coip.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_instance_type_offerings.go b/aws/data_source_aws_ec2_instance_type_offerings.go index 754618bce1f..19143031bb7 100644 --- a/aws/data_source_aws_ec2_instance_type_offerings.go +++ b/aws/data_source_aws_ec2_instance_type_offerings.go @@ -75,7 +75,7 @@ func dataSourceAwsEc2InstanceTypeOfferingsRead(d *schema.ResourceData, meta inte } if err := d.Set("instance_types", instanceTypes); err != nil { - return fmt.Errorf("error setting instance_types: %s", err) + return fmt.Errorf("error setting instance_types: %w", err) } d.SetId(meta.(*AWSClient).region) diff --git a/aws/data_source_aws_ec2_local_gateway.go b/aws/data_source_aws_ec2_local_gateway.go index 31e0d85e9a9..1295c481e5a 100644 --- a/aws/data_source_aws_ec2_local_gateway.go +++ b/aws/data_source_aws_ec2_local_gateway.go @@ -94,7 +94,7 @@ func dataSourceAwsEc2LocalGatewayRead(d *schema.ResourceData, meta interface{}) d.Set("state", localGateway.State) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(localGateway.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_local_gateway_route_table.go b/aws/data_source_aws_ec2_local_gateway_route_table.go index 64435605c81..ed6307b5baf 100644 --- a/aws/data_source_aws_ec2_local_gateway_route_table.go +++ b/aws/data_source_aws_ec2_local_gateway_route_table.go @@ -97,7 +97,7 @@ func dataSourceAwsEc2LocalGatewayRouteTableRead(d *schema.ResourceData, meta int d.Set("state", localgatewayroutetable.State) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(localgatewayroutetable.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_ec2_transit_gateway.go b/aws/data_source_aws_ec2_transit_gateway.go index 499934d3739..3a327cd8785 100644 --- a/aws/data_source_aws_ec2_transit_gateway.go +++ b/aws/data_source_aws_ec2_transit_gateway.go @@ -89,7 +89,7 @@ func dataSourceAwsEc2TransitGatewayRead(d *schema.ResourceData, meta interface{} output, err := conn.DescribeTransitGateways(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway: %w", err) } if output == nil || len(output.TransitGateways) == 0 { @@ -122,7 +122,7 @@ func dataSourceAwsEc2TransitGatewayRead(d *schema.ResourceData, meta interface{} d.Set("propagation_default_route_table_id", transitGateway.Options.PropagationDefaultRouteTableId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGateway.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("vpn_ecmp_support", transitGateway.Options.VpnEcmpSupport) diff --git a/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go b/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go index 2df51a7cf66..30fa4bd4f91 100644 --- a/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_dx_gateway_attachment.go @@ -72,7 +72,7 @@ func dataSourceAwsEc2TransitGatewayDxGatewayAttachmentRead(d *schema.ResourceDat output, err := conn.DescribeTransitGatewayAttachments(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway Direct Connect Gateway Attachment: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway Direct Connect Gateway Attachment: %w", err) } if output == nil || len(output.TransitGatewayAttachments) == 0 || output.TransitGatewayAttachments[0] == nil { @@ -86,7 +86,7 @@ func dataSourceAwsEc2TransitGatewayDxGatewayAttachmentRead(d *schema.ResourceDat transitGatewayAttachment := output.TransitGatewayAttachments[0] if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGatewayAttachment.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("transit_gateway_id", aws.StringValue(transitGatewayAttachment.TransitGatewayId)) diff --git a/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go b/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go index 2ddbda6803c..068e40a773c 100644 --- a/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_peering_attachment.go @@ -66,7 +66,7 @@ func dataSourceAwsEc2TransitGatewayPeeringAttachmentRead(d *schema.ResourceData, output, err := conn.DescribeTransitGatewayPeeringAttachments(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway Peering Attachments: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway Peering Attachments: %ws", err) } if output == nil || len(output.TransitGatewayPeeringAttachments) == 0 { @@ -97,7 +97,7 @@ func dataSourceAwsEc2TransitGatewayPeeringAttachmentRead(d *schema.ResourceData, d.Set("transit_gateway_id", local.TransitGatewayId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGatewayPeeringAttachment.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.SetId(aws.StringValue(transitGatewayPeeringAttachment.TransitGatewayAttachmentId)) diff --git a/aws/data_source_aws_ec2_transit_gateway_route_table.go b/aws/data_source_aws_ec2_transit_gateway_route_table.go index 6caa944d781..68776d1da5f 100644 --- a/aws/data_source_aws_ec2_transit_gateway_route_table.go +++ b/aws/data_source_aws_ec2_transit_gateway_route_table.go @@ -62,7 +62,7 @@ func dataSourceAwsEc2TransitGatewayRouteTableRead(d *schema.ResourceData, meta i output, err := conn.DescribeTransitGatewayRouteTables(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway Route Table: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway Route Table: %w", err) } if output == nil || len(output.TransitGatewayRouteTables) == 0 { @@ -83,7 +83,7 @@ func dataSourceAwsEc2TransitGatewayRouteTableRead(d *schema.ResourceData, meta i d.Set("default_propagation_route_table", aws.BoolValue(transitGatewayRouteTable.DefaultPropagationRouteTable)) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGatewayRouteTable.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("transit_gateway_id", aws.StringValue(transitGatewayRouteTable.TransitGatewayId)) diff --git a/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go b/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go index a1aea8c18c7..d743558d9bb 100644 --- a/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_vpc_attachment.go @@ -74,7 +74,7 @@ func dataSourceAwsEc2TransitGatewayVpcAttachmentRead(d *schema.ResourceData, met output, err := conn.DescribeTransitGatewayVpcAttachments(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway Route Table: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway Route Table: %w", err) } if output == nil || len(output.TransitGatewayVpcAttachments) == 0 { @@ -100,11 +100,11 @@ func dataSourceAwsEc2TransitGatewayVpcAttachmentRead(d *schema.ResourceData, met d.Set("ipv6_support", transitGatewayVpcAttachment.Options.Ipv6Support) if err := d.Set("subnet_ids", aws.StringValueSlice(transitGatewayVpcAttachment.SubnetIds)); err != nil { - return fmt.Errorf("error setting subnet_ids: %s", err) + return fmt.Errorf("error setting subnet_ids: %w", err) } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGatewayVpcAttachment.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("transit_gateway_id", aws.StringValue(transitGatewayVpcAttachment.TransitGatewayId)) diff --git a/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go b/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go index 8705566877a..d044185a7d0 100644 --- a/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go +++ b/aws/data_source_aws_ec2_transit_gateway_vpn_attachment.go @@ -72,7 +72,7 @@ func dataSourceAwsEc2TransitGatewayVpnAttachmentRead(d *schema.ResourceData, met output, err := conn.DescribeTransitGatewayAttachments(input) if err != nil { - return fmt.Errorf("error reading EC2 Transit Gateway VPN Attachment: %s", err) + return fmt.Errorf("error reading EC2 Transit Gateway VPN Attachment: %w", err) } if output == nil || len(output.TransitGatewayAttachments) == 0 || output.TransitGatewayAttachments[0] == nil { @@ -86,7 +86,7 @@ func dataSourceAwsEc2TransitGatewayVpnAttachmentRead(d *schema.ResourceData, met transitGatewayAttachment := output.TransitGatewayAttachments[0] if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(transitGatewayAttachment.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("transit_gateway_id", aws.StringValue(transitGatewayAttachment.TransitGatewayId)) diff --git a/aws/data_source_aws_ecr_authorization_token.go b/aws/data_source_aws_ecr_authorization_token.go index 52e43945b49..55f03219460 100644 --- a/aws/data_source_aws_ecr_authorization_token.go +++ b/aws/data_source_aws_ecr_authorization_token.go @@ -56,7 +56,7 @@ func dataSourceAwsEcrAuthorizationTokenRead(d *schema.ResourceData, meta interfa log.Printf("[DEBUG] Getting ECR authorization token") out, err := conn.GetAuthorizationToken(params) if err != nil { - return fmt.Errorf("error getting ECR authorization token: %s", err) + return fmt.Errorf("error getting ECR authorization token: %w", err) } log.Printf("[DEBUG] Received ECR AuthorizationData %v", out.AuthorizationData) authorizationData := out.AuthorizationData[0] @@ -66,7 +66,7 @@ func dataSourceAwsEcrAuthorizationTokenRead(d *schema.ResourceData, meta interfa authBytes, err := base64.URLEncoding.DecodeString(authorizationToken) if err != nil { d.SetId("") - return fmt.Errorf("error decoding ECR authorization token: %s", err) + return fmt.Errorf("error decoding ECR authorization token: %w", err) } basicAuthorization := strings.Split(string(authBytes), ":") if len(basicAuthorization) != 2 { diff --git a/aws/data_source_aws_ecr_image.go b/aws/data_source_aws_ecr_image.go index 190cab37d4a..7b7ddcb552f 100644 --- a/aws/data_source_aws_ecr_image.go +++ b/aws/data_source_aws_ecr_image.go @@ -85,7 +85,7 @@ func dataSourceAwsEcrImageRead(d *schema.ResourceData, meta interface{}) error { return true }) if err != nil { - return fmt.Errorf("Error describing ECR images: %q", err) + return fmt.Errorf("Error describing ECR images: %w", err) } if len(imageDetails) == 0 { @@ -99,19 +99,19 @@ func dataSourceAwsEcrImageRead(d *schema.ResourceData, meta interface{}) error { d.SetId(aws.StringValue(image.ImageDigest)) if err = d.Set("registry_id", aws.StringValue(image.RegistryId)); err != nil { - return fmt.Errorf("failed to set registry_id: %s", err) + return fmt.Errorf("failed to set registry_id: %w", err) } if err = d.Set("image_digest", aws.StringValue(image.ImageDigest)); err != nil { - return fmt.Errorf("failed to set image_digest: %s", err) + return fmt.Errorf("failed to set image_digest: %w", err) } if err = d.Set("image_pushed_at", image.ImagePushedAt.Unix()); err != nil { - return fmt.Errorf("failed to set image_pushed_at: %s", err) + return fmt.Errorf("failed to set image_pushed_at: %w", err) } if err = d.Set("image_size_in_bytes", aws.Int64Value(image.ImageSizeInBytes)); err != nil { - return fmt.Errorf("failed to set image_size_in_bytes: %s", err) + return fmt.Errorf("failed to set image_size_in_bytes: %w", err) } if err := d.Set("image_tags", aws.StringValueSlice(image.ImageTags)); err != nil { - return fmt.Errorf("failed to set image_tags: %s", err) + return fmt.Errorf("failed to set image_tags: %w", err) } return nil diff --git a/aws/data_source_aws_ecs_cluster.go b/aws/data_source_aws_ecs_cluster.go index b4efe05a179..2b3e52aaf22 100644 --- a/aws/data_source_aws_ecs_cluster.go +++ b/aws/data_source_aws_ecs_cluster.go @@ -94,7 +94,7 @@ func dataSourceAwsEcsClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("registered_container_instances_count", cluster.RegisteredContainerInstancesCount) if err := d.Set("setting", flattenEcsSettings(cluster.Settings)); err != nil { - return fmt.Errorf("error setting setting: %s", err) + return fmt.Errorf("error setting setting: %w", err) } return nil diff --git a/aws/data_source_aws_ecs_task_definition.go b/aws/data_source_aws_ecs_task_definition.go index 691a6dd9f27..27ce0ff4cdb 100644 --- a/aws/data_source_aws_ecs_task_definition.go +++ b/aws/data_source_aws_ecs_task_definition.go @@ -53,7 +53,7 @@ func dataSourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{} desc, err := conn.DescribeTaskDefinition(params) if err != nil { - return fmt.Errorf("Failed getting task definition %s %q", err, d.Get("task_definition").(string)) + return fmt.Errorf("Failed getting task definition %q: %w", d.Get("task_definition").(string), err) } taskDefinition := *desc.TaskDefinition diff --git a/aws/data_source_aws_efs_access_point.go b/aws/data_source_aws_efs_access_point.go index b74127d598b..48e25b75cd9 100644 --- a/aws/data_source_aws_efs_access_point.go +++ b/aws/data_source_aws_efs_access_point.go @@ -103,7 +103,7 @@ func dataSourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) e AccessPointId: aws.String(d.Get("access_point_id").(string)), }) if err != nil { - return fmt.Errorf("Error reading EFS access point %s: %s", d.Id(), err) + return fmt.Errorf("Error reading EFS access point %s: %w", d.Id(), err) } if len(resp.AccessPoints) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(resp.AccessPoints)) @@ -129,15 +129,15 @@ func dataSourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) e d.Set("owner_id", ap.OwnerId) if err := d.Set("posix_user", flattenEfsAccessPointPosixUser(ap.PosixUser)); err != nil { - return fmt.Errorf("error setting posix user: %s", err) + return fmt.Errorf("error setting posix user: %w", err) } if err := d.Set("root_directory", flattenEfsAccessPointRootDirectory(ap.RootDirectory)); err != nil { - return fmt.Errorf("error setting root directory: %s", err) + return fmt.Errorf("error setting root directory: %w", err) } if err := d.Set("tags", keyvaluetags.EfsKeyValueTags(ap.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_efs_file_system.go b/aws/data_source_aws_efs_file_system.go index 953c72874ca..b544adac1fd 100644 --- a/aws/data_source_aws_efs_file_system.go +++ b/aws/data_source_aws_efs_file_system.go @@ -131,7 +131,7 @@ func dataSourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) er } if err := d.Set("tags", keyvaluetags.EfsKeyValueTags(fs.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } res, err := efsconn.DescribeLifecycleConfiguration(&efs.DescribeLifecycleConfigurationInput{ diff --git a/aws/data_source_aws_efs_mount_target.go b/aws/data_source_aws_efs_mount_target.go index d24b02474e9..b69c054a176 100644 --- a/aws/data_source_aws_efs_mount_target.go +++ b/aws/data_source_aws_efs_mount_target.go @@ -78,7 +78,7 @@ func dataSourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Reading EFS Mount Target: %s", describeEfsOpts) resp, err := conn.DescribeMountTargets(describeEfsOpts) if err != nil { - return fmt.Errorf("Error retrieving EFS Mount Target: %s", err) + return fmt.Errorf("Error retrieving EFS Mount Target: %w", err) } if len(resp.MountTargets) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(resp.MountTargets)) diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index 977f2bfdca9..03dad246c8e 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -112,7 +112,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { resp, err := conn.DescribeAddresses(req) if err != nil { - return fmt.Errorf("error describing EC2 Address: %s", err) + return fmt.Errorf("error describing EC2 Address: %w", err) } if resp == nil || len(resp.Addresses) == 0 { return fmt.Errorf("no matching Elastic IP found") @@ -153,7 +153,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { d.Set("customer_owned_ip", eip.CustomerOwnedIp) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(eip.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_eks_cluster.go b/aws/data_source_aws_eks_cluster.go index 8931d21b9a8..db0a5b3e483 100644 --- a/aws/data_source_aws_eks_cluster.go +++ b/aws/data_source_aws_eks_cluster.go @@ -156,7 +156,7 @@ func dataSourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Reading EKS Cluster: %s", input) output, err := conn.DescribeCluster(input) if err != nil { - return fmt.Errorf("error reading EKS Cluster (%s): %s", name, err) + return fmt.Errorf("error reading EKS Cluster (%s): %w", name, err) } cluster := output.Cluster @@ -168,17 +168,17 @@ func dataSourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("arn", cluster.Arn) if err := d.Set("certificate_authority", flattenEksCertificate(cluster.CertificateAuthority)); err != nil { - return fmt.Errorf("error setting certificate_authority: %s", err) + return fmt.Errorf("error setting certificate_authority: %w", err) } d.Set("created_at", aws.TimeValue(cluster.CreatedAt).String()) if err := d.Set("enabled_cluster_log_types", flattenEksEnabledLogTypes(cluster.Logging)); err != nil { - return fmt.Errorf("error setting enabled_cluster_log_types: %s", err) + return fmt.Errorf("error setting enabled_cluster_log_types: %w", err) } d.Set("endpoint", cluster.Endpoint) if err := d.Set("identity", flattenEksIdentity(cluster.Identity)); err != nil { - return fmt.Errorf("error setting identity: %s", err) + return fmt.Errorf("error setting identity: %w", err) } d.Set("name", cluster.Name) @@ -187,13 +187,13 @@ func dataSourceAwsEksClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("status", cluster.Status) if err := d.Set("tags", keyvaluetags.EksKeyValueTags(cluster.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("version", cluster.Version) if err := d.Set("vpc_config", flattenEksVpcConfigResponse(cluster.ResourcesVpcConfig)); err != nil { - return fmt.Errorf("error setting vpc_config: %s", err) + return fmt.Errorf("error setting vpc_config: %w", err) } if err := d.Set("kubernetes_network_config", flattenEksNetworkConfig(cluster.KubernetesNetworkConfig)); err != nil { diff --git a/aws/data_source_aws_eks_cluster_auth.go b/aws/data_source_aws_eks_cluster_auth.go index 984cd023beb..d2665fa0a5a 100644 --- a/aws/data_source_aws_eks_cluster_auth.go +++ b/aws/data_source_aws_eks_cluster_auth.go @@ -33,11 +33,11 @@ func dataSourceAwsEksClusterAuthRead(d *schema.ResourceData, meta interface{}) e name := d.Get("name").(string) generator, err := token.NewGenerator(false, false) if err != nil { - return fmt.Errorf("error getting token generator: %v", err) + return fmt.Errorf("error getting token generator: %w", err) } token, err := generator.GetWithSTS(name, conn) if err != nil { - return fmt.Errorf("error getting token: %v", err) + return fmt.Errorf("error getting token: %w", err) } d.SetId(name) diff --git a/aws/data_source_aws_elastic_beanstalk_application.go b/aws/data_source_aws_elastic_beanstalk_application.go index d852a48901b..a715d4e8be0 100644 --- a/aws/data_source_aws_elastic_beanstalk_application.go +++ b/aws/data_source_aws_elastic_beanstalk_application.go @@ -63,7 +63,7 @@ func dataSourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta i ApplicationNames: []*string{aws.String(name)}, }) if err != nil { - return fmt.Errorf("Error describing Applications (%s): %s", name, err) + return fmt.Errorf("Error describing Applications (%s): %w", name, err) } if len(resp.Applications) > 1 || len(resp.Applications) < 1 { diff --git a/aws/data_source_aws_elasticsearch_domain.go b/aws/data_source_aws_elasticsearch_domain.go index 0d16b2516f5..578ff335234 100644 --- a/aws/data_source_aws_elasticsearch_domain.go +++ b/aws/data_source_aws_elasticsearch_domain.go @@ -285,7 +285,7 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface resp, err := esconn.DescribeElasticsearchDomain(req) if err != nil { - return fmt.Errorf("error querying elasticsearch_domain: %s", err) + return fmt.Errorf("error querying elasticsearch_domain: %w", err) } if resp.DomainStatus == nil { @@ -296,16 +296,16 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface d.SetId(aws.StringValue(ds.ARN)) - if ds.AccessPolicies != nil && *ds.AccessPolicies != "" { + if ds.AccessPolicies != nil && aws.StringValue(ds.AccessPolicies) != "" { policies, err := structure.NormalizeJsonString(*ds.AccessPolicies) if err != nil { - return fmt.Errorf("access policies contain an invalid JSON: %s", err) + return fmt.Errorf("access policies contain an invalid JSON: %w", err) } d.Set("access_policies", policies) } if err := d.Set("advanced_options", pointersMapToStringList(ds.AdvancedOptions)); err != nil { - return fmt.Errorf("error setting advanced_options: %s", err) + return fmt.Errorf("error setting advanced_options: %w", err) } d.Set("arn", ds.ARN) @@ -318,33 +318,33 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface } if err := d.Set("ebs_options", flattenESEBSOptions(ds.EBSOptions)); err != nil { - return fmt.Errorf("error setting ebs_options: %s", err) + return fmt.Errorf("error setting ebs_options: %w", err) } if err := d.Set("encryption_at_rest", flattenESEncryptAtRestOptions(ds.EncryptionAtRestOptions)); err != nil { - return fmt.Errorf("error setting encryption_at_rest: %s", err) + return fmt.Errorf("error setting encryption_at_rest: %w", err) } if err := d.Set("node_to_node_encryption", flattenESNodeToNodeEncryptionOptions(ds.NodeToNodeEncryptionOptions)); err != nil { - return fmt.Errorf("error setting node_to_node_encryption: %s", err) + return fmt.Errorf("error setting node_to_node_encryption: %w", err) } if err := d.Set("cluster_config", flattenESClusterConfig(ds.ElasticsearchClusterConfig)); err != nil { - return fmt.Errorf("error setting cluster_config: %s", err) + return fmt.Errorf("error setting cluster_config: %w", err) } if err := d.Set("snapshot_options", flattenESSnapshotOptions(ds.SnapshotOptions)); err != nil { - return fmt.Errorf("error setting snapshot_options: %s", err) + return fmt.Errorf("error setting snapshot_options: %w", err) } if ds.VPCOptions != nil { if err := d.Set("vpc_options", flattenESVPCDerivedInfo(ds.VPCOptions)); err != nil { - return fmt.Errorf("error setting vpc_options: %s", err) + return fmt.Errorf("error setting vpc_options: %w", err) } endpoints := pointersMapToStringList(ds.Endpoints) if err := d.Set("endpoint", endpoints["vpc"]); err != nil { - return fmt.Errorf("error setting endpoint: %s", err) + return fmt.Errorf("error setting endpoint: %w", err) } d.Set("kibana_endpoint", getKibanaEndpoint(d)) if ds.Endpoint != nil { @@ -377,7 +377,7 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface d.Set("elasticsearch_version", ds.ElasticsearchVersion) if err := d.Set("cognito_options", flattenESCognitoOptions(ds.CognitoOptions)); err != nil { - return fmt.Errorf("error setting cognito_options: %s", err) + return fmt.Errorf("error setting cognito_options: %w", err) } d.Set("created", ds.Created) @@ -388,11 +388,11 @@ func dataSourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface tags, err := keyvaluetags.ElasticsearchserviceListTags(esconn, d.Id()) if err != nil { - return fmt.Errorf("error listing tags for Elasticsearch Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("error listing tags for Elasticsearch Cluster (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_elb.go b/aws/data_source_aws_elb.go index 73d41056dfa..03b0b78281b 100644 --- a/aws/data_source_aws_elb.go +++ b/aws/data_source_aws_elb.go @@ -207,7 +207,7 @@ func dataSourceAwsElbRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Reading ELB: %s", input) resp, err := elbconn.DescribeLoadBalancers(input) if err != nil { - return fmt.Errorf("Error retrieving LB: %s", err) + return fmt.Errorf("Error retrieving LB: %w", err) } if len(resp.LoadBalancerDescriptions) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(resp.LoadBalancerDescriptions)) diff --git a/aws/internal/naming/naming.go b/aws/internal/naming/naming.go index 68f414c2f65..38425a507de 100644 --- a/aws/internal/naming/naming.go +++ b/aws/internal/naming/naming.go @@ -64,7 +64,7 @@ func TestCheckResourceAttrNameFromPrefix(resourceName string, attributeName stri attributeMatch, err := regexp.Compile(nameRegexpPattern) if err != nil { - return fmt.Errorf("Unable to compile name regexp (%s): %s", nameRegexpPattern, err) + return fmt.Errorf("Unable to compile name regexp (%s): %w", nameRegexpPattern, err) } return resource.TestMatchResourceAttr(resourceName, attributeName, attributeMatch)(s) diff --git a/aws/internal/service/cloudformation/waiter/status.go b/aws/internal/service/cloudformation/waiter/status.go index cdfa0fd0769..25dcfc2e5e4 100644 --- a/aws/internal/service/cloudformation/waiter/status.go +++ b/aws/internal/service/cloudformation/waiter/status.go @@ -66,7 +66,7 @@ func StackSetOperationStatus(conn *cloudformation.CloudFormation, stackSetName, listOperationResultsOutput, err := conn.ListStackSetOperationResults(listOperationResultsInput) if err != nil { - return output.StackSetOperation, cloudformation.StackSetOperationStatusFailed, fmt.Errorf("error listing Operation (%s) errors: %s", operationID, err) + return output.StackSetOperation, cloudformation.StackSetOperationStatusFailed, fmt.Errorf("error listing Operation (%s) errors: %w", operationID, err) } if listOperationResultsOutput == nil { diff --git a/aws/internal/service/datasync/waiter/waiter.go b/aws/internal/service/datasync/waiter/waiter.go index 311a4a49872..031f3d6f64d 100644 --- a/aws/internal/service/datasync/waiter/waiter.go +++ b/aws/internal/service/datasync/waiter/waiter.go @@ -1,6 +1,7 @@ package waiter import ( + "errors" "fmt" "time" @@ -30,15 +31,12 @@ func TaskStatusAvailable(conn *datasync.DataSync, arn string, timeout time.Durat if err != nil && output != nil && output.ErrorCode != nil && output.ErrorDetail != nil { newErr := fmt.Errorf("%s: %s", aws.StringValue(output.ErrorCode), aws.StringValue(output.ErrorDetail)) - switch e := err.(type) { - case *resource.TimeoutError: - if e.LastError == nil { - e.LastError = newErr - } - case *resource.UnexpectedStateError: - if e.LastError == nil { - e.LastError = newErr - } + var te *resource.TimeoutError + var use *resource.UnexpectedStateError + if ok := errors.As(err, &te); ok && te.LastError == nil { + te.LastError = newErr + } else if ok := errors.As(err, &use); ok && use.LastError == nil { + use.LastError = newErr } } diff --git a/aws/internal/service/eks/token/arn.go b/aws/internal/service/eks/token/arn.go index 1ba2f2787b7..c95d192e25a 100644 --- a/aws/internal/service/eks/token/arn.go +++ b/aws/internal/service/eks/token/arn.go @@ -4,6 +4,7 @@ https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9 With the following modifications: - Rename package from arn to token for simplication + - Ignore errorlint reports */ package token @@ -28,7 +29,7 @@ import ( func Canonicalize(arn string) (string, error) { parsed, err := awsarn.Parse(arn) if err != nil { - return "", fmt.Errorf("arn '%s' is invalid: '%v'", arn, err) + return "", fmt.Errorf("arn '%s' is invalid: '%v'", arn, err) // nolint:errorlint } if err := checkPartition(parsed.Partition); err != nil { diff --git a/aws/internal/service/eks/token/token.go b/aws/internal/service/eks/token/token.go index ab37d14c00e..2ab7e0cb5a9 100644 --- a/aws/internal/service/eks/token/token.go +++ b/aws/internal/service/eks/token/token.go @@ -9,6 +9,7 @@ With the following modifications: - Use *sts.STS instead of stsiface.STSAPI in Generator interface and GetWithSTS implementation - Hard copy and use local Canonicalize implementation instead of "sigs.k8s.io/aws-iam-authenticator/pkg/arn" - Fix staticcheck reports + - Ignore errorlint reports */ /* @@ -308,7 +309,7 @@ func (v tokenVerifier) Verify(token string) (*Identity, error) { response, err := v.client.Do(req) if err != nil { // special case to avoid printing the full URL if possible - if urlErr, ok := err.(*url.Error); ok { + if urlErr, ok := err.(*url.Error); ok { // nolint:errorlint return nil, NewSTSError(fmt.Sprintf("error during GET: %v", urlErr.Err)) } return nil, NewSTSError(fmt.Sprintf("error during GET: %v", err)) diff --git a/aws/internal/service/eks/token/token_test.go b/aws/internal/service/eks/token/token_test.go index 0595611423e..114db9f156d 100644 --- a/aws/internal/service/eks/token/token_test.go +++ b/aws/internal/service/eks/token/token_test.go @@ -5,6 +5,7 @@ https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/7547c74e660f8d34d9 With the following modifications: - Fix staticcheck reports + - Ignore errorlint reports */ package token @@ -49,7 +50,7 @@ func errorContains(t *testing.T, err error, expectedErr string) { func assertSTSError(t *testing.T, err error) { t.Helper() - if _, ok := err.(STSError); !ok { + if _, ok := err.(STSError); !ok { // nolint:errorlint t.Errorf("Expected err %v to be an STSError but was not", err) } } diff --git a/aws/internal/service/route53/waiter/waiter.go b/aws/internal/service/route53/waiter/waiter.go index 0c9647f9ca3..c6c8dfd4a8d 100644 --- a/aws/internal/service/route53/waiter/waiter.go +++ b/aws/internal/service/route53/waiter/waiter.go @@ -1,6 +1,7 @@ package waiter import ( + "errors" "fmt" "time" @@ -48,15 +49,12 @@ func KeySigningKeyStatusUpdated(conn *route53.Route53, hostedZoneID string, name if err != nil && output != nil && output.Status != nil && output.StatusMessage != nil { newErr := fmt.Errorf("%s: %s", aws.StringValue(output.Status), aws.StringValue(output.StatusMessage)) - switch e := err.(type) { - case *resource.TimeoutError: - if e.LastError == nil { - e.LastError = newErr - } - case *resource.UnexpectedStateError: - if e.LastError == nil { - e.LastError = newErr - } + var te *resource.TimeoutError + var use *resource.UnexpectedStateError + if ok := errors.As(err, &te); ok && te.LastError == nil { + te.LastError = newErr + } else if ok := errors.As(err, &use); ok && use.LastError == nil { + use.LastError = newErr } } diff --git a/aws/internal/tfresource/errors.go b/aws/internal/tfresource/errors.go index ae1d006ff84..47dd6137b3f 100644 --- a/aws/internal/tfresource/errors.go +++ b/aws/internal/tfresource/errors.go @@ -19,6 +19,7 @@ func NotFound(err error) bool { // * err is of type resource.TimeoutError // * TimeoutError.LastError is nil func TimedOut(err error) bool { - timeoutErr, ok := err.(*resource.TimeoutError) + // This explicitly does *not* match wrapped TimeoutErrors + timeoutErr, ok := err.(*resource.TimeoutError) // nolint:errorlint return ok && timeoutErr.LastError == nil } diff --git a/aws/opsworks_layers.go b/aws/opsworks_layers.go index b41ef254ce0..cd9afd2e9b1 100644 --- a/aws/opsworks_layers.go +++ b/aws/opsworks_layers.go @@ -357,7 +357,7 @@ func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWo } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/docs/contributing/contribution-checklists.md b/docs/contributing/contribution-checklists.md index 15195154c19..b45bcabe38c 100644 --- a/docs/contributing/contribution-checklists.md +++ b/docs/contributing/contribution-checklists.md @@ -308,7 +308,7 @@ More details about this code generation, including fixes for potential error mes ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig if err := d.Set("tags", keyvaluetags.EksKeyValueTags(cluster.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } ``` @@ -325,7 +325,7 @@ More details about this code generation, including fixes for potential error mes } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } ``` From ff2e30e72071d195ec06b7e2b2e39345f1cf7f95 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 8 Feb 2021 21:55:38 -0800 Subject: [PATCH 1037/1212] Fixes errorlint reports for data sources T-Z --- aws/data_source_aws_transfer_server.go | 2 +- aws/data_source_aws_vpc.go | 4 ++-- aws/data_source_aws_vpc_dhcp_options.go | 10 +++++----- aws/data_source_aws_vpc_endpoint.go | 20 +++++++++---------- aws/data_source_aws_vpc_endpoint_service.go | 8 ++++---- aws/data_source_aws_vpc_peering_connection.go | 2 +- aws/data_source_aws_vpcs.go | 2 +- aws/data_source_aws_vpn_gateway.go | 2 +- aws/data_source_aws_waf_ipset.go | 2 +- aws/data_source_aws_waf_rate_based_rule.go | 2 +- aws/data_source_aws_waf_rule.go | 2 +- aws/data_source_aws_waf_web_acl.go | 2 +- aws/data_source_aws_wafregional_ipset.go | 2 +- ..._source_aws_wafregional_rate_based_rule.go | 2 +- aws/data_source_aws_wafregional_rule.go | 2 +- aws/data_source_aws_wafregional_web_acl.go | 2 +- aws/data_source_aws_wafv2_ip_set.go | 6 +++--- ...data_source_aws_wafv2_regex_pattern_set.go | 6 +++--- aws/data_source_aws_wafv2_rule_group.go | 2 +- aws/data_source_aws_wafv2_web_acl.go | 2 +- aws/data_source_aws_workspaces_bundle.go | 6 +++--- aws/data_source_aws_workspaces_directory.go | 16 +++++++-------- aws/data_source_aws_workspaces_workspace.go | 6 +++--- 23 files changed, 55 insertions(+), 55 deletions(-) diff --git a/aws/data_source_aws_transfer_server.go b/aws/data_source_aws_transfer_server.go index bede4ab87da..0ac5364113c 100644 --- a/aws/data_source_aws_transfer_server.go +++ b/aws/data_source_aws_transfer_server.go @@ -57,7 +57,7 @@ func dataSourceAwsTransferServerRead(d *schema.ResourceData, meta interface{}) e resp, err := conn.DescribeServer(input) if err != nil { - return fmt.Errorf("error describing Transfer Server (%s): %s", serverID, err) + return fmt.Errorf("error describing Transfer Server (%s): %w", serverID, err) } endpoint := meta.(*AWSClient).RegionalHostname(fmt.Sprintf("%s.server.transfer", serverID)) diff --git a/aws/data_source_aws_vpc.go b/aws/data_source_aws_vpc.go index f5ac28cf15b..713ca7b35d0 100644 --- a/aws/data_source_aws_vpc.go +++ b/aws/data_source_aws_vpc.go @@ -184,7 +184,7 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { d.Set("state", vpc.State) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpc.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("owner_id", vpc.OwnerId) @@ -208,7 +208,7 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { cidrAssociations = append(cidrAssociations, association) } if err := d.Set("cidr_block_associations", cidrAssociations); err != nil { - return fmt.Errorf("error setting cidr_block_associations: %s", err) + return fmt.Errorf("error setting cidr_block_associations: %w", err) } if vpc.Ipv6CidrBlockAssociationSet != nil { diff --git a/aws/data_source_aws_vpc_dhcp_options.go b/aws/data_source_aws_vpc_dhcp_options.go index e7f6f068071..2ba6914a170 100644 --- a/aws/data_source_aws_vpc_dhcp_options.go +++ b/aws/data_source_aws_vpc_dhcp_options.go @@ -84,7 +84,7 @@ func dataSourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) e if isNoSuchDhcpOptionIDErr(err) { return errors.New("No matching EC2 DHCP Options found") } - return fmt.Errorf("error reading EC2 DHCP Options: %s", err) + return fmt.Errorf("error reading EC2 DHCP Options: %w", err) } if len(output.DhcpOptions) == 0 { @@ -114,23 +114,23 @@ func dataSourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) e d.Set(tfKey, aws.StringValue(dhcpConfiguration.Values[0].Value)) case "domain-name-servers": if err := d.Set(tfKey, flattenEc2AttributeValues(dhcpConfiguration.Values)); err != nil { - return fmt.Errorf("error setting %s: %s", tfKey, err) + return fmt.Errorf("error setting %s: %w", tfKey, err) } case "netbios-name-servers": if err := d.Set(tfKey, flattenEc2AttributeValues(dhcpConfiguration.Values)); err != nil { - return fmt.Errorf("error setting %s: %s", tfKey, err) + return fmt.Errorf("error setting %s: %w", tfKey, err) } case "netbios-node-type": d.Set(tfKey, aws.StringValue(dhcpConfiguration.Values[0].Value)) case "ntp-servers": if err := d.Set(tfKey, flattenEc2AttributeValues(dhcpConfiguration.Values)); err != nil { - return fmt.Errorf("error setting %s: %s", tfKey, err) + return fmt.Errorf("error setting %s: %w", tfKey, err) } } } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(output.DhcpOptions[0].Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("owner_id", output.DhcpOptions[0].OwnerId) diff --git a/aws/data_source_aws_vpc_endpoint.go b/aws/data_source_aws_vpc_endpoint.go index 1fccf8e9d52..59ddd1b1f23 100644 --- a/aws/data_source_aws_vpc_endpoint.go +++ b/aws/data_source_aws_vpc_endpoint.go @@ -147,7 +147,7 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Reading VPC Endpoint: %s", req) respVpce, err := conn.DescribeVpcEndpoints(req) if err != nil { - return fmt.Errorf("error reading VPC Endpoint: %s", err) + return fmt.Errorf("error reading VPC Endpoint: %w", err) } if respVpce == nil || len(respVpce.VpcEndpoints) == 0 { return fmt.Errorf("no matching VPC Endpoint found") @@ -179,7 +179,7 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro }), }) if err != nil { - return fmt.Errorf("error reading Prefix List (%s): %s", serviceName, err) + return fmt.Errorf("error reading Prefix List (%s): %w", serviceName, err) } if respPl == nil || len(respPl.PrefixLists) == 0 { d.Set("cidr_blocks", []interface{}{}) @@ -191,41 +191,41 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro d.Set("prefix_list_id", pl.PrefixListId) err = d.Set("cidr_blocks", flattenStringList(pl.Cidrs)) if err != nil { - return fmt.Errorf("error setting cidr_blocks: %s", err) + return fmt.Errorf("error setting cidr_blocks: %w", err) } } err = d.Set("dns_entry", flattenVpcEndpointDnsEntries(vpce.DnsEntries)) if err != nil { - return fmt.Errorf("error setting dns_entry: %s", err) + return fmt.Errorf("error setting dns_entry: %w", err) } err = d.Set("network_interface_ids", flattenStringSet(vpce.NetworkInterfaceIds)) if err != nil { - return fmt.Errorf("error setting network_interface_ids: %s", err) + return fmt.Errorf("error setting network_interface_ids: %w", err) } d.Set("owner_id", vpce.OwnerId) policy, err := structure.NormalizeJsonString(aws.StringValue(vpce.PolicyDocument)) if err != nil { - return fmt.Errorf("policy contains an invalid JSON: %s", err) + return fmt.Errorf("policy contains an invalid JSON: %w", err) } d.Set("policy", policy) d.Set("private_dns_enabled", vpce.PrivateDnsEnabled) err = d.Set("route_table_ids", flattenStringSet(vpce.RouteTableIds)) if err != nil { - return fmt.Errorf("error setting route_table_ids: %s", err) + return fmt.Errorf("error setting route_table_ids: %w", err) } d.Set("requester_managed", vpce.RequesterManaged) err = d.Set("security_group_ids", flattenVpcEndpointSecurityGroupIds(vpce.Groups)) if err != nil { - return fmt.Errorf("error setting security_group_ids: %s", err) + return fmt.Errorf("error setting security_group_ids: %w", err) } err = d.Set("subnet_ids", flattenStringSet(vpce.SubnetIds)) if err != nil { - return fmt.Errorf("error setting subnet_ids: %s", err) + return fmt.Errorf("error setting subnet_ids: %w", err) } err = d.Set("tags", keyvaluetags.Ec2KeyValueTags(vpce.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) if err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } // VPC endpoints don't have types in GovCloud, so set type to default if empty if vpceType := aws.StringValue(vpce.VpcEndpointType); vpceType == "" { diff --git a/aws/data_source_aws_vpc_endpoint_service.go b/aws/data_source_aws_vpc_endpoint_service.go index e03fc0aafc1..337534077ca 100644 --- a/aws/data_source_aws_vpc_endpoint_service.go +++ b/aws/data_source_aws_vpc_endpoint_service.go @@ -111,7 +111,7 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Reading VPC Endpoint Service: %s", req) resp, err := conn.DescribeVpcEndpointServices(req) if err != nil { - return fmt.Errorf("error reading VPC Endpoint Service (%s): %s", serviceName, err) + return fmt.Errorf("error reading VPC Endpoint Service (%s): %w", serviceName, err) } if resp == nil || (len(resp.ServiceNames) == 0 && len(resp.ServiceDetails) == 0) { @@ -174,11 +174,11 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ d.Set("acceptance_required", sd.AcceptanceRequired) err = d.Set("availability_zones", flattenStringSet(sd.AvailabilityZones)) if err != nil { - return fmt.Errorf("error setting availability_zones: %s", err) + return fmt.Errorf("error setting availability_zones: %w", err) } err = d.Set("base_endpoint_dns_names", flattenStringSet(sd.BaseEndpointDnsNames)) if err != nil { - return fmt.Errorf("error setting base_endpoint_dns_names: %s", err) + return fmt.Errorf("error setting base_endpoint_dns_names: %w", err) } d.Set("manages_vpc_endpoints", sd.ManagesVpcEndpoints) d.Set("owner", sd.Owner) @@ -188,7 +188,7 @@ func dataSourceAwsVpcEndpointServiceRead(d *schema.ResourceData, meta interface{ d.Set("service_type", sd.ServiceType[0].ServiceType) err = d.Set("tags", keyvaluetags.Ec2KeyValueTags(sd.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()) if err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("vpc_endpoint_policy_supported", sd.VpcEndpointPolicySupported) diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index a77747bd07a..186ff7a43fa 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -188,7 +188,7 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac } d.Set("peer_region", pcx.AccepterVpcInfo.Region) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(pcx.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } if pcx.AccepterVpcInfo.PeeringOptions != nil { diff --git a/aws/data_source_aws_vpcs.go b/aws/data_source_aws_vpcs.go index b3e50a988b4..9f1057f7fe1 100644 --- a/aws/data_source_aws_vpcs.go +++ b/aws/data_source_aws_vpcs.go @@ -68,7 +68,7 @@ func dataSourceAwsVpcsRead(d *schema.ResourceData, meta interface{}) error { d.SetId(meta.(*AWSClient).region) if err := d.Set("ids", vpcs); err != nil { - return fmt.Errorf("Error setting vpc ids: %s", err) + return fmt.Errorf("error setting vpc ids: %w", err) } return nil diff --git a/aws/data_source_aws_vpn_gateway.go b/aws/data_source_aws_vpn_gateway.go index de4bd84163b..f7f13016454 100644 --- a/aws/data_source_aws_vpn_gateway.go +++ b/aws/data_source_aws_vpn_gateway.go @@ -114,7 +114,7 @@ func dataSourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error d.Set("amazon_side_asn", strconv.FormatInt(aws.Int64Value(vgw.AmazonSideAsn), 10)) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(vgw.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } for _, attachment := range vgw.VpcAttachments { diff --git a/aws/data_source_aws_waf_ipset.go b/aws/data_source_aws_waf_ipset.go index b26a7e26c9f..f09a12132fe 100644 --- a/aws/data_source_aws_waf_ipset.go +++ b/aws/data_source_aws_waf_ipset.go @@ -31,7 +31,7 @@ func dataSourceAWSWafIpSetRead(d *schema.ResourceData, meta interface{}) error { for { output, err := conn.ListIPSets(input) if err != nil { - return fmt.Errorf("Error reading WAF IP sets: %s", err) + return fmt.Errorf("Error reading WAF IP sets: %w", err) } for _, ipset := range output.IPSets { if aws.StringValue(ipset.Name) == name { diff --git a/aws/data_source_aws_waf_rate_based_rule.go b/aws/data_source_aws_waf_rate_based_rule.go index 1e59b0c02b9..0752cd9cad7 100644 --- a/aws/data_source_aws_waf_rate_based_rule.go +++ b/aws/data_source_aws_waf_rate_based_rule.go @@ -31,7 +31,7 @@ func dataSourceAwsWafRateBasedRuleRead(d *schema.ResourceData, meta interface{}) for { output, err := conn.ListRateBasedRules(input) if err != nil { - return fmt.Errorf("error reading WAF Rate Based Rules: %s", err) + return fmt.Errorf("error reading WAF Rate Based Rules: %w", err) } for _, rule := range output.Rules { if aws.StringValue(rule.Name) == name { diff --git a/aws/data_source_aws_waf_rule.go b/aws/data_source_aws_waf_rule.go index 37a77aa47fe..cd6d50f84e6 100644 --- a/aws/data_source_aws_waf_rule.go +++ b/aws/data_source_aws_waf_rule.go @@ -31,7 +31,7 @@ func dataSourceAwsWafRuleRead(d *schema.ResourceData, meta interface{}) error { for { output, err := conn.ListRules(input) if err != nil { - return fmt.Errorf("error reading WAF Rules: %s", err) + return fmt.Errorf("error reading WAF Rules: %w", err) } for _, rule := range output.Rules { if aws.StringValue(rule.Name) == name { diff --git a/aws/data_source_aws_waf_web_acl.go b/aws/data_source_aws_waf_web_acl.go index 413e8b71cd2..cc95c47a198 100644 --- a/aws/data_source_aws_waf_web_acl.go +++ b/aws/data_source_aws_waf_web_acl.go @@ -31,7 +31,7 @@ func dataSourceAwsWafWebAclRead(d *schema.ResourceData, meta interface{}) error for { output, err := conn.ListWebACLs(input) if err != nil { - return fmt.Errorf("error reading web ACLs: %s", err) + return fmt.Errorf("error reading web ACLs: %w", err) } for _, acl := range output.WebACLs { if aws.StringValue(acl.Name) == name { diff --git a/aws/data_source_aws_wafregional_ipset.go b/aws/data_source_aws_wafregional_ipset.go index 498709c1ee3..dcd2d9aff4d 100644 --- a/aws/data_source_aws_wafregional_ipset.go +++ b/aws/data_source_aws_wafregional_ipset.go @@ -31,7 +31,7 @@ func dataSourceAWSWafRegionalIpSetRead(d *schema.ResourceData, meta interface{}) for { output, err := conn.ListIPSets(input) if err != nil { - return fmt.Errorf("Error reading WAF Regional IP sets: %s", err) + return fmt.Errorf("Error reading WAF Regional IP sets: %w", err) } for _, ipset := range output.IPSets { if aws.StringValue(ipset.Name) == name { diff --git a/aws/data_source_aws_wafregional_rate_based_rule.go b/aws/data_source_aws_wafregional_rate_based_rule.go index fb779d313dc..eebfb851012 100644 --- a/aws/data_source_aws_wafregional_rate_based_rule.go +++ b/aws/data_source_aws_wafregional_rate_based_rule.go @@ -31,7 +31,7 @@ func dataSourceAwsWafRegionalRateBasedRuleRead(d *schema.ResourceData, meta inte for { output, err := conn.ListRateBasedRules(input) if err != nil { - return fmt.Errorf("error reading WAF Rate Based Rules: %s", err) + return fmt.Errorf("error reading WAF Rate Based Rules: %w", err) } for _, rule := range output.Rules { if aws.StringValue(rule.Name) == name { diff --git a/aws/data_source_aws_wafregional_rule.go b/aws/data_source_aws_wafregional_rule.go index bdb39a57e2b..39cad09bc7b 100644 --- a/aws/data_source_aws_wafregional_rule.go +++ b/aws/data_source_aws_wafregional_rule.go @@ -31,7 +31,7 @@ func dataSourceAwsWafRegionalRuleRead(d *schema.ResourceData, meta interface{}) for { output, err := conn.ListRules(input) if err != nil { - return fmt.Errorf("error reading WAF Rule: %s", err) + return fmt.Errorf("error reading WAF Rule: %w", err) } for _, rule := range output.Rules { if aws.StringValue(rule.Name) == name { diff --git a/aws/data_source_aws_wafregional_web_acl.go b/aws/data_source_aws_wafregional_web_acl.go index 620434a8784..ea08645f0a1 100644 --- a/aws/data_source_aws_wafregional_web_acl.go +++ b/aws/data_source_aws_wafregional_web_acl.go @@ -31,7 +31,7 @@ func dataSourceAwsWafRegionalWebAclRead(d *schema.ResourceData, meta interface{} for { output, err := conn.ListWebACLs(input) if err != nil { - return fmt.Errorf("error reading web ACLs: %s", err) + return fmt.Errorf("error reading web ACLs: %w", err) } for _, acl := range output.WebACLs { if aws.StringValue(acl.Name) == name { diff --git a/aws/data_source_aws_wafv2_ip_set.go b/aws/data_source_aws_wafv2_ip_set.go index c6c56ebef8b..4f65b23e33a 100644 --- a/aws/data_source_aws_wafv2_ip_set.go +++ b/aws/data_source_aws_wafv2_ip_set.go @@ -60,7 +60,7 @@ func dataSourceAwsWafv2IPSetRead(d *schema.ResourceData, meta interface{}) error for { resp, err := conn.ListIPSets(input) if err != nil { - return fmt.Errorf("Error reading WAFv2 IPSets: %s", err) + return fmt.Errorf("Error reading WAFv2 IPSets: %w", err) } if resp == nil || resp.IPSets == nil { @@ -91,7 +91,7 @@ func dataSourceAwsWafv2IPSetRead(d *schema.ResourceData, meta interface{}) error }) if err != nil { - return fmt.Errorf("Error reading WAFv2 IPSet: %s", err) + return fmt.Errorf("Error reading WAFv2 IPSet: %w", err) } if resp == nil || resp.IPSet == nil { @@ -104,7 +104,7 @@ func dataSourceAwsWafv2IPSetRead(d *schema.ResourceData, meta interface{}) error d.Set("ip_address_version", aws.StringValue(resp.IPSet.IPAddressVersion)) if err := d.Set("addresses", flattenStringList(resp.IPSet.Addresses)); err != nil { - return fmt.Errorf("Error setting addresses: %s", err) + return fmt.Errorf("error setting addresses: %w", err) } return nil diff --git a/aws/data_source_aws_wafv2_regex_pattern_set.go b/aws/data_source_aws_wafv2_regex_pattern_set.go index 1285c67f789..d72020f1571 100644 --- a/aws/data_source_aws_wafv2_regex_pattern_set.go +++ b/aws/data_source_aws_wafv2_regex_pattern_set.go @@ -63,7 +63,7 @@ func dataSourceAwsWafv2RegexPatternSetRead(d *schema.ResourceData, meta interfac for { resp, err := conn.ListRegexPatternSets(input) if err != nil { - return fmt.Errorf("Error reading WAFv2 RegexPatternSets: %s", err) + return fmt.Errorf("Error reading WAFv2 RegexPatternSets: %w", err) } if resp == nil || resp.RegexPatternSets == nil { @@ -94,7 +94,7 @@ func dataSourceAwsWafv2RegexPatternSetRead(d *schema.ResourceData, meta interfac }) if err != nil { - return fmt.Errorf("Error reading WAFv2 RegexPatternSet: %s", err) + return fmt.Errorf("Error reading WAFv2 RegexPatternSet: %w", err) } if resp == nil || resp.RegexPatternSet == nil { @@ -106,7 +106,7 @@ func dataSourceAwsWafv2RegexPatternSetRead(d *schema.ResourceData, meta interfac d.Set("description", aws.StringValue(resp.RegexPatternSet.Description)) if err := d.Set("regular_expression", flattenWafv2RegexPatternSet(resp.RegexPatternSet.RegularExpressionList)); err != nil { - return fmt.Errorf("Error setting regular_expression: %s", err) + return fmt.Errorf("Error setting regular_expression: %w", err) } return nil diff --git a/aws/data_source_aws_wafv2_rule_group.go b/aws/data_source_aws_wafv2_rule_group.go index 57eef3151b5..8f8f96adf32 100644 --- a/aws/data_source_aws_wafv2_rule_group.go +++ b/aws/data_source_aws_wafv2_rule_group.go @@ -51,7 +51,7 @@ func dataSourceAwsWafv2RuleGroupRead(d *schema.ResourceData, meta interface{}) e for { resp, err := conn.ListRuleGroups(input) if err != nil { - return fmt.Errorf("Error reading WAFv2 RuleGroups: %s", err) + return fmt.Errorf("Error reading WAFv2 RuleGroups: %w", err) } if resp == nil || resp.RuleGroups == nil { diff --git a/aws/data_source_aws_wafv2_web_acl.go b/aws/data_source_aws_wafv2_web_acl.go index 3770781ab27..858eb6f9dae 100644 --- a/aws/data_source_aws_wafv2_web_acl.go +++ b/aws/data_source_aws_wafv2_web_acl.go @@ -51,7 +51,7 @@ func dataSourceAwsWafv2WebACLRead(d *schema.ResourceData, meta interface{}) erro for { resp, err := conn.ListWebACLs(input) if err != nil { - return fmt.Errorf("Error reading WAFv2 WebACLs: %s", err) + return fmt.Errorf("Error reading WAFv2 WebACLs: %w", err) } if resp == nil || resp.WebACLs == nil { diff --git a/aws/data_source_aws_workspaces_bundle.go b/aws/data_source_aws_workspaces_bundle.go index 800363a9bd3..75fffe8fcd1 100644 --- a/aws/data_source_aws_workspaces_bundle.go +++ b/aws/data_source_aws_workspaces_bundle.go @@ -136,7 +136,7 @@ func dataSourceAwsWorkspaceBundleRead(d *schema.ResourceData, meta interface{}) } } if err := d.Set("compute_type", computeType); err != nil { - return fmt.Errorf("error setting compute_type: %s", err) + return fmt.Errorf("error setting compute_type: %w", err) } rootStorage := make([]map[string]interface{}, 1) @@ -146,7 +146,7 @@ func dataSourceAwsWorkspaceBundleRead(d *schema.ResourceData, meta interface{}) } } if err := d.Set("root_storage", rootStorage); err != nil { - return fmt.Errorf("error setting root_storage: %s", err) + return fmt.Errorf("error setting root_storage: %w", err) } userStorage := make([]map[string]interface{}, 1) @@ -156,7 +156,7 @@ func dataSourceAwsWorkspaceBundleRead(d *schema.ResourceData, meta interface{}) } } if err := d.Set("user_storage", userStorage); err != nil { - return fmt.Errorf("error setting user_storage: %s", err) + return fmt.Errorf("error setting user_storage: %w", err) } return nil diff --git a/aws/data_source_aws_workspaces_directory.go b/aws/data_source_aws_workspaces_directory.go index e3f45dabe51..548235d38c1 100644 --- a/aws/data_source_aws_workspaces_directory.go +++ b/aws/data_source_aws_workspaces_directory.go @@ -166,7 +166,7 @@ func dataSourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface rawOutput, state, err := waiter.DirectoryState(conn, directoryID)() if err != nil { - return fmt.Errorf("error getting WorkSpaces Directory (%s): %s", directoryID, err) + return fmt.Errorf("error getting WorkSpaces Directory (%s): %w", directoryID, err) } if state == workspaces.WorkspaceDirectoryStateDeregistered { return fmt.Errorf("WorkSpaces directory %s was not found", directoryID) @@ -184,11 +184,11 @@ func dataSourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface d.Set("alias", directory.Alias) if err := d.Set("subnet_ids", flattenStringSet(directory.SubnetIds)); err != nil { - return fmt.Errorf("error setting subnet_ids: %s", err) + return fmt.Errorf("error setting subnet_ids: %w", err) } if err := d.Set("self_service_permissions", flattenSelfServicePermissions(directory.SelfservicePermissions)); err != nil { - return fmt.Errorf("error setting self_service_permissions: %s", err) + return fmt.Errorf("error setting self_service_permissions: %w", err) } if err := d.Set("workspace_access_properties", flattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { @@ -196,23 +196,23 @@ func dataSourceAwsWorkspacesDirectoryRead(d *schema.ResourceData, meta interface } if err := d.Set("workspace_creation_properties", flattenWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { - return fmt.Errorf("error setting workspace_creation_properties: %s", err) + return fmt.Errorf("error setting workspace_creation_properties: %w", err) } if err := d.Set("ip_group_ids", flattenStringSet(directory.IpGroupIds)); err != nil { - return fmt.Errorf("error setting ip_group_ids: %s", err) + return fmt.Errorf("error setting ip_group_ids: %w", err) } if err := d.Set("dns_ip_addresses", flattenStringSet(directory.DnsIpAddresses)); err != nil { - return fmt.Errorf("error setting dns_ip_addresses: %s", err) + return fmt.Errorf("error setting dns_ip_addresses: %w", err) } tags, err := keyvaluetags.WorkspacesListTags(conn, d.Id()) if err != nil { - return fmt.Errorf("error listing tags: %s", err) + return fmt.Errorf("error listing tags: %w", err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_workspaces_workspace.go b/aws/data_source_aws_workspaces_workspace.go index f01c8d13dbc..34c52a4f0d4 100644 --- a/aws/data_source_aws_workspaces_workspace.go +++ b/aws/data_source_aws_workspaces_workspace.go @@ -152,16 +152,16 @@ func dataSourceAwsWorkspacesWorkspaceRead(d *schema.ResourceData, meta interface d.Set("user_volume_encryption_enabled", aws.BoolValue(workspace.UserVolumeEncryptionEnabled)) d.Set("volume_encryption_key", aws.StringValue(workspace.VolumeEncryptionKey)) if err := d.Set("workspace_properties", flattenWorkspaceProperties(workspace.WorkspaceProperties)); err != nil { - return fmt.Errorf("error setting workspace properties: %s", err) + return fmt.Errorf("error setting workspace properties: %w", err) } tags, err := keyvaluetags.WorkspacesListTags(conn, d.Id()) if err != nil { - return fmt.Errorf("error listing tags: %s", err) + return fmt.Errorf("error listing tags: %w", err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil From 1f9fb8719c285470f2a65ba35416357010e147f3 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 8 Feb 2021 21:54:04 -0800 Subject: [PATCH 1038/1212] Fixes errorlint reports for data sources F-S --- aws/data_source_aws_glue_script.go | 2 +- aws/data_source_aws_guardduty_detector.go | 2 +- aws/data_source_aws_iam_group.go | 4 +-- aws/data_source_aws_iam_instance_profile.go | 2 +- aws/data_source_aws_iam_policy_document.go | 14 ++++---- aws/data_source_aws_iam_role.go | 8 ++--- aws/data_source_aws_iam_server_certificate.go | 2 +- aws/data_source_aws_iam_user.go | 2 +- ...ata_source_aws_inspector_rules_packages.go | 2 +- aws/data_source_aws_instance.go | 10 +++--- aws/data_source_aws_internet_gateway.go | 2 +- aws/data_source_aws_iot_endpoint.go | 4 +-- aws/data_source_aws_ip_ranges.go | 16 +++++----- aws/data_source_aws_kinesis_stream.go | 4 +-- aws/data_source_aws_kms_alias.go | 4 +-- aws/data_source_aws_kms_key.go | 2 +- aws/data_source_aws_kms_secrets.go | 6 ++-- aws/data_source_aws_lambda_alias.go | 2 +- ...a_source_aws_lambda_code_signing_config.go | 12 +++---- aws/data_source_aws_lambda_function.go | 18 +++++------ aws/data_source_aws_lambda_layer_version.go | 26 +++++++-------- aws/data_source_aws_launch_configuration.go | 8 ++--- aws/data_source_aws_lb.go | 2 +- aws/data_source_aws_lb_target_group.go | 2 +- aws/data_source_aws_msk_cluster.go | 6 ++-- aws/data_source_aws_msk_configuration.go | 6 ++-- aws/data_source_aws_nat_gateway.go | 2 +- aws/data_source_aws_network_acls.go | 2 +- aws/data_source_aws_network_interface.go | 2 +- aws/data_source_aws_network_interfaces.go | 2 +- ...a_source_aws_organizations_organization.go | 18 +++++------ ..._aws_organizations_organizational_units.go | 4 +-- aws/data_source_aws_pricing_product.go | 4 +-- aws/data_source_aws_qldb_ledger.go | 2 +- aws/data_source_aws_ram_resource_share.go | 4 +-- aws/data_source_aws_rds_cluster.go | 16 +++++----- aws/data_source_aws_redshift_cluster.go | 10 +++--- aws/data_source_aws_regions.go | 4 +-- aws/data_source_aws_route53_delegation_set.go | 4 +-- ...ta_source_aws_route53_resolver_endpoint.go | 2 +- aws/data_source_aws_route53_resolver_rule.go | 8 ++--- aws/data_source_aws_route53_resolver_rules.go | 4 +-- aws/data_source_aws_route53_zone.go | 4 +-- aws/data_source_aws_route_table.go | 2 +- aws/data_source_aws_route_tables.go | 2 +- aws/data_source_aws_s3_bucket.go | 4 +-- aws/data_source_aws_s3_bucket_object.go | 17 ++++------ aws/data_source_aws_s3_bucket_objects.go | 8 ++--- aws/data_source_aws_secretsmanager_secret.go | 10 +++--- ...urce_aws_secretsmanager_secret_rotation.go | 4 +-- ...ource_aws_secretsmanager_secret_version.go | 4 +-- aws/data_source_aws_security_group.go | 2 +- aws/data_source_aws_security_groups.go | 2 +- aws/data_source_aws_servicequotas_service.go | 2 +- ..._source_aws_servicequotas_service_quota.go | 6 ++-- aws/data_source_aws_sfn_activity.go | 4 +-- aws/data_source_aws_sfn_state_machine.go | 2 +- aws/data_source_aws_signer_signing_job.go | 32 +++++++++---------- aws/data_source_aws_signer_signing_profile.go | 20 ++++++------ aws/data_source_aws_sns.go | 2 +- aws/data_source_aws_sqs_queue.go | 8 ++--- aws/data_source_aws_ssm_document.go | 2 +- aws/data_source_aws_ssm_patch_baseline.go | 2 +- ...ta_source_aws_storagegateway_local_disk.go | 2 +- aws/data_source_aws_subnet.go | 2 +- 65 files changed, 198 insertions(+), 201 deletions(-) diff --git a/aws/data_source_aws_glue_script.go b/aws/data_source_aws_glue_script.go index 4143847f305..8ed391b609c 100644 --- a/aws/data_source_aws_glue_script.go +++ b/aws/data_source_aws_glue_script.go @@ -115,7 +115,7 @@ func dataSourceAwsGlueScriptRead(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Creating Glue Script: %s", input) output, err := conn.CreateScript(input) if err != nil { - return fmt.Errorf("error creating Glue script: %s", err) + return fmt.Errorf("error creating Glue script: %w", err) } if output == nil { diff --git a/aws/data_source_aws_guardduty_detector.go b/aws/data_source_aws_guardduty_detector.go index 9f7666fcb55..1a6e8e73d0c 100644 --- a/aws/data_source_aws_guardduty_detector.go +++ b/aws/data_source_aws_guardduty_detector.go @@ -44,7 +44,7 @@ func dataSourceAwsGuarddutyDetectorRead(d *schema.ResourceData, meta interface{} resp, err := conn.ListDetectors(input) if err != nil { - return fmt.Errorf("error listing GuardDuty Detectors: %s ,", err) + return fmt.Errorf("error listing GuardDuty Detectors: %w", err) } if resp == nil || len(resp.DetectorIds) == 0 { diff --git a/aws/data_source_aws_iam_group.go b/aws/data_source_aws_iam_group.go index 6fdcefad045..615ecaae6cb 100644 --- a/aws/data_source_aws_iam_group.go +++ b/aws/data_source_aws_iam_group.go @@ -79,7 +79,7 @@ func dataSourceAwsIAMGroupRead(d *schema.ResourceData, meta interface{}) error { return !lastPage }) if err != nil { - return fmt.Errorf("Error getting group: %s", err) + return fmt.Errorf("Error getting group: %w", err) } if group == nil { return fmt.Errorf("no IAM group found") @@ -90,7 +90,7 @@ func dataSourceAwsIAMGroupRead(d *schema.ResourceData, meta interface{}) error { d.Set("path", group.Path) d.Set("group_id", group.GroupId) if err := d.Set("users", dataSourceUsersRead(users)); err != nil { - return fmt.Errorf("error setting users: %s", err) + return fmt.Errorf("error setting users: %w", err) } return nil diff --git a/aws/data_source_aws_iam_instance_profile.go b/aws/data_source_aws_iam_instance_profile.go index 8ef77865860..5655c483d4d 100644 --- a/aws/data_source_aws_iam_instance_profile.go +++ b/aws/data_source_aws_iam_instance_profile.go @@ -58,7 +58,7 @@ func dataSourceAwsIAMInstanceProfileRead(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Reading IAM Instance Profile: %s", req) resp, err := iamconn.GetInstanceProfile(req) if err != nil { - return fmt.Errorf("Error getting instance profiles: %s", err) + return fmt.Errorf("Error getting instance profiles: %w", err) } if resp == nil { return fmt.Errorf("no IAM instance profile found") diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 582c7c0d2f1..285bb365609 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -156,7 +156,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} iamPolicyDecodeConfigStringList(resources), doc.Version, ) if err != nil { - return fmt.Errorf("error reading resources: %s", err) + return fmt.Errorf("error reading resources: %w", err) } } if notResources := cfgStmt["not_resources"].(*schema.Set).List(); len(notResources) > 0 { @@ -165,7 +165,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} iamPolicyDecodeConfigStringList(notResources), doc.Version, ) if err != nil { - return fmt.Errorf("error reading not_resources: %s", err) + return fmt.Errorf("error reading not_resources: %w", err) } } @@ -173,7 +173,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} var err error stmt.Principals, err = dataSourceAwsIamPolicyDocumentMakePrincipals(principals, doc.Version) if err != nil { - return fmt.Errorf("error reading principals: %s", err) + return fmt.Errorf("error reading principals: %w", err) } } @@ -181,7 +181,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} var err error stmt.NotPrincipals, err = dataSourceAwsIamPolicyDocumentMakePrincipals(notPrincipals, doc.Version) if err != nil { - return fmt.Errorf("error reading not_principals: %s", err) + return fmt.Errorf("error reading not_principals: %w", err) } } @@ -189,7 +189,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} var err error stmt.Conditions, err = dataSourceAwsIamPolicyDocumentMakeConditions(conditions, doc.Version) if err != nil { - return fmt.Errorf("error reading condition: %s", err) + return fmt.Errorf("error reading condition: %w", err) } } @@ -262,7 +262,7 @@ func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}, version stri ), version, ) if err != nil { - return nil, fmt.Errorf("error reading values: %s", err) + return nil, fmt.Errorf("error reading values: %w", err) } } return IAMPolicyStatementConditionSet(out), nil @@ -282,7 +282,7 @@ func dataSourceAwsIamPolicyDocumentMakePrincipals(in []interface{}, version stri ), version, ) if err != nil { - return nil, fmt.Errorf("error reading identifiers: %s", err) + return nil, fmt.Errorf("error reading identifiers: %w", err) } } return IAMPolicyStatementPrincipalSet(out), nil diff --git a/aws/data_source_aws_iam_role.go b/aws/data_source_aws_iam_role.go index 221e598b971..33bf0c93a79 100644 --- a/aws/data_source_aws_iam_role.go +++ b/aws/data_source_aws_iam_role.go @@ -69,12 +69,12 @@ func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error { output, err := iamconn.GetRole(input) if err != nil { - return fmt.Errorf("error reading IAM Role (%s): %s", name, err) + return fmt.Errorf("error reading IAM Role (%s): %w", name, err) } d.Set("arn", output.Role.Arn) if err := d.Set("create_date", output.Role.CreateDate.Format(time.RFC3339)); err != nil { - return fmt.Errorf("error setting create_date: %s", err) + return fmt.Errorf("error setting create_date: %w", err) } d.Set("description", output.Role.Description) d.Set("max_session_duration", output.Role.MaxSessionDuration) @@ -89,10 +89,10 @@ func dataSourceAwsIAMRoleRead(d *schema.ResourceData, meta interface{}) error { assumRolePolicy, err := url.QueryUnescape(aws.StringValue(output.Role.AssumeRolePolicyDocument)) if err != nil { - return fmt.Errorf("error parsing assume role policy document: %s", err) + return fmt.Errorf("error parsing assume role policy document: %w", err) } if err := d.Set("assume_role_policy", assumRolePolicy); err != nil { - return fmt.Errorf("error setting assume_role_policy: %s", err) + return fmt.Errorf("error setting assume_role_policy: %w", err) } d.SetId(name) diff --git a/aws/data_source_aws_iam_server_certificate.go b/aws/data_source_aws_iam_server_certificate.go index 5f312e47c86..de71ad96903 100644 --- a/aws/data_source_aws_iam_server_certificate.go +++ b/aws/data_source_aws_iam_server_certificate.go @@ -119,7 +119,7 @@ func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interfac return true }) if err != nil { - return fmt.Errorf("Error describing certificates: %s", err) + return fmt.Errorf("Error describing certificates: %w", err) } if len(metadatas) == 0 { diff --git a/aws/data_source_aws_iam_user.go b/aws/data_source_aws_iam_user.go index a8aa35e2dae..aadad2838ca 100644 --- a/aws/data_source_aws_iam_user.go +++ b/aws/data_source_aws_iam_user.go @@ -52,7 +52,7 @@ func dataSourceAwsIAMUserRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Reading IAM User: %s", req) resp, err := iamconn.GetUser(req) if err != nil { - return fmt.Errorf("error getting user: %s", err) + return fmt.Errorf("error getting user: %w", err) } user := resp.User diff --git a/aws/data_source_aws_inspector_rules_packages.go b/aws/data_source_aws_inspector_rules_packages.go index ef2b94702da..cf17489a34e 100644 --- a/aws/data_source_aws_inspector_rules_packages.go +++ b/aws/data_source_aws_inspector_rules_packages.go @@ -40,7 +40,7 @@ func dataSourceAwsInspectorRulesPackagesRead(d *schema.ResourceData, meta interf return !lastPage }) if err != nil { - return fmt.Errorf("Error fetching Rules Packages: %s", err) + return fmt.Errorf("Error fetching Rules Packages: %w", err) } if len(arns) == 0 { diff --git a/aws/data_source_aws_instance.go b/aws/data_source_aws_instance.go index 3eb17e5c359..419c711b61c 100644 --- a/aws/data_source_aws_instance.go +++ b/aws/data_source_aws_instance.go @@ -495,7 +495,7 @@ func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instanc } if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(instance.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } // Security Groups @@ -549,20 +549,20 @@ func instanceDescriptionAttributes(d *schema.ResourceData, instance *ec2.Instanc // Ignore UnsupportedOperation errors for AWS China and GovCloud (US) // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/4362 if err != nil && !isAWSErr(err, "UnsupportedOperation", "") { - return fmt.Errorf("error getting EC2 Instance (%s) Credit Specifications: %s", d.Id(), err) + return fmt.Errorf("error getting EC2 Instance (%s) Credit Specifications: %w", d.Id(), err) } } if err := d.Set("credit_specification", creditSpecifications); err != nil { - return fmt.Errorf("error setting credit_specification: %s", err) + return fmt.Errorf("error setting credit_specification: %w", err) } if err := d.Set("metadata_options", flattenEc2InstanceMetadataOptions(instance.MetadataOptions)); err != nil { - return fmt.Errorf("error setting metadata_options: %s", err) + return fmt.Errorf("error setting metadata_options: %w", err) } if err := d.Set("enclave_options", flattenEc2EnclaveOptions(instance.EnclaveOptions)); err != nil { - return fmt.Errorf("error setting enclave_options: %s", err) + return fmt.Errorf("error setting enclave_options: %w", err) } return nil diff --git a/aws/data_source_aws_internet_gateway.go b/aws/data_source_aws_internet_gateway.go index 1f8ac76ffe7..3f0f5b3fc7e 100644 --- a/aws/data_source_aws_internet_gateway.go +++ b/aws/data_source_aws_internet_gateway.go @@ -90,7 +90,7 @@ func dataSourceAwsInternetGatewayRead(d *schema.ResourceData, meta interface{}) d.SetId(aws.StringValue(igw.InternetGatewayId)) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(igw.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("owner_id", igw.OwnerId) diff --git a/aws/data_source_aws_iot_endpoint.go b/aws/data_source_aws_iot_endpoint.go index dc026623b28..a1ad5cbfeb0 100644 --- a/aws/data_source_aws_iot_endpoint.go +++ b/aws/data_source_aws_iot_endpoint.go @@ -41,12 +41,12 @@ func dataSourceAwsIotEndpointRead(d *schema.ResourceData, meta interface{}) erro output, err := conn.DescribeEndpoint(input) if err != nil { - return fmt.Errorf("error while describing iot endpoint: %s", err) + return fmt.Errorf("error while describing iot endpoint: %w", err) } endpointAddress := aws.StringValue(output.EndpointAddress) d.SetId(endpointAddress) if err := d.Set("endpoint_address", endpointAddress); err != nil { - return fmt.Errorf("error setting endpoint_address: %s", err) + return fmt.Errorf("error setting endpoint_address: %w", err) } return nil } diff --git a/aws/data_source_aws_ip_ranges.go b/aws/data_source_aws_ip_ranges.go index 420892e0b00..07ef06083c7 100644 --- a/aws/data_source_aws_ip_ranges.go +++ b/aws/data_source_aws_ip_ranges.go @@ -84,7 +84,7 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { res, err := conn.Get(url) if err != nil { - return fmt.Errorf("Error listing IP ranges from (%s): %s", url, err) + return fmt.Errorf("Error listing IP ranges from (%s): %w", url, err) } defer res.Body.Close() @@ -92,29 +92,29 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { data, err := ioutil.ReadAll(res.Body) if err != nil { - return fmt.Errorf("Error reading response body from (%s): %s", url, err) + return fmt.Errorf("Error reading response body from (%s): %w", url, err) } result := new(dataSourceAwsIPRangesResult) if err := json.Unmarshal(data, result); err != nil { - return fmt.Errorf("Error parsing result from (%s): %s", url, err) + return fmt.Errorf("Error parsing result from (%s): %w", url, err) } if err := d.Set("create_date", result.CreateDate); err != nil { - return fmt.Errorf("Error setting create date: %s", err) + return fmt.Errorf("Error setting create date: %w", err) } syncToken, err := strconv.Atoi(result.SyncToken) if err != nil { - return fmt.Errorf("Error while converting sync token: %s", err) + return fmt.Errorf("Error while converting sync token: %w", err) } d.SetId(result.SyncToken) if err := d.Set("sync_token", syncToken); err != nil { - return fmt.Errorf("Error setting sync token: %s", err) + return fmt.Errorf("Error setting sync token: %w", err) } get := func(key string) *schema.Set { @@ -167,13 +167,13 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { sort.Strings(ipPrefixes) if err := d.Set("cidr_blocks", ipPrefixes); err != nil { - return fmt.Errorf("Error setting cidr_blocks: %s", err) + return fmt.Errorf("Error setting cidr_blocks: %w", err) } sort.Strings(ipv6Prefixes) if err := d.Set("ipv6_cidr_blocks", ipv6Prefixes); err != nil { - return fmt.Errorf("Error setting ipv6_cidr_blocks: %s", err) + return fmt.Errorf("Error setting ipv6_cidr_blocks: %w", err) } return nil diff --git a/aws/data_source_aws_kinesis_stream.go b/aws/data_source_aws_kinesis_stream.go index 3b4bd52a46c..ed4a96932dc 100644 --- a/aws/data_source_aws_kinesis_stream.go +++ b/aws/data_source_aws_kinesis_stream.go @@ -86,11 +86,11 @@ func dataSourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) er tags, err := keyvaluetags.KinesisListTags(conn, sn) if err != nil { - return fmt.Errorf("error listing tags for Kinesis Stream (%s): %s", sn, err) + return fmt.Errorf("error listing tags for Kinesis Stream (%s): %w", sn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_kms_alias.go b/aws/data_source_aws_kms_alias.go index b763ad83205..1c5c84e5687 100644 --- a/aws/data_source_aws_kms_alias.go +++ b/aws/data_source_aws_kms_alias.go @@ -52,7 +52,7 @@ func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { return true }) if err != nil { - return fmt.Errorf("Error fetch KMS alias list: %s", err) + return fmt.Errorf("Error fetch KMS alias list: %w", err) } if alias == nil { @@ -78,7 +78,7 @@ func dataSourceAwsKmsAliasRead(d *schema.ResourceData, meta interface{}) error { } resp, err := conn.DescribeKey(req) if err != nil { - return fmt.Errorf("Error calling KMS DescribeKey: %s", err) + return fmt.Errorf("Error calling KMS DescribeKey: %w", err) } d.Set("target_key_arn", resp.KeyMetadata.Arn) diff --git a/aws/data_source_aws_kms_key.go b/aws/data_source_aws_kms_key.go index 248f09aec7f..6c1c02193f2 100644 --- a/aws/data_source_aws_kms_key.go +++ b/aws/data_source_aws_kms_key.go @@ -92,7 +92,7 @@ func dataSourceAwsKmsKeyRead(d *schema.ResourceData, meta interface{}) error { } output, err := conn.DescribeKey(input) if err != nil { - return fmt.Errorf("error while describing key [%s]: %s", keyId, err) + return fmt.Errorf("error while describing key [%s]: %w", keyId, err) } d.SetId(aws.StringValue(output.KeyMetadata.KeyId)) d.Set("arn", output.KeyMetadata.Arn) diff --git a/aws/data_source_aws_kms_secrets.go b/aws/data_source_aws_kms_secrets.go index 7b3b22a6f40..196e7d39883 100644 --- a/aws/data_source_aws_kms_secrets.go +++ b/aws/data_source_aws_kms_secrets.go @@ -63,7 +63,7 @@ func dataSourceAwsKmsSecretsRead(d *schema.ResourceData, meta interface{}) error // base64 decode the payload payload, err := base64.StdEncoding.DecodeString(secret["payload"].(string)) if err != nil { - return fmt.Errorf("Invalid base64 value for secret '%s': %v", secret["name"].(string), err) + return fmt.Errorf("Invalid base64 value for secret '%s': %w", secret["name"].(string), err) } // build the kms decrypt params @@ -86,7 +86,7 @@ func dataSourceAwsKmsSecretsRead(d *schema.ResourceData, meta interface{}) error // decrypt resp, err := conn.Decrypt(params) if err != nil { - return fmt.Errorf("Failed to decrypt '%s': %s", secret["name"].(string), err) + return fmt.Errorf("Failed to decrypt '%s': %w", secret["name"].(string), err) } // Set the secret via the name @@ -95,7 +95,7 @@ func dataSourceAwsKmsSecretsRead(d *schema.ResourceData, meta interface{}) error } if err := d.Set("plaintext", plaintext); err != nil { - return fmt.Errorf("error setting plaintext: %s", err) + return fmt.Errorf("error setting plaintext: %w", err) } d.SetId(meta.(*AWSClient).region) diff --git a/aws/data_source_aws_lambda_alias.go b/aws/data_source_aws_lambda_alias.go index 917ff8fb0be..096a376b1ba 100644 --- a/aws/data_source_aws_lambda_alias.go +++ b/aws/data_source_aws_lambda_alias.go @@ -59,7 +59,7 @@ func dataSourceAwsLambdaAliasRead(d *schema.ResourceData, meta interface{}) erro aliasConfiguration, err := conn.GetAlias(params) if err != nil { - return fmt.Errorf("Error getting Lambda alias: %s", err) + return fmt.Errorf("Error getting Lambda alias: %w", err) } d.SetId(aws.StringValue(aliasConfiguration.AliasArn)) diff --git a/aws/data_source_aws_lambda_code_signing_config.go b/aws/data_source_aws_lambda_code_signing_config.go index 92a49a40098..85f0095a0a4 100644 --- a/aws/data_source_aws_lambda_code_signing_config.go +++ b/aws/data_source_aws_lambda_code_signing_config.go @@ -72,7 +72,7 @@ func dataSourceAwsLambdaCodeSigningConfigRead(d *schema.ResourceData, meta inter }) if err != nil { - return fmt.Errorf("error getting Lambda code signing config (%s): %s", arn, err) + return fmt.Errorf("error getting Lambda code signing config (%s): %w", arn, err) } if configOutput == nil { @@ -85,19 +85,19 @@ func dataSourceAwsLambdaCodeSigningConfigRead(d *schema.ResourceData, meta inter } if err := d.Set("config_id", codeSigningConfig.CodeSigningConfigId); err != nil { - return fmt.Errorf("error setting lambda code signing config id: %s", err) + return fmt.Errorf("error setting lambda code signing config id: %w", err) } if err := d.Set("description", codeSigningConfig.Description); err != nil { - return fmt.Errorf("error setting lambda code signing config description: %s", err) + return fmt.Errorf("error setting lambda code signing config description: %w", err) } if err := d.Set("last_modified", codeSigningConfig.LastModified); err != nil { - return fmt.Errorf("error setting lambda code signing config last modified: %s", err) + return fmt.Errorf("error setting lambda code signing config last modified: %w", err) } if err := d.Set("allowed_publishers", flattenLambdaCodeSigningConfigAllowedPublishers(codeSigningConfig.AllowedPublishers)); err != nil { - return fmt.Errorf("error setting lambda code signing config allowed publishers: %s", err) + return fmt.Errorf("error setting lambda code signing config allowed publishers: %w", err) } if err := d.Set("policies", []interface{}{ @@ -105,7 +105,7 @@ func dataSourceAwsLambdaCodeSigningConfigRead(d *schema.ResourceData, meta inter "untrusted_artifact_on_deployment": codeSigningConfig.CodeSigningPolicies.UntrustedArtifactOnDeployment, }, }); err != nil { - return fmt.Errorf("error setting lambda code signing config code signing policies: %s", err) + return fmt.Errorf("error setting lambda code signing config code signing policies: %w", err) } d.SetId(aws.StringValue(codeSigningConfig.CodeSigningConfigArn)) diff --git a/aws/data_source_aws_lambda_function.go b/aws/data_source_aws_lambda_function.go index 2079a23b30c..bda952f988d 100644 --- a/aws/data_source_aws_lambda_function.go +++ b/aws/data_source_aws_lambda_function.go @@ -204,7 +204,7 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e output, err := conn.GetFunction(input) if err != nil { - return fmt.Errorf("error getting Lambda Function (%s): %s", functionName, err) + return fmt.Errorf("error getting Lambda Function (%s): %w", functionName, err) } if output == nil { @@ -235,13 +235,13 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e } } if err := d.Set("dead_letter_config", deadLetterConfig); err != nil { - return fmt.Errorf("error setting dead_letter_config: %s", err) + return fmt.Errorf("error setting dead_letter_config: %w", err) } d.Set("description", function.Description) if err := d.Set("environment", flattenLambdaEnvironment(function.Environment)); err != nil { - return fmt.Errorf("error setting environment: %s", err) + return fmt.Errorf("error setting environment: %w", err) } d.Set("handler", function.Handler) @@ -250,7 +250,7 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e d.Set("last_modified", function.LastModified) if err := d.Set("layers", flattenLambdaLayers(function.Layers)); err != nil { - return fmt.Errorf("Error setting layers for Lambda Function (%s): %s", d.Id(), err) + return fmt.Errorf("Error setting layers for Lambda Function (%s): %w", d.Id(), err) } d.Set("memory_size", function.MemorySize) @@ -258,12 +258,12 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e // Add Signing Profile Version ARN if err := d.Set("signing_profile_version_arn", function.SigningProfileVersionArn); err != nil { - return fmt.Errorf("Error setting signing profile version arn for Lambda Function: %s", err) + return fmt.Errorf("Error setting signing profile version arn for Lambda Function: %w", err) } // Add Signing Job ARN if err := d.Set("signing_job_arn", function.SigningJobArn); err != nil { - return fmt.Errorf("Error setting signing job arn for Lambda Function: %s", err) + return fmt.Errorf("Error setting signing job arn for Lambda Function: %w", err) } reservedConcurrentExecutions := int64(-1) @@ -278,7 +278,7 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e d.Set("source_code_size", function.CodeSize) if err := d.Set("tags", keyvaluetags.LambdaKeyValueTags(output.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } tracingConfig := []map[string]interface{}{ @@ -297,11 +297,11 @@ func dataSourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) e d.Set("version", function.Version) if err := d.Set("vpc_config", flattenLambdaVpcConfigResponse(function.VpcConfig)); err != nil { - return fmt.Errorf("error setting vpc_config: %s", err) + return fmt.Errorf("error setting vpc_config: %w", err) } if err := d.Set("file_system_config", flattenLambdaFileSystemConfigs(function.FileSystemConfigs)); err != nil { - return fmt.Errorf("error setting file_system_config: %s", err) + return fmt.Errorf("error setting file_system_config: %w", err) } // Currently, this functionality is only enabled in AWS Commercial partition diff --git a/aws/data_source_aws_lambda_layer_version.go b/aws/data_source_aws_lambda_layer_version.go index 9e162d441d6..f60ce7f0101 100644 --- a/aws/data_source_aws_lambda_layer_version.go +++ b/aws/data_source_aws_lambda_layer_version.go @@ -97,7 +97,7 @@ func dataSourceAwsLambdaLayerVersionRead(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Looking up latest version for lambda layer %s", layerName) listOutput, err := conn.ListLayerVersions(listInput) if err != nil { - return fmt.Errorf("error listing Lambda Layer Versions (%s): %s", layerName, err) + return fmt.Errorf("error listing Lambda Layer Versions (%s): %w", layerName, err) } if len(listOutput.LayerVersions) == 0 { @@ -116,7 +116,7 @@ func dataSourceAwsLambdaLayerVersionRead(d *schema.ResourceData, meta interface{ output, err := conn.GetLayerVersion(input) if err != nil { - return fmt.Errorf("error getting Lambda Layer Version (%s, version %d): %s", layerName, version, err) + return fmt.Errorf("error getting Lambda Layer Version (%s, version %d): %w", layerName, version, err) } if output == nil { @@ -124,37 +124,37 @@ func dataSourceAwsLambdaLayerVersionRead(d *schema.ResourceData, meta interface{ } if err := d.Set("version", int(aws.Int64Value(output.Version))); err != nil { - return fmt.Errorf("error setting lambda layer version: %s", err) + return fmt.Errorf("error setting lambda layer version: %w", err) } if err := d.Set("compatible_runtimes", flattenStringList(output.CompatibleRuntimes)); err != nil { - return fmt.Errorf("error setting lambda layer compatible runtimes: %s", err) + return fmt.Errorf("error setting lambda layer compatible runtimes: %w", err) } if err := d.Set("description", output.Description); err != nil { - return fmt.Errorf("error setting lambda layer description: %s", err) + return fmt.Errorf("error setting lambda layer description: %w", err) } if err := d.Set("license_info", output.LicenseInfo); err != nil { - return fmt.Errorf("error setting lambda layer license info: %s", err) + return fmt.Errorf("error setting lambda layer license info: %w", err) } if err := d.Set("arn", output.LayerVersionArn); err != nil { - return fmt.Errorf("error setting lambda layer version arn: %s", err) + return fmt.Errorf("error setting lambda layer version arn: %w", err) } if err := d.Set("layer_arn", output.LayerArn); err != nil { - return fmt.Errorf("error setting lambda layer arn: %s", err) + return fmt.Errorf("error setting lambda layer arn: %w", err) } if err := d.Set("created_date", output.CreatedDate); err != nil { - return fmt.Errorf("error setting lambda layer created date: %s", err) + return fmt.Errorf("error setting lambda layer created date: %w", err) } if err := d.Set("source_code_hash", output.Content.CodeSha256); err != nil { - return fmt.Errorf("error setting lambda layer source code hash: %s", err) + return fmt.Errorf("error setting lambda layer source code hash: %w", err) } if err := d.Set("source_code_size", output.Content.CodeSize); err != nil { - return fmt.Errorf("error setting lambda layer source code size: %s", err) + return fmt.Errorf("error setting lambda layer source code size: %w", err) } if err := d.Set("signing_profile_version_arn", output.Content.SigningProfileVersionArn); err != nil { - return fmt.Errorf("Error setting lambda layer signing profile arn: %s", err) + return fmt.Errorf("Error setting lambda layer signing profile arn: %w", err) } if err := d.Set("signing_job_arn", output.Content.SigningJobArn); err != nil { - return fmt.Errorf("Error setting lambda layer signing job arn: %s", err) + return fmt.Errorf("Error setting lambda layer signing job arn: %w", err) } d.SetId(aws.StringValue(output.LayerVersionArn)) diff --git a/aws/data_source_aws_launch_configuration.go b/aws/data_source_aws_launch_configuration.go index b6b0c290085..76b673b4eb5 100644 --- a/aws/data_source_aws_launch_configuration.go +++ b/aws/data_source_aws_launch_configuration.go @@ -228,7 +228,7 @@ func dataSourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface log.Printf("[DEBUG] launch configuration describe configuration: %s", describeOpts) describConfs, err := autoscalingconn.DescribeLaunchConfigurations(&describeOpts) if err != nil { - return fmt.Errorf("Error retrieving launch configuration: %s", err) + return fmt.Errorf("Error retrieving launch configuration: %w", err) } if describConfs == nil || len(describConfs.LaunchConfigurations) == 0 { @@ -263,11 +263,11 @@ func dataSourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface vpcSGs = append(vpcSGs, *sg) } if err := d.Set("security_groups", vpcSGs); err != nil { - return fmt.Errorf("error setting security_groups: %s", err) + return fmt.Errorf("error setting security_groups: %w", err) } if err := d.Set("metadata_options", flattenLaunchConfigInstanceMetadataOptions(lc.MetadataOptions)); err != nil { - return fmt.Errorf("error setting metadata_options: %s", err) + return fmt.Errorf("error setting metadata_options: %w", err) } classicSGs := make([]string, 0, len(lc.ClassicLinkVPCSecurityGroups)) @@ -275,7 +275,7 @@ func dataSourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface classicSGs = append(classicSGs, *sg) } if err := d.Set("vpc_classic_link_security_groups", classicSGs); err != nil { - return fmt.Errorf("error setting vpc_classic_link_security_groups: %s", err) + return fmt.Errorf("error setting vpc_classic_link_security_groups: %w", err) } if err := readLCBlockDevices(d, lc, ec2conn); err != nil { diff --git a/aws/data_source_aws_lb.go b/aws/data_source_aws_lb.go index ed9e42ab3a6..d65cc952940 100644 --- a/aws/data_source_aws_lb.go +++ b/aws/data_source_aws_lb.go @@ -169,7 +169,7 @@ func dataSourceAwsLbRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Reading Load Balancer: %s", describeLbOpts) describeResp, err := conn.DescribeLoadBalancers(describeLbOpts) if err != nil { - return fmt.Errorf("Error retrieving LB: %s", err) + return fmt.Errorf("Error retrieving LB: %w", err) } if len(describeResp.LoadBalancers) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.LoadBalancers)) diff --git a/aws/data_source_aws_lb_target_group.go b/aws/data_source_aws_lb_target_group.go index b22e4bc98db..ca765be1525 100644 --- a/aws/data_source_aws_lb_target_group.go +++ b/aws/data_source_aws_lb_target_group.go @@ -175,7 +175,7 @@ func dataSourceAwsLbTargetGroupRead(d *schema.ResourceData, meta interface{}) er log.Printf("[DEBUG] Reading Load Balancer Target Group: %s", describeTgOpts) describeResp, err := elbconn.DescribeTargetGroups(describeTgOpts) if err != nil { - return fmt.Errorf("Error retrieving LB Target Group: %s", err) + return fmt.Errorf("Error retrieving LB Target Group: %w", err) } if len(describeResp.TargetGroups) != 1 { return fmt.Errorf("Search returned %d results, please revise so only one is returned", len(describeResp.TargetGroups)) diff --git a/aws/data_source_aws_msk_cluster.go b/aws/data_source_aws_msk_cluster.go index dca1ce7ded1..ac8a20a8c26 100644 --- a/aws/data_source_aws_msk_cluster.go +++ b/aws/data_source_aws_msk_cluster.go @@ -66,7 +66,7 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error listClustersOutput, err := conn.ListClusters(listClustersInput) if err != nil { - return fmt.Errorf("error listing MSK Clusters: %s", err) + return fmt.Errorf("error listing MSK Clusters: %w", err) } if listClustersOutput == nil { @@ -99,7 +99,7 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error bootstrapBrokersoOutput, err := conn.GetBootstrapBrokers(bootstrapBrokersInput) if err != nil { - return fmt.Errorf("error reading MSK Cluster (%s) bootstrap brokers: %s", aws.StringValue(cluster.ClusterArn), err) + return fmt.Errorf("error reading MSK Cluster (%s) bootstrap brokers: %w", aws.StringValue(cluster.ClusterArn), err) } d.Set("arn", aws.StringValue(cluster.ClusterArn)) @@ -111,7 +111,7 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) if err := d.Set("tags", keyvaluetags.KafkaKeyValueTags(cluster.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) diff --git a/aws/data_source_aws_msk_configuration.go b/aws/data_source_aws_msk_configuration.go index fbcae200968..59cb35b3ce6 100644 --- a/aws/data_source_aws_msk_configuration.go +++ b/aws/data_source_aws_msk_configuration.go @@ -64,7 +64,7 @@ func dataSourceAwsMskConfigurationRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("error listing MSK Configurations: %s", err) + return fmt.Errorf("error listing MSK Configurations: %w", err) } if configuration == nil { @@ -84,7 +84,7 @@ func dataSourceAwsMskConfigurationRead(d *schema.ResourceData, meta interface{}) revisionOutput, err := conn.DescribeConfigurationRevision(revisionInput) if err != nil { - return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): %s", d.Id(), aws.Int64Value(revision), err) + return fmt.Errorf("error describing MSK Configuration (%s) Revision (%d): %w", d.Id(), aws.Int64Value(revision), err) } if revisionOutput == nil { @@ -95,7 +95,7 @@ func dataSourceAwsMskConfigurationRead(d *schema.ResourceData, meta interface{}) d.Set("description", aws.StringValue(configuration.Description)) if err := d.Set("kafka_versions", aws.StringValueSlice(configuration.KafkaVersions)); err != nil { - return fmt.Errorf("error setting kafka_versions: %s", err) + return fmt.Errorf("error setting kafka_versions: %w", err) } d.Set("latest_revision", aws.Int64Value(revision)) diff --git a/aws/data_source_aws_nat_gateway.go b/aws/data_source_aws_nat_gateway.go index f2431ea4d20..30f743b7fd3 100644 --- a/aws/data_source_aws_nat_gateway.go +++ b/aws/data_source_aws_nat_gateway.go @@ -126,7 +126,7 @@ func dataSourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error d.Set("vpc_id", ngw.VpcId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(ngw.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } for _, address := range ngw.NatGatewayAddresses { diff --git a/aws/data_source_aws_network_acls.go b/aws/data_source_aws_network_acls.go index 12a0d4fc6ef..4513dfbcc42 100644 --- a/aws/data_source_aws_network_acls.go +++ b/aws/data_source_aws_network_acls.go @@ -86,7 +86,7 @@ func dataSourceAwsNetworkAclsRead(d *schema.ResourceData, meta interface{}) erro d.SetId(meta.(*AWSClient).region) if err := d.Set("ids", networkAcls); err != nil { - return fmt.Errorf("Error setting network ACL ids: %s", err) + return fmt.Errorf("Error setting network ACL ids: %w", err) } return nil diff --git a/aws/data_source_aws_network_interface.go b/aws/data_source_aws_network_interface.go index e836bf310de..f6bcf9aa5fe 100644 --- a/aws/data_source_aws_network_interface.go +++ b/aws/data_source_aws_network_interface.go @@ -197,7 +197,7 @@ func dataSourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) d.Set("vpc_id", eni.VpcId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(eni.TagSet).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_network_interfaces.go b/aws/data_source_aws_network_interfaces.go index 859d4c12b03..96cbcc1f329 100644 --- a/aws/data_source_aws_network_interfaces.go +++ b/aws/data_source_aws_network_interfaces.go @@ -73,7 +73,7 @@ func dataSourceAwsNetworkInterfacesRead(d *schema.ResourceData, meta interface{} d.SetId(meta.(*AWSClient).region) if err := d.Set("ids", networkInterfaces); err != nil { - return fmt.Errorf("Error setting network interfaces ids: %s", err) + return fmt.Errorf("Error setting network interfaces ids: %w", err) } return nil diff --git a/aws/data_source_aws_organizations_organization.go b/aws/data_source_aws_organizations_organization.go index 7d32e078681..efd990e0ddc 100644 --- a/aws/data_source_aws_organizations_organization.go +++ b/aws/data_source_aws_organizations_organization.go @@ -146,7 +146,7 @@ func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta int org, err := conn.DescribeOrganization(&organizations.DescribeOrganizationInput{}) if err != nil { - return fmt.Errorf("Error describing organization: %s", err) + return fmt.Errorf("Error describing organization: %w", err) } d.SetId(aws.StringValue(org.Organization.Id)) @@ -171,7 +171,7 @@ func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta int return !lastPage }) if err != nil { - return fmt.Errorf("error listing AWS Organization (%s) accounts: %s", d.Id(), err) + return fmt.Errorf("error listing AWS Organization (%s) accounts: %w", d.Id(), err) } var roots []*organizations.Root @@ -180,7 +180,7 @@ func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta int return !lastPage }) if err != nil { - return fmt.Errorf("error listing AWS Organization (%s) roots: %s", d.Id(), err) + return fmt.Errorf("error listing AWS Organization (%s) roots: %w", d.Id(), err) } awsServiceAccessPrincipals := make([]string, 0) @@ -194,7 +194,7 @@ func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta int }) if err != nil { - return fmt.Errorf("error listing AWS Service Access for Organization (%s): %s", d.Id(), err) + return fmt.Errorf("error listing AWS Service Access for Organization (%s): %w", d.Id(), err) } } @@ -206,23 +206,23 @@ func dataSourceAwsOrganizationsOrganizationRead(d *schema.ResourceData, meta int } if err := d.Set("accounts", flattenOrganizationsAccounts(accounts)); err != nil { - return fmt.Errorf("error setting accounts: %s", err) + return fmt.Errorf("error setting accounts: %w", err) } if err := d.Set("aws_service_access_principals", awsServiceAccessPrincipals); err != nil { - return fmt.Errorf("error setting aws_service_access_principals: %s", err) + return fmt.Errorf("error setting aws_service_access_principals: %w", err) } if err := d.Set("enabled_policy_types", enabledPolicyTypes); err != nil { - return fmt.Errorf("error setting enabled_policy_types: %s", err) + return fmt.Errorf("error setting enabled_policy_types: %w", err) } if err := d.Set("non_master_accounts", flattenOrganizationsAccounts(nonMasterAccounts)); err != nil { - return fmt.Errorf("error setting non_master_accounts: %s", err) + return fmt.Errorf("error setting non_master_accounts: %w", err) } if err := d.Set("roots", flattenOrganizationsRoots(roots)); err != nil { - return fmt.Errorf("error setting roots: %s", err) + return fmt.Errorf("error setting roots: %w", err) } } diff --git a/aws/data_source_aws_organizations_organizational_units.go b/aws/data_source_aws_organizations_organizational_units.go index d56705db6b5..2406fe7ad72 100644 --- a/aws/data_source_aws_organizations_organizational_units.go +++ b/aws/data_source_aws_organizations_organizational_units.go @@ -60,13 +60,13 @@ func dataSourceAwsOrganizationsOrganizationalUnitsRead(d *schema.ResourceData, m }) if err != nil { - return fmt.Errorf("error listing Organizations Organization Units for parent (%s): %s", parent_id, err) + return fmt.Errorf("error listing Organizations Organization Units for parent (%s): %w", parent_id, err) } d.SetId(parent_id) if err := d.Set("children", flattenOrganizationsOrganizationalUnits(children)); err != nil { - return fmt.Errorf("Error setting children: %s", err) + return fmt.Errorf("error setting children: %w", err) } return nil diff --git a/aws/data_source_aws_pricing_product.go b/aws/data_source_aws_pricing_product.go index 6f04ef55a2a..2d983e9b9c3 100644 --- a/aws/data_source_aws_pricing_product.go +++ b/aws/data_source_aws_pricing_product.go @@ -65,7 +65,7 @@ func dataSourceAwsPricingProductRead(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Reading pricing of products: %s", params) resp, err := conn.GetProducts(params) if err != nil { - return fmt.Errorf("Error reading pricing of products: %s", err) + return fmt.Errorf("Error reading pricing of products: %w", err) } numberOfElements := len(resp.PriceList) @@ -82,7 +82,7 @@ func dataSourceAwsPricingProductRead(d *schema.ResourceData, meta interface{}) e pricingResult, err := json.Marshal(resp.PriceList[0]) if err != nil { - return fmt.Errorf("Invalid JSON value returned by AWS: %s", err) + return fmt.Errorf("Invalid JSON value returned by AWS: %w", err) } d.SetId(fmt.Sprintf("%d", hashcode.String(params.String()))) diff --git a/aws/data_source_aws_qldb_ledger.go b/aws/data_source_aws_qldb_ledger.go index 598f7427266..1dbe9e834be 100644 --- a/aws/data_source_aws_qldb_ledger.go +++ b/aws/data_source_aws_qldb_ledger.go @@ -50,7 +50,7 @@ func dataSourceAwsQLDBLedgerRead(d *schema.ResourceData, meta interface{}) error resp, err := conn.DescribeLedger(req) if err != nil { - return fmt.Errorf("Error describing ledger: %s", err) + return fmt.Errorf("Error describing ledger: %w", err) } d.SetId(aws.StringValue(resp.Name)) diff --git a/aws/data_source_aws_ram_resource_share.go b/aws/data_source_aws_ram_resource_share.go index a637b0c4a8e..383d8f65c20 100644 --- a/aws/data_source_aws_ram_resource_share.go +++ b/aws/data_source_aws_ram_resource_share.go @@ -97,7 +97,7 @@ func dataSourceAwsRamResourceShareRead(d *schema.ResourceData, meta interface{}) } if resp == nil || len(resp.ResourceShares) == 0 { - return fmt.Errorf("No matching resource found: %s", err) + return fmt.Errorf("No matching resource found: %w", err) } for _, r := range resp.ResourceShares { @@ -108,7 +108,7 @@ func dataSourceAwsRamResourceShareRead(d *schema.ResourceData, meta interface{}) d.Set("status", aws.StringValue(r.Status)) if err := d.Set("tags", keyvaluetags.RamKeyValueTags(r.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } break diff --git a/aws/data_source_aws_rds_cluster.go b/aws/data_source_aws_rds_cluster.go index 308701cbf60..0616e3aff8a 100644 --- a/aws/data_source_aws_rds_cluster.go +++ b/aws/data_source_aws_rds_cluster.go @@ -176,7 +176,7 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error resp, err := conn.DescribeDBClusters(params) if err != nil { - return fmt.Errorf("Error retrieving RDS cluster: %s", err) + return fmt.Errorf("Error retrieving RDS cluster: %w", err) } if resp == nil { @@ -198,7 +198,7 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error d.SetId(aws.StringValue(dbc.DBClusterIdentifier)) if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return fmt.Errorf("error setting availability_zones: %s", err) + return fmt.Errorf("error setting availability_zones: %w", err) } arn := dbc.DBClusterArn @@ -212,7 +212,7 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error cm = append(cm, aws.StringValue(m.DBInstanceIdentifier)) } if err := d.Set("cluster_members", cm); err != nil { - return fmt.Errorf("error setting cluster_members: %s", err) + return fmt.Errorf("error setting cluster_members: %w", err) } d.Set("cluster_resource_id", dbc.DbClusterResourceId) @@ -229,7 +229,7 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("db_subnet_group_name", dbc.DBSubnetGroup) if err := d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)); err != nil { - return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %s", err) + return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %w", err) } d.Set("endpoint", dbc.Endpoint) @@ -243,7 +243,7 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error roles = append(roles, aws.StringValue(r.RoleArn)) } if err := d.Set("iam_roles", roles); err != nil { - return fmt.Errorf("error setting iam_roles: %s", err) + return fmt.Errorf("error setting iam_roles: %w", err) } d.Set("kms_key_id", dbc.KmsKeyId) @@ -261,17 +261,17 @@ func dataSourceAwsRdsClusterRead(d *schema.ResourceData, meta interface{}) error vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) } if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("error setting vpc_security_group_ids: %s", err) + return fmt.Errorf("error setting vpc_security_group_ids: %w", err) } tags, err := keyvaluetags.RdsListTags(conn, *arn) if err != nil { - return fmt.Errorf("error listing tags for RDS Cluster (%s): %s", *arn, err) + return fmt.Errorf("error listing tags for RDS Cluster (%s): %w", *arn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_redshift_cluster.go b/aws/data_source_aws_redshift_cluster.go index 48f240bed13..a6b3222d3a6 100644 --- a/aws/data_source_aws_redshift_cluster.go +++ b/aws/data_source_aws_redshift_cluster.go @@ -182,7 +182,7 @@ func dataSourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("Error describing Redshift Cluster: %s, error: %s", cluster, err) + return fmt.Errorf("Error describing Redshift Cluster: %s, error: %w", cluster, err) } if resp.Clusters == nil || len(resp.Clusters) == 0 { @@ -209,7 +209,7 @@ func dataSourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) csg = append(csg, *g.ClusterSecurityGroupName) } if err := d.Set("cluster_security_groups", csg); err != nil { - return fmt.Errorf("Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %s", cluster, err) + return fmt.Errorf("Error saving Cluster Security Group Names to state for Redshift Cluster (%s): %w", cluster, err) } d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName) @@ -240,7 +240,7 @@ func dataSourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) iamRoles = append(iamRoles, *i.IamRoleArn) } if err := d.Set("iam_roles", iamRoles); err != nil { - return fmt.Errorf("Error saving IAM Roles to state for Redshift Cluster (%s): %s", cluster, err) + return fmt.Errorf("Error saving IAM Roles to state for Redshift Cluster (%s): %w", cluster, err) } d.Set("kms_key_id", rsc.KmsKeyId) @@ -252,7 +252,7 @@ func dataSourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) d.Set("publicly_accessible", rsc.PubliclyAccessible) if err := d.Set("tags", keyvaluetags.RedshiftKeyValueTags(rsc.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("vpc_id", rsc.VpcId) @@ -262,7 +262,7 @@ func dataSourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) vpcg = append(vpcg, *g.VpcSecurityGroupId) } if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %s", cluster, err) + return fmt.Errorf("Error saving VPC Security Group IDs to state for Redshift Cluster (%s): %w", cluster, err) } log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", cluster) diff --git a/aws/data_source_aws_regions.go b/aws/data_source_aws_regions.go index bb438ff3a00..61886c586c0 100644 --- a/aws/data_source_aws_regions.go +++ b/aws/data_source_aws_regions.go @@ -44,7 +44,7 @@ func dataSourceAwsRegionsRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Reading regions for request: %s", request) response, err := connection.DescribeRegions(request) if err != nil { - return fmt.Errorf("Error fetching Regions: %s", err) + return fmt.Errorf("Error fetching Regions: %w", err) } names := []string{} @@ -54,7 +54,7 @@ func dataSourceAwsRegionsRead(d *schema.ResourceData, meta interface{}) error { d.SetId(meta.(*AWSClient).partition) if err := d.Set("names", names); err != nil { - return fmt.Errorf("error setting names: %s", err) + return fmt.Errorf("error setting names: %w", err) } return nil diff --git a/aws/data_source_aws_route53_delegation_set.go b/aws/data_source_aws_route53_delegation_set.go index fc5a73acf66..723a17c7b2d 100644 --- a/aws/data_source_aws_route53_delegation_set.go +++ b/aws/data_source_aws_route53_delegation_set.go @@ -44,14 +44,14 @@ func dataSourceAwsDelegationSetRead(d *schema.ResourceData, meta interface{}) er resp, err := conn.GetReusableDelegationSet(input) if err != nil { - return fmt.Errorf("Failed getting Route53 delegation set: %s Set: %q", err, dSetID) + return fmt.Errorf("Failed getting Route53 delegation set (%s): %w", dSetID, err) } d.SetId(dSetID) d.Set("caller_reference", resp.DelegationSet.CallerReference) if err := d.Set("name_servers", expandNameServers(resp.DelegationSet.NameServers)); err != nil { - return fmt.Errorf("error setting name_servers: %s", err) + return fmt.Errorf("error setting name_servers: %w", err) } return nil diff --git a/aws/data_source_aws_route53_resolver_endpoint.go b/aws/data_source_aws_route53_resolver_endpoint.go index d5ded2a1b49..0e2acf9be6d 100644 --- a/aws/data_source_aws_route53_resolver_endpoint.go +++ b/aws/data_source_aws_route53_resolver_endpoint.go @@ -140,7 +140,7 @@ func dataSourceAwsRoute53ResolverEndpointRead(d *schema.ResourceData, meta inter ip, err := conn.ListResolverEndpointIpAddresses(params) if err != nil { - return fmt.Errorf("error getting Route53 Resolver endpoint (%s) IP Addresses: %s", d.Id(), err) + return fmt.Errorf("error getting Route53 Resolver endpoint (%s) IP Addresses: %w", d.Id(), err) } for _, vIPAddresses := range ip.IpAddresses { diff --git a/aws/data_source_aws_route53_resolver_rule.go b/aws/data_source_aws_route53_resolver_rule.go index ec5bda8b26f..da80eed78d0 100644 --- a/aws/data_source_aws_route53_resolver_rule.go +++ b/aws/data_source_aws_route53_resolver_rule.go @@ -86,7 +86,7 @@ func dataSourceAwsRoute53ResolverRuleRead(d *schema.ResourceData, meta interface if v, ok := d.GetOk("resolver_rule_id"); ok { ruleRaw, state, err := route53ResolverRuleRefresh(conn, v.(string))() if err != nil { - return fmt.Errorf("error getting Route53 Resolver rule (%s): %s", v, err) + return fmt.Errorf("error getting Route53 Resolver rule (%s): %w", v, err) } if state == route53ResolverRuleStatusDeleted { @@ -107,7 +107,7 @@ func dataSourceAwsRoute53ResolverRuleRead(d *schema.ResourceData, meta interface log.Printf("[DEBUG] Listing Route53 Resolver rules: %s", req) resp, err := conn.ListResolverRules(req) if err != nil { - return fmt.Errorf("error getting Route53 Resolver rules: %s", err) + return fmt.Errorf("error getting Route53 Resolver rules: %w", err) } if n := len(resp.ResolverRules); n == 0 { @@ -137,11 +137,11 @@ func dataSourceAwsRoute53ResolverRuleRead(d *schema.ResourceData, meta interface tags, err := keyvaluetags.Route53resolverListTags(conn, arn) if err != nil { - return fmt.Errorf("error listing tags for Route 53 Resolver rule (%s): %s", arn, err) + return fmt.Errorf("error listing tags for Route 53 Resolver rule (%s): %w", arn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } } diff --git a/aws/data_source_aws_route53_resolver_rules.go b/aws/data_source_aws_route53_resolver_rules.go index e4d45645798..9d5f7ca222a 100644 --- a/aws/data_source_aws_route53_resolver_rules.go +++ b/aws/data_source_aws_route53_resolver_rules.go @@ -87,14 +87,14 @@ func dataSourceAwsRoute53ResolverRulesRead(d *schema.ResourceData, meta interfac return !isLast }) if err != nil { - return fmt.Errorf("error getting Route53 Resolver rules: %s", err) + return fmt.Errorf("error getting Route53 Resolver rules: %w", err) } d.SetId(meta.(*AWSClient).region) err = d.Set("resolver_rule_ids", flattenStringSet(resolverRuleIds)) if err != nil { - return fmt.Errorf("error setting resolver_rule_ids: %s", err) + return fmt.Errorf("error setting resolver_rule_ids: %w", err) } return nil diff --git a/aws/data_source_aws_route53_zone.go b/aws/data_source_aws_route53_zone.go index c3f95b39967..025ee88125d 100644 --- a/aws/data_source_aws_route53_zone.go +++ b/aws/data_source_aws_route53_zone.go @@ -97,7 +97,7 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro resp, err := conn.ListHostedZones(req) if err != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", err) + return fmt.Errorf("Error finding Route 53 Hosted Zone: %w", err) } for _, hostedZone := range resp.HostedZones { hostedZoneId := cleanZoneID(aws.StringValue(hostedZone.Id)) @@ -113,7 +113,7 @@ func dataSourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) erro respHostedZone, errHostedZone := conn.GetHostedZone(reqHostedZone) if errHostedZone != nil { - return fmt.Errorf("Error finding Route 53 Hosted Zone: %v", errHostedZone) + return fmt.Errorf("Error finding Route 53 Hosted Zone: %w", errHostedZone) } // we go through all VPCs for _, vpc := range respHostedZone.VPCs { diff --git a/aws/data_source_aws_route_table.go b/aws/data_source_aws_route_table.go index 2bcd3d3fb65..02475139140 100644 --- a/aws/data_source_aws_route_table.go +++ b/aws/data_source_aws_route_table.go @@ -189,7 +189,7 @@ func dataSourceAwsRouteTableRead(d *schema.ResourceData, meta interface{}) error d.Set("vpc_id", rt.VpcId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(rt.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("owner_id", rt.OwnerId) diff --git a/aws/data_source_aws_route_tables.go b/aws/data_source_aws_route_tables.go index e970016a0c6..8da3cf51ce8 100644 --- a/aws/data_source_aws_route_tables.go +++ b/aws/data_source_aws_route_tables.go @@ -74,7 +74,7 @@ func dataSourceAwsRouteTablesRead(d *schema.ResourceData, meta interface{}) erro d.SetId(meta.(*AWSClient).region) if err = d.Set("ids", routeTables); err != nil { - return fmt.Errorf("error setting ids: %s", err) + return fmt.Errorf("error setting ids: %w", err) } return nil diff --git a/aws/data_source_aws_s3_bucket.go b/aws/data_source_aws_s3_bucket.go index d9f8e88bafd..2212447accd 100644 --- a/aws/data_source_aws_s3_bucket.go +++ b/aws/data_source_aws_s3_bucket.go @@ -67,7 +67,7 @@ func dataSourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { _, err := conn.HeadBucket(input) if err != nil { - return fmt.Errorf("Failed getting S3 bucket: %s Bucket: %q", err, bucket) + return fmt.Errorf("Failed getting S3 bucket (%s): %w", bucket, err) } d.SetId(bucket) @@ -81,7 +81,7 @@ func dataSourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { err = bucketLocation(meta.(*AWSClient), d, bucket) if err != nil { - return fmt.Errorf("error getting S3 Bucket location: %s", err) + return fmt.Errorf("error getting S3 Bucket location: %w", err) } regionalDomainName, err := BucketRegionalDomainName(bucket, d.Get("region").(string)) diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index e25a5be4ad1..63742936245 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -147,11 +147,10 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Reading S3 Bucket Object: %s", input) out, err := conn.HeadObject(&input) if err != nil { - return fmt.Errorf("Failed getting S3 object: %s Bucket: %q Object: %q", err, bucket, key) + return fmt.Errorf("failed getting S3 Bucket (%s) Object (%s): %w", bucket, key, err) } if aws.BoolValue(out.DeleteMarker) { - return fmt.Errorf("Requested S3 object %q%s has been deleted", - bucket+key, versionText) + return fmt.Errorf("Requested S3 object %q%s has been deleted", bucket+key, versionText) } log.Printf("[DEBUG] Received S3 object: %s", out) @@ -202,14 +201,13 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e } out, err := conn.GetObject(&input) if err != nil { - return fmt.Errorf("Failed getting S3 object: %s", err) + return fmt.Errorf("Failed getting S3 object: %w", err) } buf := new(bytes.Buffer) bytesRead, err := buf.ReadFrom(out.Body) if err != nil { - return fmt.Errorf("Failed reading content of S3 object (%s): %s", - uniqueId, err) + return fmt.Errorf("Failed reading content of S3 object (%s): %w", uniqueId, err) } log.Printf("[INFO] Saving %d bytes from S3 object %s", bytesRead, uniqueId) d.Set("body", buf.String()) @@ -221,18 +219,17 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e contentType = *out.ContentType } - log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q", - uniqueId, contentType) + log.Printf("[INFO] Ignoring body of S3 object %s with Content-Type %q", uniqueId, contentType) } tags, err := keyvaluetags.S3ObjectListTags(conn, bucket, key) if err != nil { - return fmt.Errorf("error listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) + return fmt.Errorf("error listing tags for S3 Bucket (%s) Object (%s): %w", bucket, key, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_s3_bucket_objects.go b/aws/data_source_aws_s3_bucket_objects.go index 87dafb4fa44..da5a1894fa1 100644 --- a/aws/data_source_aws_s3_bucket_objects.go +++ b/aws/data_source_aws_s3_bucket_objects.go @@ -128,21 +128,21 @@ func dataSourceAwsS3BucketObjectsRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("error listing S3 Bucket (%s) Objects: %s", bucket, err) + return fmt.Errorf("error listing S3 Bucket (%s) Objects: %w", bucket, err) } d.SetId(bucket) if err := d.Set("common_prefixes", commonPrefixes); err != nil { - return fmt.Errorf("error setting common_prefixes: %s", err) + return fmt.Errorf("error setting common_prefixes: %w", err) } if err := d.Set("keys", keys); err != nil { - return fmt.Errorf("error setting keys: %s", err) + return fmt.Errorf("error setting keys: %w", err) } if err := d.Set("owners", owners); err != nil { - return fmt.Errorf("error setting owners: %s", err) + return fmt.Errorf("error setting owners: %w", err) } return nil diff --git a/aws/data_source_aws_secretsmanager_secret.go b/aws/data_source_aws_secretsmanager_secret.go index 8b0837d98e2..a06f33a81fe 100644 --- a/aws/data_source_aws_secretsmanager_secret.go +++ b/aws/data_source_aws_secretsmanager_secret.go @@ -101,7 +101,7 @@ func dataSourceAwsSecretsManagerSecretRead(d *schema.ResourceData, meta interfac if isAWSErr(err, secretsmanager.ErrCodeResourceNotFoundException, "") { return fmt.Errorf("Secrets Manager Secret %q not found", secretID) } - return fmt.Errorf("error reading Secrets Manager Secret: %s", err) + return fmt.Errorf("error reading Secrets Manager Secret: %w", err) } if output.ARN == nil { @@ -123,23 +123,23 @@ func dataSourceAwsSecretsManagerSecretRead(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] Reading Secrets Manager Secret policy: %s", pIn) pOut, err := conn.GetResourcePolicy(pIn) if err != nil { - return fmt.Errorf("error reading Secrets Manager Secret policy: %s", err) + return fmt.Errorf("error reading Secrets Manager Secret policy: %w", err) } if pOut != nil && pOut.ResourcePolicy != nil { policy, err := structure.NormalizeJsonString(aws.StringValue(pOut.ResourcePolicy)) if err != nil { - return fmt.Errorf("policy contains an invalid JSON: %s", err) + return fmt.Errorf("policy contains an invalid JSON: %w", err) } d.Set("policy", policy) } if err := d.Set("rotation_rules", flattenSecretsManagerRotationRules(output.RotationRules)); err != nil { - return fmt.Errorf("error setting rotation_rules: %s", err) + return fmt.Errorf("error setting rotation_rules: %w", err) } if err := d.Set("tags", keyvaluetags.SecretsmanagerKeyValueTags(output.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_secretsmanager_secret_rotation.go b/aws/data_source_aws_secretsmanager_secret_rotation.go index 1849759ba55..9d475d6726a 100644 --- a/aws/data_source_aws_secretsmanager_secret_rotation.go +++ b/aws/data_source_aws_secretsmanager_secret_rotation.go @@ -55,7 +55,7 @@ func dataSourceAwsSecretsManagerSecretRotationRead(d *schema.ResourceData, meta log.Printf("[DEBUG] Reading Secrets Manager Secret: %s", input) output, err := conn.DescribeSecret(input) if err != nil { - return fmt.Errorf("error reading Secrets Manager Secret: %s", err) + return fmt.Errorf("error reading Secrets Manager Secret: %w", err) } if output.ARN == nil { @@ -67,7 +67,7 @@ func dataSourceAwsSecretsManagerSecretRotationRead(d *schema.ResourceData, meta d.Set("rotation_lambda_arn", output.RotationLambdaARN) if err := d.Set("rotation_rules", flattenSecretsManagerRotationRules(output.RotationRules)); err != nil { - return fmt.Errorf("error setting rotation_rules: %s", err) + return fmt.Errorf("error setting rotation_rules: %w", err) } return nil diff --git a/aws/data_source_aws_secretsmanager_secret_version.go b/aws/data_source_aws_secretsmanager_secret_version.go index b2b60419377..1fbfc762027 100644 --- a/aws/data_source_aws_secretsmanager_secret_version.go +++ b/aws/data_source_aws_secretsmanager_secret_version.go @@ -79,7 +79,7 @@ func dataSourceAwsSecretsManagerSecretVersionRead(d *schema.ResourceData, meta i if isAWSErr(err, secretsmanager.ErrCodeInvalidRequestException, "You can’t perform this operation on the secret because it was deleted") { return fmt.Errorf("Secrets Manager Secret %q Version %q not found", secretID, version) } - return fmt.Errorf("error reading Secrets Manager Secret Version: %s", err) + return fmt.Errorf("error reading Secrets Manager Secret Version: %w", err) } d.SetId(fmt.Sprintf("%s|%s", secretID, version)) @@ -90,7 +90,7 @@ func dataSourceAwsSecretsManagerSecretVersionRead(d *schema.ResourceData, meta i d.Set("arn", output.ARN) if err := d.Set("version_stages", flattenStringList(output.VersionStages)); err != nil { - return fmt.Errorf("error setting version_stages: %s", err) + return fmt.Errorf("error setting version_stages: %w", err) } return nil diff --git a/aws/data_source_aws_security_group.go b/aws/data_source_aws_security_group.go index fb1b63e8c54..f3555b7e1a4 100644 --- a/aws/data_source_aws_security_group.go +++ b/aws/data_source_aws_security_group.go @@ -96,7 +96,7 @@ func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) er d.Set("vpc_id", sg.VpcId) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(sg.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } arn := arn.ARN{ diff --git a/aws/data_source_aws_security_groups.go b/aws/data_source_aws_security_groups.go index e2a7b5a8641..46fc8434d2e 100644 --- a/aws/data_source_aws_security_groups.go +++ b/aws/data_source_aws_security_groups.go @@ -59,7 +59,7 @@ func dataSourceAwsSecurityGroupsRead(d *schema.ResourceData, meta interface{}) e for { resp, err := conn.DescribeSecurityGroups(req) if err != nil { - return fmt.Errorf("error reading security groups: %s", err) + return fmt.Errorf("error reading security groups: %w", err) } for _, sg := range resp.SecurityGroups { diff --git a/aws/data_source_aws_servicequotas_service.go b/aws/data_source_aws_servicequotas_service.go index 9ed865c5fb4..a61e421cb05 100644 --- a/aws/data_source_aws_servicequotas_service.go +++ b/aws/data_source_aws_servicequotas_service.go @@ -45,7 +45,7 @@ func dataSourceAwsServiceQuotasServiceRead(d *schema.ResourceData, meta interfac }) if err != nil { - return fmt.Errorf("error listing Services: %s", err) + return fmt.Errorf("error listing Services: %w", err) } if service == nil { diff --git a/aws/data_source_aws_servicequotas_service_quota.go b/aws/data_source_aws_servicequotas_service_quota.go index 164a3cc5cd4..eb621f04b31 100644 --- a/aws/data_source_aws_servicequotas_service_quota.go +++ b/aws/data_source_aws_servicequotas_service_quota.go @@ -87,7 +87,7 @@ func dataSourceAwsServiceQuotasServiceQuotaRead(d *schema.ResourceData, meta int }) if err != nil { - return fmt.Errorf("error listing Service (%s) Quotas: %s", serviceCode, err) + return fmt.Errorf("error listing Service (%s) Quotas: %w", serviceCode, err) } if serviceQuota == nil { @@ -102,7 +102,7 @@ func dataSourceAwsServiceQuotasServiceQuotaRead(d *schema.ResourceData, meta int output, err := conn.GetServiceQuota(input) if err != nil { - return fmt.Errorf("error getting Service (%s) Quota (%s): %s", serviceCode, quotaCode, err) + return fmt.Errorf("error getting Service (%s) Quota (%s): %w", serviceCode, quotaCode, err) } if output == nil { @@ -120,7 +120,7 @@ func dataSourceAwsServiceQuotasServiceQuotaRead(d *schema.ResourceData, meta int output, err := conn.GetAWSDefaultServiceQuota(input) if err != nil { - return fmt.Errorf("error getting Service (%s) Default Quota (%s): %s", serviceCode, aws.StringValue(serviceQuota.QuotaCode), err) + return fmt.Errorf("error getting Service (%s) Default Quota (%s): %w", serviceCode, aws.StringValue(serviceQuota.QuotaCode), err) } if output == nil { diff --git a/aws/data_source_aws_sfn_activity.go b/aws/data_source_aws_sfn_activity.go index e08ddd11107..714855ef007 100644 --- a/aws/data_source_aws_sfn_activity.go +++ b/aws/data_source_aws_sfn_activity.go @@ -60,7 +60,7 @@ func dataSourceAwsSfnActivityRead(d *schema.ResourceData, meta interface{}) erro }) if err != nil { - return fmt.Errorf("Error listing activities: %s", err) + return fmt.Errorf("Error listing activities: %w", err) } if len(acts) == 0 { @@ -89,7 +89,7 @@ func dataSourceAwsSfnActivityRead(d *schema.ResourceData, meta interface{}) erro act, err := conn.DescribeActivity(params) if err != nil { - return fmt.Errorf("Error describing activities: %s", err) + return fmt.Errorf("Error describing activities: %w", err) } if act == nil { diff --git a/aws/data_source_aws_sfn_state_machine.go b/aws/data_source_aws_sfn_state_machine.go index 411fa99a9b6..e869e8075b2 100644 --- a/aws/data_source_aws_sfn_state_machine.go +++ b/aws/data_source_aws_sfn_state_machine.go @@ -60,7 +60,7 @@ func dataSourceAwsSfnStateMachineRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("Error listing state machines: %s", err) + return fmt.Errorf("Error listing state machines: %w", err) } if len(arns) == 0 { diff --git a/aws/data_source_aws_signer_signing_job.go b/aws/data_source_aws_signer_signing_job.go index f916237c143..faa96d2d623 100644 --- a/aws/data_source_aws_signer_signing_job.go +++ b/aws/data_source_aws_signer_signing_job.go @@ -151,47 +151,47 @@ func dataSourceAwsSignerSigningJobRead(d *schema.ResourceData, meta interface{}) }) if err != nil { - return fmt.Errorf("error reading Signer signing job (%s): %s", d.Id(), err) + return fmt.Errorf("error reading Signer signing job (%s): %w", d.Id(), err) } if err := d.Set("completed_at", aws.TimeValue(describeSigningJobOutput.CompletedAt).Format(time.RFC3339)); err != nil { - return fmt.Errorf("error setting signer signing job completed at: %s", err) + return fmt.Errorf("error setting signer signing job completed at: %w", err) } if err := d.Set("created_at", aws.TimeValue(describeSigningJobOutput.CreatedAt).Format(time.RFC3339)); err != nil { - return fmt.Errorf("error setting signer signing job created at: %s", err) + return fmt.Errorf("error setting signer signing job created at: %w", err) } if err := d.Set("job_invoker", describeSigningJobOutput.JobInvoker); err != nil { - return fmt.Errorf("error setting signer signing job invoker: %s", err) + return fmt.Errorf("error setting signer signing job invoker: %w", err) } if err := d.Set("job_owner", describeSigningJobOutput.JobOwner); err != nil { - return fmt.Errorf("error setting signer signing job owner: %s", err) + return fmt.Errorf("error setting signer signing job owner: %w", err) } if err := d.Set("platform_display_name", describeSigningJobOutput.PlatformDisplayName); err != nil { - return fmt.Errorf("error setting signer signing job platform display name: %s", err) + return fmt.Errorf("error setting signer signing job platform display name: %w", err) } if err := d.Set("platform_id", describeSigningJobOutput.PlatformId); err != nil { - return fmt.Errorf("error setting signer signing job platform id: %s", err) + return fmt.Errorf("error setting signer signing job platform id: %w", err) } if err := d.Set("profile_name", describeSigningJobOutput.ProfileName); err != nil { - return fmt.Errorf("error setting signer signing job profile name: %s", err) + return fmt.Errorf("error setting signer signing job profile name: %w", err) } if err := d.Set("profile_version", describeSigningJobOutput.ProfileVersion); err != nil { - return fmt.Errorf("error setting signer signing job profile version: %s", err) + return fmt.Errorf("error setting signer signing job profile version: %w", err) } if err := d.Set("requested_by", describeSigningJobOutput.RequestedBy); err != nil { - return fmt.Errorf("error setting signer signing job requested by: %s", err) + return fmt.Errorf("error setting signer signing job requested by: %w", err) } if err := d.Set("revocation_record", flattenSignerSigningJobRevocationRecord(describeSigningJobOutput.RevocationRecord)); err != nil { - return fmt.Errorf("error setting signer signing job revocation record: %s", err) + return fmt.Errorf("error setting signer signing job revocation record: %w", err) } signatureExpiresAt := "" @@ -199,23 +199,23 @@ func dataSourceAwsSignerSigningJobRead(d *schema.ResourceData, meta interface{}) signatureExpiresAt = aws.TimeValue(describeSigningJobOutput.SignatureExpiresAt).Format(time.RFC3339) } if err := d.Set("signature_expires_at", signatureExpiresAt); err != nil { - return fmt.Errorf("error setting signer signing job requested by: %s", err) + return fmt.Errorf("error setting signer signing job requested by: %w", err) } if err := d.Set("signed_object", flattenSignerSigningJobSignedObject(describeSigningJobOutput.SignedObject)); err != nil { - return fmt.Errorf("error setting signer signing job signed object: %s", err) + return fmt.Errorf("error setting signer signing job signed object: %w", err) } if err := d.Set("source", flattenSignerSigningJobSource(describeSigningJobOutput.Source)); err != nil { - return fmt.Errorf("error setting signer signing job source: %s", err) + return fmt.Errorf("error setting signer signing job source: %w", err) } if err := d.Set("status", describeSigningJobOutput.Status); err != nil { - return fmt.Errorf("error setting signer signing job status: %s", err) + return fmt.Errorf("error setting signer signing job status: %w", err) } if err := d.Set("status_reason", describeSigningJobOutput.StatusReason); err != nil { - return fmt.Errorf("error setting signer signing job status reason: %s", err) + return fmt.Errorf("error setting signer signing job status reason: %w", err) } d.SetId(aws.StringValue(describeSigningJobOutput.JobId)) diff --git a/aws/data_source_aws_signer_signing_profile.go b/aws/data_source_aws_signer_signing_profile.go index ffacc281088..149aacaf8dc 100644 --- a/aws/data_source_aws_signer_signing_profile.go +++ b/aws/data_source_aws_signer_signing_profile.go @@ -93,11 +93,11 @@ func dataSourceAwsSignerSigningProfileRead(d *schema.ResourceData, meta interfac }) if err != nil { - return fmt.Errorf("error reading Signer signing profile (%s): %s", d.Id(), err) + return fmt.Errorf("error reading Signer signing profile (%s): %w", d.Id(), err) } if err := d.Set("platform_id", signingProfileOutput.PlatformId); err != nil { - return fmt.Errorf("error setting signer signing profile platform id: %s", err) + return fmt.Errorf("error setting signer signing profile platform id: %w", err) } if err := d.Set("signature_validity_period", []interface{}{ @@ -106,35 +106,35 @@ func dataSourceAwsSignerSigningProfileRead(d *schema.ResourceData, meta interfac "type": signingProfileOutput.SignatureValidityPeriod.Type, }, }); err != nil { - return fmt.Errorf("error setting signer signing profile signature validity period: %s", err) + return fmt.Errorf("error setting signer signing profile signature validity period: %w", err) } if err := d.Set("platform_display_name", signingProfileOutput.PlatformDisplayName); err != nil { - return fmt.Errorf("error setting signer signing profile platform display name: %s", err) + return fmt.Errorf("error setting signer signing profile platform display name: %w", err) } if err := d.Set("arn", signingProfileOutput.Arn); err != nil { - return fmt.Errorf("error setting signer signing profile arn: %s", err) + return fmt.Errorf("error setting signer signing profile arn: %w", err) } if err := d.Set("version", signingProfileOutput.ProfileVersion); err != nil { - return fmt.Errorf("error setting signer signing profile version: %s", err) + return fmt.Errorf("error setting signer signing profile version: %w", err) } if err := d.Set("version_arn", signingProfileOutput.ProfileVersionArn); err != nil { - return fmt.Errorf("error setting signer signing profile version arn: %s", err) + return fmt.Errorf("error setting signer signing profile version arn: %w", err) } if err := d.Set("status", signingProfileOutput.Status); err != nil { - return fmt.Errorf("error setting signer signing profile status: %s", err) + return fmt.Errorf("error setting signer signing profile status: %w", err) } if err := d.Set("tags", keyvaluetags.SignerKeyValueTags(signingProfileOutput.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting signer signing profile tags: %s", err) + return fmt.Errorf("error setting signer signing profile tags: %w", err) } if err := d.Set("revocation_record", flattenSignerSigningProfileRevocationRecord(signingProfileOutput.RevocationRecord)); err != nil { - return fmt.Errorf("error setting signer signing profile revocation record: %s", err) + return fmt.Errorf("error setting signer signing profile revocation record: %w", err) } d.SetId(aws.StringValue(signingProfileOutput.ProfileName)) diff --git a/aws/data_source_aws_sns.go b/aws/data_source_aws_sns.go index ce84ba1159e..2f4a6c433e7 100644 --- a/aws/data_source_aws_sns.go +++ b/aws/data_source_aws_sns.go @@ -54,7 +54,7 @@ func dataSourceAwsSnsTopicsRead(d *schema.ResourceData, meta interface{}) error return true }) if err != nil { - return fmt.Errorf("Error describing topics: %s", err) + return fmt.Errorf("Error describing topics: %w", err) } if len(arns) == 0 { diff --git a/aws/data_source_aws_sqs_queue.go b/aws/data_source_aws_sqs_queue.go index 221fb29e831..bb9aca92d9e 100644 --- a/aws/data_source_aws_sqs_queue.go +++ b/aws/data_source_aws_sqs_queue.go @@ -40,7 +40,7 @@ func dataSourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { QueueName: aws.String(name), }) if err != nil || urlOutput.QueueUrl == nil { - return fmt.Errorf("Error getting queue URL: %s", err) + return fmt.Errorf("Error getting queue URL: %w", err) } queueURL := aws.StringValue(urlOutput.QueueUrl) @@ -50,7 +50,7 @@ func dataSourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { AttributeNames: []*string{aws.String(sqs.QueueAttributeNameQueueArn)}, }) if err != nil { - return fmt.Errorf("Error getting queue attributes: %s", err) + return fmt.Errorf("Error getting queue attributes: %w", err) } d.Set("arn", aws.StringValue(attributesOutput.Attributes[sqs.QueueAttributeNameQueueArn])) @@ -60,11 +60,11 @@ func dataSourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { tags, err := keyvaluetags.SqsListTags(conn, queueURL) if err != nil { - return fmt.Errorf("error listing tags for SQS Queue (%s): %s", queueURL, err) + return fmt.Errorf("error listing tags for SQS Queue (%s): %w", queueURL, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil diff --git a/aws/data_source_aws_ssm_document.go b/aws/data_source_aws_ssm_document.go index 20e726b4199..44c853b4bde 100644 --- a/aws/data_source_aws_ssm_document.go +++ b/aws/data_source_aws_ssm_document.go @@ -66,7 +66,7 @@ func dataAwsSsmDocumentRead(d *schema.ResourceData, meta interface{}) error { resp, err := ssmconn.GetDocument(docInput) if err != nil { - return fmt.Errorf("Error reading SSM Document: %s", err) + return fmt.Errorf("Error reading SSM Document: %w", err) } d.SetId(aws.StringValue(resp.Name)) diff --git a/aws/data_source_aws_ssm_patch_baseline.go b/aws/data_source_aws_ssm_patch_baseline.go index d2f5eccacdb..61ed4a5f3c2 100644 --- a/aws/data_source_aws_ssm_patch_baseline.go +++ b/aws/data_source_aws_ssm_patch_baseline.go @@ -76,7 +76,7 @@ func dataAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) error resp, err := ssmconn.DescribePatchBaselines(params) if err != nil { - return fmt.Errorf("Error describing SSM PatchBaselines: %s", err) + return fmt.Errorf("Error describing SSM PatchBaselines: %w", err) } var filteredBaselines []*ssm.PatchBaselineIdentity diff --git a/aws/data_source_aws_storagegateway_local_disk.go b/aws/data_source_aws_storagegateway_local_disk.go index c74ec587afa..61ea759a453 100644 --- a/aws/data_source_aws_storagegateway_local_disk.go +++ b/aws/data_source_aws_storagegateway_local_disk.go @@ -46,7 +46,7 @@ func dataSourceAwsStorageGatewayLocalDiskRead(d *schema.ResourceData, meta inter log.Printf("[DEBUG] Reading Storage Gateway Local Disk: %s", input) output, err := conn.ListLocalDisks(input) if err != nil { - return fmt.Errorf("error reading Storage Gateway Local Disk: %s", err) + return fmt.Errorf("error reading Storage Gateway Local Disk: %w", err) } if output == nil || len(output.Disks) == 0 { diff --git a/aws/data_source_aws_subnet.go b/aws/data_source_aws_subnet.go index 40c59e9079e..cf0ee0f056e 100644 --- a/aws/data_source_aws_subnet.go +++ b/aws/data_source_aws_subnet.go @@ -186,7 +186,7 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { d.Set("outpost_arn", subnet.OutpostArn) if err := d.Set("tags", keyvaluetags.Ec2KeyValueTags(subnet.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } d.Set("assign_ipv6_address_on_creation", subnet.AssignIpv6AddressOnCreation) From 6b9d4a843395c22eb23af2a81250f0d3814d64ae Mon Sep 17 00:00:00 2001 From: Pradeep Bhadani Date: Tue, 9 Feb 2021 12:10:46 +0000 Subject: [PATCH 1039/1212] Update invalid S3 bucket name and add note for bucket policy size. --- website/docs/r/s3_bucket_policy.html.markdown | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/docs/r/s3_bucket_policy.html.markdown b/website/docs/r/s3_bucket_policy.html.markdown index b094b006514..1eee6249438 100644 --- a/website/docs/r/s3_bucket_policy.html.markdown +++ b/website/docs/r/s3_bucket_policy.html.markdown @@ -16,7 +16,7 @@ Attaches a policy to an S3 bucket resource. ```hcl resource "aws_s3_bucket" "b" { - bucket = "my_tf_test_bucket" + bucket = "my-tf-test-bucket" } resource "aws_s3_bucket_policy" "b" { @@ -31,8 +31,8 @@ resource "aws_s3_bucket_policy" "b" { "Sid": "IPAllow", "Effect": "Deny", "Principal": "*", - "Action": "s3:*", - "Resource": "arn:aws:s3:::my_tf_test_bucket/*", + "Action": "s3:*",˜ + "Resource": "arn:aws:s3:::my-tf-test-bucket/*", "Condition": { "IpAddress": {"aws:SourceIp": "8.8.8.8/32"} } @@ -48,7 +48,8 @@ POLICY The following arguments are supported: * `bucket` - (Required) The name of the bucket to which to apply the policy. -* `policy` - (Required) The text of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). +* `policy` - (Required) The text of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Note: Bucket policies are limited to 20 KB in size. + ## Import From 626df9f590e8266965fe6caa3b10765a3072cba3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 9 Feb 2021 10:30:39 -0500 Subject: [PATCH 1040/1212] Add opsworkscm service label --- infrastructure/repository/labels-service.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index 1a5d2ca5957..1b7750b4788 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -143,6 +143,7 @@ variable "service_labels" { "networkfirewall", "networkmanager", "opsworks", + "opsworkscm", "organizations", "outposts", "personalize", From 8003b268384eccb482f0be0857dd3f29ae16566a Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 9 Feb 2021 23:41:13 -0500 Subject: [PATCH 1041/1212] re-use resource delete func in sweeper;update status func --- .../service/route53resolver/waiter/status.go | 4 + .../service/route53resolver/waiter/waiter.go | 4 +- ...urce_aws_route53_resolver_dnssec_config.go | 75 +++++++++++-- ...aws_route53_resolver_dnssec_config_test.go | 104 +++++++----------- 4 files changed, 113 insertions(+), 74 deletions(-) diff --git a/aws/internal/service/route53resolver/waiter/status.go b/aws/internal/service/route53resolver/waiter/status.go index 2d902d562a7..b0086ebc332 100644 --- a/aws/internal/service/route53resolver/waiter/status.go +++ b/aws/internal/service/route53resolver/waiter/status.go @@ -66,6 +66,10 @@ func DnssecConfigStatus(conn *route53resolver.Route53Resolver, dnssecConfigID st return func() (interface{}, string, error) { dnssecConfig, err := finder.ResolverDnssecConfigByID(conn, dnssecConfigID) + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { + return nil, resolverDnssecConfigStatusNotFound, nil + } + if err != nil { return nil, resolverDnssecConfigStatusUnknown, err } diff --git a/aws/internal/service/route53resolver/waiter/waiter.go b/aws/internal/service/route53resolver/waiter/waiter.go index 8bcfea75b9b..a414e334b03 100644 --- a/aws/internal/service/route53resolver/waiter/waiter.go +++ b/aws/internal/service/route53resolver/waiter/waiter.go @@ -117,8 +117,8 @@ func DnssecConfigCreated(conn *route53resolver.Route53Resolver, dnssecConfigID s return nil, err } -// DnssecConfigCreated waits for a DnssecConfig to return DELETED -func DnssecConfigDeleted(conn *route53resolver.Route53Resolver, dnssecConfigID string) (*route53resolver.ResolverDnssecConfig, error) { +// DnssecConfigDisabled waits for a DnssecConfig to return DISABLED +func DnssecConfigDisabled(conn *route53resolver.Route53Resolver, dnssecConfigID string) (*route53resolver.ResolverDnssecConfig, error) { stateConf := &resource.StateChangeConf{ Pending: []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, Target: []string{route53resolver.ResolverDNSSECValidationStatusDisabled}, diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go index 370f5d969a5..07ada395cb9 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config.go +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53resolver/finder" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53resolver/waiter" @@ -110,22 +111,80 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).route53resolverconn - log.Printf("[DEBUG] Deleting Route53 Resolver DNSSEC config: %s", d.Id()) - _, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ - ResourceId: aws.String(d.Get("resource_id").(string)), - Validation: aws.String(route53resolver.ValidationDisable), - }) - if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { + // To delete a Route53 ResolverDnssecConfig, it must be: + // (1) updated to a "DISABLED" state + // (2) updated again to be permanently removed + // + // To determine how many Updates are required, + // we first find the config by ID and proceed as follows: + + config, err := finder.ResolverDnssecConfigByID(conn, d.Id()) + + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { return nil } + if err != nil { return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } - _, err = waiter.DnssecConfigDeleted(conn, d.Id()) + if config == nil { + return nil + } + + // (1) Update Route53 ResolverDnssecConfig to "DISABLED" state, if necessary + if aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusEnabled { + config, err = updateResolverDnsSecConfig(conn, config.ResourceId) + if err != nil { + return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) + } + if config == nil { + return nil + } + } + + // (1.a) Wait for Route53 ResolverDnssecConfig to reach "DISABLED" state, if necessary + if aws.StringValue(config.ValidationStatus) != route53resolver.ResolverDNSSECValidationStatusDisabled { + if _, err = waiter.DnssecConfigDisabled(conn, d.Id()); err != nil { + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { + return nil + } + + return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to be disabled: %w", d.Id(), err) + } + } + + // (2) Update Route53 ResolverDnssecConfig again, effectively deleting the resource + _, err = updateResolverDnsSecConfig(conn, config.ResourceId) + + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { + return nil + } + if err != nil { - return err + return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) } return nil } + +func updateResolverDnsSecConfig(conn *route53resolver.Route53Resolver, resourceId *string) (*route53resolver.ResolverDnssecConfig, error) { + output, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ + ResourceId: resourceId, + Validation: aws.String(route53resolver.ValidationDisable), + }) + + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { + return nil, nil + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + return output.ResolverDNSSECConfig, nil +} diff --git a/aws/resource_aws_route53_resolver_dnssec_config_test.go b/aws/resource_aws_route53_resolver_dnssec_config_test.go index ad864458467..7be983a453c 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config_test.go +++ b/aws/resource_aws_route53_resolver_dnssec_config_test.go @@ -5,14 +5,15 @@ import ( "log" "regexp" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/route53resolver" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/route53resolver/finder" ) func init() { @@ -29,52 +30,53 @@ func testSweepRoute53ResolverDnssecConfig(region string) error { } conn := client.(*AWSClient).route53resolverconn - var errors error + var sweeperErrs *multierror.Error err = conn.ListResolverDnssecConfigsPages(&route53resolver.ListResolverDnssecConfigsInput{}, func(page *route53resolver.ListResolverDnssecConfigsOutput, isLast bool) bool { if page == nil { return !isLast } for _, resolverDnssecConfig := range page.ResolverDnssecConfigs { - id := aws.StringValue(resolverDnssecConfig.ResourceId) - - log.Printf("[INFO] Deleting Route53 Resolver Dnssec config: %s", id) - _, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ - ResourceId: aws.String(id), - Validation: aws.String(route53resolver.ResolverDNSSECValidationStatusDisabled), - }) - if isAWSErr(err, route53resolver.ErrCodeResourceNotFoundException, "") { - continue - } - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error deleting Route53 Resolver Resolver Dnssec config (%s): %w", id, err)) + if resolverDnssecConfig == nil { continue } - err = route53ResolverEndpointWaitUntilTargetState(conn, id, 10*time.Minute, - []string{route53resolver.ResolverDNSSECValidationStatusDisabling}, - []string{route53resolver.ResolverDNSSECValidationStatusDisabled}) + id := aws.StringValue(resolverDnssecConfig.Id) + resourceId := aws.StringValue(resolverDnssecConfig.ResourceId) + + log.Printf("[INFO] Deleting Route53 Resolver Dnssec config: %s", id) + + r := resourceAwsRoute53ResolverDnssecConfig() + d := r.Data(nil) + d.SetId(aws.StringValue(resolverDnssecConfig.Id)) + d.Set("resource_id", resourceId) + + err := r.Delete(d, client) + if err != nil { - errors = multierror.Append(errors, err) + sweeperErr := fmt.Errorf("error deleting Route53 Resolver Resolver Dnssec config (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) continue } } return !isLast }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Route53 Resolver Resolver Dnssec config sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() + } + if err != nil { - if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Route53 Resolver Resolver Dnssec config sweep for %s: %s", region, err) - return nil - } - errors = multierror.Append(errors, fmt.Errorf("error retrieving Route53 Resolver Resolver Dnssec config: %w", err)) + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving Route53 Resolver Resolver Dnssec config: %w", err)) } - return errors + return sweeperErrs.ErrorOrNil() } func TestAccAWSRoute53ResolverDnssecConfig_basic(t *testing.T) { - var config route53resolver.ResolverDnssecConfig resourceName := "aws_route53_resolver_dnssec_config.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -87,7 +89,7 @@ func TestAccAWSRoute53ResolverDnssecConfig_basic(t *testing.T) { { Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + testAccCheckRoute53ResolverDnssecConfigExists(resourceName), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "route53resolver", regexp.MustCompile(`resolver-dnssec-config/.+$`)), resource.TestCheckResourceAttrSet(resourceName, "id"), resource.TestCheckResourceAttrSet(resourceName, "owner_id"), @@ -105,7 +107,6 @@ func TestAccAWSRoute53ResolverDnssecConfig_basic(t *testing.T) { } func TestAccAWSRoute53ResolverDnssecConfig_disappear(t *testing.T) { - var config route53resolver.ResolverDnssecConfig resourceName := "aws_route53_resolver_dnssec_config.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -118,7 +119,7 @@ func TestAccAWSRoute53ResolverDnssecConfig_disappear(t *testing.T) { { Config: testAccRoute53ResolverDnssecConfigConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ResolverDnssecConfigExists(resourceName, &config), + testAccCheckRoute53ResolverDnssecConfigExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsRoute53ResolverDnssecConfig(), resourceName), ), ExpectNonEmptyPlan: true, @@ -135,30 +136,18 @@ func testAccCheckRoute53ResolverDnssecConfigDestroy(s *terraform.State) error { continue } - input := &route53resolver.ListResolverDnssecConfigsInput{} - - var config *route53resolver.ResolverDnssecConfig - err := conn.ListResolverDnssecConfigsPages(input, func(page *route53resolver.ListResolverDnssecConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + config, err := finder.ResolverDnssecConfigByID(conn, rs.Primary.ID) - for _, c := range page.ResolverDnssecConfigs { - if aws.StringValue(c.Id) == rs.Primary.ID { - config = c - return false - } - } - - return !lastPage - }) + if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { + continue + } if err != nil { return err } - if config == nil || aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { - return nil + if config == nil { + continue } return fmt.Errorf("Route 53 Resolver Dnssec config still exists: %s", rs.Primary.ID) @@ -167,7 +156,7 @@ func testAccCheckRoute53ResolverDnssecConfigDestroy(s *terraform.State) error { return nil } -func testAccCheckRoute53ResolverDnssecConfigExists(n string, config *route53resolver.ResolverDnssecConfig) resource.TestCheckFunc { +func testAccCheckRoute53ResolverDnssecConfigExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -178,34 +167,21 @@ func testAccCheckRoute53ResolverDnssecConfigExists(n string, config *route53reso return fmt.Errorf("No Route 53 Resolver Dnssec config ID is set") } + id := rs.Primary.ID conn := testAccProvider.Meta().(*AWSClient).route53resolverconn - input := &route53resolver.ListResolverDnssecConfigsInput{} - - err := conn.ListResolverDnssecConfigsPages(input, func(page *route53resolver.ListResolverDnssecConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, c := range page.ResolverDnssecConfigs { - if aws.StringValue(c.Id) == rs.Primary.ID { - config = c - return false - } - } - return !lastPage - }) + config, err := finder.ResolverDnssecConfigByID(conn, id) if err != nil { return err } if config == nil { - return fmt.Errorf("No Route 53 Resolver Dnssec config found") + return fmt.Errorf("Route53 Resolver Dnssec config (%s) not found", id) } if aws.StringValue(config.ValidationStatus) != route53resolver.ResolverDNSSECValidationStatusEnabled { - return fmt.Errorf("Route 53 Resolver Dnssec config (%s) is not enabled", aws.StringValue(config.Id)) + return fmt.Errorf("Route53 Resolver Dnssec config (%s) is not enabled", aws.StringValue(config.Id)) } return nil From 6806bd081f7daf21e8272f06882178ae08e6e77d Mon Sep 17 00:00:00 2001 From: Pradeep Bhadani Date: Wed, 10 Feb 2021 10:43:26 +0000 Subject: [PATCH 1042/1212] docs/contributing: expired_object_delete_marker cannot be used with days or date in lifecycle expiration policy --- website/docs/r/s3_bucket.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 51c09d1d95b..cec41dc9293 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -403,7 +403,7 @@ The `expiration` object supports the following * `date` (Optional) Specifies the date after which you want the corresponding action to take effect. * `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. +* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. The `transition` object supports the following From e89e193e2d0e7f99f5d12f5e4ae2b22d0fda15ac Mon Sep 17 00:00:00 2001 From: Carlovo <54660773+Carlovo@users.noreply.github.com> Date: Wed, 10 Feb 2021 14:46:06 +0100 Subject: [PATCH 1043/1212] Increase WAFv2 association timeout --- aws/resource_aws_wafv2_web_acl_association.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_wafv2_web_acl_association.go b/aws/resource_aws_wafv2_web_acl_association.go index d143f69c029..0f1c6da20a7 100644 --- a/aws/resource_aws_wafv2_web_acl_association.go +++ b/aws/resource_aws_wafv2_web_acl_association.go @@ -13,7 +13,7 @@ import ( ) const ( - Wafv2WebACLAssociationCreateTimeout = 2 * time.Minute + Wafv2WebACLAssociationCreateTimeout = 5 * time.Minute ) func resourceAwsWafv2WebACLAssociation() *schema.Resource { From 3add0fa431382937efe9a41c4e87c30a6c12bdb8 Mon Sep 17 00:00:00 2001 From: Carlovo <54660773+Carlovo@users.noreply.github.com> Date: Wed, 10 Feb 2021 15:05:39 +0100 Subject: [PATCH 1044/1212] Increase WAFv2 association timeout update changelog --- .changelog/17545.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17545.txt diff --git a/.changelog/17545.txt b/.changelog/17545.txt new file mode 100644 index 00000000000..291a21eb58e --- /dev/null +++ b/.changelog/17545.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_wafv2_web_acl_association: Increase creation timeout value from 2 to 5 minutes to prevent WAFUnavailableEntityException +``` From 51c7793e73af03e3521b1341dce2d9271ac03830 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 10 Feb 2021 10:26:59 -0500 Subject: [PATCH 1045/1212] docs/contributing: Initial Retries and Waiters documentation (#17508) Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12844 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15792 Reference: https://github.com/hashicorp/terraform-provider-aws/issues/16796 This page is meant to serve as a reference for all the related retry and waiting logic present in the provider. Further enhancements could discuss resource timeouts in general, however there is some future uncertainty with that functionality so it is currently omitted. --- docs/CONTRIBUTING.md | 1 + docs/contributing/error-handling.md | 6 +- docs/contributing/retries-and-waiters.md | 487 +++++++++++++++++++++++ 3 files changed, 492 insertions(+), 2 deletions(-) create mode 100644 docs/contributing/retries-and-waiters.md diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 9d96f08339c..472ccfbdbd1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -22,3 +22,4 @@ This documentation also contains reference material specific to certain function - [Running and Writing Acceptance Tests](contributing/running-and-writing-acceptance-tests.md) - [Data Handling and Conversion](contributing/data-handling-and-conversion.md) - [Error Handling](contributing/error-handling.md) +- [Retries and Waiters](contributing/retries-and-waiters.md) diff --git a/docs/contributing/error-handling.md b/docs/contributing/error-handling.md index 1ad7c8b9f8e..636cc07fe1f 100644 --- a/docs/contributing/error-handling.md +++ b/docs/contributing/error-handling.md @@ -4,6 +4,8 @@ _Please Note: This documentation is intended for Terraform AWS Provider code dev The Terraform AWS Provider codebase bridges the implementation of a [Terraform Plugin](https://www.terraform.io/docs/extend/how-terraform-works.html) and an AWS API client to support AWS operations and data types as Terraform Resources. An important aspect of performing resource and remote actions is properly handling those operations, but those operations are not guaranteed to succeed every time. Some common examples include where network connections are unreliable, necessary permissions are not properly setup, incorrect Terraform configurations, or the remote system responds unexpectedly. All these situations lead to an unexpected workflow action that must be surfaced to the Terraform user interface for operators to troubleshoot. This guide is intended to explain and show various Terraform AWS Provider code implementations that are considered best practice for surfacing these issues properly to operators and code maintainers. +For further details about how the AWS Go SDK and the Terraform AWS Provider resource logic handle retryable errors, see the [Retries and Waiters documentation](retries-and-waiters.md). + - [General Guidelines and Helpers](#general-guidelines-and-helpers) - [Naming and Check Style](#naming-and-check-style) - [Wrap Errors](#wrap-errors) @@ -200,7 +202,7 @@ func resourceServiceThingRead(d *schema.ResourceData, meta interface{}) error { } ``` -Future documentation will show how to properly retry the remote operation for a short period of time until it is successful to remove the error completely. +If the remote system is not strongly read-after-write consistent, see the [Retries and Waiters documentation on Resource Lifecycle Retries](retries-and-waiters.md#resource-lifecycle-retries) for how to prevent consistency-type errors. #### Creation Error Message Context @@ -276,7 +278,7 @@ if err != nil { } ``` -Code that also uses waiters or other operations that return errors should follow a similar pattern: +Code that also uses [waiters](retries-and-waiters.md) or other operations that return errors should follow a similar pattern: ```go if _, err := waiter.VpcDeleted(conn, d.Id()); err != nil { diff --git a/docs/contributing/retries-and-waiters.md b/docs/contributing/retries-and-waiters.md new file mode 100644 index 00000000000..7ced24fd694 --- /dev/null +++ b/docs/contributing/retries-and-waiters.md @@ -0,0 +1,487 @@ +# Retries and Waiters + +_Please Note: This documentation is intended for Terraform AWS Provider code developers. Typical operators writing and applying Terraform configurations do not need to read or understand this material._ + +Terraform plugins may run into situations where calling the remote system after an operation may be necessary. These typically fall under three classes where: + +- The request never reaches the remote system. +- The request reaches the remote system and responds that it cannot handle the request temporarily. +- The implementation of the remote system requires additional requests to ensure success. + +This guide describes the behavior of the Terraform AWS Provider and provides code implementations that help ensure success in each of these situations. + +- [Terraform Plugin SDK Functionality](#terraform-plugin-sdk-functionality) + - [State Change Configuration and Functions](#state-change-configuration-and-functions) + - [Retry Functions](#retry-functions) +- [AWS Request Handling](#aws-request-handling) + - [Default AWS Go SDK Retries](#default-aws-go-sdk-retries) + - [Lower Network Error Retries](#lower-network-error-retries) + - [Terraform AWS Provider Service Retries](#terraform-aws-provider-service-retries) +- [Eventual Consistency](#eventual-consistency) + - [Operation Specific Error Retries](#operation-specific-error-retries) + - [IAM Error Retries](#iam-error-retries) + - [Resource Lifecycle Retries](#resource-lifecycle-retries) + - [Resource Attribute Value Waiters](#resource-attribute-value-waiters) +- [Asynchronous Operations](#asynchronous-operations) + - [AWS Go SDK Waiters](#aws-go-sdk-waiters) + - [Resource Lifecycle Waiters](#resource-lifecycle-waiters) + +## Terraform Plugin SDK Functionality + +The [Terraform Plugin SDK](https://github.com/hashicorp/terraform-plugin-sdk/), which the AWS Provider uses, provides vital tools for handling consistency: the `resource.StateChangeConf{}` struct, and the retry functions, `resource.Retry()` and `resource.RetryContext()`. We will discuss these throughout the rest of this guide. Since they help keep the AWS Provider code consistent, we heavily prefer them over custom implementations. + +This guide goes beyond the [Extending Terraform documentation](https://www.terraform.io/docs/extend/resources/retries-and-customizable-timeouts.html) by providing additional context and emergent implementations specific to the Terraform AWS Provider. + +### State Change Configuration and Functions + +The [`resource.StateChangeConf` type](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#StateChangeConf) along with its receiver methods [`WaitForState()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#StateChangeConf.WaitForState) and [`WaitForStateContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#StateChangeConf.WaitForStateContext) is a generic primitive for repeating operations in Terraform resource logic until desired value(s) are received. The "state change" in this case is generic to any value and not specific to the Terraform State. Among other functionality, it supports some of these desirable optional properties: + +- Expecting specific value(s) while waiting for the target value(s) to be reached. Unexpected values are returned as an error which can be augmented with additional details. +- Expecting the target value(s) to be returned multiple times in succession. +- Allowing various polling configurations such as delaying the initial request and setting the time between polls. + +### Retry Functions + +The [`resource.Retry()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#Retry) and [`resource.RetryContext()`](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource#RetryContext) functions provide a simplified retry implementation around `resource.StateChangeConf`. Their most common use is for simple error-based retries. + +## AWS Request Handling + +The Terraform AWS Provider's requests to AWS service APIs happen on top of Hypertext Transfer Protocol (HTTP). The following is a simplified description of the layers and handling that requests pass through: + +- A Terraform resource calls an AWS Go SDK function. +- The AWS Go SDK generates an AWS-compatible HTTP request using the [Go standard library `net/http` package](https://pkg.go.dev/net/http/). This includes the following: + - Adding HTTP headers for authentication and signing of requests to ensure authenticity. + - Converting operation inputs into required HTTP URI parameters and/or request body type (XML or JSON). + - If debug logging is enabled, logging of the HTTP request. +- The AWS Go SDK transmits the `net/http` request using Go's standard handling of the Operating System (OS) and Domain Name System (DNS) configuration. +- The AWS service potentially receives the request and responds, typically adding a request identifier HTTP header which can be used for AWS Support cases. +- The OS and Go `net/http` receive the response and pass it to the AWS Go SDK. +- The AWS Go SDK attempts to handle the response. This may include: + - Parsing output + - Converting errors into operation errors (Go `error` type of wrapped [`awserr.Error` type](https://docs.aws.amazon.com/sdk-for-go/api/aws/awserr/#Error)). + - Converting response elements into operation outputs (AWS Go SDK operation-specific types). + - Triggering automatic request retries based on default and custom logic. +- The Terraform resource receives the response, including any output and errors, from the AWS Go SDK. + +The Terraform AWS Provider specific configuration for AWS Go SDK operation handling can be found in `aws/config.go` in this codebase and the [`hashicorp/aws-sdk-go-base` codebase](https://github.com/hashicorp/aws-sdk-go-base). + +_NOTE: The section descibes the current handling with version 1 of the AWS Go SDK. In the future, this codebase will be migrated to version 2 of the AWS Go SDK. The newer version implements a very similar request flow but uses a simpler credential and request handling configuration. As such, the `aws-sdk-go-base` dependency will likely not receive further updates and will be removed after that migration._ + +### Default AWS Go SDK Retries + +In some situations, while handling a response, the AWS Go SDK automatically retries a request before returning the output and error. The retry mechanism implements an exponential backoff algorithm. The default conditions triggering automatic retries (implemented through [`client.DefaultRetryer`](https://docs.aws.amazon.com/sdk-for-go/api/aws/client/#DefaultRetryer)) include: + +- Certain network errors. A common exception to this is connection reset errors. +- HTTP status codes 429 and 5xx. +- Certain API error codes, which are common across various AWS services (e.g. `ThrottledException`). However, not all AWS services implement these error codes consistently. A common exception to this is certain expired credentials errors. + +By default, the Terraform AWS Provider sets the maximum number of AWS Go SDK retries based on the [`max_retries` provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#max_retries). The provider configuration defaults to 25 and the exponential backoff roughly equates to one hour of retries. This very high default value was present before the Terraform AWS Provider codebase was split from Terraform CLI in version 0.10. + +_NOTE: The section describes the current handling with version 1 of the AWS Go SDK. In the future, this codebase will be migrated to version 2 of the AWS Go SDK. The newer version implements additional retry conditions by default, such as consistently retrying all common network errors._ + +_NOTE: The section describes the current handling with Terraform Plugin SDK resource signatures without `context.Context`. In the future, this codebase will be migrated to the context-aware resource signatures which currently enforce a 20-minute default timeout that conflicts with the timeout with the default `max_retries` value. The Terraform Plugin SDK may be updated to support removing this default 20-minute timeout or the default retry mechanism described here will be updated to prevent context cancellation errors where possible._ + +### Lower Network Error Retries + +Given the very high default number of AWS Go SDK retries configured in the Terraform AWS Provider and the excessive wait that practitioners would face, the [`hashicorp/aws-sdk-go-base` codebase](https://github.com/hashicorp/aws-sdk-go-base/blob/57529b4c2d2f8f3b5299d66a829b01259fa800d7/session.go#L108-L130) lowers retries to 10 for certain network errors that typically cannot be remediated via retries. This roughly equates to 30 seconds of retries. + +### Terraform AWS Provider Service Retries + +The AWS Go SDK provides hooks for injecting custom logic into the service client handlers. We prefer this handling in situations where contributors would need to apply the retry behavior to many resources. For example, in cases where the AWS service API does not mark an error code as automatically retriable. The AWS Provider includes other retry-changing behaviors using this method. You can find them in the `aws/config.go` file. For example: + +```go +client.kafkaconn.Handlers.Retry.PushBack(func(r *request.Request) { + if tfawserr.ErrMessageContains(r.Error, kafka.ErrCodeTooManyRequestsException, "Too Many Requests") { + r.Retryable = aws.Bool(true) + } +}) +``` + +## Eventual Consistency + +Eventual consistency is a temporary condition where the remote system can return outdated information or errors due to not being strongly read-after-write consistent. This is a pattern found in remote systems that must be highly scaled for broad usage. + +Terraform expects any planned resource lifecycle change (create, update, destroy of the resource itself) and planned resource attribute value change to match after being applied. Conversely, operators typically expect that Terraform resources also implement the concept of drift detection for resources and their attributes, which requires reading information back from the remote system after an operation. A common implementation is refreshing the Terraform State information (`d.Set()`) during the `Read` function of a resource after `Create` and `Update`. + +These two concepts conflict with each other and require additional handling in Terraform resource logic as shown in the following sections. These issues are _not_ reliably reproducible, especially in the case of writing acceptance testing, so they can be elusive with false positives to verify fixes. + +### Operation Specific Error Retries + +Even given a properly ordered Terraform configuration, eventual consistency can unexpectedly prevent downstream operations from succeeding. A simple retry after a few seconds resolves many of these issues. To reduce frustrating behavior for operators, wrap AWS Go SDK operations with the `resource.Retry()` or `resource.RetryContext()` functions. These retries should have a reasonably low timeout (typically two minutes but up to five minutes). Save them in a constant for reusability. These functions are preferably in line with the associated resource logic to remove any indirection with the code. + +Do not use this type of logic to overcome improperly ordered Terraform configurations. The approach may not work in larger environments. + +```go +// aws/internal/service/example/waiter/waiter.go (created if does not exist) + +const ( + // Maximum amount of time to wait for Thing operation eventual consistency + ThingOperationTimeout = 2 * time.Minute +) +``` + +```go +// aws/resource_example_thing.go + +import ( + // ... other imports ... + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/example/waiter" +) + +// ... Create, Read, Update, or Delete function ... + err := resource.Retry(waiter.ThingOperationTimeout, func() *resource.RetryError { + _, err := conn./* ... AWS Go SDK operation with eventual consistency errors ... */ + + // Retryable conditions which can be checked. + // These must be updated to match the AWS service API error code and message. + if tfawserr.ErrMessageContains(err, /* error code */, /* error message */) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + // This check is important - it handles when the AWS Go SDK operation retries without returning. + // e.g. any automatic retries due to network or throttling errors. + if tfresource.TimedOut(err) { + // The use of equals assignment (over colon equals) is also important here. + // This overwrites the error variable to simplify logic. + _, err = conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ + } + + if err != nil { + return fmt.Errorf("... error message context ... : %w", err) + } +``` + +_NOTE: The section descibes the current handling with version 1 of the AWS Go SDK. In the future, this codebase will be migrated to version 2 of the AWS Go SDK. The newer version natively supports operation-specific retries in a more friendly manner, which may replace this type of implementation._ + +#### IAM Error Retries + +A common eventual consistency issue is an error returned due to IAM permissions. The IAM service itself is eventually consistent along with the propagation of its components and permissions to other AWS services. For example, if the following operations occur in quick succession: + +- Create an IAM Role +- Attach an IAM Policy to the IAM Role +- Reference the new IAM Role in another AWS service, such as creating a Lambda Function + +The last operation can receive varied API errors ranging from: + +- IAM Role being reported as not existing +- IAM Role being reported as not having permissions for the other service to use it (assume role permissions) +- IAM Role being reported as not having sufficient permissions (inline or attached role permissions) + +Each AWS service API (and sometimes even operations within the same API) varies in the implementation of these errors. To handle them, it is recommended to use the [Operation Specific Error Retries](#operation-specific-error-retries) pattern. The Terraform AWS Provider implements a standard timeout constant of two minutes in the `aws/internal/service/iam/waiter` package which should be used for all retry timeouts associated with IAM errors. This timeout was derived from years of Terraform operational experience with all AWS APIs. + +```go +// aws/resource_example_thing.go + +import ( + // ... other imports ... + // By convention, cross-service waiter imports are aliased as {SERVICE}waiter + iamwaiter "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/iam/waiter" +) + +// ... Create and typically Update function ... + err := resource.Retry(iamwaiter.PropagationTimeout, func() *resource.RetryError { + _, err := conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ + + // Example retryable condition + // This must be updated to match the AWS service API error code and message. + if tfawserr.ErrMessageContains(err, /* error code */, /* error message */) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + if tfresource.TimedOut(err) { + _, err = conn./* ... AWS Go SDK operation with IAM eventual consistency errors ... */ + } + + if err != nil { + return fmt.Errorf("... error message context ... : %w", err) + } +``` + +### Resource Lifecycle Retries + +Resource lifecycle eventual consistency is a type of consistency issue that relates to the existence or state of an AWS infrastructure component. For example, if you create a resource and immediately try to get information about it, some AWS services and operations will return a "not found" error. Depending on the service and general AWS load, these errors can be frequent or rare. + +In order to avoid this issue, identify operations that make changes. Then, when calling any other operations that rely on the changes, account for the possibility that the AWS service has not yet fully realized them. + +A typical example is creating an AWS component. After creation, when attempting to read the component's information, provide logic to retry the read if the AWS service returns a "not found" error. + +The pattern that most resources should follow is to have the `Create` function return calling the `Read` function. This fills in computed attributes and ensures that the AWS service applied the configuration correctly. Add retry logic to the `Read` function to overcome the temporary condition on resource creation. + +Note that for eventually consistent resources, "not found" errors can still occur in the `Read` function even after implementing [Resource Lifecycle Waiters](#resource-lifecycle-waiters) for the Create function. + +```go +// aws/internal/service/example/waiter/waiter.go (created if does not exist) + +const ( + // Maximum amount of time to wait for Thing eventual consistency on creation + ThingCreationTimeout = 2 * time.Minute +) +``` + +```go +// aws/resource_example_thing.go + +function ExampleThingCreate(d *schema.ResourceData, meta interface{}) error { + // ... + return ExampleThingRead(d, meta) +} + +function ExampleThingRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).exampleconn + + input := &example.OperationInput{/* ... */} + + var output *example.OperationOutput + err := resource.Retry(waiter.ThingCreationTimeout, func() *resource.RetryError { + var err error + output, err = conn.Operation(input) + + // Retry on any API "not found" errors, but only on new resources. + if d.IsNewResource() && tfawserr.ErrorCodeEquals(err, example.ErrCodeResourceNotFoundException) { + return resource.RetryableError(err) + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + + // Retry AWS Go SDK operation if no response from automatic retries. + if tfresource.TimedOut(err) { + output, err = exampleconn.Operation(input) + } + + // Prevent confusing Terraform error messaging to operators by + // Only ignoring API "not found" errors if not a new resource. + if !d.IsNewResource() && tfawserr.ErrorCodeEquals(err, example.ErrCodeNoSuchEntityException) { + log.Printf("[WARN] Example Thing (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading Example Thing (%s): %w", d.Id(), err) + } + + // Prevent panics. + if output == nil { + return fmt.Errorf("error reading Example Thing (%s): empty response", d.Id()) + } + + // ... refresh Terraform state as normal ... + d.Set("arn", output.Arn) +} +``` + +Some other general guidelines are: + +- If the `Create` function uses `resource.StateChangeConf`, the underlying `resource.RefreshStateFunc` should `return nil, "", nil` instead of the API "not found" error. This way the `StateChangeConf` logic will automatically retry. +- If the `Create` function uses `resource.Retry()`, the API "not found" error should be caught and `return resource.RetryableError(err)` to automatically retry. + +In rare cases, it may be easier to duplicate all `Read` function logic in the `Create` function to handle all retries in one place. + +### Resource Attribute Value Waiters + +An emergent solution for handling eventual consistency with attribute values on updates is to introduce a custom `resource.StateChangeConf` and `resource.RefreshStateFunc` handlers. For example: + +```go +// aws/internal/service/example/waiter/status.go (created if does not exist) + +// ThingAttribute fetches the Thing and its Attribute +func ThingAttribute(conn *example.Example, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := /* ... AWS Go SDK operation to fetch resource/value ... */ + + if tfawserr.ErrCodeEquals(err, example.ErrCodeResourceNotFoundException) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil { + return nil, "", nil + } + + return output, aws.StringValue(output.Attribute), nil + } +} +``` + +```go +// aws/internal/service/example/waiter/waiter.go (created if does not exist) + +const ( + ThingAttributePropagationTimeout = 2 * time.Minute +) + +// ThingAttributeUpdated is an attribute waiter for ThingAttribute +func ThingAttributeUpdated(conn *example.Example, id string, expectedValue string) (*example.Thing, error) { + stateConf := &resource.StateChangeConf{ + Target: []string{expectedValue}, + Refresh: ThingAttribute(conn, id), + Timeout: ThingAttributePropagationTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*example.Thing); ok { + return output, err + } + + return nil, err +} +``` + +```go +// aws/resource_example_thing.go + +function ExampleThingUpdate(d *schema.ResourceData, meta interface{}) error { + // ... + + d.HasChange("attribute") { + // ... AWS Go SDK logic to update attribute ... + + if err := waiter.ThingAttributeUpdated(conn, d.Id(), d.Get("attribute").(string)); err != nil { + return fmt.Errorf("error waiting for Example Thing (%s) attribute update: %w", d.Id(), err) + } + } + + // ... +} +``` + +## Asynchronous Operations + +When you initiate a long-running operation, an AWS service may return a successful response immediately and continue working on the request asynchronously. A resource can track the status with a component-level field (e.g. `CREATING`, `UPDATING`, etc.) or an explicit tracking identifier. + +Terraform resources should wait for these background operations to complete. Failing to do so can introduce incomplete state information and downstream errors in other resources. In rare scenarios involving very long-running operations, operators may request a flag to skip the waiting. However, these should only be implemented case-by-case to prevent those previously mentioned confusing issues. + +### AWS Go SDK Waiters + +In limited cases, the AWS service API model includes the information to automatically generate a waiter function in the AWS Go SDK for an operation. These are typically named with the prefix `WaitUntil...`. If available, these functions can be used for an initial resource implementation. For example: + +```go +if err := conn.WaitUntilEndpointInService(input); err != nil { + return fmt.Errorf("error waiting for Example Thing (%s) ...: %w", d.Id(), err) +} +``` + +If it is necessary to customize the timeouts and polling, we generally prefer using [Resource Lifecycle Waiters](#resource-lifecycle-waiters) instead since they are more commonly used throughout the codebase. + +### Resource Lifecycle Waiters + +Most of the codebase uses `resource.StateChangeConf` and `resource.RefreshStateFunc` handlers for tracking either component level status fields or explicit tracking identifiers. These should be placed in the `aws/internal/service/{SERVICE}/waiter` package and split into separate functions. For example: + +```go +// aws/internal/service/example/waiter/status.go (created if does not exist) + +// ThingStatus fetches the Thing and its Status +func ThingStatus(conn *example.Example, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := /* ... AWS Go SDK operation to fetch resource/status ... */ + + if tfawserr.ErrCodeEquals(err, example.ErrCodeResourceNotFoundException) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil { + return nil, "", nil + } + + return output, aws.StringValue(output.Status), nil + } +} +``` + +```go +// aws/internal/service/example/waiter/waiter.go (created if does not exist) + +const ( + ThingCreationTimeout = 2 * time.Minute + ThingDeletionTimeout = 5 * time.Minute +) + +// ThingCreated is a resource waiter for Thing creation +func ThingCreated(conn *example.Example, id string) (*example.Thing, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{example.StatusCreating}, + Target: []string{example.StatusCreated}, + Refresh: ThingStatus(conn, id), + Timeout: ThingCreationTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*example.Thing); ok { + return output, err + } + + return nil, err +} + +// ThingDeleted is a resource waiter for Thing deletion +func ThingDeleted(conn *example.Example, id string) (*example.Thing, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{example.StatusDeleting}, + Target: []string{}, // Use empty list if the resource disappears and does not have "deleted" status + Refresh: ThingStatus(conn, id), + Timeout: ThingDeletionTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*example.Thing); ok { + return output, err + } + + return nil, err +} +``` + +```go +// aws/resource_example_thing.go + +function ExampleThingCreate(d *schema.ResourceData, meta interface{}) error { + // ... AWS Go SDK logic to create resource ... + + if err := waiter.ThingCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Example Thing (%s) creation: %w", d.Id(), err) + } + + return ExampleThingRead(d, meta) +} + +function ExampleThingDelete(d *schema.ResourceData, meta interface{}) error { + // ... AWS Go SDK logic to delete resource ... + + if err := waiter.ThingDeleted(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Example Thing (%s) deletion: %w", d.Id(), err) + } + + return ExampleThingRead(d, meta) +} +``` + +Typically, the AWS Go SDK should include constants for various status field values (e.g. `StatusCreating` for `CREATING`). If not, create them in a file named `aws/internal/service/{SERVICE}/consts.go`. From c3c9e8811c6da1f721b9c8a1707e83ad55bddc4f Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Mon, 10 Feb 2020 22:21:59 +1100 Subject: [PATCH 1046/1212] Add source_json_list attribute to aws_iam_policy_document datasource --- aws/data_source_aws_iam_policy_document.go | 37 ++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 582c7c0d2f1..4bfef8816d3 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -39,6 +39,10 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "source_json_list": { + Type: schema.TypeList, + Optional: true, + }, "statement": { Type: schema.TypeList, Optional: true, @@ -113,6 +117,39 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} } } + // populate mergedDoc with any provided source_json_list + if sourceJSONList, hasSourceJSONList := d.GetOk("source_json_list"); hasSourceJSONList { + + // generate sid map to assure there are no duplicates in source jsons + sidMap := make(map[string]struct{}) + for _, stmt := range mergedDoc.Statements { + if stmt.Sid != "" { + sidMap[stmt.Sid] = struct{}{} + } + } + + // merge sourceDocs in order specified + for sourceJSONIndex, sourceJSON := range sourceJSONList.([]string) { + sourceDoc := &IAMPolicyDoc{} + if err := json.Unmarshal([]byte(sourceJSON), sourceDoc); err != nil { + return err + } + + // assure all statements in sourceDoc are unique before merging + for stmtIndex, stmt := range sourceDoc.Statements { + if stmt.Sid != "" { + if _, sidExists := sidMap[stmt.Sid]; sidExists { + return fmt.Errorf("Found duplicate sid (%s) in source_json_list (item %d; statement %d). Either remove the sid or ensure the sid is unique across all statements.", stmt.Sid, sourceJSONIndex, stmtIndex) + } + sidMap[stmt.Sid] = struct{}{} + } + } + + mergedDoc.Merge(sourceDoc) + } + + } + // process the current document doc := &IAMPolicyDoc{ Version: d.Get("version").(string), From 0cf0ce703cd22f9e9444d58e180fd70541734ee4 Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Mon, 10 Feb 2020 22:23:40 +1100 Subject: [PATCH 1047/1212] Add override_json_list attribute to aws_iam_policy_document datasource --- aws/data_source_aws_iam_policy_document.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 4bfef8816d3..d766cd6c96e 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -31,6 +31,10 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "override_json_list": { + Type: schema.TypeList, + Optional: true, + }, "policy_id": { Type: schema.TypeString, Optional: true, @@ -240,6 +244,19 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} // merge our current document into mergedDoc mergedDoc.Merge(doc) + // merge override_json_list policies into mergedDoc in order specified + if overrideJSONList, hasOverideJSONList := d.GetOk("overide_json_list"); hasOverideJSONList { + for _, overrideJSON := range overrideJSONList.([]string) { + overrideDoc := &IAMPolicyDoc{} + if err := json.Unmarshal([]byte(overrideJSON), overrideDoc); err != nil { + return err + } + + mergedDoc.Merge(overrideDoc) + } + + } + // merge in override_json if overrideJSON, hasOverrideJSON := d.GetOk("override_json"); hasOverrideJSON { overrideDoc := &IAMPolicyDoc{} From 512ca94ee69260f5f93fb144d3606ebc1c03b00c Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Tue, 11 Feb 2020 22:10:24 +1100 Subject: [PATCH 1048/1212] Add tests for enhancements --- ...ata_source_aws_iam_policy_document_test.go | 206 ++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index 31007ca6882..a6e5fd9f8d5 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -57,6 +57,23 @@ func TestAccAWSDataSourceIAMPolicyDocument_source(t *testing.T) { }) } +func TestAccAWSDataSourceIAMPolicyDocument_sourceList(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAWSIAMPolicyDocumentSourceListConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_iam_policy_document.test_source_list", "json", + testAccAWSIAMPolicyDocumentSourceListExpectedJSON, + ), + ), + }, + }, + }) +} + func TestAccAWSDataSourceIAMPolicyDocument_sourceConflicting(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -74,6 +91,19 @@ func TestAccAWSDataSourceIAMPolicyDocument_sourceConflicting(t *testing.T) { }) } +func TestAccAWSDataSourceIAMPolicyDocument_sourceListConflicting(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAWSIAMPolicyDocumentSourceListConflictingConfig, + ExpectError: regexp.MustCompile(`Found duplicate sid (.*?) in source_json_list`), + }, + }, + }) +} + func TestAccAWSDataSourceIAMPolicyDocument_override(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -91,6 +121,23 @@ func TestAccAWSDataSourceIAMPolicyDocument_override(t *testing.T) { }) } +func TestAccAWSDataSourceIAMPolicyDocument_overrideList(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccAWSIAMPolicyDocumentOverrideListConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_iam_policy_document.test_override_list", "json", + testAccAWSIAMPolicyDocumentOverrideListExpectedJSON, + ), + ), + }, + }, + }) +} + func TestAccAWSDataSourceIAMPolicyDocument_noStatementMerge(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -564,6 +611,69 @@ func testAccAWSIAMPolicyDocumentSourceExpectedJSON() string { }`, testAccGetPartition()) } +var testAccAWSIAMPolicyDocumentSourceListConfig = ` +data "aws_iam_policy_document" "policy_a" { + statement { + sid = "" + effect = "Allow" + actions = [ "foo:ActionOne" ] + } + statement { + sid = "validSidOne" + effect = "Allow" + actions = [ "bar:ActionOne" ] + } +} +data "aws_iam_policy_document" "policy_b" { + statement { + sid = "validSidTwo" + effect = "Deny" + actions = [ "foo:ActionTwo" ] + } +} +data "aws_iam_policy_document" "policy_c" { + statement { + sid = "" + effect = "Allow" + actions = [ "bar:ActionTwo" ] + } +} +data "aws_iam_policy_document" "test_source_list" { + version = "2012-10-17" + source_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] +} +` +var testAccAWSIAMPolicyDocumentSourceListExpectedJSON = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "foo:ActionOne" + }, + { + "Sid": "validSidOne", + "Effect": "Allow", + "Action": "bar:ActionOne" + }, + { + "Sid": "validSidTwo", + "Effect": "Deny", + "Action": "foo:ActionTwo" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "bar:ActionTwo" + } + ] +} +` + var testAccAWSIAMPolicyDocumentSourceBlankConfig = ` data "aws_iam_policy_document" "test_source_blank" { source_json = "" @@ -620,6 +730,43 @@ var testAccAWSIAMPolicyDocumentSourceConflictingExpectedJSON = `{ ] }` +var testAccAWSIAMPolicyDocumentSourceListConflictingConfig = ` +data "aws_iam_policy_document" "policy_a" { + statement { + sid = "" + effect = "Allow" + actions = [ "foo:ActionOne" ] + } + statement { + sid = "conflictSid" + effect = "Allow" + actions = [ "bar:ActionOne" ] + } +} +data "aws_iam_policy_document" "policy_b" { + statement { + sid = "validSid" + effect = "Deny" + actions = [ "foo:ActionTwo" ] + } +} +data "aws_iam_policy_document" "policy_c" { + statement { + sid = "conflictSid" + effect = "Allow" + actions = [ "bar:ActionTwo" ] + } +} +data "aws_iam_policy_document" "test_source_list_conflicting" { + version = "2012-10-17" + source_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] +} +` + var testAccAWSIAMPolicyDocumentOverrideConfig = ` data "aws_partition" "current" {} @@ -671,6 +818,65 @@ var testAccAWSIAMPolicyDocumentOverrideExpectedJSON = `{ ] }` +var testAccAWSIAMPolicyDocumentOverrideListConfig = ` +data "aws_iam_policy_document" "policy_a" { + statement { + sid = "" + effect = "Allow" + actions = [ "foo:ActionOne" ] + } + statement { + sid = "overrideSid" + effect = "Allow" + actions = [ "bar:ActionOne" ] + } +} +data "aws_iam_policy_document" "policy_b" { + statement { + sid = "validSid" + effect = "Deny" + actions = [ "foo:ActionTwo" ] + } +} +data "aws_iam_policy_document" "policy_c" { + statement { + sid = "overrideSid" + effect = "Deny" + actions = [ "bar:ActionOne" ] + } +} +data "aws_iam_policy_document" "test_override_list" { + version = "2012-10-17" + override_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] +} +` + +var testAccAWSIAMPolicyDocumentOverrideListExpectedJSON = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "foo:ActionOne" + }, + { + "Sid": "overrideSid", + "Effect": "Deny", + "Action": "bar:ActionOne" + }, + { + "Sid": "validSid", + "Effect": "Deny", + "Action": "foo:ActionTwo" + } + ] +} +` + var testAccAWSIAMPolicyDocumentNoStatementMergeConfig = ` data "aws_iam_policy_document" "source" { statement { From e19c6611150cf384199837f767af9aa8c5e87256 Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Tue, 11 Feb 2020 22:15:11 +1100 Subject: [PATCH 1049/1212] Fix sdk schema errors --- aws/data_source_aws_iam_policy_document.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index d766cd6c96e..59830480852 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -34,6 +34,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { "override_json_list": { Type: schema.TypeList, Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "policy_id": { Type: schema.TypeString, @@ -46,6 +47,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { "source_json_list": { Type: schema.TypeList, Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "statement": { Type: schema.TypeList, @@ -133,9 +135,9 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} } // merge sourceDocs in order specified - for sourceJSONIndex, sourceJSON := range sourceJSONList.([]string) { + for sourceJSONIndex, sourceJSON := range sourceJSONList.([]interface{}) { sourceDoc := &IAMPolicyDoc{} - if err := json.Unmarshal([]byte(sourceJSON), sourceDoc); err != nil { + if err := json.Unmarshal([]byte(sourceJSON.(string)), sourceDoc); err != nil { return err } @@ -246,9 +248,9 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} // merge override_json_list policies into mergedDoc in order specified if overrideJSONList, hasOverideJSONList := d.GetOk("overide_json_list"); hasOverideJSONList { - for _, overrideJSON := range overrideJSONList.([]string) { + for _, overrideJSON := range overrideJSONList.([]interface{}) { overrideDoc := &IAMPolicyDoc{} - if err := json.Unmarshal([]byte(overrideJSON), overrideDoc); err != nil { + if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { return err } From 8f1fdbda380b351616a89b366722c0768d3e5517 Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Tue, 11 Feb 2020 22:23:02 +1100 Subject: [PATCH 1050/1212] Correct mis-spelling of override --- aws/data_source_aws_iam_policy_document.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 59830480852..460ca5d5453 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -247,7 +247,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} mergedDoc.Merge(doc) // merge override_json_list policies into mergedDoc in order specified - if overrideJSONList, hasOverideJSONList := d.GetOk("overide_json_list"); hasOverideJSONList { + if overrideJSONList, hasOverrideJSONList := d.GetOk("override_json_list"); hasOverrideJSONList { for _, overrideJSON := range overrideJSONList.([]interface{}) { overrideDoc := &IAMPolicyDoc{} if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { From c3c6b94bed2a448af8ed4d853a10aaa8da7d1708 Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Tue, 11 Feb 2020 22:23:45 +1100 Subject: [PATCH 1051/1212] Remove newlines after expected test policy outputs --- aws/data_source_aws_iam_policy_document_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index a6e5fd9f8d5..26e4cc2690a 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -671,8 +671,7 @@ var testAccAWSIAMPolicyDocumentSourceListExpectedJSON = `{ "Action": "bar:ActionTwo" } ] -} -` +}` var testAccAWSIAMPolicyDocumentSourceBlankConfig = ` data "aws_iam_policy_document" "test_source_blank" { @@ -874,8 +873,7 @@ var testAccAWSIAMPolicyDocumentOverrideListExpectedJSON = `{ "Action": "foo:ActionTwo" } ] -} -` +}` var testAccAWSIAMPolicyDocumentNoStatementMergeConfig = ` data "aws_iam_policy_document" "source" { From 5309682b6cf60189bab488579f932a1d2c0b879a Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Fri, 14 Feb 2020 21:18:24 +1100 Subject: [PATCH 1052/1212] Add documentation for new attributes --- .../docs/d/iam_policy_document.html.markdown | 177 +++++++++++++++++- 1 file changed, 176 insertions(+), 1 deletion(-) diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index c8f69e62a6a..33c87ba97d6 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -84,10 +84,21 @@ The following arguments are supported: current policy document. Statements with non-blank `sid`s in the current policy document will overwrite statements with the same `sid` in the source json. Statements without an `sid` cannot be overwritten. +* `source_json_list` (Optional) - A list of IAM policy documents to import as + a base for the current policy document. Statements accross all policy documents + defined in this list or in the `source_json` attribute must be unique. Statements + with non-blank `sid`s in the current policy document will overwrite statements + with the same `sid` from any of the source json documents. * `override_json` (Optional) - An IAM policy document to import and override the current policy document. Statements with non-blank `sid`s in the override - document will overwrite statements with the same `sid` in the current document. + document will overwrite statements with the same `sid` in the current document, + including any defined by the `override_json_list` attribute. Statements without an `sid` cannot be overwritten. +* `override_json_list` (Optional) - A list of IAM policy documents to import and + override the current policy document. Documents in this list are merged + iteratively, overwriting previously defined statements with non-blank matching + `sid`s. Statements with non-blank `sid`s will overwrite statements with the same + `sid` in the current document. Statements without an `sid` cannot be overwritten. * `statement` (Optional) - A nested configuration block (described below) configuring one *statement* to be included in the policy document. * `version` (Optional) - IAM policy document version. Valid values: `2008-10-17`, `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). @@ -350,3 +361,167 @@ data "aws_iam_policy_document" "politik" { ] } ``` + +## Combining multiple documents + +Multiple documents can be combined using the `source_json_list` or +`override_json_list` attributes. `source_json_list` requires all documents +to have unique `sid`s, while `override_json_list` will iteratively overwrite +matching `sid`s. + +Examble of `source_json_list`: + +```hcl +data "aws_iam_policy_document" "source_one" { + statement { + actions = ["ec2:*"] + resources = ["*"] + } + + statement { + sid = "UniqueSidOne" + + actions = ["s3:*"] + resources = ["*"] + } +} + +data "aws_iam_policy_document" "source_two" { + statement { + sid = "UniqueSidTwo" + + actions = ["iam:*"] + resources = ["*"] + } + + statement { + actions = ["lambda:*"] + resources = ["*"] + } +} + +data "aws_iam_policy_document" "combined" { + source_json_list = [ + data.aws_iam_policy_document.source_one.json, + data.aws_iam_policy_document.source_two.json + ] +} +``` + +`data.aws_iam_policy_document.combined.json` will evaluate to: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + }, + { + "Sid": "UniqueSidOne", + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + }, + { + "Sid": "UniqueSidTwo", + "Effect": "Allow", + "Action": "iam:*", + "Resource": "*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "lambda:*", + "Resource": "*" + } + ] +} +``` + +Examble of `override_json_list`: + +```hcl +data "aws_iam_policy_document" "policy_one" { + statement { + sid = "OverridePlaceHolderOne" + effect = "Allow" + + actions = ["s3:*"] + resources = ["*"] + } +} + +data "aws_iam_policy_document" "policy_two" { + statement { + effect = "Allow" + actions = ["ec2:*"] + resources = ["*"] + } + + statement { + sid = "OverridePlaceHolderTwo" + effect = "Allow" + + actions = ["iam:*"] + resources = ["*"] + } +} + +data "aws_iam_policy_document" "policy_three" { + statement { + sid = "OverridePlaceHolderOne" + effect = "Deny" + + actions = ["logs:*"] + resources = ["*"] + } +} + +data "aws_iam_policy_document" "combined" { + override_json_list = [ + data.aws_iam_policy_document.policy_one.json, + data.aws_iam_policy_document.policy_two.json, + data.aws_iam_policy_document.policy_three.json + ] + + statement { + sid = "OverridePlaceHolderTwo" + effect = "Deny" + + actions = ["*"] + resources = ["*"] + } +} +``` + +`data.aws_iam_policy_document.combined.json` will evaluate to: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "OverridePlaceholderTwo", + "Effect": "Allow", + "Action": "iam:*", + "Resource": "*" + }, + { + "Sid": "OverridePlaceholderOne", + "Effect": "Deny", + "Action": "logs:*", + "Resource": "*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + }, + ] +} +``` From 37c0e361810a309737eec69f3089a0cc4db464d0 Mon Sep 17 00:00:00 2001 From: Alessandro Borrello Date: Sun, 16 Feb 2020 19:15:58 +1100 Subject: [PATCH 1053/1212] Fix spelling mistake in docs --- website/docs/d/iam_policy_document.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index 33c87ba97d6..53817cc8dd2 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -85,7 +85,7 @@ The following arguments are supported: policy document will overwrite statements with the same `sid` in the source json. Statements without an `sid` cannot be overwritten. * `source_json_list` (Optional) - A list of IAM policy documents to import as - a base for the current policy document. Statements accross all policy documents + a base for the current policy document. Statements across all policy documents defined in this list or in the `source_json` attribute must be unique. Statements with non-blank `sid`s in the current policy document will overwrite statements with the same `sid` from any of the source json documents. From 6249e5c4c39c4b72cf356ac0d061a8a3f46c9998 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 10:55:12 -0500 Subject: [PATCH 1054/1212] data-source/iam_policy_document: Fix lint issues --- ...ata_source_aws_iam_policy_document_test.go | 171 ++++++++++-------- 1 file changed, 93 insertions(+), 78 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index 26e4cc2690a..5a4bf1d98de 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -613,38 +613,43 @@ func testAccAWSIAMPolicyDocumentSourceExpectedJSON() string { var testAccAWSIAMPolicyDocumentSourceListConfig = ` data "aws_iam_policy_document" "policy_a" { - statement { - sid = "" - effect = "Allow" - actions = [ "foo:ActionOne" ] - } - statement { - sid = "validSidOne" - effect = "Allow" - actions = [ "bar:ActionOne" ] - } + statement { + sid = "" + effect = "Allow" + actions = ["foo:ActionOne"] + } + + statement { + sid = "validSidOne" + effect = "Allow" + actions = ["bar:ActionOne"] + } } + data "aws_iam_policy_document" "policy_b" { - statement { - sid = "validSidTwo" - effect = "Deny" - actions = [ "foo:ActionTwo" ] - } + statement { + sid = "validSidTwo" + effect = "Deny" + actions = ["foo:ActionTwo"] + } } + data "aws_iam_policy_document" "policy_c" { - statement { - sid = "" - effect = "Allow" - actions = [ "bar:ActionTwo" ] - } + statement { + sid = "" + effect = "Allow" + actions = ["bar:ActionTwo"] + } } + data "aws_iam_policy_document" "test_source_list" { - version = "2012-10-17" - source_json_list = [ - data.aws_iam_policy_document.policy_a.json, - data.aws_iam_policy_document.policy_b.json, - data.aws_iam_policy_document.policy_c.json - ] + version = "2012-10-17" + + source_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] } ` var testAccAWSIAMPolicyDocumentSourceListExpectedJSON = `{ @@ -731,38 +736,43 @@ var testAccAWSIAMPolicyDocumentSourceConflictingExpectedJSON = `{ var testAccAWSIAMPolicyDocumentSourceListConflictingConfig = ` data "aws_iam_policy_document" "policy_a" { - statement { - sid = "" - effect = "Allow" - actions = [ "foo:ActionOne" ] - } - statement { - sid = "conflictSid" - effect = "Allow" - actions = [ "bar:ActionOne" ] - } + statement { + sid = "" + effect = "Allow" + actions = ["foo:ActionOne"] + } + + statement { + sid = "conflictSid" + effect = "Allow" + actions = ["bar:ActionOne"] + } } + data "aws_iam_policy_document" "policy_b" { - statement { - sid = "validSid" - effect = "Deny" - actions = [ "foo:ActionTwo" ] - } + statement { + sid = "validSid" + effect = "Deny" + actions = ["foo:ActionTwo"] + } } + data "aws_iam_policy_document" "policy_c" { - statement { - sid = "conflictSid" - effect = "Allow" - actions = [ "bar:ActionTwo" ] - } + statement { + sid = "conflictSid" + effect = "Allow" + actions = ["bar:ActionTwo"] + } } + data "aws_iam_policy_document" "test_source_list_conflicting" { - version = "2012-10-17" - source_json_list = [ - data.aws_iam_policy_document.policy_a.json, - data.aws_iam_policy_document.policy_b.json, - data.aws_iam_policy_document.policy_c.json - ] + version = "2012-10-17" + + source_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] } ` @@ -819,38 +829,43 @@ var testAccAWSIAMPolicyDocumentOverrideExpectedJSON = `{ var testAccAWSIAMPolicyDocumentOverrideListConfig = ` data "aws_iam_policy_document" "policy_a" { - statement { - sid = "" - effect = "Allow" - actions = [ "foo:ActionOne" ] - } - statement { - sid = "overrideSid" - effect = "Allow" - actions = [ "bar:ActionOne" ] - } + statement { + sid = "" + effect = "Allow" + actions = ["foo:ActionOne"] + } + + statement { + sid = "overrideSid" + effect = "Allow" + actions = ["bar:ActionOne"] + } } + data "aws_iam_policy_document" "policy_b" { - statement { - sid = "validSid" - effect = "Deny" - actions = [ "foo:ActionTwo" ] - } + statement { + sid = "validSid" + effect = "Deny" + actions = ["foo:ActionTwo"] + } } + data "aws_iam_policy_document" "policy_c" { - statement { - sid = "overrideSid" - effect = "Deny" - actions = [ "bar:ActionOne" ] - } + statement { + sid = "overrideSid" + effect = "Deny" + actions = ["bar:ActionOne"] + } } + data "aws_iam_policy_document" "test_override_list" { - version = "2012-10-17" - override_json_list = [ - data.aws_iam_policy_document.policy_a.json, - data.aws_iam_policy_document.policy_b.json, - data.aws_iam_policy_document.policy_c.json - ] + version = "2012-10-17" + + override_json_list = [ + data.aws_iam_policy_document.policy_a.json, + data.aws_iam_policy_document.policy_b.json, + data.aws_iam_policy_document.policy_c.json + ] } ` From e426a9b4f349d2b859ff1c44720006d6929611e0 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 11:31:45 -0500 Subject: [PATCH 1055/1212] data-source/iam_policy_document: Make provider consistent --- aws/data_source_aws_iam_policy_document.go | 23 ++++++++++------------ 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 460ca5d5453..674d45822cb 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -116,16 +116,13 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{}) error { mergedDoc := &IAMPolicyDoc{} - // populate mergedDoc directly with any source_json - if sourceJSON, hasSourceJSON := d.GetOk("source_json"); hasSourceJSON { - if err := json.Unmarshal([]byte(sourceJSON.(string)), mergedDoc); err != nil { + if v, ok := d.GetOk("source_json"); ok { + if err := json.Unmarshal([]byte(v.(string)), mergedDoc); err != nil { return err } } - // populate mergedDoc with any provided source_json_list - if sourceJSONList, hasSourceJSONList := d.GetOk("source_json_list"); hasSourceJSONList { - + if v, ok := d.GetOk("source_json_list"); ok && len(v.([]interface{})) > 0 { // generate sid map to assure there are no duplicates in source jsons sidMap := make(map[string]struct{}) for _, stmt := range mergedDoc.Statements { @@ -135,7 +132,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} } // merge sourceDocs in order specified - for sourceJSONIndex, sourceJSON := range sourceJSONList.([]interface{}) { + for sourceJSONIndex, sourceJSON := range v.([]interface{}) { sourceDoc := &IAMPolicyDoc{} if err := json.Unmarshal([]byte(sourceJSON.(string)), sourceDoc); err != nil { return err @@ -145,7 +142,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} for stmtIndex, stmt := range sourceDoc.Statements { if stmt.Sid != "" { if _, sidExists := sidMap[stmt.Sid]; sidExists { - return fmt.Errorf("Found duplicate sid (%s) in source_json_list (item %d; statement %d). Either remove the sid or ensure the sid is unique across all statements.", stmt.Sid, sourceJSONIndex, stmtIndex) + return fmt.Errorf("duplicate SID (%s) in source_json_list (item %d; statement %d). Remove the SID or ensure SIDs are unique.", stmt.Sid, sourceJSONIndex, stmtIndex) } sidMap[stmt.Sid] = struct{}{} } @@ -178,7 +175,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} if sid, ok := cfgStmt["sid"]; ok { if _, ok := sidMap[sid.(string)]; ok { - return fmt.Errorf("Found duplicate sid (%s). Either remove the sid or ensure the sid is unique across all statements.", sid.(string)) + return fmt.Errorf("duplicate SID (%s). Remove the SID or ensure the SID is unique.", sid.(string)) } stmt.Sid = sid.(string) if len(stmt.Sid) > 0 { @@ -247,8 +244,8 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} mergedDoc.Merge(doc) // merge override_json_list policies into mergedDoc in order specified - if overrideJSONList, hasOverrideJSONList := d.GetOk("override_json_list"); hasOverrideJSONList { - for _, overrideJSON := range overrideJSONList.([]interface{}) { + if v, ok := d.GetOk("override_json_list"); ok && len(v.([]interface{})) > 0 { + for _, overrideJSON := range v.([]interface{}) { overrideDoc := &IAMPolicyDoc{} if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { return err @@ -260,9 +257,9 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} } // merge in override_json - if overrideJSON, hasOverrideJSON := d.GetOk("override_json"); hasOverrideJSON { + if v, ok := d.GetOk("override_json"); ok { overrideDoc := &IAMPolicyDoc{} - if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { + if err := json.Unmarshal([]byte(v.(string)), overrideDoc); err != nil { return err } From 59395dbda2641315f0115931e65f6bf15c0a3808 Mon Sep 17 00:00:00 2001 From: Luciano Mammino Date: Wed, 10 Feb 2021 16:44:51 +0000 Subject: [PATCH 1056/1212] Provides correct values for max and min capacity with PostgreSQL --- website/docs/r/rds_cluster.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 0fb0c9c303b..80b267c70aa 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -209,8 +209,8 @@ resource "aws_rds_cluster" "example" { ``` * `auto_pause` - (Optional) Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to `true`. -* `max_capacity` - (Optional) The maximum capacity. The maximum capacity must be greater than or equal to the minimum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256`. Defaults to `16`. -* `min_capacity` - (Optional) The minimum capacity. The minimum capacity must be lesser than or equal to the maximum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256`. Defaults to `1`. +* `max_capacity` - (Optional) The maximum capacity. The maximum capacity must be greater than or equal to the minimum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256` (or `1`, `2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384` for PostgreSQL). Defaults to `16`. +* `min_capacity` - (Optional) The minimum capacity. The minimum capacity must be lesser than or equal to the maximum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256` (or `1`, `2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384` for PostgreSQL). Defaults to `1`. * `seconds_until_auto_pause` - (Optional) The time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are `300` through `86400`. Defaults to `300`. * `timeout_action` - (Optional) The action to take when the timeout is reached. Valid values: `ForceApplyCapacityChange`, `RollbackCapacityChange`. Defaults to `RollbackCapacityChange`. See [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.timeout-action). From 0c68aa1cd8712a7b610b6f696ff92892f3c69dbc Mon Sep 17 00:00:00 2001 From: Pradeep Bhadani Date: Wed, 10 Feb 2021 16:45:41 +0000 Subject: [PATCH 1057/1212] remove unwanted char --- website/docs/r/s3_bucket_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket_policy.html.markdown b/website/docs/r/s3_bucket_policy.html.markdown index 1eee6249438..f51242b76be 100644 --- a/website/docs/r/s3_bucket_policy.html.markdown +++ b/website/docs/r/s3_bucket_policy.html.markdown @@ -31,7 +31,7 @@ resource "aws_s3_bucket_policy" "b" { "Sid": "IPAllow", "Effect": "Deny", "Principal": "*", - "Action": "s3:*",˜ + "Action": "s3:*", "Resource": "arn:aws:s3:::my-tf-test-bucket/*", "Condition": { "IpAddress": {"aws:SourceIp": "8.8.8.8/32"} From 0c2a0f5b27345808b0ba04bb815184c1fbf8e30e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 9 Dec 2020 13:26:29 -0500 Subject: [PATCH 1058/1212] r/aws_s3_bucket: Test replication to multiple destinations. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSS3Bucket_Replication_MultipleDestinations_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSS3Bucket_Replication_MultipleDestinations_ -timeout 120m === RUN TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter === PAUSE TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter === RUN TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter === PAUSE TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter === CONT TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter === CONT TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter --- PASS: TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter (39.33s) --- PASS: TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter (40.35s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 40.541s --- aws/resource_aws_s3_bucket_test.go | 299 +++++++++++++++++++++++++++++ 1 file changed, 299 insertions(+) diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index d8c9eae4b82..81e49aa8669 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -1548,6 +1548,141 @@ func TestAccAWSS3Bucket_Replication(t *testing.T) { }) } +func TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter(t *testing.T) { + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsNonEmptyFilter(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination3", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": "Disabled", + "filter.#": "1", + "filter.0.prefix": "prefix3", + "filter.0.tags.%": "1", + "filter.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": "ONEZONE_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsNonEmptyFilter(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + func TestAccAWSS3Bucket_ReplicationConfiguration_Rule_Destination_AccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() region := testAccGetRegion() @@ -3765,6 +3900,170 @@ resource "aws_s3_bucket" "bucket" { `, randInt, storageClass) } +func testAccAWSS3BucketConfigReplicationWithMultipleDestinationsEmptyFilter(randInt int) string { + return composeConfig( + testAccAWSS3BucketConfigReplicationBasic(randInt), + fmt.Sprintf(` +resource "aws_s3_bucket" "destination2" { + provider = "awsalternate" + bucket = "tf-test-bucket-destination2-%[1]d" + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket" "destination3" { + provider = "awsalternate" + bucket = "tf-test-bucket-destination3-%[1]d" + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%[1]d" + acl = "private" + + versioning { + enabled = true + } + + replication_configuration { + role = aws_iam_role.role.arn + + rules { + id = "rule1" + priority = 1 + status = "Enabled" + + filter {} + + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + } + } + + rules { + id = "rule2" + priority = 2 + status = "Enabled" + + filter {} + + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" + } + } + + rules { + id = "rule3" + priority = 3 + status = "Disabled" + + filter {} + + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" + } + } + } +} +`, randInt)) +} + +func testAccAWSS3BucketConfigReplicationWithMultipleDestinationsNonEmptyFilter(randInt int) string { + return composeConfig( + testAccAWSS3BucketConfigReplicationBasic(randInt), + fmt.Sprintf(` +resource "aws_s3_bucket" "destination2" { + provider = "awsalternate" + bucket = "tf-test-bucket-destination2-%[1]d" + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket" "destination3" { + provider = "awsalternate" + bucket = "tf-test-bucket-destination3-%[1]d" + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%[1]d" + acl = "private" + + versioning { + enabled = true + } + + replication_configuration { + role = aws_iam_role.role.arn + + rules { + id = "rule1" + priority = 1 + status = "Enabled" + + filter { + prefix = "prefix1" + } + + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + } + } + + rules { + id = "rule2" + priority = 2 + status = "Enabled" + + filter { + tags = { + Key2 = "Value2" + } + } + + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" + } + } + + rules { + id = "rule3" + priority = 3 + status = "Disabled" + + filter { + prefix = "prefix3" + + tags = { + Key3 = "Value3" + } + } + + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" + } + } + } +} +`, randInt)) +} + func testAccAWSS3BucketConfigReplicationWithSseKmsEncryptedObjects(randInt int) string { return testAccAWSS3BucketConfigReplicationBasic(randInt) + fmt.Sprintf(` resource "aws_kms_key" "replica" { From cb8cccf2ac97d5ea1778b77c9ff58ab0484960e8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 14 Dec 2020 09:17:33 -0500 Subject: [PATCH 1059/1212] Document requirement for 'filter' block when using replication to multiple destination buckets. --- website/docs/r/s3_bucket.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 51c09d1d95b..bb336f64d0d 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -443,6 +443,8 @@ Replication configuration V1 supports filtering based on only the `prefix` attri * If any rule has `filter` specified then they all must * `priority` is optional (with a default value of `0`) but must be unique between multiple rules +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + The `destination` object supports the following: * `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. @@ -482,7 +484,7 @@ The `apply_server_side_encryption_by_default` object supports the following: The `grant` object supports the following: -* `id` - (optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. +* `id` - (optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. * `type` - (required) - Type of grantee to apply for. Valid values are `CanonicalUser` and `Group`. `AmazonCustomerByEmail` is not supported. * `permissions` - (required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. * `uri` - (optional) Uri address to grant for. Used only when `type` is `Group`. From 31e59ab085d449d1d4d480ea8d50179bdbe98531 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 10 Feb 2021 13:19:39 -0500 Subject: [PATCH 1060/1212] r/aws_globalaccelerator_accelerator: Better documentation around flow log attributes. --- .../docs/r/globalaccelerator_accelerator.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/globalaccelerator_accelerator.markdown b/website/docs/r/globalaccelerator_accelerator.markdown index 5249b087f96..3e5b4a85113 100644 --- a/website/docs/r/globalaccelerator_accelerator.markdown +++ b/website/docs/r/globalaccelerator_accelerator.markdown @@ -31,16 +31,16 @@ resource "aws_globalaccelerator_accelerator" "example" { The following arguments are supported: * `name` - (Required) The name of the accelerator. -* `ip_address_type` - (Optional) The value for the address type must be `IPV4`. -* `enabled` - (Optional) Indicates whether the accelerator is enabled. The value is true or false. The default value is true. +* `ip_address_type` - (Optional) The value for the address type. Defaults to `IPV4`. Valid values: `IPV4`. +* `enabled` - (Optional) Indicates whether the accelerator is enabled. Defaults to `true`. Valid values: `true`, `false`. * `attributes` - (Optional) The attributes of the accelerator. Fields documented below. * `tags` - (Optional) A map of tags to assign to the resource. **attributes** supports the following attributes: -* `flow_logs_enabled` - (Optional) Indicates whether flow logs are enabled. -* `flow_logs_s3_bucket` - (Optional) The name of the Amazon S3 bucket for the flow logs. -* `flow_logs_s3_prefix` - (Optional) The prefix for the location in the Amazon S3 bucket for the flow logs. +* `flow_logs_enabled` - (Optional) Indicates whether flow logs are enabled. Defaults to `false`. Valid values: `true`, `false`. +* `flow_logs_s3_bucket` - (Optional) The name of the Amazon S3 bucket for the flow logs. Required if `flow_logs_enabled` is `true`. +* `flow_logs_s3_prefix` - (Optional) The prefix for the location in the Amazon S3 bucket for the flow logs. Required if `flow_logs_enabled` is `true`. ## Attributes Reference @@ -56,7 +56,7 @@ In addition to all arguments above, the following attributes are exported: **ip_sets** exports the following attributes: * `ip_addresses` - A list of IP addresses in the IP address set. -* `ip_family` - The types of IP addresses included in this IP set. +* `ip_family` - The type of IP addresses included in this IP set. [1]: https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html From 7275b3ef44f0cb555f39f51f18880e872219dd35 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 14:57:29 -0500 Subject: [PATCH 1061/1212] data-source/iam_policy_document: Clean up arguments --- aws/data_source_aws_iam_policy_document.go | 62 +++++++++---------- ...ata_source_aws_iam_policy_document_test.go | 10 +-- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index 674d45822cb..a248d66539c 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -27,11 +27,15 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Read: dataSourceAwsIamPolicyDocumentRead, Schema: map[string]*schema.Schema{ + "json": { + Type: schema.TypeString, + Computed: true, + }, "override_json": { Type: schema.TypeString, Optional: true, }, - "override_json_list": { + "override_policy_documents": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -44,7 +48,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "source_json_list": { + "source_policy_documents": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -54,22 +58,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "sid": { - Type: schema.TypeString, - Optional: true, - }, - "effect": { - Type: schema.TypeString, - Optional: true, - Default: "Allow", - ValidateFunc: validation.StringInSlice([]string{"Allow", "Deny"}, false), - }, - "actions": setOfString, - "not_actions": setOfString, - "resources": setOfString, - "not_resources": setOfString, - "principals": dataSourceAwsIamPolicyPrincipalSchema(), - "not_principals": dataSourceAwsIamPolicyPrincipalSchema(), + "actions": setOfString, "condition": { Type: schema.TypeSet, Optional: true, @@ -79,10 +68,6 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Type: schema.TypeString, Required: true, }, - "variable": { - Type: schema.TypeString, - Required: true, - }, "values": { Type: schema.TypeSet, Required: true, @@ -90,9 +75,28 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Type: schema.TypeString, }, }, + "variable": { + Type: schema.TypeString, + Required: true, + }, }, }, }, + "effect": { + Type: schema.TypeString, + Optional: true, + Default: "Allow", + ValidateFunc: validation.StringInSlice([]string{"Allow", "Deny"}, false), + }, + "not_actions": setOfString, + "not_principals": dataSourceAwsIamPolicyPrincipalSchema(), + "not_resources": setOfString, + "principals": dataSourceAwsIamPolicyPrincipalSchema(), + "resources": setOfString, + "sid": { + Type: schema.TypeString, + Optional: true, + }, }, }, }, @@ -105,10 +109,6 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { "2012-10-17", }, false), }, - "json": { - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -122,7 +122,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} } } - if v, ok := d.GetOk("source_json_list"); ok && len(v.([]interface{})) > 0 { + if v, ok := d.GetOk("source_policy_documents"); ok && len(v.([]interface{})) > 0 { // generate sid map to assure there are no duplicates in source jsons sidMap := make(map[string]struct{}) for _, stmt := range mergedDoc.Statements { @@ -142,7 +142,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} for stmtIndex, stmt := range sourceDoc.Statements { if stmt.Sid != "" { if _, sidExists := sidMap[stmt.Sid]; sidExists { - return fmt.Errorf("duplicate SID (%s) in source_json_list (item %d; statement %d). Remove the SID or ensure SIDs are unique.", stmt.Sid, sourceJSONIndex, stmtIndex) + return fmt.Errorf("duplicate Sid (%s) in source_policy_documents (item %d; statement %d). Remove the Sid or ensure Sids are unique.", stmt.Sid, sourceJSONIndex, stmtIndex) } sidMap[stmt.Sid] = struct{}{} } @@ -175,7 +175,7 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} if sid, ok := cfgStmt["sid"]; ok { if _, ok := sidMap[sid.(string)]; ok { - return fmt.Errorf("duplicate SID (%s). Remove the SID or ensure the SID is unique.", sid.(string)) + return fmt.Errorf("duplicate Sid (%s). Remove the Sid or ensure the Sid is unique.", sid.(string)) } stmt.Sid = sid.(string) if len(stmt.Sid) > 0 { @@ -243,8 +243,8 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} // merge our current document into mergedDoc mergedDoc.Merge(doc) - // merge override_json_list policies into mergedDoc in order specified - if v, ok := d.GetOk("override_json_list"); ok && len(v.([]interface{})) > 0 { + // merge override_policy_documents policies into mergedDoc in order specified + if v, ok := d.GetOk("override_policy_documents"); ok && len(v.([]interface{})) > 0 { for _, overrideJSON := range v.([]interface{}) { overrideDoc := &IAMPolicyDoc{} if err := json.Unmarshal([]byte(overrideJSON.(string)), overrideDoc); err != nil { diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index 5a4bf1d98de..ade95a96adb 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -98,7 +98,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_sourceListConflicting(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSIAMPolicyDocumentSourceListConflictingConfig, - ExpectError: regexp.MustCompile(`Found duplicate sid (.*?) in source_json_list`), + ExpectError: regexp.MustCompile(`duplicate Sid (.*?)`), }, }, }) @@ -179,7 +179,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_duplicateSid(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSIAMPolicyDocumentDuplicateSidConfig, - ExpectError: regexp.MustCompile(`Found duplicate sid`), + ExpectError: regexp.MustCompile(`duplicate Sid`), }, { Config: testAccAWSIAMPolicyDocumentDuplicateBlankSidConfig, @@ -645,7 +645,7 @@ data "aws_iam_policy_document" "policy_c" { data "aws_iam_policy_document" "test_source_list" { version = "2012-10-17" - source_json_list = [ + source_policy_documents = [ data.aws_iam_policy_document.policy_a.json, data.aws_iam_policy_document.policy_b.json, data.aws_iam_policy_document.policy_c.json @@ -768,7 +768,7 @@ data "aws_iam_policy_document" "policy_c" { data "aws_iam_policy_document" "test_source_list_conflicting" { version = "2012-10-17" - source_json_list = [ + source_policy_documents = [ data.aws_iam_policy_document.policy_a.json, data.aws_iam_policy_document.policy_b.json, data.aws_iam_policy_document.policy_c.json @@ -861,7 +861,7 @@ data "aws_iam_policy_document" "policy_c" { data "aws_iam_policy_document" "test_override_list" { version = "2012-10-17" - override_json_list = [ + override_policy_documents = [ data.aws_iam_policy_document.policy_a.json, data.aws_iam_policy_document.policy_b.json, data.aws_iam_policy_document.policy_c.json From 373e8898b92eebbe27c4be74c5214107e9e0bc8f Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 14:58:05 -0500 Subject: [PATCH 1062/1212] docs/iam_policy_document: Clean up documentation --- .../docs/d/iam_policy_document.html.markdown | 268 +++++++----------- 1 file changed, 107 insertions(+), 161 deletions(-) diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index 53817cc8dd2..9fc9130e2d8 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -8,14 +8,18 @@ description: |- # Data Source: aws_iam_policy_document -Generates an IAM policy document in JSON format. +Generates an IAM policy document in JSON format for use with resources that expect policy documents such as [`aws_iam_policy`](/docs/providers/aws/r/iam_policy.html). -This is a data source which can be used to construct a JSON representation of -an IAM policy document, for use with resources which expect policy documents, -such as the `aws_iam_policy` resource. +Using this data source to generate policy documents is *optional*. It is also valid to use literal JSON strings in your configuration or to use the `file` interpolation function to read a raw JSON policy document from a file. + +~> **NOTE:** AWS's IAM policy document syntax allows for replacement of [policy variables](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html) within a statement using `${...}`-style notation, which conflicts with Terraform's interpolation syntax. In order to use AWS policy variables with this data source, use `&{...}` notation for interpolations that should be processed by AWS rather than by Terraform. -> For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). +## Example Usage + +### Basic Example + ```hcl data "aws_iam_policy_document" "example" { statement { @@ -71,117 +75,9 @@ resource "aws_iam_policy" "example" { } ``` -Using this data source to generate policy documents is *optional*. It is also -valid to use literal JSON strings within your configuration, or to use the -`file` interpolation function to read a raw JSON policy document from a file. - -## Argument Reference - -The following arguments are supported: - -* `policy_id` (Optional) - An ID for the policy document. -* `source_json` (Optional) - An IAM policy document to import as a base for the - current policy document. Statements with non-blank `sid`s in the current - policy document will overwrite statements with the same `sid` in the source - json. Statements without an `sid` cannot be overwritten. -* `source_json_list` (Optional) - A list of IAM policy documents to import as - a base for the current policy document. Statements across all policy documents - defined in this list or in the `source_json` attribute must be unique. Statements - with non-blank `sid`s in the current policy document will overwrite statements - with the same `sid` from any of the source json documents. -* `override_json` (Optional) - An IAM policy document to import and override the - current policy document. Statements with non-blank `sid`s in the override - document will overwrite statements with the same `sid` in the current document, - including any defined by the `override_json_list` attribute. - Statements without an `sid` cannot be overwritten. -* `override_json_list` (Optional) - A list of IAM policy documents to import and - override the current policy document. Documents in this list are merged - iteratively, overwriting previously defined statements with non-blank matching - `sid`s. Statements with non-blank `sid`s will overwrite statements with the same - `sid` in the current document. Statements without an `sid` cannot be overwritten. -* `statement` (Optional) - A nested configuration block (described below) - configuring one *statement* to be included in the policy document. -* `version` (Optional) - IAM policy document version. Valid values: `2008-10-17`, `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). - -Each document configuration may have one or more `statement` blocks, which -each accept the following arguments: - -* `sid` (Optional) - An ID for the policy statement. -* `effect` (Optional) - Either "Allow" or "Deny", to specify whether this - statement allows or denies the given actions. The default is "Allow". -* `actions` (Optional) - A list of actions that this statement either allows - or denies. For example, ``["ec2:RunInstances", "s3:*"]``. -* `not_actions` (Optional) - A list of actions that this statement does *not* - apply to. Used to apply a policy statement to all actions *except* those - listed. -* `resources` (Optional) - A list of resource ARNs that this statement applies - to. This is required by AWS if used for an IAM policy. -* `not_resources` (Optional) - A list of resource ARNs that this statement - does *not* apply to. Used to apply a policy statement to all resources - *except* those listed. -* `principals` (Optional) - A nested configuration block (described below) - specifying a principal (or principal pattern) to which this statement applies. -* `not_principals` (Optional) - Like `principals` except gives principals that - the statement does *not* apply to. -* `condition` (Optional) - A nested configuration block (described below) - that defines a further, possibly-service-specific condition that constrains - whether this statement applies. - -Each policy may have either zero or more `principals` blocks or zero or more -`not_principals` blocks, both of which each accept the following arguments: - -* `type` (Required) The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated". -* `identifiers` (Required) List of identifiers for principals. When `type` - is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs. - -For further examples or information about AWS principals then please refer to the [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html). - -Each policy statement may have zero or more `condition` blocks, which each -accept the following arguments: - -* `test` (Required) The name of the - [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) - to evaluate. -* `variable` (Required) The name of a - [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) - to apply the condition to. Context variables may either be standard AWS - variables starting with `aws:`, or service-specific variables prefixed with - the service name. -* `values` (Required) The values to evaluate the condition against. If multiple - values are provided, the condition matches if at least one of them applies. - (That is, the tests are combined with the "OR" boolean operation.) - -When multiple `condition` blocks are provided, they must *all* evaluate to true -for the policy statement to apply. (In other words, the conditions are combined -with the "AND" boolean operation.) - -## Context Variable Interpolation - -The IAM policy document format allows context variables to be interpolated -into various strings within a statement. The native IAM policy document format -uses `${...}`-style syntax that is in conflict with Terraform's interpolation -syntax, so this data source instead uses `&{...}` syntax for interpolations that -should be processed by AWS rather than by Terraform. - -## Wildcard Principal - -In order to define wildcard principal (a.k.a. anonymous user) use `type = "*"` and -`identifiers = ["*"]`. In that case the rendered json will contain `"Principal": "*"`. -Note, that even though the [IAM Documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) -states that `"Principal": "*"` and `"Principal": {"AWS": "*"}` are equivalent, -those principals have different behavior for IAM Role Trust Policy. Therefore -Terraform will normalize the principal field only in above-mentioned case and principals -like `type = "AWS"` and `identifiers = ["*"]` will be rendered as `"Principal": {"AWS": "*"}`. - -## Attributes Reference - -The following attribute is exported: - -* `json` - The above arguments serialized as a standard JSON policy document. - -## Example with Multiple Principals +### Example Assume-Role Policy with Multiple Principals -Showing how you can use this as an assume role policy as well as showing how you can specify multiple principal blocks with different types. +You can specify multiple principal blocks with different types. You can also use this data source to generate an assume-role policy. ```hcl data "aws_iam_policy_document" "event_stream_bucket_role_assume_role_policy" { @@ -206,9 +102,7 @@ data "aws_iam_policy_document" "event_stream_bucket_role_assume_role_policy" { } ``` -## Example with Source and Override - -Showing how you can use `source_json` and `override_json` +### Example Using A Source Document ```hcl data "aws_iam_policy_document" "source" { @@ -218,7 +112,7 @@ data "aws_iam_policy_document" "source" { } statement { - sid = "SidToOverwrite" + sid = "SidToOverride" actions = ["s3:*"] resources = ["*"] @@ -229,7 +123,7 @@ data "aws_iam_policy_document" "source_json_example" { source_json = data.aws_iam_policy_document.source.json statement { - sid = "SidToOverwrite" + sid = "SidToOverride" actions = ["s3:*"] @@ -239,10 +133,39 @@ data "aws_iam_policy_document" "source_json_example" { ] } } +``` + +`data.aws_iam_policy_document.source_json_example.json` will evaluate to: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + }, + { + "Sid": "SidToOverride", + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::somebucket/*", + "arn:aws:s3:::somebucket" + ] + } + ] +} +``` + +### Example Using An Override Document + +```hcl data "aws_iam_policy_document" "override" { statement { - sid = "SidToOverwrite" + sid = "SidToOverride" actions = ["s3:*"] resources = ["*"] @@ -258,7 +181,7 @@ data "aws_iam_policy_document" "override_json_example" { } statement { - sid = "SidToOverwrite" + sid = "SidToOverride" actions = ["s3:*"] @@ -270,31 +193,6 @@ data "aws_iam_policy_document" "override_json_example" { } ``` -`data.aws_iam_policy_document.source_json_example.json` will evaluate to: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Action": "ec2:*", - "Resource": "*" - }, - { - "Sid": "SidToOverwrite", - "Effect": "Allow", - "Action": "s3:*", - "Resource": [ - "arn:aws:s3:::somebucket/*", - "arn:aws:s3:::somebucket" - ] - } - ] -} -``` - `data.aws_iam_policy_document.override_json_example.json` will evaluate to: ```json @@ -308,7 +206,7 @@ data "aws_iam_policy_document" "override_json_example" { "Resource": "*" }, { - "Sid": "SidToOverwrite", + "Sid": "SidToOverride", "Effect": "Allow", "Action": "s3:*", "Resource": "*" @@ -317,11 +215,9 @@ data "aws_iam_policy_document" "override_json_example" { } ``` -You can also combine `source_json` and `override_json` in the same document. - -## Example without Statement +### Example with Both Source and Override Documents -Use without a `statement`: +You can also combine `source_json` and `override_json` in the same document. ```hcl data "aws_iam_policy_document" "source" { @@ -362,14 +258,9 @@ data "aws_iam_policy_document" "politik" { } ``` -## Combining multiple documents - -Multiple documents can be combined using the `source_json_list` or -`override_json_list` attributes. `source_json_list` requires all documents -to have unique `sid`s, while `override_json_list` will iteratively overwrite -matching `sid`s. +### Example of Merging Source Documents -Examble of `source_json_list`: +Multiple documents can be combined using the `source_policy_documents` or `override_policy_documents` attributes. `source_policy_documents` requires that all documents have unique Sids, while `override_policy_documents` will iteratively override matching Sids. ```hcl data "aws_iam_policy_document" "source_one" { @@ -401,7 +292,7 @@ data "aws_iam_policy_document" "source_two" { } data "aws_iam_policy_document" "combined" { - source_json_list = [ + source_policy_documents = [ data.aws_iam_policy_document.source_one.json, data.aws_iam_policy_document.source_two.json ] @@ -442,7 +333,7 @@ data "aws_iam_policy_document" "combined" { } ``` -Examble of `override_json_list`: +### Example of Merging Override Documents ```hcl data "aws_iam_policy_document" "policy_one" { @@ -482,7 +373,7 @@ data "aws_iam_policy_document" "policy_three" { } data "aws_iam_policy_document" "combined" { - override_json_list = [ + override_policy_documents = [ data.aws_iam_policy_document.policy_one.json, data.aws_iam_policy_document.policy_two.json, data.aws_iam_policy_document.policy_three.json @@ -525,3 +416,58 @@ data "aws_iam_policy_document" "combined" { ] } ``` + +## Argument Reference + +The following arguments are optional: + +* `override_json` (Optional) - IAM policy document whose statements with non-blank `sid`s will override statements with the same `sid` in the exported document including any defined by the `override_policy_documents` argument. Statements without a `sid` cannot be overridden. +* `override_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document, potentially overriding previously defined statements with the same `sid`s. +* `policy_id` (Optional) - ID for the policy document. +* `source_json` (Optional) - IAM policy document used as a base for the exported policy document. +* `source_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document. Statements defined in `source_policy_documents` or `source_json` must have unique `sid`s. Override statements with the same `sid` will override source statements. Statements without a `sid` cannot be overridden. +* `statement` (Optional) - Configuration block for a policy statement. Detailed below. +* `version` (Optional) - IAM policy document version. Valid values are `2008-10-17` and `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). + +### `statement` + +The following arguments are optional: + +* `actions` (Optional) - List of actions that this statement either allows or denies. For example, `["ec2:RunInstances", "s3:*"]`. +* `condition` (Optional) - Configuration block for a condition. Detailed below. +* `effect` (Optional) - Whether this statement allows or denies the given actions. Valid values are `Allow` and `Deny`. Defaults to `Allow`. +* `not_actions` (Optional) - List of actions that this statement does *not* apply to. Use to apply a policy statement to all actions *except* those listed. +* `not_principals` (Optional) - Like `principals` except these are principals that the statement does *not* apply to. +* `not_resources` (Optional) - List of resource ARNs that this statement does *not* apply to. Use to apply a policy statement to all resources *except* those listed. +* `principals` (Optional) - Configuration block for principals. Detailed below. +* `resources` (Optional) - List of resource ARNs that this statement applies to. This is required by AWS if used for an IAM policy. +* `sid` (Optional) - Sid (statement ID) is an identifier for a policy statement. + +### `condition` + +A `condition` constrains whether a statement applies in a particular situation. Conditions can be specific to an AWS service. When using multiple `condition` blocks, they must *all* evaluate to true for the policy statement to apply. In other words, AWS evaluates the conditions as though with an "AND" boolean operation. + +The following arguments are required: + +* `test` (Required) Name of the [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) to evaluate. +* `values` (Required) Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation. +* `variable` (Required) Name of a [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) to apply the condition to. Context variables may either be standard AWS variables starting with `aws:` or service-specific variables prefixed with the service name. + +### `principals` and `not_principals` + +The `principals` and `not_principals` arguments define to whom a statement applies or does not apply, respectively. + +~> **NOTE**: Even though the [IAM Documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) states that `"Principal": "*"` and `"Principal": {"AWS": "*"}` are equivalent, those principal elements have different behavior in some situations, e.g. IAM Role Trust Policy. To have Terraform render JSON containing `"Principal": "*"`, use `type = "*"` and `identifiers = ["*"]`. To have Terraform render JSON containing `"Principal": {"AWS": "*"}`, use `type = "AWS"` and `identifiers = ["*"]`. + +-> For more information about AWS principals, refer to the [AWS Identity and Access Management User Guide: AWS JSON policy elements: Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html). + +The following arguments are required: + +* `identifiers` (Required) List of identifiers for principals. When `type` is `AWS`, these are IAM principal ARNs, e.g. `arn:aws:iam::12345678901:role/yak-role`. When `type` is `Service`, these are AWS Service roles, e.g. `lambda.amazonaws.com`. When `type` is `Federated`, these are web identity users or SAML provider ARNs, e.g. `accounts.google.com` or `arn:aws:iam::12345678901:saml-provider/yak-saml-provider`. When `type` is `CanonicalUser`, these are [canonical user IDs](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId), e.g. `79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be`. +* `type` (Required) Type of principal. Valid values include `AWS`, `Service`, `Federated`, and `CanonicalUser`. + +## Attributes Reference + +The following attribute is exported: + +* `json` - Standard JSON policy document rendered based on the arguments above. From 1e07711e79d3e94a4248cd982bad044401dbe3d8 Mon Sep 17 00:00:00 2001 From: Bill Rich Date: Mon, 25 Jan 2021 16:13:07 -0800 Subject: [PATCH 1063/1212] Add cloudfront_cache_policy resource --- aws/cloudfront_cache_policy_structure.go | 230 ++++++++++++++++++ ...nt_distribution_configuration_structure.go | 25 +- aws/provider.go | 1 + aws/resource_aws_cloudfront_cache_policy.go | 223 +++++++++++++++++ ...source_aws_cloudfront_cache_policy_test.go | 208 ++++++++++++++++ aws/resource_aws_cloudfront_distribution.go | 21 +- ...source_aws_cloudfront_distribution_test.go | 113 +++++++++ .../r/cloudfront_cache_policy.html.markdown | 93 +++++++ .../r/cloudfront_distribution.html.markdown | 3 + 9 files changed, 910 insertions(+), 7 deletions(-) create mode 100644 aws/cloudfront_cache_policy_structure.go create mode 100644 aws/resource_aws_cloudfront_cache_policy.go create mode 100644 aws/resource_aws_cloudfront_cache_policy_test.go create mode 100644 website/docs/r/cloudfront_cache_policy.html.markdown diff --git a/aws/cloudfront_cache_policy_structure.go b/aws/cloudfront_cache_policy_structure.go new file mode 100644 index 00000000000..67e2c0cac78 --- /dev/null +++ b/aws/cloudfront_cache_policy_structure.go @@ -0,0 +1,230 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func expandCloudFrontCachePolicyCookieNames(cookieNamesFlat map[string]interface{}) *cloudfront.CookieNames { + cookieNames := &cloudfront.CookieNames{} + + var newCookieItems []*string + for _, cookie := range cookieNamesFlat["items"].(*schema.Set).List() { + newCookieItems = append(newCookieItems, aws.String(cookie.(string))) + } + cookieNames.Items = newCookieItems + cookieNames.Quantity = aws.Int64(int64(len(newCookieItems))) + + return cookieNames +} + +func expandCloudFrontCachePolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.CachePolicyCookiesConfig { + cookies := &cloudfront.CookieNames{ + Quantity: aws.Int64(int64(0)), + } + + if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { + cookies = expandCloudFrontCachePolicyCookieNames(cookiesFlat[0].(map[string]interface{})) + } else { + cookies = nil + } + + cookiesConfig := &cloudfront.CachePolicyCookiesConfig{ + CookieBehavior: aws.String(cookiesConfigFlat["cookie_behavior"].(string)), + Cookies: cookies, + } + + return cookiesConfig +} + +func expandCloudFrontCachePolicyHeaders(headerNamesFlat map[string]interface{}) *cloudfront.Headers { + headers := &cloudfront.Headers{} + + var newHeaderItems []*string + for _, header := range headerNamesFlat["items"].(*schema.Set).List() { + newHeaderItems = append(newHeaderItems, aws.String(header.(string))) + } + headers.Items = newHeaderItems + headers.Quantity = aws.Int64(int64(len(newHeaderItems))) + + return headers +} + +func expandCloudFrontCachePolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.CachePolicyHeadersConfig { + headers := &cloudfront.Headers{} + + if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { + headers = expandCloudFrontCachePolicyHeaders(headersFlat[0].(map[string]interface{})) + } else { + headers = nil + } + + headersConfig := &cloudfront.CachePolicyHeadersConfig{ + HeaderBehavior: aws.String(headersConfigFlat["header_behavior"].(string)), + Headers: headers, + } + + return headersConfig +} + +func expandCloudFrontCachePolicyQueryStringNames(queryStringNamesFlat map[string]interface{}) *cloudfront.QueryStringNames { + queryStringNames := &cloudfront.QueryStringNames{} + + var newQueryStringItems []*string + for _, queryString := range queryStringNamesFlat["items"].(*schema.Set).List() { + newQueryStringItems = append(newQueryStringItems, aws.String(queryString.(string))) + } + queryStringNames.Items = newQueryStringItems + queryStringNames.Quantity = aws.Int64(int64(len(newQueryStringItems))) + + return queryStringNames +} + +func expandCloudFrontCachePolicyQueryStringConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.CachePolicyQueryStringsConfig { + queryStrings := &cloudfront.QueryStringNames{ + Quantity: aws.Int64(int64(0)), + } + + if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { + queryStrings = expandCloudFrontCachePolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) + } else { + queryStrings = nil + } + + queryStringConfig := &cloudfront.CachePolicyQueryStringsConfig{ + QueryStringBehavior: aws.String(queryStringConfigFlat["query_string_behavior"].(string)), + QueryStrings: queryStrings, + } + + return queryStringConfig +} + +func expandCloudFrontCachePolicyParametersConfig(parameters map[string]interface{}) *cloudfront.ParametersInCacheKeyAndForwardedToOrigin { + var cookiesConfig *cloudfront.CachePolicyCookiesConfig + var headersConfig *cloudfront.CachePolicyHeadersConfig + var queryStringsConfig *cloudfront.CachePolicyQueryStringsConfig + + if cookiesFlat, ok := parameters["cookies_config"].([]interface{}); ok && len(cookiesFlat) == 1 { + cookiesConfig = expandCloudFrontCachePolicyCookiesConfig(cookiesFlat[0].(map[string]interface{})) + } + + if headersFlat, ok := parameters["headers_config"].([]interface{}); ok && len(headersFlat) == 1 { + headersConfig = expandCloudFrontCachePolicyHeadersConfig(headersFlat[0].(map[string]interface{})) + } + + if queryStringsFlat, ok := parameters["query_strings_config"].([]interface{}); ok && len(queryStringsFlat) == 1 { + queryStringsConfig = expandCloudFrontCachePolicyQueryStringConfig(queryStringsFlat[0].(map[string]interface{})) + } + + parametersConfig := &cloudfront.ParametersInCacheKeyAndForwardedToOrigin{ + CookiesConfig: cookiesConfig, + EnableAcceptEncodingBrotli: aws.Bool(parameters["enable_accept_encoding_brotli"].(bool)), + EnableAcceptEncodingGzip: aws.Bool(parameters["enable_accept_encoding_gzip"].(bool)), + HeadersConfig: headersConfig, + QueryStringsConfig: queryStringsConfig, + } + + return parametersConfig +} + +func expandCloudFrontCachePolicyConfig(d *schema.ResourceData) *cloudfront.CachePolicyConfig { + parametersConfig := &cloudfront.ParametersInCacheKeyAndForwardedToOrigin{} + + if parametersFlat, ok := d.GetOk("parameters_in_cache_key_and_forwarded_to_origin"); ok { + parametersConfig = expandCloudFrontCachePolicyParametersConfig(parametersFlat.([]interface{})[0].(map[string]interface{})) + } + cachePolicy := &cloudfront.CachePolicyConfig{ + Comment: aws.String(d.Get("comment").(string)), + DefaultTTL: aws.Int64(int64(d.Get("default_ttl").(int))), + MaxTTL: aws.Int64(int64(d.Get("max_ttl").(int))), + MinTTL: aws.Int64(int64(d.Get("min_ttl").(int))), + Name: aws.String(d.Get("name").(string)), + ParametersInCacheKeyAndForwardedToOrigin: parametersConfig, + } + + return cachePolicy +} + +func flattenCloudFrontCachePolicyCookiesConfig(cookiesConfig *cloudfront.CachePolicyCookiesConfig) []map[string]interface{} { + cookiesConfigFlat := map[string]interface{}{} + + cookies := []map[string]interface{}{} + if cookiesConfig.Cookies != nil { + cookies = []map[string]interface{}{ + { + "items": cookiesConfig.Cookies.Items, + }, + } + } + + cookiesConfigFlat["cookie_behavior"] = aws.StringValue(cookiesConfig.CookieBehavior) + cookiesConfigFlat["cookies"] = cookies + + return []map[string]interface{}{ + cookiesConfigFlat, + } +} + +func flattenCloudFrontCachePolicyHeadersConfig(headersConfig *cloudfront.CachePolicyHeadersConfig) []map[string]interface{} { + headersConfigFlat := map[string]interface{}{} + + headers := []map[string]interface{}{} + if headersConfig.Headers != nil { + headers = []map[string]interface{}{ + { + "items": headersConfig.Headers.Items, + }, + } + } + + headersConfigFlat["header_behavior"] = aws.StringValue(headersConfig.HeaderBehavior) + headersConfigFlat["headers"] = headers + + return []map[string]interface{}{ + headersConfigFlat, + } +} + +func flattenCloudFrontCachePolicyQueryStringsConfig(queryStringsConfig *cloudfront.CachePolicyQueryStringsConfig) []map[string]interface{} { + queryStringsConfigFlat := map[string]interface{}{} + + queryStrings := []map[string]interface{}{} + if queryStringsConfig.QueryStrings != nil { + queryStrings = []map[string]interface{}{ + { + "items": queryStringsConfig.QueryStrings.Items, + }, + } + } + + queryStringsConfigFlat["query_string_behavior"] = aws.StringValue(queryStringsConfig.QueryStringBehavior) + queryStringsConfigFlat["query_strings"] = queryStrings + + return []map[string]interface{}{ + queryStringsConfigFlat, + } +} + +func flattenParametersConfig(parametersConfig *cloudfront.ParametersInCacheKeyAndForwardedToOrigin) []map[string]interface{} { + parametersConfigFlat := map[string]interface{}{ + "enable_accept_encoding_brotli": aws.BoolValue(parametersConfig.EnableAcceptEncodingBrotli), + "enable_accept_encoding_gzip": aws.BoolValue(parametersConfig.EnableAcceptEncodingGzip), + "cookies_config": flattenCloudFrontCachePolicyCookiesConfig(parametersConfig.CookiesConfig), + "headers_config": flattenCloudFrontCachePolicyHeadersConfig(parametersConfig.HeadersConfig), + "query_strings_config": flattenCloudFrontCachePolicyQueryStringsConfig(parametersConfig.QueryStringsConfig), + } + + return []map[string]interface{}{ + parametersConfigFlat, + } +} + +func flattenCloudFrontCachePolicy(d *schema.ResourceData, cachePolicy *cloudfront.CachePolicyConfig) { + d.Set("comment", aws.StringValue(cachePolicy.Comment)) + d.Set("default_ttl", aws.Int64Value(cachePolicy.DefaultTTL)) + d.Set("max_ttl", aws.Int64Value(cachePolicy.MaxTTL)) + d.Set("min_ttl", aws.Int64Value(cachePolicy.MinTTL)) + d.Set("name", aws.StringValue(cachePolicy.Name)) + d.Set("parameters_in_cache_key_and_forwarded_to_origin", flattenParametersConfig(cachePolicy.ParametersInCacheKeyAndForwardedToOrigin)) +} diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 6473b5ccef7..31c09f628b4 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -224,13 +224,28 @@ func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront. } func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { + var forwardedValues *cloudfront.ForwardedValues + if forwardedValuesFlat, ok := m["forwarded_values"].([]interface{}); ok && len(forwardedValuesFlat) == 1 { + forwardedValues = expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})) + } + + minTTL := aws.Int64(int64(m["min_ttl"].(int))) + maxTTL := aws.Int64(int64(m["max_ttl"].(int))) + defaultTTL := aws.Int64(int64(m["default_ttl"].(int))) + if m["cache_policy_id"].(string) != "" { + minTTL = nil + maxTTL = nil + defaultTTL = nil + } + cb := &cloudfront.CacheBehavior{ + CachePolicyId: aws.String(m["cache_policy_id"].(string)), Compress: aws.Bool(m["compress"].(bool)), - DefaultTTL: aws.Int64(int64(m["default_ttl"].(int))), + DefaultTTL: defaultTTL, FieldLevelEncryptionId: aws.String(m["field_level_encryption_id"].(string)), - ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), - MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), - MinTTL: aws.Int64(int64(m["min_ttl"].(int))), + ForwardedValues: forwardedValues, + MaxTTL: maxTTL, + MinTTL: minTTL, OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), @@ -263,6 +278,7 @@ func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { func flattenCloudFrontDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) map[string]interface{} { m := map[string]interface{}{ + "cache_policy_id": aws.StringValue(dcb.CachePolicyId), "compress": aws.BoolValue(dcb.Compress), "field_level_encryption_id": aws.StringValue(dcb.FieldLevelEncryptionId), "viewer_protocol_policy": aws.StringValue(dcb.ViewerProtocolPolicy), @@ -302,6 +318,7 @@ func flattenCloudFrontDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m := make(map[string]interface{}) + m["cache_policy_id"] = aws.StringValue(cb.CachePolicyId) m["compress"] = aws.BoolValue(cb.Compress) m["field_level_encryption_id"] = aws.StringValue(cb.FieldLevelEncryptionId) m["viewer_protocol_policy"] = aws.StringValue(cb.ViewerProtocolPolicy) diff --git a/aws/provider.go b/aws/provider.go index 4b6f7ecf5b1..4b1c38dbc11 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -486,6 +486,7 @@ func Provider() *schema.Provider { "aws_cloudformation_stack": resourceAwsCloudFormationStack(), "aws_cloudformation_stack_set": resourceAwsCloudFormationStackSet(), "aws_cloudformation_stack_set_instance": resourceAwsCloudFormationStackSetInstance(), + "aws_cloudfront_cache_policy": resourceAwsCloudFrontCachePolicy(), "aws_cloudfront_distribution": resourceAwsCloudFrontDistribution(), "aws_cloudfront_origin_access_identity": resourceAwsCloudFrontOriginAccessIdentity(), "aws_cloudfront_origin_request_policy": resourceAwsCloudFrontOriginRequestPolicy(), diff --git a/aws/resource_aws_cloudfront_cache_policy.go b/aws/resource_aws_cloudfront_cache_policy.go new file mode 100644 index 00000000000..8ea69da67a1 --- /dev/null +++ b/aws/resource_aws_cloudfront_cache_policy.go @@ -0,0 +1,223 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceAwsCloudFrontCachePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCloudFrontCachePolicyCreate, + Read: resourceAwsCloudFrontCachePolicyRead, + Update: resourceAwsCloudFrontCachePolicyUpdate, + Delete: resourceAwsCloudFrontCachePolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "comment": { + Type: schema.TypeString, + Optional: true, + }, + "default_ttl": { + Type: schema.TypeInt, + Optional: true, + Default: 86400, + }, + "max_ttl": { + Type: schema.TypeInt, + Optional: true, + Default: 31536000, + }, + "min_ttl": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "etag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "parameters_in_cache_key_and_forwarded_to_origin": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookies_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookie_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist", "allExcept", "all"}, false), + }, + "cookies": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "enable_accept_encoding_brotli": { + Type: schema.TypeBool, + Optional: true, + }, + "enable_accept_encoding_gzip": { + Type: schema.TypeBool, + Optional: true, + }, + "headers_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_behavior": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist"}, false), + }, + "headers": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "query_strings_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_string_behavior": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"none", "whitelist", "allExcept", "all"}, false), + }, + "query_strings": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceAwsCloudFrontCachePolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.CreateCachePolicyInput{ + CachePolicyConfig: expandCloudFrontCachePolicyConfig(d), + } + + resp, err := conn.CreateCachePolicy(request) + + if err != nil { + return err + } + + d.SetId(aws.StringValue(resp.CachePolicy.Id)) + + return resourceAwsCloudFrontCachePolicyRead(d, meta) +} + +func resourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + request := &cloudfront.GetCachePolicyInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetCachePolicy(request) + if err != nil { + return err + } + d.Set("etag", aws.StringValue(resp.ETag)) + + flattenCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) + + return nil +} + +func resourceAwsCloudFrontCachePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.UpdateCachePolicyInput{ + CachePolicyConfig: expandCloudFrontCachePolicyConfig(d), + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err := conn.UpdateCachePolicy(request) + if err != nil { + return err + } + + return resourceAwsCloudFrontCachePolicyRead(d, meta) +} + +func resourceAwsCloudFrontCachePolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + request := &cloudfront.DeleteCachePolicyInput{ + Id: aws.String(d.Id()), + IfMatch: aws.String(d.Get("etag").(string)), + } + + _, err := conn.DeleteCachePolicy(request) + if err != nil { + if isAWSErr(err, cloudfront.ErrCodeNoSuchCachePolicy, "") { + return nil + } + return err + } + + return nil +} diff --git a/aws/resource_aws_cloudfront_cache_policy_test.go b/aws/resource_aws_cloudfront_cache_policy_test.go new file mode 100644 index 00000000000..86f41e68ee1 --- /dev/null +++ b/aws/resource_aws_cloudfront_cache_policy_test.go @@ -0,0 +1,208 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSCloudFrontCachePolicy_basic(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontCachePolicyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + { + ResourceName: "aws_cloudfront_cache_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func TestAccAWSCloudFrontCachePolicy_update(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontCachePolicyConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + { + Config: testAccAWSCloudFrontCachePolicyConfigUpdate(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment updated"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "51"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "2"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "101"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "allExcept"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test2"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "allExcept"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test2"), + ), + }, + { + ResourceName: "aws_cloudfront_cache_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func TestAccAWSCloudFrontCachePolicy_noneBehavior(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontCachePolicyConfigNoneBehavior(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "none"), + resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.#", "0"), + ), + }, + { + ResourceName: "aws_cloudfront_cache_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func testAccAWSCloudFrontCachePolicyConfig(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } + } +} +`, rInt) +} + +func testAccAWSCloudFrontCachePolicyConfigUpdate(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy-updated%[1]d" + comment = "test comment updated" + default_ttl = 51 + max_ttl = 101 + min_ttl = 2 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "allExcept" + cookies { + items = ["test2"] + } + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "allExcept" + query_strings { + items = ["test2"] + } + } + } +} +`, rInt) +} + +func testAccAWSCloudFrontCachePolicyConfigNoneBehavior(rInt int) string { + return fmt.Sprintf(` +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy-updated%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } + } +} +`, rInt) +} diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index f63cfd2745e..2ab3c96aa17 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -58,6 +58,10 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "cache_policy_id": { + Type: schema.TypeString, + Optional: true, + }, "compress": { Type: schema.TypeBool, Optional: true, @@ -66,7 +70,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "default_ttl": { Type: schema.TypeInt, Optional: true, - Default: 86400, + Computed: true, }, "field_level_encryption_id": { Type: schema.TypeString, @@ -74,7 +78,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { }, "forwarded_values": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -104,6 +108,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "headers": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "query_string": { @@ -113,6 +118,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "query_string_cache_keys": { Type: schema.TypeList, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -144,7 +150,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "max_ttl": { Type: schema.TypeInt, Optional: true, - Default: 31536000, + Computed: true, }, "min_ttl": { Type: schema.TypeInt, @@ -224,6 +230,10 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "cache_policy_id": { + Type: schema.TypeString, + Optional: true, + }, "compress": { Type: schema.TypeBool, Optional: true, @@ -262,6 +272,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "whitelisted_names": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -270,6 +281,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "headers": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "query_string": { @@ -279,6 +291,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "query_string_cache_keys": { Type: schema.TypeList, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -332,6 +345,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "trusted_signers": { Type: schema.TypeList, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "viewer_protocol_policy": { @@ -529,6 +543,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "locations": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "restriction_type": { diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index 29badfe2ce8..f412c40ee9d 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -310,6 +310,35 @@ func TestAccAWSCloudFrontDistribution_orderedCacheBehavior(t *testing.T) { }) } +func TestAccAWSCloudFrontDistribution_orderedCacheBehaviorCachePolicy(t *testing.T) { + var distribution cloudfront.Distribution + resourceName := "aws_cloudfront_distribution.main" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDistributionOrderedCacheBehaviorCachePolicy, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFrontDistributionExists(resourceName, &distribution), + resource.TestCheckResourceAttr(resourceName, "ordered_cache_behavior.0.path_pattern", "images2/*.jpg"), + resource.TestMatchResourceAttr(resourceName, "ordered_cache_behavior.0.cache_policy_id", regexp.MustCompile(`^[a-z0-9]+`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + }, + }) +} + func TestAccAWSCloudFrontDistribution_Origin_EmptyDomainName(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -2053,6 +2082,90 @@ resource "aws_cloudfront_distribution" "main" { } `, acctest.RandInt(), testAccAWSCloudFrontDistributionRetainConfig()) +var testAccAWSCloudFrontDistributionOrderedCacheBehaviorCachePolicy = fmt.Sprintf(` +variable rand_id { + default = %d +} + +resource "aws_cloudfront_distribution" "main" { + origin { + domain_name = "www.hashicorp.com" + origin_id = "myCustomOrigin" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "http-only" + origin_ssl_protocols = ["SSLv3", "TLSv1"] + } + } + + enabled = true + comment = "Some comment" + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + smooth_streaming = true + + forwarded_values { + query_string = false + + cookies { + forward = "all" + } + } + + viewer_protocol_policy = "allow-all" + } + + ordered_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + + cache_policy_id = aws_cloudfront_cache_policy.cache_policy.id + + viewer_protocol_policy = "allow-all" + path_pattern = "images2/*.jpg" + } + + price_class = "PriceClass_All" + + restrictions { + geo_restriction { + restriction_type = "none" + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } + + %s +} + +resource "aws_cloudfront_cache_policy" "cache_policy" { + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } + } +} + +`, acctest.RandInt(), testAccAWSCloudFrontDistributionRetainConfig()) + var testAccAWSCloudFrontDistributionOriginGroupsConfig = ` variable rand_id { default = %d diff --git a/website/docs/r/cloudfront_cache_policy.html.markdown b/website/docs/r/cloudfront_cache_policy.html.markdown new file mode 100644 index 00000000000..2a8e0be984a --- /dev/null +++ b/website/docs/r/cloudfront_cache_policy.html.markdown @@ -0,0 +1,93 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_cache_policy" +description: |- + Provides a cache policy for a CloudFront ditribution. When it’s attached to a cache behavior, + the cache policy determines the the values that CloudFront includes in the cache key. These + values can include HTTP headers, cookies, and URL query strings. CloudFront uses the cache + key to find an object in its cache that it can return to the viewer. It also determines the + default, minimum, and maximum time to live (TTL) values that you want objects to stay in the + CloudFront cache. +--- + +# Resource: aws_cloudfront_cache_policy + +## Example Usage + +The following example below creates a CloudFront public key. + +```hcl +resource "aws_cloudfront_cache_policy" "example" { + name = "example-policy" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = [ "example" ] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = [ "example" ] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = [ "example" ] + } + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) A unique name to identify the cache policy. +* `min_ttl` - (Required) The minimum amount of time, in seconds, that you want objects to stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `max_ttl` - (Optional) The maximum amount of time, in seconds, that objects stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `default_ttl` - (Optional) The default amount of time, in seconds, that you want objects to stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `comment` - (Optional) A comment to describe the cache policy. +* `parameters_in_cache_key_and_forwarded_to_origin` - (Optional) The HTTP headers, cookies, and URL query strings to include in the cache key. See [Parameters In Cache Key And Forwarded To Origin](#parameters-in-cache-key-and-forwarded-to-origin) for more information. + +### Parameters In Cache Key And Forwarded To Origin + +* `cookies_config` - (Required) An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `cookies_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `cookies_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `enable_accept_encoding_brotli` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. +* `enable_accept_encoding_gzip` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. + +### Cookies Config + +`cookie_behavior` - (Required) Determines whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. +`cookies` - (Optional) An object that contains a list of cookie names. See [Items](#items) for more information. + +### Headers Config + +`header_behavior` - (Required) Determines whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`. +`headers` - (Optional) An object that contains a list of header names. See [Items](#items) for more information. + +### Query String Config + +`query_string_behavior` - (Required) Determines whether any URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. +`query_strings` - (Optional) An object that contains a list of query string names. See [Items](#items) for more information. + +### Items + +`items` - (Required) A list of item names (cookies, headers, or query strings). + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `etag` - The current version of the cache policy. +* `id` - The identifier for the cache policy. diff --git a/website/docs/r/cloudfront_distribution.html.markdown b/website/docs/r/cloudfront_distribution.html.markdown index 2c1b3b5eed3..1fcf48d7d19 100644 --- a/website/docs/r/cloudfront_distribution.html.markdown +++ b/website/docs/r/cloudfront_distribution.html.markdown @@ -271,6 +271,9 @@ of several sub-resources - these resources are laid out below. * `cached_methods` (Required) - Controls whether CloudFront caches the response to requests using the specified HTTP methods. +* `cache_policy_id` (Optional) - The unique identifier of the cache policy that + is attached to the cache behavior. + * `compress` (Optional) - Whether you want CloudFront to automatically compress content for web requests that include `Accept-Encoding: gzip` in the request header (default: `false`). From 921ffcb99319eb9dace2a69a47cacbb8817961af Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 27 Jan 2021 23:36:22 -0800 Subject: [PATCH 1064/1212] Fix copy and paste error --- website/docs/r/cloudfront_cache_policy.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/cloudfront_cache_policy.html.markdown b/website/docs/r/cloudfront_cache_policy.html.markdown index 2a8e0be984a..7612576e8f9 100644 --- a/website/docs/r/cloudfront_cache_policy.html.markdown +++ b/website/docs/r/cloudfront_cache_policy.html.markdown @@ -15,7 +15,7 @@ description: |- ## Example Usage -The following example below creates a CloudFront public key. +The following example below creates a CloudFront cache policy. ```hcl resource "aws_cloudfront_cache_policy" "example" { From 272c6b97988094b06342a7c36b485152ddd40ef2 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 07:48:02 -0800 Subject: [PATCH 1065/1212] Fix formatting --- aws/cloudfront_cache_policy_structure.go | 10 +--- ...source_aws_cloudfront_distribution_test.go | 30 +++++----- .../r/cloudfront_cache_policy.html.markdown | 56 +++++++++---------- 3 files changed, 46 insertions(+), 50 deletions(-) diff --git a/aws/cloudfront_cache_policy_structure.go b/aws/cloudfront_cache_policy_structure.go index 67e2c0cac78..38f5d775d6d 100644 --- a/aws/cloudfront_cache_policy_structure.go +++ b/aws/cloudfront_cache_policy_structure.go @@ -20,9 +20,7 @@ func expandCloudFrontCachePolicyCookieNames(cookieNamesFlat map[string]interface } func expandCloudFrontCachePolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.CachePolicyCookiesConfig { - cookies := &cloudfront.CookieNames{ - Quantity: aws.Int64(int64(0)), - } + var cookies *cloudfront.CookieNames if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { cookies = expandCloudFrontCachePolicyCookieNames(cookiesFlat[0].(map[string]interface{})) @@ -52,7 +50,7 @@ func expandCloudFrontCachePolicyHeaders(headerNamesFlat map[string]interface{}) } func expandCloudFrontCachePolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.CachePolicyHeadersConfig { - headers := &cloudfront.Headers{} + var headers *cloudfront.Headers if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { headers = expandCloudFrontCachePolicyHeaders(headersFlat[0].(map[string]interface{})) @@ -82,9 +80,7 @@ func expandCloudFrontCachePolicyQueryStringNames(queryStringNamesFlat map[string } func expandCloudFrontCachePolicyQueryStringConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.CachePolicyQueryStringsConfig { - queryStrings := &cloudfront.QueryStringNames{ - Quantity: aws.Int64(int64(0)), - } + var queryStrings *cloudfront.QueryStringNames if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { queryStrings = expandCloudFrontCachePolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index f412c40ee9d..8d210f69073 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -2147,21 +2147,21 @@ resource "aws_cloudfront_distribution" "main" { } resource "aws_cloudfront_cache_policy" "cache_policy" { - name = "test-policy%[1]d" - comment = "test comment" - default_ttl = 50 - max_ttl = 100 - parameters_in_cache_key_and_forwarded_to_origin { - cookies_config { - cookie_behavior = "none" - } - headers_config { - header_behavior = "none" - } - query_strings_config { - query_string_behavior = "none" - } - } + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "none" + } + headers_config { + header_behavior = "none" + } + query_strings_config { + query_string_behavior = "none" + } + } } `, acctest.RandInt(), testAccAWSCloudFrontDistributionRetainConfig()) diff --git a/website/docs/r/cloudfront_cache_policy.html.markdown b/website/docs/r/cloudfront_cache_policy.html.markdown index 7612576e8f9..811aa26d4ce 100644 --- a/website/docs/r/cloudfront_cache_policy.html.markdown +++ b/website/docs/r/cloudfront_cache_policy.html.markdown @@ -18,33 +18,33 @@ description: |- The following example below creates a CloudFront cache policy. ```hcl -resource "aws_cloudfront_cache_policy" "example" { - name = "example-policy" - comment = "test comment" - default_ttl = 50 - max_ttl = 100 - min_ttl = 1 - parameters_in_cache_key_and_forwarded_to_origin { - cookies_config { - cookie_behavior = "whitelist" - cookies { - items = [ "example" ] - } - } - headers_config { - header_behavior = "whitelist" - headers { - items = [ "example" ] - } - } - query_strings_config { - query_string_behavior = "whitelist" - query_strings { - items = [ "example" ] - } - } - } -} +resource "aws_cloudfront_cache_policy" "example" { + name = "example-policy" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["example"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["example"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["example"] + } + } + } +} ``` ## Argument Reference @@ -90,4 +90,4 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: * `etag` - The current version of the cache policy. -* `id` - The identifier for the cache policy. +* `id` - The identifier for the cache policy. From b3cf9d5508fffde52abb90415ac1eb09cef47386 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 28 Jan 2021 09:41:44 -0800 Subject: [PATCH 1066/1212] Add cloudfront_cache_policy data source --- ...data_source_aws_cloudfront_cache_policy.go | 185 ++++++++++++++++++ ...source_aws_cloudfront_cache_policy_test.go | 79 ++++++++ aws/provider.go | 1 + .../d/cloudfront_cache_policy.html.markdown | 65 ++++++ 4 files changed, 330 insertions(+) create mode 100644 aws/data_source_aws_cloudfront_cache_policy.go create mode 100644 aws/data_source_aws_cloudfront_cache_policy_test.go create mode 100644 website/docs/d/cloudfront_cache_policy.html.markdown diff --git a/aws/data_source_aws_cloudfront_cache_policy.go b/aws/data_source_aws_cloudfront_cache_policy.go new file mode 100644 index 00000000000..47cc025f3ef --- /dev/null +++ b/aws/data_source_aws_cloudfront_cache_policy.go @@ -0,0 +1,185 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceAwsCloudFrontCachePolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCloudFrontCachePolicyRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + ConflictsWith: []string{"id"}, + Optional: true, + }, + "id": { + Type: schema.TypeString, + ConflictsWith: []string{"name"}, + Optional: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "comment": { + Type: schema.TypeString, + Computed: true, + }, + "default_ttl": { + Type: schema.TypeInt, + Computed: true, + }, + "max_ttl": { + Type: schema.TypeInt, + Computed: true, + }, + "min_ttl": { + Type: schema.TypeInt, + Computed: true, + }, + "parameters_in_cache_key_and_forwarded_to_origin": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookies_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cookie_behavior": { + Type: schema.TypeString, + Computed: true, + }, + "cookies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "enable_accept_encoding_brotli": { + Type: schema.TypeBool, + Computed: true, + }, + "enable_accept_encoding_gzip": { + Type: schema.TypeBool, + Computed: true, + }, + "headers_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_behavior": { + Type: schema.TypeString, + Computed: true, + }, + "headers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "query_strings_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_string_behavior": { + Type: schema.TypeString, + Computed: true, + }, + "query_strings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "items": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceAwsCloudFrontCachePolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { + var cachePolicy *cloudfront.CachePolicy + request := &cloudfront.ListCachePoliciesInput{} + resp, err := conn.ListCachePolicies(request) + if err != nil { + return err + } + + for _, policySummary := range resp.CachePolicyList.Items { + if *policySummary.CachePolicy.CachePolicyConfig.Name == d.Get("name").(string) { + cachePolicy = policySummary.CachePolicy + break + } + } + + if cachePolicy != nil { + d.SetId(aws.StringValue(cachePolicy.Id)) + } + return nil +} + +func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cloudfrontconn + + if d.Id() == "" { + if err := dataSourceAwsCloudFrontCachePolicyFindByName(d, conn); err != nil { + return err + } + } + + if d.Id() != "" { + d.Set("id", d.Id()) + request := &cloudfront.GetCachePolicyInput{ + Id: aws.String(d.Id()), + } + + resp, err := conn.GetCachePolicy(request) + if err != nil { + return err + } + d.Set("etag", aws.StringValue(resp.ETag)) + + flattenCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) + } + + return nil +} diff --git a/aws/data_source_aws_cloudfront_cache_policy_test.go b/aws/data_source_aws_cloudfront_cache_policy_test.go new file mode 100644 index 00000000000..b32a77853f0 --- /dev/null +++ b/aws/data_source_aws_cloudfront_cache_policy_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccAWSCloudFrontDataSourceCachePolicy_basic(t *testing.T) { + rInt := acctest.RandInt() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontCachePolicyDataSourceNameConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "comment", "test comment"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "default_ttl", "50"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "min_ttl", "1"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "max_ttl", "100"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + ), + }, + { + ResourceName: "aws_cloudfront_cache_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +func testAccAWSCloudFrontCachePolicyDataSourceNameConfig(rInt int) string { + return fmt.Sprintf(` +data "aws_cloudfront_cache_policy" "example" { + name = aws_cloudfront_cache_policy.example.name +} + +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } + } +} +`, rInt) +} diff --git a/aws/provider.go b/aws/provider.go index 4b1c38dbc11..f4ee772900e 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -190,6 +190,7 @@ func Provider() *schema.Provider { "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), "aws_cloudformation_export": dataSourceAwsCloudFormationExport(), "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_cloudfront_cache_policy": dataSourceAwsCloudFrontCachePolicy(), "aws_cloudfront_distribution": dataSourceAwsCloudFrontDistribution(), "aws_cloudfront_origin_request_policy": dataSourceAwsCloudFrontOriginRequestPolicy(), "aws_cloudhsm_v2_cluster": dataSourceCloudHsmV2Cluster(), diff --git a/website/docs/d/cloudfront_cache_policy.html.markdown b/website/docs/d/cloudfront_cache_policy.html.markdown new file mode 100644 index 00000000000..a39554ef6e4 --- /dev/null +++ b/website/docs/d/cloudfront_cache_policy.html.markdown @@ -0,0 +1,65 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_cache_policy" +description: |- + Use this data source to retrieve information about a CloudFront cache policy. +--- + +# Resource: aws_cloudfront_cache_policy + +## Example Usage + +The following example below creates a CloudFront cache policy. + +```hcl +data "aws_cloudfront_cache_policy" "example" { + name = "example-policy" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Optional) A unique name to identify the cache policy. +* `id` - (Optional) The identifier for the cache policy. + +## Attributes Reference + +* `etag` - The current version of the cache policy. +* `min_ttl` - The minimum amount of time, in seconds, that you want objects to stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `max_ttl` - The maximum amount of time, in seconds, that objects stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `default_ttl` - The default amount of time, in seconds, that you want objects to stay in the CloudFront cache before CloudFront sends another request to the origin to see if the object has been updated. +* `comment` - A comment to describe the cache policy. +* `parameters_in_cache_key_and_forwarded_to_origin` - The HTTP headers, cookies, and URL query strings to include in the cache key. See [Parameters In Cache Key And Forwarded To Origin](#parameters-in-cache-key-and-forwarded-to-origin) for more information. + +### Parameters In Cache Key And Forwarded To Origin + +* `cookies_config` - An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `cookies_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `cookies_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `enable_accept_encoding_brotli` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. +* `enable_accept_encoding_gzip` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. + +### Cookies Config + +`cookie_behavior` - Determines whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. +`cookies` - An object that contains a list of cookie names. See [Items](#items) for more information. + +### Headers Config + +`header_behavior` - Determines whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`. +`headers` - An object that contains a list of header names. See [Items](#items) for more information. + +### Query String Config + +`query_string_behavior` - Determines whether any URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. +`query_strings` - An object that contains a list of query string names. See [Items](#items) for more information. + +### Items + +`items` - A list of item names (cookies, headers, or query strings). + + + From bf950e97c4e605650c047c949635d1b3b9df69e5 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 15:09:36 -0500 Subject: [PATCH 1067/1212] tests/iam_policy_document: Clean up test names --- aws/data_source_aws_iam_policy_document_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index ade95a96adb..33fcf5ddbc0 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -194,7 +194,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_duplicateSid(t *testing.T) { } // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10777 -func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_StringAndSlice(t *testing.T) { +func TestAccAWSDataSourceIAMPolicyDocument_statementPrincipalIdentifiers_stringAndSlice(t *testing.T) { dataSourceName := "data.aws_iam_policy_document.test" resource.ParallelTest(t, resource.TestCase{ @@ -212,7 +212,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_Strin } // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/10777 -func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_MultiplePrincipals(t *testing.T) { +func TestAccAWSDataSourceIAMPolicyDocument_statementPrincipalIdentifiers_multiplePrincipals(t *testing.T) { dataSourceName := "data.aws_iam_policy_document.test" resource.ParallelTest(t, resource.TestCase{ @@ -229,7 +229,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_Multi }) } -func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_MultiplePrincipalsGov(t *testing.T) { +func TestAccAWSDataSourceIAMPolicyDocument_statementPrincipalIdentifiers_multiplePrincipalsGov(t *testing.T) { dataSourceName := "data.aws_iam_policy_document.test" resource.ParallelTest(t, resource.TestCase{ @@ -246,7 +246,7 @@ func TestAccAWSDataSourceIAMPolicyDocument_Statement_Principal_Identifiers_Multi }) } -func TestAccAWSDataSourceIAMPolicyDocument_Version_20081017(t *testing.T) { +func TestAccAWSDataSourceIAMPolicyDocument_version20081017(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From 1b32de5663437914faec1eb43ee2776cbca887f4 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 15:56:49 -0800 Subject: [PATCH 1068/1212] Address feedback from cache_policy code review --- aws/cloudfront_cache_policy_structure.go | 45 ++++------ ...nt_distribution_configuration_structure.go | 28 +++---- ...data_source_aws_cloudfront_cache_policy.go | 8 +- ...source_aws_cloudfront_cache_policy_test.go | 21 ++--- aws/resource_aws_cloudfront_cache_policy.go | 2 +- ...source_aws_cloudfront_cache_policy_test.go | 83 ++++++++++--------- .../d/cloudfront_cache_policy.html.markdown | 4 +- .../r/cloudfront_cache_policy.html.markdown | 4 +- 8 files changed, 90 insertions(+), 105 deletions(-) diff --git a/aws/cloudfront_cache_policy_structure.go b/aws/cloudfront_cache_policy_structure.go index 38f5d775d6d..40d9c85d8c5 100644 --- a/aws/cloudfront_cache_policy_structure.go +++ b/aws/cloudfront_cache_policy_structure.go @@ -20,17 +20,12 @@ func expandCloudFrontCachePolicyCookieNames(cookieNamesFlat map[string]interface } func expandCloudFrontCachePolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.CachePolicyCookiesConfig { - var cookies *cloudfront.CookieNames - - if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { - cookies = expandCloudFrontCachePolicyCookieNames(cookiesFlat[0].(map[string]interface{})) - } else { - cookies = nil - } - cookiesConfig := &cloudfront.CachePolicyCookiesConfig{ CookieBehavior: aws.String(cookiesConfigFlat["cookie_behavior"].(string)), - Cookies: cookies, + } + + if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { + cookiesConfig.Cookies = expandCloudFrontCachePolicyCookieNames(cookiesFlat[0].(map[string]interface{})) } return cookiesConfig @@ -50,17 +45,12 @@ func expandCloudFrontCachePolicyHeaders(headerNamesFlat map[string]interface{}) } func expandCloudFrontCachePolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.CachePolicyHeadersConfig { - var headers *cloudfront.Headers - - if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { - headers = expandCloudFrontCachePolicyHeaders(headersFlat[0].(map[string]interface{})) - } else { - headers = nil - } - headersConfig := &cloudfront.CachePolicyHeadersConfig{ HeaderBehavior: aws.String(headersConfigFlat["header_behavior"].(string)), - Headers: headers, + } + + if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { + headersConfig.Headers = expandCloudFrontCachePolicyHeaders(headersFlat[0].(map[string]interface{})) } return headersConfig @@ -80,17 +70,12 @@ func expandCloudFrontCachePolicyQueryStringNames(queryStringNamesFlat map[string } func expandCloudFrontCachePolicyQueryStringConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.CachePolicyQueryStringsConfig { - var queryStrings *cloudfront.QueryStringNames - - if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { - queryStrings = expandCloudFrontCachePolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) - } else { - queryStrings = nil - } - queryStringConfig := &cloudfront.CachePolicyQueryStringsConfig{ QueryStringBehavior: aws.String(queryStringConfigFlat["query_string_behavior"].(string)), - QueryStrings: queryStrings, + } + + if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { + queryStringConfig.QueryStrings = expandCloudFrontCachePolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) } return queryStringConfig @@ -202,7 +187,7 @@ func flattenCloudFrontCachePolicyQueryStringsConfig(queryStringsConfig *cloudfro } } -func flattenParametersConfig(parametersConfig *cloudfront.ParametersInCacheKeyAndForwardedToOrigin) []map[string]interface{} { +func setParametersConfig(parametersConfig *cloudfront.ParametersInCacheKeyAndForwardedToOrigin) []map[string]interface{} { parametersConfigFlat := map[string]interface{}{ "enable_accept_encoding_brotli": aws.BoolValue(parametersConfig.EnableAcceptEncodingBrotli), "enable_accept_encoding_gzip": aws.BoolValue(parametersConfig.EnableAcceptEncodingGzip), @@ -216,11 +201,11 @@ func flattenParametersConfig(parametersConfig *cloudfront.ParametersInCacheKeyAn } } -func flattenCloudFrontCachePolicy(d *schema.ResourceData, cachePolicy *cloudfront.CachePolicyConfig) { +func setCloudFrontCachePolicy(d *schema.ResourceData, cachePolicy *cloudfront.CachePolicyConfig) { d.Set("comment", aws.StringValue(cachePolicy.Comment)) d.Set("default_ttl", aws.Int64Value(cachePolicy.DefaultTTL)) d.Set("max_ttl", aws.Int64Value(cachePolicy.MaxTTL)) d.Set("min_ttl", aws.Int64Value(cachePolicy.MinTTL)) d.Set("name", aws.StringValue(cachePolicy.Name)) - d.Set("parameters_in_cache_key_and_forwarded_to_origin", flattenParametersConfig(cachePolicy.ParametersInCacheKeyAndForwardedToOrigin)) + d.Set("parameters_in_cache_key_and_forwarded_to_origin", setParametersConfig(cachePolicy.ParametersInCacheKeyAndForwardedToOrigin)) } diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 31c09f628b4..acd31d9e627 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -224,33 +224,23 @@ func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront. } func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { - var forwardedValues *cloudfront.ForwardedValues - if forwardedValuesFlat, ok := m["forwarded_values"].([]interface{}); ok && len(forwardedValuesFlat) == 1 { - forwardedValues = expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})) - } - - minTTL := aws.Int64(int64(m["min_ttl"].(int))) - maxTTL := aws.Int64(int64(m["max_ttl"].(int))) - defaultTTL := aws.Int64(int64(m["default_ttl"].(int))) - if m["cache_policy_id"].(string) != "" { - minTTL = nil - maxTTL = nil - defaultTTL = nil - } - cb := &cloudfront.CacheBehavior{ CachePolicyId: aws.String(m["cache_policy_id"].(string)), Compress: aws.Bool(m["compress"].(bool)), DefaultTTL: defaultTTL, FieldLevelEncryptionId: aws.String(m["field_level_encryption_id"].(string)), - ForwardedValues: forwardedValues, - MaxTTL: maxTTL, - MinTTL: minTTL, + ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), } + if m["cache_policy_id"].(string) != "" { + cb.MinTTL = aws.Int64(int64(m["min_ttl"].(int))) + cb.MaxTTL = aws.Int64(int64(m["max_ttl"].(int))) + cb.DefaultTTL = aws.Int64(int64(m["default_ttl"].(int))) + } + if v, ok := m["trusted_signers"]; ok { cb.TrustedSigners = expandTrustedSigners(v.([]interface{})) } else { @@ -435,6 +425,10 @@ func flattenLambdaFunctionAssociation(lfa *cloudfront.LambdaFunctionAssociation) } func expandForwardedValues(m map[string]interface{}) *cloudfront.ForwardedValues { + if len(m) < 1 { + return nil + } + fv := &cloudfront.ForwardedValues{ QueryString: aws.Bool(m["query_string"].(bool)), } diff --git a/aws/data_source_aws_cloudfront_cache_policy.go b/aws/data_source_aws_cloudfront_cache_policy.go index 47cc025f3ef..f78c5225c29 100644 --- a/aws/data_source_aws_cloudfront_cache_policy.go +++ b/aws/data_source_aws_cloudfront_cache_policy.go @@ -1,6 +1,8 @@ package aws import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -162,7 +164,7 @@ func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interfa if d.Id() == "" { if err := dataSourceAwsCloudFrontCachePolicyFindByName(d, conn); err != nil { - return err + return fmt.Errorf("Unable to locate cache policy by name: %s", err.Error()) } } @@ -174,11 +176,11 @@ func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interfa resp, err := conn.GetCachePolicy(request) if err != nil { - return err + return fmt.Errorf("Unable to retrieve cache policy with ID %s: %s", d.Id(), err.Error()) } d.Set("etag", aws.StringValue(resp.ETag)) - flattenCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) + setCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) } return nil diff --git a/aws/data_source_aws_cloudfront_cache_policy_test.go b/aws/data_source_aws_cloudfront_cache_policy_test.go index b32a77853f0..4b8f20f9300 100644 --- a/aws/data_source_aws_cloudfront_cache_policy_test.go +++ b/aws/data_source_aws_cloudfront_cache_policy_test.go @@ -11,6 +11,7 @@ import ( func TestAccAWSCloudFrontDataSourceCachePolicy_basic(t *testing.T) { rInt := acctest.RandInt() + dataSourceName := "data.aws_cloudfront_cache_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -20,16 +21,16 @@ func TestAccAWSCloudFrontDataSourceCachePolicy_basic(t *testing.T) { { Config: testAccAWSCloudFrontCachePolicyDataSourceNameConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "default_ttl", "50"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "min_ttl", "1"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "max_ttl", "100"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("data.aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(dataSourceName, "default_ttl", "50"), + resource.TestCheckResourceAttr(dataSourceName, "min_ttl", "1"), + resource.TestCheckResourceAttr(dataSourceName, "max_ttl", "100"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(dataSourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), ), }, { diff --git a/aws/resource_aws_cloudfront_cache_policy.go b/aws/resource_aws_cloudfront_cache_policy.go index 8ea69da67a1..2e9da7785bd 100644 --- a/aws/resource_aws_cloudfront_cache_policy.go +++ b/aws/resource_aws_cloudfront_cache_policy.go @@ -181,7 +181,7 @@ func resourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interface } d.Set("etag", aws.StringValue(resp.ETag)) - flattenCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) + setCloudFrontCachePolicy(d, resp.CachePolicy.CachePolicyConfig) return nil } diff --git a/aws/resource_aws_cloudfront_cache_policy_test.go b/aws/resource_aws_cloudfront_cache_policy_test.go index 86f41e68ee1..6c3649ca202 100644 --- a/aws/resource_aws_cloudfront_cache_policy_test.go +++ b/aws/resource_aws_cloudfront_cache_policy_test.go @@ -11,6 +11,7 @@ import ( func TestAccAWSCloudFrontCachePolicy_basic(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_cache_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -20,16 +21,16 @@ func TestAccAWSCloudFrontCachePolicy_basic(t *testing.T) { { Config: testAccAWSCloudFrontCachePolicyConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "default_ttl", "50"), + resource.TestCheckResourceAttr(resourceName, "min_ttl", "1"), + resource.TestCheckResourceAttr(resourceName, "max_ttl", "100"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), ), }, { @@ -44,6 +45,7 @@ func TestAccAWSCloudFrontCachePolicy_basic(t *testing.T) { func TestAccAWSCloudFrontCachePolicy_update(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_cache_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -53,31 +55,31 @@ func TestAccAWSCloudFrontCachePolicy_update(t *testing.T) { { Config: testAccAWSCloudFrontCachePolicyConfig(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "default_ttl", "50"), + resource.TestCheckResourceAttr(resourceName, "min_ttl", "1"), + resource.TestCheckResourceAttr(resourceName, "max_ttl", "100"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.0.items.0", "test"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "whitelist"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test"), ), }, { Config: testAccAWSCloudFrontCachePolicyConfigUpdate(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment updated"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "51"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "2"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "101"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "allExcept"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test2"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "allExcept"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test2"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment updated"), + resource.TestCheckResourceAttr(resourceName, "default_ttl", "51"), + resource.TestCheckResourceAttr(resourceName, "min_ttl", "2"), + resource.TestCheckResourceAttr(resourceName, "max_ttl", "101"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "allExcept"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.0.items.0", "test2"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "allExcept"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.0.items.0", "test2"), ), }, { @@ -92,6 +94,7 @@ func TestAccAWSCloudFrontCachePolicy_update(t *testing.T) { func TestAccAWSCloudFrontCachePolicy_noneBehavior(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_cloudfront_cache_policy.example" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, @@ -101,16 +104,16 @@ func TestAccAWSCloudFrontCachePolicy_noneBehavior(t *testing.T) { { Config: testAccAWSCloudFrontCachePolicyConfigNoneBehavior(rInt), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "comment", "test comment"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "default_ttl", "50"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "min_ttl", "1"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "max_ttl", "100"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "none"), - resource.TestCheckResourceAttr("aws_cloudfront_cache_policy.example", "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.#", "0"), + resource.TestCheckResourceAttr(resourceName, "comment", "test comment"), + resource.TestCheckResourceAttr(resourceName, "default_ttl", "50"), + resource.TestCheckResourceAttr(resourceName, "min_ttl", "1"), + resource.TestCheckResourceAttr(resourceName, "max_ttl", "100"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookie_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.cookies_config.0.cookies.#", "0"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.header_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.headers_config.0.headers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_string_behavior", "none"), + resource.TestCheckResourceAttr(resourceName, "parameters_in_cache_key_and_forwarded_to_origin.0.query_strings_config.0.query_strings.#", "0"), ), }, { diff --git a/website/docs/d/cloudfront_cache_policy.html.markdown b/website/docs/d/cloudfront_cache_policy.html.markdown index a39554ef6e4..71e100472dc 100644 --- a/website/docs/d/cloudfront_cache_policy.html.markdown +++ b/website/docs/d/cloudfront_cache_policy.html.markdown @@ -37,8 +37,8 @@ The following arguments are supported: ### Parameters In Cache Key And Forwarded To Origin * `cookies_config` - An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. -* `cookies_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `cookies_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `headers_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. * `enable_accept_encoding_brotli` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. * `enable_accept_encoding_gzip` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. diff --git a/website/docs/r/cloudfront_cache_policy.html.markdown b/website/docs/r/cloudfront_cache_policy.html.markdown index 811aa26d4ce..ea485f3d054 100644 --- a/website/docs/r/cloudfront_cache_policy.html.markdown +++ b/website/docs/r/cloudfront_cache_policy.html.markdown @@ -61,8 +61,8 @@ The following arguments are supported: ### Parameters In Cache Key And Forwarded To Origin * `cookies_config` - (Required) An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. -* `cookies_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `cookies_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `headers_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. * `enable_accept_encoding_brotli` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. * `enable_accept_encoding_gzip` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. From 66e407fd8bcff7b10333639c62b701cdf1b5b226 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 3 Feb 2021 16:19:12 -0800 Subject: [PATCH 1069/1212] Include feedback from origin_request_policy --- aws/cloudfront_cache_policy_structure.go | 127 +++++++++++------- ...data_source_aws_cloudfront_cache_policy.go | 71 +++++----- aws/resource_aws_cloudfront_cache_policy.go | 10 +- .../d/cloudfront_cache_policy.html.markdown | 12 +- .../r/cloudfront_cache_policy.html.markdown | 12 +- 5 files changed, 131 insertions(+), 101 deletions(-) diff --git a/aws/cloudfront_cache_policy_structure.go b/aws/cloudfront_cache_policy_structure.go index 40d9c85d8c5..dc80b3ea090 100644 --- a/aws/cloudfront_cache_policy_structure.go +++ b/aws/cloudfront_cache_policy_structure.go @@ -6,102 +6,133 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func expandCloudFrontCachePolicyCookieNames(cookieNamesFlat map[string]interface{}) *cloudfront.CookieNames { - cookieNames := &cloudfront.CookieNames{} +func expandCloudFrontCachePolicyCookieNames(tfMap map[string]interface{}) *cloudfront.CookieNames { + if tfMap == nil { + return nil + } + + var items []*string + for _, item := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(item.(string))) + } - var newCookieItems []*string - for _, cookie := range cookieNamesFlat["items"].(*schema.Set).List() { - newCookieItems = append(newCookieItems, aws.String(cookie.(string))) + apiObject := &cloudfront.CookieNames{ + Items: items, + Quantity: aws.Int64(int64(len(items))), } - cookieNames.Items = newCookieItems - cookieNames.Quantity = aws.Int64(int64(len(newCookieItems))) - return cookieNames + return apiObject } -func expandCloudFrontCachePolicyCookiesConfig(cookiesConfigFlat map[string]interface{}) *cloudfront.CachePolicyCookiesConfig { - cookiesConfig := &cloudfront.CachePolicyCookiesConfig{ - CookieBehavior: aws.String(cookiesConfigFlat["cookie_behavior"].(string)), +func expandCloudFrontCachePolicyCookiesConfig(tfMap map[string]interface{}) *cloudfront.CachePolicyCookiesConfig { + if tfMap == nil { + return nil + } + + apiObject := &cloudfront.CachePolicyCookiesConfig{ + CookieBehavior: aws.String(tfMap["cookie_behavior"].(string)), } - if cookiesFlat, ok := cookiesConfigFlat["cookies"].([]interface{}); ok && len(cookiesFlat) == 1 { - cookiesConfig.Cookies = expandCloudFrontCachePolicyCookieNames(cookiesFlat[0].(map[string]interface{})) + if items, ok := tfMap["cookies"].([]interface{}); ok && len(items) == 1 { + apiObject.Cookies = expandCloudFrontCachePolicyCookieNames(items[0].(map[string]interface{})) } - return cookiesConfig + return apiObject } -func expandCloudFrontCachePolicyHeaders(headerNamesFlat map[string]interface{}) *cloudfront.Headers { - headers := &cloudfront.Headers{} +func expandCloudFrontCachePolicyHeaders(tfMap map[string]interface{}) *cloudfront.Headers { + if tfMap == nil { + return nil + } + + var items []*string + for _, item := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(item.(string))) + } - var newHeaderItems []*string - for _, header := range headerNamesFlat["items"].(*schema.Set).List() { - newHeaderItems = append(newHeaderItems, aws.String(header.(string))) + apiObject := &cloudfront.Headers{ + Items: items, + Quantity: aws.Int64(int64(len(items))), } - headers.Items = newHeaderItems - headers.Quantity = aws.Int64(int64(len(newHeaderItems))) - return headers + return apiObject } -func expandCloudFrontCachePolicyHeadersConfig(headersConfigFlat map[string]interface{}) *cloudfront.CachePolicyHeadersConfig { - headersConfig := &cloudfront.CachePolicyHeadersConfig{ - HeaderBehavior: aws.String(headersConfigFlat["header_behavior"].(string)), +func expandCloudFrontCachePolicyHeadersConfig(tfMap map[string]interface{}) *cloudfront.CachePolicyHeadersConfig { + if tfMap == nil { + return nil } - if headersFlat, ok := headersConfigFlat["headers"].([]interface{}); ok && len(headersFlat) == 1 && headersConfigFlat["header_behavior"] != "none" { - headersConfig.Headers = expandCloudFrontCachePolicyHeaders(headersFlat[0].(map[string]interface{})) + apiObject := &cloudfront.CachePolicyHeadersConfig{ + HeaderBehavior: aws.String(tfMap["header_behavior"].(string)), } - return headersConfig + if items, ok := tfMap["headers"].([]interface{}); ok && len(items) == 1 && tfMap["header_behavior"] != "none" { + apiObject.Headers = expandCloudFrontCachePolicyHeaders(items[0].(map[string]interface{})) + } + + return apiObject } -func expandCloudFrontCachePolicyQueryStringNames(queryStringNamesFlat map[string]interface{}) *cloudfront.QueryStringNames { - queryStringNames := &cloudfront.QueryStringNames{} +func expandCloudFrontCachePolicyQueryStringNames(tfMap map[string]interface{}) *cloudfront.QueryStringNames { + if tfMap == nil { + return nil + } + + var items []*string + for _, queryStringitesm := range tfMap["items"].(*schema.Set).List() { + items = append(items, aws.String(queryStringitesm.(string))) + } - var newQueryStringItems []*string - for _, queryString := range queryStringNamesFlat["items"].(*schema.Set).List() { - newQueryStringItems = append(newQueryStringItems, aws.String(queryString.(string))) + apiObject := &cloudfront.QueryStringNames{ + Items: items, + Quantity: aws.Int64(int64(len(items))), } - queryStringNames.Items = newQueryStringItems - queryStringNames.Quantity = aws.Int64(int64(len(newQueryStringItems))) - return queryStringNames + return apiObject } -func expandCloudFrontCachePolicyQueryStringConfig(queryStringConfigFlat map[string]interface{}) *cloudfront.CachePolicyQueryStringsConfig { - queryStringConfig := &cloudfront.CachePolicyQueryStringsConfig{ - QueryStringBehavior: aws.String(queryStringConfigFlat["query_string_behavior"].(string)), +func expandCloudFrontCachePolicyQueryStringConfig(tfMap map[string]interface{}) *cloudfront.CachePolicyQueryStringsConfig { + if tfMap == nil { + return nil } - if queryStringFlat, ok := queryStringConfigFlat["query_strings"].([]interface{}); ok && len(queryStringFlat) == 1 { - queryStringConfig.QueryStrings = expandCloudFrontCachePolicyQueryStringNames(queryStringFlat[0].(map[string]interface{})) + apiObject := &cloudfront.CachePolicyQueryStringsConfig{ + QueryStringBehavior: aws.String(tfMap["query_string_behavior"].(string)), } - return queryStringConfig + if items, ok := tfMap["query_strings"].([]interface{}); ok && len(items) == 1 { + apiObject.QueryStrings = expandCloudFrontCachePolicyQueryStringNames(items[0].(map[string]interface{})) + } + + return apiObject } -func expandCloudFrontCachePolicyParametersConfig(parameters map[string]interface{}) *cloudfront.ParametersInCacheKeyAndForwardedToOrigin { +func expandCloudFrontCachePolicyParametersConfig(tfMap map[string]interface{}) *cloudfront.ParametersInCacheKeyAndForwardedToOrigin { + if tfMap == nil { + return nil + } + var cookiesConfig *cloudfront.CachePolicyCookiesConfig var headersConfig *cloudfront.CachePolicyHeadersConfig var queryStringsConfig *cloudfront.CachePolicyQueryStringsConfig - if cookiesFlat, ok := parameters["cookies_config"].([]interface{}); ok && len(cookiesFlat) == 1 { + if cookiesFlat, ok := tfMap["cookies_config"].([]interface{}); ok && len(cookiesFlat) == 1 { cookiesConfig = expandCloudFrontCachePolicyCookiesConfig(cookiesFlat[0].(map[string]interface{})) } - if headersFlat, ok := parameters["headers_config"].([]interface{}); ok && len(headersFlat) == 1 { + if headersFlat, ok := tfMap["headers_config"].([]interface{}); ok && len(headersFlat) == 1 { headersConfig = expandCloudFrontCachePolicyHeadersConfig(headersFlat[0].(map[string]interface{})) } - if queryStringsFlat, ok := parameters["query_strings_config"].([]interface{}); ok && len(queryStringsFlat) == 1 { + if queryStringsFlat, ok := tfMap["query_strings_config"].([]interface{}); ok && len(queryStringsFlat) == 1 { queryStringsConfig = expandCloudFrontCachePolicyQueryStringConfig(queryStringsFlat[0].(map[string]interface{})) } parametersConfig := &cloudfront.ParametersInCacheKeyAndForwardedToOrigin{ CookiesConfig: cookiesConfig, - EnableAcceptEncodingBrotli: aws.Bool(parameters["enable_accept_encoding_brotli"].(bool)), - EnableAcceptEncodingGzip: aws.Bool(parameters["enable_accept_encoding_gzip"].(bool)), + EnableAcceptEncodingBrotli: aws.Bool(tfMap["enable_accept_encoding_brotli"].(bool)), + EnableAcceptEncodingGzip: aws.Bool(tfMap["enable_accept_encoding_gzip"].(bool)), HeadersConfig: headersConfig, QueryStringsConfig: queryStringsConfig, } diff --git a/aws/data_source_aws_cloudfront_cache_policy.go b/aws/data_source_aws_cloudfront_cache_policy.go index f78c5225c29..da876f93472 100644 --- a/aws/data_source_aws_cloudfront_cache_policy.go +++ b/aws/data_source_aws_cloudfront_cache_policy.go @@ -13,20 +13,6 @@ func dataSourceAwsCloudFrontCachePolicy() *schema.Resource { Read: dataSourceAwsCloudFrontCachePolicyRead, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - ConflictsWith: []string{"id"}, - Optional: true, - }, - "id": { - Type: schema.TypeString, - ConflictsWith: []string{"name"}, - Optional: true, - }, - "etag": { - Type: schema.TypeString, - Computed: true, - }, "comment": { Type: schema.TypeString, Computed: true, @@ -35,6 +21,15 @@ func dataSourceAwsCloudFrontCachePolicy() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + ConflictsWith: []string{"name"}, + Optional: true, + }, "max_ttl": { Type: schema.TypeInt, Computed: true, @@ -43,6 +38,11 @@ func dataSourceAwsCloudFrontCachePolicy() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "name": { + Type: schema.TypeString, + ConflictsWith: []string{"id"}, + Optional: true, + }, "parameters_in_cache_key_and_forwarded_to_origin": { Type: schema.TypeList, Computed: true, @@ -137,28 +137,6 @@ func dataSourceAwsCloudFrontCachePolicy() *schema.Resource { }, } } - -func dataSourceAwsCloudFrontCachePolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { - var cachePolicy *cloudfront.CachePolicy - request := &cloudfront.ListCachePoliciesInput{} - resp, err := conn.ListCachePolicies(request) - if err != nil { - return err - } - - for _, policySummary := range resp.CachePolicyList.Items { - if *policySummary.CachePolicy.CachePolicyConfig.Name == d.Get("name").(string) { - cachePolicy = policySummary.CachePolicy - break - } - } - - if cachePolicy != nil { - d.SetId(aws.StringValue(cachePolicy.Id)) - } - return nil -} - func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudfrontconn @@ -185,3 +163,24 @@ func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interfa return nil } + +func dataSourceAwsCloudFrontCachePolicyFindByName(d *schema.ResourceData, conn *cloudfront.CloudFront) error { + var cachePolicy *cloudfront.CachePolicy + request := &cloudfront.ListCachePoliciesInput{} + resp, err := conn.ListCachePolicies(request) + if err != nil { + return err + } + + for _, policySummary := range resp.CachePolicyList.Items { + if *policySummary.CachePolicy.CachePolicyConfig.Name == d.Get("name").(string) { + cachePolicy = policySummary.CachePolicy + break + } + } + + if cachePolicy != nil { + d.SetId(aws.StringValue(cachePolicy.Id)) + } + return nil +} diff --git a/aws/resource_aws_cloudfront_cache_policy.go b/aws/resource_aws_cloudfront_cache_policy.go index 2e9da7785bd..6e090c8e35c 100644 --- a/aws/resource_aws_cloudfront_cache_policy.go +++ b/aws/resource_aws_cloudfront_cache_policy.go @@ -27,6 +27,11 @@ func resourceAwsCloudFrontCachePolicy() *schema.Resource { Optional: true, Default: 86400, }, + "etag": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "max_ttl": { Type: schema.TypeInt, Optional: true, @@ -41,11 +46,6 @@ func resourceAwsCloudFrontCachePolicy() *schema.Resource { Type: schema.TypeString, Required: true, }, - "etag": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, "parameters_in_cache_key_and_forwarded_to_origin": { Type: schema.TypeList, MaxItems: 1, diff --git a/website/docs/d/cloudfront_cache_policy.html.markdown b/website/docs/d/cloudfront_cache_policy.html.markdown index 71e100472dc..007bf54878f 100644 --- a/website/docs/d/cloudfront_cache_policy.html.markdown +++ b/website/docs/d/cloudfront_cache_policy.html.markdown @@ -36,26 +36,26 @@ The following arguments are supported: ### Parameters In Cache Key And Forwarded To Origin -* `cookies_config` - An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. -* `headers_config` - An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `query_strings_config` - An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `cookies_config` - Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `headers_config` - Object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. * `enable_accept_encoding_brotli` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. * `enable_accept_encoding_gzip` - A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. ### Cookies Config `cookie_behavior` - Determines whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. -`cookies` - An object that contains a list of cookie names. See [Items](#items) for more information. +`cookies` - Object that contains a list of cookie names. See [Items](#items) for more information. ### Headers Config `header_behavior` - Determines whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`. -`headers` - An object that contains a list of header names. See [Items](#items) for more information. +`headers` - Object that contains a list of header names. See [Items](#items) for more information. ### Query String Config `query_string_behavior` - Determines whether any URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. -`query_strings` - An object that contains a list of query string names. See [Items](#items) for more information. +`query_strings` - Object that contains a list of query string names. See [Items](#items) for more information. ### Items diff --git a/website/docs/r/cloudfront_cache_policy.html.markdown b/website/docs/r/cloudfront_cache_policy.html.markdown index ea485f3d054..df94af0e340 100644 --- a/website/docs/r/cloudfront_cache_policy.html.markdown +++ b/website/docs/r/cloudfront_cache_policy.html.markdown @@ -60,26 +60,26 @@ The following arguments are supported: ### Parameters In Cache Key And Forwarded To Origin -* `cookies_config` - (Required) An object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. -* `headers_config` - (Required) An object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. -* `query_strings_config` - (Required) An object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. +* `cookies_config` - (Required) Object that determines whether any cookies in viewer requests (and if so, which cookies) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Cookies Config](#cookies-config) for more information. +* `headers_config` - (Required) Object that determines whether any HTTP headers (and if so, which headers) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Headers Config](#headers-config) for more information. +* `query_strings_config` - (Required) Object that determines whether any URL query strings in viewer requests (and if so, which query strings) are included in the cache key and automatically included in requests that CloudFront sends to the origin. See [Query Strings Config](#query-strings-config) for more information. * `enable_accept_encoding_brotli` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. * `enable_accept_encoding_gzip` - (Optional) A flag that can affect whether the Accept-Encoding HTTP header is included in the cache key and included in requests that CloudFront sends to the origin. ### Cookies Config `cookie_behavior` - (Required) Determines whether any cookies in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. -`cookies` - (Optional) An object that contains a list of cookie names. See [Items](#items) for more information. +`cookies` - (Optional) Object that contains a list of cookie names. See [Items](#items) for more information. ### Headers Config `header_behavior` - (Required) Determines whether any HTTP headers are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`. -`headers` - (Optional) An object that contains a list of header names. See [Items](#items) for more information. +`headers` - (Optional) Object that contains a list of header names. See [Items](#items) for more information. ### Query String Config `query_string_behavior` - (Required) Determines whether any URL query strings in viewer requests are included in the cache key and automatically included in requests that CloudFront sends to the origin. Valid values are `none`, `whitelist`, `allExcept`, `all`. -`query_strings` - (Optional) An object that contains a list of query string names. See [Items](#items) for more information. +`query_strings` - (Optional) Object that contains a list of query string names. See [Items](#items) for more information. ### Items From 79133b3233f8ea86358dc54337d5ecded870ccce Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 09:53:14 -0800 Subject: [PATCH 1070/1212] Revert part of CF distribution changes --- aws/cloudfront_distribution_configuration_structure.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index acd31d9e627..91f2a85f67e 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -224,18 +224,22 @@ func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront. } func expandCacheBehavior(m map[string]interface{}) *cloudfront.CacheBehavior { + var forwardedValues *cloudfront.ForwardedValues + if forwardedValuesFlat, ok := m["forwarded_values"].([]interface{}); ok && len(forwardedValuesFlat) == 1 { + forwardedValues = expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})) + } + cb := &cloudfront.CacheBehavior{ CachePolicyId: aws.String(m["cache_policy_id"].(string)), Compress: aws.Bool(m["compress"].(bool)), - DefaultTTL: defaultTTL, FieldLevelEncryptionId: aws.String(m["field_level_encryption_id"].(string)), - ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), + ForwardedValues: forwardedValues, OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), } - if m["cache_policy_id"].(string) != "" { + if m["cache_policy_id"].(string) == "" { cb.MinTTL = aws.Int64(int64(m["min_ttl"].(int))) cb.MaxTTL = aws.Int64(int64(m["max_ttl"].(int))) cb.DefaultTTL = aws.Int64(int64(m["default_ttl"].(int))) From ffad98ce3c72c04aa89bd4a067a7a7efad39d454 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 10:00:06 -0800 Subject: [PATCH 1071/1212] Fix error message capitalization --- aws/data_source_aws_cloudfront_cache_policy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/data_source_aws_cloudfront_cache_policy.go b/aws/data_source_aws_cloudfront_cache_policy.go index da876f93472..a60434125f2 100644 --- a/aws/data_source_aws_cloudfront_cache_policy.go +++ b/aws/data_source_aws_cloudfront_cache_policy.go @@ -142,7 +142,7 @@ func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interfa if d.Id() == "" { if err := dataSourceAwsCloudFrontCachePolicyFindByName(d, conn); err != nil { - return fmt.Errorf("Unable to locate cache policy by name: %s", err.Error()) + return fmt.Errorf("unable to locate cache policy by name: %s", err.Error()) } } @@ -154,7 +154,7 @@ func dataSourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interfa resp, err := conn.GetCachePolicy(request) if err != nil { - return fmt.Errorf("Unable to retrieve cache policy with ID %s: %s", d.Id(), err.Error()) + return fmt.Errorf("unable to retrieve cache policy with ID %s: %s", d.Id(), err.Error()) } d.Set("etag", aws.StringValue(resp.ETag)) From 1b2174edbdf29035aa032f0236d0a7ee8e7c5480 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 10:08:34 -0800 Subject: [PATCH 1072/1212] Add IsNewResource check --- aws/resource_aws_cloudfront_cache_policy.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/aws/resource_aws_cloudfront_cache_policy.go b/aws/resource_aws_cloudfront_cache_policy.go index 6e090c8e35c..d8a215f457d 100644 --- a/aws/resource_aws_cloudfront_cache_policy.go +++ b/aws/resource_aws_cloudfront_cache_policy.go @@ -176,6 +176,13 @@ func resourceAwsCloudFrontCachePolicyRead(d *schema.ResourceData, meta interface } resp, err := conn.GetCachePolicy(request) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, "ResourceNotFoundException") { + log.Printf("[WARN] CloudFront Cache Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { return err } From b6d0d33d3acd286e0e2453873fb27cbd23c02c79 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 4 Feb 2021 12:11:50 -0800 Subject: [PATCH 1073/1212] Fix imports --- aws/resource_aws_cloudfront_cache_policy.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_cloudfront_cache_policy.go b/aws/resource_aws_cloudfront_cache_policy.go index d8a215f457d..334271f12ec 100644 --- a/aws/resource_aws_cloudfront_cache_policy.go +++ b/aws/resource_aws_cloudfront_cache_policy.go @@ -1,8 +1,11 @@ package aws import ( + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) From 672902f61026b2d3c9167b70f237fee9f0df17f4 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 10 Feb 2021 12:04:25 -0800 Subject: [PATCH 1074/1212] Use expandStringSet --- aws/cloudfront_cache_policy_structure.go | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/aws/cloudfront_cache_policy_structure.go b/aws/cloudfront_cache_policy_structure.go index dc80b3ea090..7412bb16cd9 100644 --- a/aws/cloudfront_cache_policy_structure.go +++ b/aws/cloudfront_cache_policy_structure.go @@ -11,10 +11,7 @@ func expandCloudFrontCachePolicyCookieNames(tfMap map[string]interface{}) *cloud return nil } - var items []*string - for _, item := range tfMap["items"].(*schema.Set).List() { - items = append(items, aws.String(item.(string))) - } + items := expandStringSet(tfMap["items"].(*schema.Set)) apiObject := &cloudfront.CookieNames{ Items: items, @@ -45,10 +42,7 @@ func expandCloudFrontCachePolicyHeaders(tfMap map[string]interface{}) *cloudfron return nil } - var items []*string - for _, item := range tfMap["items"].(*schema.Set).List() { - items = append(items, aws.String(item.(string))) - } + items := expandStringSet(tfMap["items"].(*schema.Set)) apiObject := &cloudfront.Headers{ Items: items, @@ -79,10 +73,7 @@ func expandCloudFrontCachePolicyQueryStringNames(tfMap map[string]interface{}) * return nil } - var items []*string - for _, queryStringitesm := range tfMap["items"].(*schema.Set).List() { - items = append(items, aws.String(queryStringitesm.(string))) - } + items := expandStringSet(tfMap["items"].(*schema.Set)) apiObject := &cloudfront.QueryStringNames{ Items: items, From 0935bb568171dffefa23c5f1f25c00791e226772 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 15:21:52 -0500 Subject: [PATCH 1075/1212] data-source/iam_policy_document: Add changelog entry --- .changelog/12055.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/12055.txt diff --git a/.changelog/12055.txt b/.changelog/12055.txt new file mode 100644 index 00000000000..bce1ace8c06 --- /dev/null +++ b/.changelog/12055.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments +``` \ No newline at end of file From cfe7de3671a3a6df5d57f78e374c7139e7e08309 Mon Sep 17 00:00:00 2001 From: Dan Lidral-Porter Date: Thu, 4 Feb 2021 11:54:25 -0800 Subject: [PATCH 1076/1212] Clarify that overriding policy document statements can be added to current document if `sid` does not match In the description of `iam_policy_document` data source's `override_json` argument, state specifically that any statements in the override document that have `sid`s that do _not_ match a statement in the current document will be added to the current document, instead of ignored. Before this change, there was no description of how non-matching statements in the `override_json` were used, so the behavior was unclear: the opposite and incorrect conclusion that these statements were ignored was equally consistent with the documentation. This behavior is covered by an acceptance test, so from the coverage I'm inferring that this behavior is both intended and supported. --- .../docs/d/iam_policy_document.html.markdown | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index 9fc9130e2d8..d1131c39ddb 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -78,6 +78,104 @@ resource "aws_iam_policy" "example" { ### Example Assume-Role Policy with Multiple Principals You can specify multiple principal blocks with different types. You can also use this data source to generate an assume-role policy. +## Argument Reference + +The following arguments are supported: + +* `policy_id` (Optional) - An ID for the policy document. +* `source_json` (Optional) - An IAM policy document to import as a base for the + current policy document. Statements with non-blank `sid`s in the current + policy document will overwrite statements with the same `sid` in the source + json. Statements without an `sid` cannot be overwritten. +* `override_json` (Optional) - An IAM policy document to import and override the + current policy document. Statements with non-blank `sid`s in the override + document will overwrite statements with the same `sid` in the current document. + Statements with non-blank `sid`s in the override document that do not match an + `sid` in the current document will be added to the current document. Statements + without an `sid` cannot be overwritten. +* `statement` (Optional) - A nested configuration block (described below) + configuring one *statement* to be included in the policy document. +* `version` (Optional) - IAM policy document version. Valid values: `2008-10-17`, `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). + +Each document configuration may have one or more `statement` blocks, which +each accept the following arguments: + +* `sid` (Optional) - An ID for the policy statement. +* `effect` (Optional) - Either "Allow" or "Deny", to specify whether this + statement allows or denies the given actions. The default is "Allow". +* `actions` (Optional) - A list of actions that this statement either allows + or denies. For example, ``["ec2:RunInstances", "s3:*"]``. +* `not_actions` (Optional) - A list of actions that this statement does *not* + apply to. Used to apply a policy statement to all actions *except* those + listed. +* `resources` (Optional) - A list of resource ARNs that this statement applies + to. This is required by AWS if used for an IAM policy. +* `not_resources` (Optional) - A list of resource ARNs that this statement + does *not* apply to. Used to apply a policy statement to all resources + *except* those listed. +* `principals` (Optional) - A nested configuration block (described below) + specifying a principal (or principal pattern) to which this statement applies. +* `not_principals` (Optional) - Like `principals` except gives principals that + the statement does *not* apply to. +* `condition` (Optional) - A nested configuration block (described below) + that defines a further, possibly-service-specific condition that constrains + whether this statement applies. + +Each policy may have either zero or more `principals` blocks or zero or more +`not_principals` blocks, both of which each accept the following arguments: + +* `type` (Required) The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated". +* `identifiers` (Required) List of identifiers for principals. When `type` + is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs. + +For further examples or information about AWS principals then please refer to the [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html). + +Each policy statement may have zero or more `condition` blocks, which each +accept the following arguments: + +* `test` (Required) The name of the + [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) + to evaluate. +* `variable` (Required) The name of a + [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) + to apply the condition to. Context variables may either be standard AWS + variables starting with `aws:`, or service-specific variables prefixed with + the service name. +* `values` (Required) The values to evaluate the condition against. If multiple + values are provided, the condition matches if at least one of them applies. + (That is, the tests are combined with the "OR" boolean operation.) + +When multiple `condition` blocks are provided, they must *all* evaluate to true +for the policy statement to apply. (In other words, the conditions are combined +with the "AND" boolean operation.) + +## Context Variable Interpolation + +The IAM policy document format allows context variables to be interpolated +into various strings within a statement. The native IAM policy document format +uses `${...}`-style syntax that is in conflict with Terraform's interpolation +syntax, so this data source instead uses `&{...}` syntax for interpolations that +should be processed by AWS rather than by Terraform. + +## Wildcard Principal + +In order to define wildcard principal (a.k.a. anonymous user) use `type = "*"` and +`identifiers = ["*"]`. In that case the rendered json will contain `"Principal": "*"`. +Note, that even though the [IAM Documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) +states that `"Principal": "*"` and `"Principal": {"AWS": "*"}` are equivalent, +those principals have different behavior for IAM Role Trust Policy. Therefore +Terraform will normalize the principal field only in above-mentioned case and principals +like `type = "AWS"` and `identifiers = ["*"]` will be rendered as `"Principal": {"AWS": "*"}`. + +## Attributes Reference + +The following attribute is exported: + +* `json` - The above arguments serialized as a standard JSON policy document. + +## Example with Multiple Principals + +Showing how you can use this as an assume role policy as well as showing how you can specify multiple principal blocks with different types. ```hcl data "aws_iam_policy_document" "event_stream_bucket_role_assume_role_policy" { From 755ac6faeeac3766ea751deb2e2c859a9bfaf7b1 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 17:10:37 -0500 Subject: [PATCH 1077/1212] docs/iam_policy_document: Clarify overriding --- .../docs/d/iam_policy_document.html.markdown | 109 ++---------------- 1 file changed, 7 insertions(+), 102 deletions(-) diff --git a/website/docs/d/iam_policy_document.html.markdown b/website/docs/d/iam_policy_document.html.markdown index d1131c39ddb..dc815f5b799 100644 --- a/website/docs/d/iam_policy_document.html.markdown +++ b/website/docs/d/iam_policy_document.html.markdown @@ -78,104 +78,6 @@ resource "aws_iam_policy" "example" { ### Example Assume-Role Policy with Multiple Principals You can specify multiple principal blocks with different types. You can also use this data source to generate an assume-role policy. -## Argument Reference - -The following arguments are supported: - -* `policy_id` (Optional) - An ID for the policy document. -* `source_json` (Optional) - An IAM policy document to import as a base for the - current policy document. Statements with non-blank `sid`s in the current - policy document will overwrite statements with the same `sid` in the source - json. Statements without an `sid` cannot be overwritten. -* `override_json` (Optional) - An IAM policy document to import and override the - current policy document. Statements with non-blank `sid`s in the override - document will overwrite statements with the same `sid` in the current document. - Statements with non-blank `sid`s in the override document that do not match an - `sid` in the current document will be added to the current document. Statements - without an `sid` cannot be overwritten. -* `statement` (Optional) - A nested configuration block (described below) - configuring one *statement* to be included in the policy document. -* `version` (Optional) - IAM policy document version. Valid values: `2008-10-17`, `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). - -Each document configuration may have one or more `statement` blocks, which -each accept the following arguments: - -* `sid` (Optional) - An ID for the policy statement. -* `effect` (Optional) - Either "Allow" or "Deny", to specify whether this - statement allows or denies the given actions. The default is "Allow". -* `actions` (Optional) - A list of actions that this statement either allows - or denies. For example, ``["ec2:RunInstances", "s3:*"]``. -* `not_actions` (Optional) - A list of actions that this statement does *not* - apply to. Used to apply a policy statement to all actions *except* those - listed. -* `resources` (Optional) - A list of resource ARNs that this statement applies - to. This is required by AWS if used for an IAM policy. -* `not_resources` (Optional) - A list of resource ARNs that this statement - does *not* apply to. Used to apply a policy statement to all resources - *except* those listed. -* `principals` (Optional) - A nested configuration block (described below) - specifying a principal (or principal pattern) to which this statement applies. -* `not_principals` (Optional) - Like `principals` except gives principals that - the statement does *not* apply to. -* `condition` (Optional) - A nested configuration block (described below) - that defines a further, possibly-service-specific condition that constrains - whether this statement applies. - -Each policy may have either zero or more `principals` blocks or zero or more -`not_principals` blocks, both of which each accept the following arguments: - -* `type` (Required) The type of principal. For AWS ARNs this is "AWS". For AWS services (e.g. Lambda), this is "Service". For Federated access the type is "Federated". -* `identifiers` (Required) List of identifiers for principals. When `type` - is "AWS", these are IAM user or role ARNs. When `type` is "Service", these are AWS Service roles e.g. `lambda.amazonaws.com`. When `type` is "Federated", these are web identity users or SAML provider ARNs. - -For further examples or information about AWS principals then please refer to the [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html). - -Each policy statement may have zero or more `condition` blocks, which each -accept the following arguments: - -* `test` (Required) The name of the - [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) - to evaluate. -* `variable` (Required) The name of a - [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) - to apply the condition to. Context variables may either be standard AWS - variables starting with `aws:`, or service-specific variables prefixed with - the service name. -* `values` (Required) The values to evaluate the condition against. If multiple - values are provided, the condition matches if at least one of them applies. - (That is, the tests are combined with the "OR" boolean operation.) - -When multiple `condition` blocks are provided, they must *all* evaluate to true -for the policy statement to apply. (In other words, the conditions are combined -with the "AND" boolean operation.) - -## Context Variable Interpolation - -The IAM policy document format allows context variables to be interpolated -into various strings within a statement. The native IAM policy document format -uses `${...}`-style syntax that is in conflict with Terraform's interpolation -syntax, so this data source instead uses `&{...}` syntax for interpolations that -should be processed by AWS rather than by Terraform. - -## Wildcard Principal - -In order to define wildcard principal (a.k.a. anonymous user) use `type = "*"` and -`identifiers = ["*"]`. In that case the rendered json will contain `"Principal": "*"`. -Note, that even though the [IAM Documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html) -states that `"Principal": "*"` and `"Principal": {"AWS": "*"}` are equivalent, -those principals have different behavior for IAM Role Trust Policy. Therefore -Terraform will normalize the principal field only in above-mentioned case and principals -like `type = "AWS"` and `identifiers = ["*"]` will be rendered as `"Principal": {"AWS": "*"}`. - -## Attributes Reference - -The following attribute is exported: - -* `json` - The above arguments serialized as a standard JSON policy document. - -## Example with Multiple Principals - -Showing how you can use this as an assume role policy as well as showing how you can specify multiple principal blocks with different types. ```hcl data "aws_iam_policy_document" "event_stream_bucket_role_assume_role_policy" { @@ -519,11 +421,14 @@ data "aws_iam_policy_document" "combined" { The following arguments are optional: -* `override_json` (Optional) - IAM policy document whose statements with non-blank `sid`s will override statements with the same `sid` in the exported document including any defined by the `override_policy_documents` argument. Statements without a `sid` cannot be overridden. -* `override_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document, potentially overriding previously defined statements with the same `sid`s. +* `override_json` (Optional) - IAM policy document whose statements with non-blank `sid`s will override statements with the same `sid` from documents assigned to the `source_json`, `source_policy_documents`, and `override_policy_documents` arguments. Non-overriding statements will be added to the exported document. + +~> **NOTE:** Statements without a `sid` cannot be overridden. In other words, a statement without a `sid` from documents assigned to the `source_json` or `source_policy_documents` arguments cannot be overridden by statements from documents assigned to the `override_json` or `override_policy_documents` arguments. + +* `override_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` from earlier documents in the list. Statements with non-blank `sid`s will also override statements with the same `sid` from documents provided in the `source_json` and `source_policy_documents` arguments. Non-overriding statements will be added to the exported document. * `policy_id` (Optional) - ID for the policy document. -* `source_json` (Optional) - IAM policy document used as a base for the exported policy document. -* `source_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document. Statements defined in `source_policy_documents` or `source_json` must have unique `sid`s. Override statements with the same `sid` will override source statements. Statements without a `sid` cannot be overridden. +* `source_json` (Optional) - IAM policy document used as a base for the exported policy document. Statements with the same `sid` from documents assigned to the `override_json` and `override_policy_documents` arguments will override source statements. +* `source_policy_documents` (Optional) - List of IAM policy documents that are merged together into the exported document. Statements defined in `source_policy_documents` or `source_json` must have unique `sid`s. Statements with the same `sid` from documents assigned to the `override_json` and `override_policy_documents` arguments will override source statements. * `statement` (Optional) - Configuration block for a policy statement. Detailed below. * `version` (Optional) - IAM policy document version. Valid values are `2008-10-17` and `2012-10-17`. Defaults to `2012-10-17`. For more information, see the [AWS IAM User Guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html). From e9a98cf0dad9bad4c17aa7deeb657f7e4066aa6d Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 10 Feb 2021 18:55:21 -0500 Subject: [PATCH 1078/1212] resource/iam_policy_document: Preserve order of condition values --- aws/data_source_aws_iam_policy_document.go | 8 ++++---- aws/data_source_aws_iam_policy_document_test.go | 8 ++++---- aws/iam_policy_model.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/aws/data_source_aws_iam_policy_document.go b/aws/data_source_aws_iam_policy_document.go index a248d66539c..16639969d0b 100644 --- a/aws/data_source_aws_iam_policy_document.go +++ b/aws/data_source_aws_iam_policy_document.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/hashcode" @@ -69,7 +70,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Required: true, }, "values": { - Type: schema.TypeSet, + Type: schema.TypeList, Required: true, Elem: &schema.Schema{ Type: schema.TypeString, @@ -310,9 +311,8 @@ func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}, version stri Variable: item["variable"].(string), } out[i].Values, err = dataSourceAwsIamPolicyDocumentReplaceVarsInList( - iamPolicyDecodeConfigStringList( - item["values"].(*schema.Set).List(), - ), version, + aws.StringValueSlice(expandStringList(item["values"].([]interface{}))), + version, ) if err != nil { return nil, fmt.Errorf("error reading values: %s", err) diff --git a/aws/data_source_aws_iam_policy_document_test.go b/aws/data_source_aws_iam_policy_document_test.go index 33fcf5ddbc0..8ac7a72c49d 100644 --- a/aws/data_source_aws_iam_policy_document_test.go +++ b/aws/data_source_aws_iam_policy_document_test.go @@ -393,8 +393,8 @@ func testAccAWSIAMPolicyDocumentExpectedJSON() string { "Condition": { "StringLike": { "s3:prefix": [ - "home/${aws:username}/", - "home/" + "home/", + "home/${aws:username}/" ] } } @@ -560,8 +560,8 @@ func testAccAWSIAMPolicyDocumentSourceExpectedJSON() string { "Condition": { "StringLike": { "s3:prefix": [ - "home/${aws:username}/", - "home/" + "home/", + "home/${aws:username}/" ] } } diff --git a/aws/iam_policy_model.go b/aws/iam_policy_model.go index f2240079e95..22c7fb332ee 100644 --- a/aws/iam_policy_model.go +++ b/aws/iam_policy_model.go @@ -170,7 +170,7 @@ func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { if _, ok := raw[c.Test][c.Variable]; !ok { raw[c.Test][c.Variable] = make([]string, 0, len(i)) } - sort.Sort(sort.Reverse(sort.StringSlice(i))) + // order matters with values so not sorting here raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable].([]string), i...) case string: raw[c.Test][c.Variable] = i From cc460944520af86daab54041e7c4e968f9f3cd8d Mon Sep 17 00:00:00 2001 From: bill-rich Date: Wed, 10 Feb 2021 15:14:50 -0800 Subject: [PATCH 1079/1212] Add default and ordered policy tests --- ...nt_distribution_configuration_structure.go | 15 +- ...stribution_configuration_structure_test.go | 1 + aws/resource_aws_cloudfront_distribution.go | 8 +- ...source_aws_cloudfront_distribution_test.go | 155 +++++++++++++++++- .../r/cloudfront_distribution.html.markdown | 5 +- 5 files changed, 166 insertions(+), 18 deletions(-) diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index 91f2a85f67e..59203dc1697 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -189,17 +189,24 @@ func flattenCacheBehaviors(cbs *cloudfront.CacheBehaviors) []interface{} { func expandCloudFrontDefaultCacheBehavior(m map[string]interface{}) *cloudfront.DefaultCacheBehavior { dcb := &cloudfront.DefaultCacheBehavior{ + CachePolicyId: aws.String(m["cache_policy_id"].(string)), Compress: aws.Bool(m["compress"].(bool)), - DefaultTTL: aws.Int64(int64(m["default_ttl"].(int))), FieldLevelEncryptionId: aws.String(m["field_level_encryption_id"].(string)), - ForwardedValues: expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})), - MaxTTL: aws.Int64(int64(m["max_ttl"].(int))), - MinTTL: aws.Int64(int64(m["min_ttl"].(int))), OriginRequestPolicyId: aws.String(m["origin_request_policy_id"].(string)), TargetOriginId: aws.String(m["target_origin_id"].(string)), ViewerProtocolPolicy: aws.String(m["viewer_protocol_policy"].(string)), } + if forwardedValuesFlat, ok := m["forwarded_values"].([]interface{}); ok && len(forwardedValuesFlat) == 1 { + dcb.ForwardedValues = expandForwardedValues(m["forwarded_values"].([]interface{})[0].(map[string]interface{})) + } + + if m["cache_policy_id"].(string) == "" { + dcb.MinTTL = aws.Int64(int64(m["min_ttl"].(int))) + dcb.MaxTTL = aws.Int64(int64(m["max_ttl"].(int))) + dcb.DefaultTTL = aws.Int64(int64(m["default_ttl"].(int))) + } + if v, ok := m["trusted_signers"]; ok { dcb.TrustedSigners = expandTrustedSigners(v.([]interface{})) } else { diff --git a/aws/cloudfront_distribution_configuration_structure_test.go b/aws/cloudfront_distribution_configuration_structure_test.go index 61659ef7577..481a7a7e14e 100644 --- a/aws/cloudfront_distribution_configuration_structure_test.go +++ b/aws/cloudfront_distribution_configuration_structure_test.go @@ -12,6 +12,7 @@ import ( func defaultCacheBehaviorConf() map[string]interface{} { return map[string]interface{}{ "viewer_protocol_policy": "allow-all", + "cache_policy_id": "", "target_origin_id": "myS3Origin", "forwarded_values": []interface{}{forwardedValuesConf()}, "min_ttl": 0, diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index 2ab3c96aa17..42ea527548c 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -80,6 +80,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Type: schema.TypeList, Optional: true, MaxItems: 1, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cookies": { @@ -242,7 +243,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "default_ttl": { Type: schema.TypeInt, Optional: true, - Default: 86400, + Computed: true, }, "field_level_encryption_id": { Type: schema.TypeString, @@ -250,7 +251,8 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { }, "forwarded_values": { Type: schema.TypeList, - Required: true, + Optional: true, + Computed: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -323,7 +325,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "max_ttl": { Type: schema.TypeInt, Optional: true, - Default: 31536000, + Computed: true, }, "min_ttl": { Type: schema.TypeInt, diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index 8d210f69073..e53a97c15ef 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -212,14 +212,14 @@ func TestAccAWSCloudFrontDistribution_customOrigin(t *testing.T) { }) } -func TestAccAWSCloudFrontDistribution_originPolicy(t *testing.T) { +func TestAccAWSCloudFrontDistribution_originPolicyDefault(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCloudFrontDistributionOriginRequestPolicyConfig, + Config: testAccAWSCloudFrontDistributionOriginRequestPolicyConfigDefault, Check: resource.ComposeTestCheckFunc( resource.TestMatchResourceAttr("aws_cloudfront_distribution.custom_distribution", "default_cache_behavior.0.origin_request_policy_id", regexp.MustCompile("[A-z0-9]+")), ), @@ -237,6 +237,31 @@ func TestAccAWSCloudFrontDistribution_originPolicy(t *testing.T) { }) } +func TestAccAWSCloudFrontDistribution_originPolicyOrdered(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(cloudfront.EndpointsID, t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudFrontDistributionOriginRequestPolicyConfigOrdered, + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("aws_cloudfront_distribution.custom_distribution", "ordered_cache_behavior.0.origin_request_policy_id", regexp.MustCompile("[A-z0-9]+")), + ), + }, + { + ResourceName: "aws_cloudfront_distribution.custom_distribution", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "retain_on_delete", + "wait_for_deployment", + }, + }, + }, + }) +} + // TestAccAWSCloudFrontDistribution_multiOrigin runs an // aws_cloudfront_distribution acceptance test with multiple origins. // @@ -391,7 +416,6 @@ func TestAccAWSCloudFrontDistribution_noOptionalItemsConfig(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.allowed_methods.#", "7"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.cached_methods.#", "2"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.compress", "false"), - resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.default_ttl", "86400"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.forwarded_values.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.forwarded_values.0.cookies.#", "1"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.forwarded_values.0.cookies.0.forward", "all"), @@ -400,7 +424,6 @@ func TestAccAWSCloudFrontDistribution_noOptionalItemsConfig(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.forwarded_values.0.query_string", "false"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.forwarded_values.0.query_string_cache_keys.#", "0"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.lambda_function_association.#", "0"), - resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.max_ttl", "31536000"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.min_ttl", "0"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.smooth_streaming", "false"), resource.TestCheckResourceAttr(resourceName, "default_cache_behavior.0.target_origin_id", "myCustomOrigin"), @@ -1417,7 +1440,7 @@ resource "aws_cloudfront_distribution" "custom_distribution" { } `, acctest.RandInt(), logBucket, testAccAWSCloudFrontDistributionRetainConfig()) -var testAccAWSCloudFrontDistributionOriginRequestPolicyConfig = fmt.Sprintf(` +var testAccAWSCloudFrontDistributionOriginRequestPolicyConfigDefault = fmt.Sprintf(` variable rand_id { default = %[1]d } @@ -1510,13 +1533,129 @@ resource "aws_cloudfront_distribution" "custom_distribution" { origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id cache_policy_id = aws_cloudfront_cache_policy.example.id - forwarded_values { - query_string = false + viewer_protocol_policy = "allow-all" + } + + price_class = "PriceClass_200" + restrictions { + geo_restriction { + restriction_type = "whitelist" + locations = ["US", "CA", "GB", "DE"] + } + } + + viewer_certificate { + cloudfront_default_certificate = true + } +} +`, acctest.RandInt(), logBucket, testAccAWSCloudFrontDistributionRetainConfig()) + +var testAccAWSCloudFrontDistributionOriginRequestPolicyConfigOrdered = fmt.Sprintf(` +variable rand_id { + default = %[1]d +} + +# log bucket +%[2]s + +resource "aws_cloudfront_cache_policy" "example" { + name = "test-policy%[1]d" + comment = "test comment" + default_ttl = 50 + max_ttl = 100 + min_ttl = 1 + parameters_in_cache_key_and_forwarded_to_origin { + cookies_config { + cookie_behavior = "whitelist" cookies { - forward = "all" + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] } } + } +} + +resource "aws_cloudfront_origin_request_policy" "test_policy" { + name = "test-policy%[1]d" + comment = "test comment" + cookies_config { + cookie_behavior = "whitelist" + cookies { + items = ["test"] + } + } + headers_config { + header_behavior = "whitelist" + headers { + items = ["test"] + } + } + query_strings_config { + query_string_behavior = "whitelist" + query_strings { + items = ["test"] + } + } +} + +resource "aws_cloudfront_distribution" "custom_distribution" { + origin { + domain_name = "www.example.com" + origin_id = "myCustomOrigin" + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_protocol_policy = "http-only" + origin_ssl_protocols = ["SSLv3", "TLSv1"] + origin_read_timeout = 30 + origin_keepalive_timeout = 5 + } + } + + enabled = true + comment = "Some comment" + default_root_object = "index.html" + + logging_config { + include_cookies = false + bucket = "${aws_s3_bucket.s3_bucket_logs.id}.s3.amazonaws.com" + prefix = "myprefix" + } + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + smooth_streaming = false + + origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id + cache_policy_id = aws_cloudfront_cache_policy.example.id + + viewer_protocol_policy = "allow-all" + } + + ordered_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "myCustomOrigin" + smooth_streaming = false + path_pattern = "/*" + + origin_request_policy_id = aws_cloudfront_origin_request_policy.test_policy.id + cache_policy_id = aws_cloudfront_cache_policy.example.id viewer_protocol_policy = "allow-all" } diff --git a/website/docs/r/cloudfront_distribution.html.markdown b/website/docs/r/cloudfront_distribution.html.markdown index 1fcf48d7d19..084362a2221 100644 --- a/website/docs/r/cloudfront_distribution.html.markdown +++ b/website/docs/r/cloudfront_distribution.html.markdown @@ -280,8 +280,7 @@ of several sub-resources - these resources are laid out below. * `default_ttl` (Optional) - The default amount of time (in seconds) that an object is in a CloudFront cache before CloudFront forwards another request - in the absence of an `Cache-Control max-age` or `Expires` header. Defaults to - 1 day. + in the absence of an `Cache-Control max-age` or `Expires` header. * `field_level_encryption_id` (Optional) - Field level encryption configuration ID @@ -295,7 +294,7 @@ of several sub-resources - these resources are laid out below. object is in a CloudFront cache before CloudFront forwards another request to your origin to determine whether the object has been updated. Only effective in the presence of `Cache-Control max-age`, `Cache-Control - s-maxage`, and `Expires` headers. Defaults to 365 days. + s-maxage`, and `Expires` headers. * `min_ttl` (Optional) - The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront queries your origin to see From 76758ea2853d03fbe1f1f5346ef246a9a24f2ba8 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Feb 2021 02:12:38 -0500 Subject: [PATCH 1080/1212] CR updates including documentation, acctest coverage, and new error handling --- aws/configservice.go | 198 ++++--- aws/resource_aws_config_conformance_pack.go | 262 +++++---- ...source_aws_config_conformance_pack_test.go | 502 ++++++++++++++---- aws/resource_aws_config_test.go | 16 +- .../r/config_conformance_pack.html.markdown | 82 ++- 5 files changed, 755 insertions(+), 305 deletions(-) diff --git a/aws/configservice.go b/aws/configservice.go index 21431d24907..bfae8a9c56a 100644 --- a/aws/configservice.go +++ b/aws/configservice.go @@ -7,9 +7,78 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) +const ( + ConfigConformancePackCreateTimeout = 5 * time.Minute + ConfigConformancePackDeleteTimeout = 5 * time.Minute + + ConfigConformancePackStatusNotFound = "NotFound" + ConfigConformancePackStatusUnknown = "Unknown" +) + +func configDescribeConformancePack(conn *configservice.ConfigService, name string) (*configservice.ConformancePackDetail, error) { + input := &configservice.DescribeConformancePacksInput{ + ConformancePackNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeConformancePacks(input) + + if err != nil { + return nil, err + } + + for _, pack := range output.ConformancePackDetails { + if pack == nil { + continue + } + + if aws.StringValue(pack.ConformancePackName) == name { + return pack, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} + +func configDescribeConformancePackStatus(conn *configservice.ConfigService, name string) (*configservice.ConformancePackStatusDetail, error) { + input := &configservice.DescribeConformancePackStatusInput{ + ConformancePackNames: []*string{aws.String(name)}, + } + + for { + output, err := conn.DescribeConformancePackStatus(input) + + if err != nil { + return nil, err + } + + for _, status := range output.ConformancePackStatusDetails { + if aws.StringValue(status.ConformancePackName) == name { + return status, nil + } + } + + if aws.StringValue(output.NextToken) == "" { + break + } + + input.NextToken = output.NextToken + } + + return nil, nil +} + func configDescribeOrganizationConfigRule(conn *configservice.ConfigService, name string) (*configservice.OrganizationConfigRule, error) { input := &configservice.DescribeOrganizationConfigRulesInput{ OrganizationConfigRuleNames: []*string{aws.String(name)}, @@ -94,6 +163,26 @@ func configGetOrganizationConfigRuleDetailedStatus(conn *configservice.ConfigSer return statuses, nil } +func configRefreshConformancePackStatus(conn *configservice.ConfigService, name string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + status, err := configDescribeConformancePackStatus(conn, name) + + if err != nil { + return nil, ConfigConformancePackStatusUnknown, err + } + + if status == nil { + return nil, ConfigConformancePackStatusNotFound, nil + } + + if errMsg := aws.StringValue(status.ConformancePackStatusReason); errMsg != "" { + return status, aws.StringValue(status.ConformancePackState), fmt.Errorf(errMsg) + } + + return status, aws.StringValue(status.ConformancePackState), nil + } +} + func configRefreshOrganizationConfigRuleStatus(conn *configservice.ConfigService, name string) resource.StateRefreshFunc { return func() (interface{}, string, error) { status, err := configDescribeOrganizationConfigRuleStatus(conn, name) @@ -132,6 +221,41 @@ func configRefreshOrganizationConfigRuleStatus(conn *configservice.ConfigService } } +func configWaitForConformancePackStateCreateComplete(conn *configservice.ConfigService, name string) error { + stateChangeConf := resource.StateChangeConf{ + Pending: []string{configservice.ConformancePackStateCreateInProgress}, + Target: []string{configservice.ConformancePackStateCreateComplete}, + Timeout: ConfigConformancePackCreateTimeout, + Refresh: configRefreshConformancePackStatus(conn, name), + } + + _, err := stateChangeConf.WaitForState() + + if tfawserr.ErrCodeEquals(err, configservice.ErrCodeNoSuchConformancePackException) { + return nil + } + + return err + +} + +func configWaitForConformancePackStateDeleteComplete(conn *configservice.ConfigService, name string) error { + stateChangeConf := resource.StateChangeConf{ + Pending: []string{configservice.ConformancePackStateDeleteInProgress}, + Target: []string{}, + Timeout: ConfigConformancePackDeleteTimeout, + Refresh: configRefreshConformancePackStatus(conn, name), + } + + _, err := stateChangeConf.WaitForState() + + if tfawserr.ErrCodeEquals(err, configservice.ErrCodeNoSuchConformancePackException) { + return nil + } + + return err +} + func configWaitForOrganizationRuleStatusCreateSuccessful(conn *configservice.ConfigService, name string, timeout time.Duration) error { stateChangeConf := &resource.StateChangeConf{ Pending: []string{configservice.OrganizationRuleStatusCreateInProgress}, @@ -177,77 +301,3 @@ func configWaitForOrganizationRuleStatusUpdateSuccessful(conn *configservice.Con return err } - -func configDescribeConformancePack(conn *configservice.ConfigService, name string) (*configservice.ConformancePackDetail, error) { - input := &configservice.DescribeConformancePacksInput{ - ConformancePackNames: []*string{aws.String(name)}, - } - - for { - output, err := conn.DescribeConformancePacks(input) - - if err != nil { - return nil, err - } - - for _, pack := range output.ConformancePackDetails { - if aws.StringValue(pack.ConformancePackName) == name { - return pack, nil - } - } - - if aws.StringValue(output.NextToken) == "" { - break - } - - input.NextToken = output.NextToken - } - - return nil, nil -} - -func configDescribeConformancePackStatus(conn *configservice.ConfigService, name string) (*configservice.ConformancePackStatusDetail, error) { - input := &configservice.DescribeConformancePackStatusInput{ - ConformancePackNames: []*string{aws.String(name)}, - } - - for { - output, err := conn.DescribeConformancePackStatus(input) - - if err != nil { - return nil, err - } - - for _, status := range output.ConformancePackStatusDetails { - if aws.StringValue(status.ConformancePackName) == name { - return status, nil - } - } - - if aws.StringValue(output.NextToken) == "" { - break - } - - input.NextToken = output.NextToken - } - - return nil, nil -} - -func expandConfigConformancePackParameters(m map[string]interface{}) (params []*configservice.ConformancePackInputParameter) { - for k, v := range m { - params = append(params, &configservice.ConformancePackInputParameter{ - ParameterName: aws.String(k), - ParameterValue: aws.String(v.(string)), - }) - } - return -} - -func flattenConformancePackInputParameters(parameters []*configservice.ConformancePackInputParameter) (m map[string]string) { - m = make(map[string]string) - for _, p := range parameters { - m[*p.ParameterName] = *p.ParameterValue - } - return -} diff --git a/aws/resource_aws_config_conformance_pack.go b/aws/resource_aws_config_conformance_pack.go index e51a013b00b..07177c873a9 100644 --- a/aws/resource_aws_config_conformance_pack.go +++ b/aws/resource_aws_config_conformance_pack.go @@ -4,14 +4,14 @@ import ( "fmt" "log" "regexp" - "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) func resourceAwsConfigConformancePack() *schema.Resource { @@ -26,40 +26,15 @@ func resourceAwsConfigConformancePack() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 51200), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z][-a-zA-Z0-9]*$`), "must be a valid conformance pack name"), - ), - }, "arn": { Type: schema.TypeString, Computed: true, }, - "template_s3_uri": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 256), - }, - "template_body": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 51200), - validateStringIsJsonOrYaml), - StateFunc: func(v interface{}) string { - template, _ := normalizeJsonOrYamlString(v) - return template - }, - }, "delivery_s3_bucket": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 63), - validation.StringMatch(regexp.MustCompile("awsconfigconforms.+"), "must start with 'awsconfigconforms'"), ), }, "delivery_s3_key_prefix": { @@ -67,10 +42,50 @@ func resourceAwsConfigConformancePack() *schema.Resource { Optional: true, ValidateFunc: validation.StringLenBetween(1, 1024), }, - "input_parameters": { - Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, + "input_parameter": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 60, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parameter_name": { + Type: schema.TypeString, + Required: true, + }, + "parameter_value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z]`), "must begin with alphabetic character"), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-]+$`), "must contain only alphanumeric and hyphen characters")), + }, + "template_body": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressEquivalentJsonOrYamlDiffs, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 51200), + validateStringIsJsonOrYaml, + ), + AtLeastOneOf: []string{"template_body", "template_s3_uri"}, + }, + "template_s3_uri": { + Type: schema.TypeString, Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"), + ), + AtLeastOneOf: []string{"template_s3_uri", "template_body"}, }, }, } @@ -80,6 +95,7 @@ func resourceAwsConfigConformancePackPut(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).configconn name := d.Get("name").(string) + input := configservice.PutConformancePackInput{ ConformancePackName: aws.String(name), } @@ -87,107 +103,69 @@ func resourceAwsConfigConformancePackPut(d *schema.ResourceData, meta interface{ if v, ok := d.GetOk("delivery_s3_bucket"); ok { input.DeliveryS3Bucket = aws.String(v.(string)) } + if v, ok := d.GetOk("delivery_s3_key_prefix"); ok { input.DeliveryS3KeyPrefix = aws.String(v.(string)) } - if v, ok := d.GetOk("input_parameters"); ok { - input.ConformancePackInputParameters = expandConfigConformancePackParameters(v.(map[string]interface{})) + + if v, ok := d.GetOk("input_parameter"); ok { + input.ConformancePackInputParameters = expandConfigConformancePackInputParameters(v.(*schema.Set).List()) } + if v, ok := d.GetOk("template_body"); ok { input.TemplateBody = aws.String(v.(string)) } + if v, ok := d.GetOk("template_s3_uri"); ok { input.TemplateS3Uri = aws.String(v.(string)) } _, err := conn.PutConformancePack(&input) if err != nil { - return fmt.Errorf("failed to put AWSConfig conformance pack %q: %s", name, err) + return fmt.Errorf("error creating Config Conformance Pack (%s): %w", name, err) } d.SetId(name) - conf := resource.StateChangeConf{ - Pending: []string{ - configservice.ConformancePackStateCreateInProgress, - }, - Target: []string{ - configservice.ConformancePackStateCreateComplete, - }, - Timeout: 30 * time.Minute, - Refresh: refreshConformancePackStatus(d, conn), - } - if _, err := conf.WaitForState(); err != nil { - return err - } - return resourceAwsConfigConformancePackRead(d, meta) -} -func refreshConformancePackStatus(d *schema.ResourceData, conn *configservice.ConfigService) func() (interface{}, string, error) { - return func() (interface{}, string, error) { - out, err := conn.DescribeConformancePackStatus(&configservice.DescribeConformancePackStatusInput{ - ConformancePackNames: []*string{aws.String(d.Id())}, - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && isAWSErr(awsErr, configservice.ErrCodeNoSuchConformancePackException, "") { - return 42, "", nil - } - return 42, "", fmt.Errorf("failed to describe conformance pack %q: %s", d.Id(), err) - } - if len(out.ConformancePackStatusDetails) < 1 { - return 42, "", nil - } - status := out.ConformancePackStatusDetails[0] - return out, *status.ConformancePackState, nil + if err := configWaitForConformancePackStateCreateComplete(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Config Conformance Pack (%s) to be created: %w", d.Id(), err) } + + return resourceAwsConfigConformancePackRead(d, meta) } func resourceAwsConfigConformancePackRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).configconn - out, err := conn.DescribeConformancePacks(&configservice.DescribeConformancePacksInput{ - ConformancePackNames: []*string{aws.String(d.Id())}, - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok && isAWSErr(err, configservice.ErrCodeNoSuchConformancePackException, "") { - log.Printf("[WARN] Conformance Pack %q is gone (%s)", d.Id(), awsErr.Code()) - d.SetId("") - return nil - } - return err - } + pack, err := configDescribeConformancePack(conn, d.Id()) - numberOfPacks := len(out.ConformancePackDetails) - if numberOfPacks < 1 { - log.Printf("[WARN] Conformance Pack %q is gone (no packs found)", d.Id()) + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, configservice.ErrCodeNoSuchConformancePackException) { + log.Printf("[WARN] Config Conformance Pack (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if numberOfPacks > 1 { - return fmt.Errorf("expected exactly 1 conformance pack, received %d: %#v", - numberOfPacks, out.ConformancePackDetails) + if err != nil { + return fmt.Errorf("error describing Config Conformance Pack (%s): %w", d.Id(), err) } - log.Printf("[DEBUG] AWS Config conformance packs received: %s", out) + if pack == nil { + if d.IsNewResource() { + return fmt.Errorf("error describing Config Conformance Pack (%s): not found", d.Id()) + } - pack := out.ConformancePackDetails[0] - if err = d.Set("arn", pack.ConformancePackArn); err != nil { - return err - } - if err = d.Set("name", pack.ConformancePackName); err != nil { - return err - } - if err = d.Set("delivery_s3_bucket", pack.DeliveryS3Bucket); err != nil { - return err - } - if err = d.Set("delivery_s3_key_prefix", pack.DeliveryS3KeyPrefix); err != nil { - return err + log.Printf("[WARN] Config Conformance Pack (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil } - if pack.ConformancePackInputParameters != nil { - if err = d.Set("input_parameters", flattenConformancePackInputParameters(pack.ConformancePackInputParameters)); err != nil { - return err - } + d.Set("arn", pack.ConformancePackArn) + d.Set("delivery_s3_bucket", pack.DeliveryS3Bucket) + d.Set("delivery_s3_key_prefix", pack.DeliveryS3KeyPrefix) + d.Set("name", pack.ConformancePackName) + + if err = d.Set("input_parameter", flattenConfigConformancePackInputParameters(pack.ConformancePackInputParameters)); err != nil { + return fmt.Errorf("error setting input_parameter: %w", err) } return nil @@ -196,43 +174,91 @@ func resourceAwsConfigConformancePackRead(d *schema.ResourceData, meta interface func resourceAwsConfigConformancePackDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).configconn - name := d.Get("name").(string) - - log.Printf("[DEBUG] Deleting AWS Config conformance pack %q", name) input := &configservice.DeleteConformancePackInput{ - ConformancePackName: aws.String(name), + ConformancePackName: aws.String(d.Id()), } - err := resource.Retry(30*time.Minute, func() *resource.RetryError { + + err := resource.Retry(ConfigConformancePackDeleteTimeout, func() *resource.RetryError { _, err := conn.DeleteConformancePack(input) + if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceInUseException" { + if tfawserr.ErrCodeEquals(err, configservice.ErrCodeResourceInUseException) { return resource.RetryableError(err) } + return resource.NonRetryableError(err) } + return nil }) - if isResourceTimeoutError(err) { + + if tfresource.TimedOut(err) { _, err = conn.DeleteConformancePack(input) } + if err != nil { - return fmt.Errorf("deleting conformance pack failed: %s", err) + if tfawserr.ErrCodeEquals(err, configservice.ErrCodeNoSuchConformancePackException) { + return nil + } + + return fmt.Errorf("erorr deleting Config Conformance Pack (%s): %w", d.Id(), err) } - conf := resource.StateChangeConf{ - Pending: []string{ - configservice.ConformancePackStateDeleteInProgress, - }, - Target: []string{""}, - Timeout: 30 * time.Minute, - Refresh: refreshConformancePackStatus(d, conn), + if err := configWaitForConformancePackStateDeleteComplete(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Config Conformance Pack (%s) to be deleted: %w", d.Id(), err) } - _, err = conf.WaitForState() - if err != nil { - return err + + return nil +} + +func expandConfigConformancePackInputParameters(l []interface{}) []*configservice.ConformancePackInputParameter { + if len(l) == 0 || l[0] == nil { + return nil } - log.Printf("[DEBUG] AWS conformance pack %q deleted", name) + params := make([]*configservice.ConformancePackInputParameter, 0, len(l)) - return nil + for _, v := range l { + tfMap, ok := v.(map[string]interface{}) + if !ok { + continue + } + + param := &configservice.ConformancePackInputParameter{} + + if name, ok := tfMap["parameter_name"].(string); ok && name != "" { + param.ParameterName = aws.String(name) + } + + if value, ok := tfMap["parameter_value"].(string); ok && value != "" { + param.ParameterValue = aws.String(value) + } + + params = append(params, param) + } + + return params +} + +func flattenConfigConformancePackInputParameters(parameters []*configservice.ConformancePackInputParameter) []interface{} { + if parameters == nil { + return nil + } + + params := make([]interface{}, 0, len(parameters)) + + for _, p := range parameters { + if p == nil { + continue + } + + param := map[string]interface{}{ + "parameter_name": aws.StringValue(p.ParameterName), + "parameter_value": aws.StringValue(p.ParameterValue), + } + + params = append(params, param) + } + + return params } diff --git a/aws/resource_aws_config_conformance_pack_test.go b/aws/resource_aws_config_conformance_pack_test.go index 63cecfb863e..e771be14bd0 100644 --- a/aws/resource_aws_config_conformance_pack_test.go +++ b/aws/resource_aws_config_conformance_pack_test.go @@ -5,7 +5,9 @@ import ( "regexp" "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/configservice" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -14,7 +16,6 @@ import ( func testAccConfigConformancePack_basic(t *testing.T) { var pack configservice.ConformancePackDetail rName := acctest.RandomWithPrefix("tf-acc-test") - rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" resource.Test(t, resource.TestCase{ @@ -23,22 +24,64 @@ func testAccConfigConformancePack_basic(t *testing.T) { CheckDestroy: testAccCheckConfigConformancePackDestroy, Steps: []resource.TestStep{ { - Config: testAccConfigConformancePackConfigRuleIdentifier(rName, rId), + Config: testAccConfigConformancePackBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckConfigConformancePackExists(resourceName, &pack), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), - resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), - testAccCheckConfigConformancePackSuccessful(resourceName), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"template_body"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "template_body", + }, + }, + }, + }) +} + +func testAccConfigConformancePack_forceNew(t *testing.T) { + var before, after configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + rNameUpdated := acctest.RandomWithPrefix("tf-acc-test-update") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &before), + ), + }, + { + Config: testAccConfigConformancePackBasicConfig(rNameUpdated), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &after), + testAccCheckConfigConformancePackRecreated(&before, &after), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rNameUpdated))), + resource.TestCheckResourceAttr(resourceName, "name", rNameUpdated), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "template_body", + }, }, }, }) @@ -55,7 +98,7 @@ func testAccConfigConformancePack_disappears(t *testing.T) { CheckDestroy: testAccCheckConfigConformancePackDestroy, Steps: []resource.TestStep{ { - Config: testAccConfigConformancePackConfigRuleIdentifier(rName, "IAM_PASSWORD_POLICY"), + Config: testAccConfigConformancePackBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckConfigConformancePackExists(resourceName, &pack), testAccCheckResourceDisappears(testAccProvider, resourceAwsConfigConformancePack(), resourceName), @@ -66,12 +109,9 @@ func testAccConfigConformancePack_disappears(t *testing.T) { }) } -func testAccConfigConformancePack_InputParameters(t *testing.T) { +func testAccConfigConformancePack_inputParameters(t *testing.T) { var pack configservice.ConformancePackDetail rName := acctest.RandomWithPrefix("tf-acc-test") - rId := "IAM_PASSWORD_POLICY" - pKey := "ParamKey" - pValue := "ParamValue" resourceName := "aws_config_conformance_pack.test" resource.Test(t, resource.TestCase{ @@ -80,15 +120,18 @@ func testAccConfigConformancePack_InputParameters(t *testing.T) { CheckDestroy: testAccCheckConfigConformancePackDestroy, Steps: []resource.TestStep{ { - Config: testAccConfigConformancePackConfigRuleIdentifierParameter(rName, rId, pKey, pValue), + Config: testAccConfigConformancePackInputParameterConfig(rName, "TestKey", "TestValue"), Check: resource.ComposeTestCheckFunc( testAccCheckConfigConformancePackExists(resourceName, &pack), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), - resource.TestCheckResourceAttr(resourceName, "input_parameters."+pKey, pValue), - testAccCheckConfigConformancePackSuccessful(resourceName), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "input_parameter.*", map[string]string{ + "parameter_name": "TestKey", + "parameter_value": "TestValue", + }), ), }, { @@ -104,8 +147,6 @@ func testAccConfigConformancePack_InputParameters(t *testing.T) { func testAccConfigConformancePack_S3Delivery(t *testing.T) { var pack configservice.ConformancePackDetail rName := acctest.RandomWithPrefix("tf-acc-test") - bName := "awsconfigconforms" + rName - rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" resource.Test(t, resource.TestCase{ @@ -114,15 +155,14 @@ func testAccConfigConformancePack_S3Delivery(t *testing.T) { CheckDestroy: testAccCheckConfigConformancePackDestroy, Steps: []resource.TestStep{ { - Config: testAccConfigConformancePackConfigRuleIdentifierS3Delivery(rName, rId, bName), + Config: testAccConfigConformancePackS3DeliveryConfig(rName, rName), Check: resource.ComposeTestCheckFunc( testAccCheckConfigConformancePackExists(resourceName, &pack), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", bName), - resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", rId), - resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), - testAccCheckConfigConformancePackSuccessful(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", rName), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), ), }, { @@ -138,9 +178,6 @@ func testAccConfigConformancePack_S3Delivery(t *testing.T) { func testAccConfigConformancePack_S3Template(t *testing.T) { var pack configservice.ConformancePackDetail rName := acctest.RandomWithPrefix("tf-acc-test") - bName := rName - kName := rName + ".yaml" - rId := "IAM_PASSWORD_POLICY" resourceName := "aws_config_conformance_pack.test" resource.Test(t, resource.TestCase{ @@ -149,15 +186,138 @@ func testAccConfigConformancePack_S3Template(t *testing.T) { CheckDestroy: testAccCheckConfigConformancePackDestroy, Steps: []resource.TestStep{ { - Config: testAccConfigConformancePackConfigRuleIdentifierS3Template(rName, rId, bName, kName), + Config: testAccConfigConformancePackS3TemplateConfig(rName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_s3_uri"}, + }, + }, + }) +} + +func testAccConfigConformancePack_updateInputParameters(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackInputParameterConfig(rName, "TestKey", "TestValue"), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + ), + }, + { + Config: testAccConfigConformancePackUpdateInputParameterConfig(rName, "TestKey1", "TestKey2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "input_parameter.*", map[string]string{ + "parameter_name": "TestKey1", + "parameter_value": "TestValue1", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "input_parameter.*", map[string]string{ + "parameter_name": "TestKey2", + "parameter_value": "TestValue2", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_body"}, + }, + { + Config: testAccConfigConformancePackBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + }, + }) +} + +func testAccConfigConformancePack_updateS3Delivery(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + bucketName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackS3DeliveryConfig(rName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + ), + }, + { + Config: testAccConfigConformancePackS3DeliveryConfig(rName, bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", bucketName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", bucketName), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"template_body"}, + }, + }, + }) +} + +func testAccConfigConformancePack_updateS3Template(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + bucketName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackS3TemplateConfig(rName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + ), + }, + { + Config: testAccConfigConformancePackS3TemplateConfig(rName, bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckConfigConformancePackExists(resourceName, &pack), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), - resource.TestCheckNoResourceAttr(resourceName, "input_parameters"), - testAccCheckConfigConformancePackSuccessful(resourceName), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), ), }, { @@ -170,6 +330,79 @@ func testAccConfigConformancePack_S3Template(t *testing.T) { }) } +func testAccConfigConformancePack_updateTemplateBody(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + ), + }, + { + Config: testAccConfigConformancePackUpdateConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "template_body", + }, + }, + }, + }) +} + +func testAccConfigConformancePack_S3TemplateAndTemplateBody(t *testing.T) { + var pack configservice.ConformancePackDetail + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_config_conformance_pack.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckConfigConformancePackDestroy, + Steps: []resource.TestStep{ + { + Config: testAccConfigConformancePackS3TemplateAndTemplateBodyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckConfigConformancePackExists(resourceName, &pack), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "config", regexp.MustCompile(fmt.Sprintf("conformance-pack/%s/.+", rName))), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_bucket", ""), + resource.TestCheckResourceAttr(resourceName, "delivery_s3_key_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "input_parameter.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "template_body", + "template_s3_uri", + }, + }, + }, + }) +} + func testAccCheckConfigConformancePackDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).configconn @@ -178,25 +411,25 @@ func testAccCheckConfigConformancePackDestroy(s *terraform.State) error { continue } - rule, err := configDescribeConformancePack(conn, rs.Primary.ID) + pack, err := configDescribeConformancePack(conn, rs.Primary.ID) - if isAWSErr(err, configservice.ErrCodeNoSuchConformancePackException, "") { + if tfawserr.ErrCodeEquals(err, configservice.ErrCodeNoSuchConformancePackException) { continue } if err != nil { - return fmt.Errorf("error describing Config Managed Rule (%s): %s", rs.Primary.ID, err) + return fmt.Errorf("error describing Config Conformance Pack (%s): %w", rs.Primary.ID, err) } - if rule != nil { - return fmt.Errorf("Config Managed Rule (%s) still exists", rs.Primary.ID) + if pack != nil { + return fmt.Errorf("Config Conformance Pack (%s) still exists", rs.Primary.ID) } } return nil } -func testAccCheckConfigConformancePackExists(resourceName string, ocr *configservice.ConformancePackDetail) resource.TestCheckFunc { +func testAccCheckConfigConformancePackExists(resourceName string, detail *configservice.ConformancePackDetail) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -208,47 +441,31 @@ func testAccCheckConfigConformancePackExists(resourceName string, ocr *configser pack, err := configDescribeConformancePack(conn, rs.Primary.ID) if err != nil { - return fmt.Errorf("error describing conformance pack (%s): %s", rs.Primary.ID, err) + return fmt.Errorf("error describing Config Conformance Pack (%s): %w", rs.Primary.ID, err) } if pack == nil { - return fmt.Errorf(" conformance pack (%s) not found", rs.Primary.ID) + return fmt.Errorf("Config Conformance Pack (%s) not found", rs.Primary.ID) } - *ocr = *pack + *detail = *pack return nil } } -func testAccCheckConfigConformancePackSuccessful(resourceName string) resource.TestCheckFunc { +func testAccCheckConfigConformancePackRecreated(before, after *configservice.ConformancePackDetail) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not Found: %s", resourceName) + if aws.StringValue(before.ConformancePackArn) == aws.StringValue(after.ConformancePackArn) { + return fmt.Errorf("AWS Config Conformance Pack was not recreated") } - - conn := testAccProvider.Meta().(*AWSClient).configconn - - packStatus, err := configDescribeConformancePackStatus(conn, rs.Primary.ID) - if err != nil { - return fmt.Errorf("error describing conformance pack status (%s): %s", rs.Primary.ID, err) - } - if packStatus == nil { - return fmt.Errorf("conformance pack status (%s) not found", rs.Primary.ID) - } - if *packStatus.ConformancePackState != configservice.ConformancePackStateCreateComplete { - return fmt.Errorf("conformance pack (%s) returned %s status: %s", rs.Primary.ID, *packStatus.ConformancePackState, *packStatus.ConformancePackStatusReason) - } - return nil } } func testAccConfigConformancePackConfigBase(rName string) string { return fmt.Sprintf(` -data "aws_partition" "current" { -} +data "aws_partition" "current" {} resource "aws_config_configuration_recorder" "test" { depends_on = [aws_iam_role_policy_attachment.test] @@ -283,13 +500,12 @@ resource "aws_iam_role_policy_attachment" "test" { `, rName) } -func testAccConfigConformancePackConfigRuleIdentifier(rName, ruleIdentifier string) string { - return fmt.Sprintf(` -%[3]s - +func testAccConfigConformancePackBasicConfig(rName string) string { + return composeConfig(testAccConfigConformancePackConfigBase(rName), + fmt.Sprintf(` resource "aws_config_conformance_pack" "test" { depends_on = [aws_config_configuration_recorder.test] - name = %[1]q + name = %q template_body = < **NOTE:** The account must have a Configuration Recorder with proper IAM permissions before the conformance pack will +~> **NOTE:** The account must have a Configuration Recorder with proper IAM permissions before the Conformance Pack will successfully create or update. See also the [`aws_config_configuration_recorder` resource](/docs/providers/aws/r/config_configuration_recorder.html). -## Example Usage +## Example Usage with Template Body ```hcl -resource "aws_config_conformance_pack" "test" { - name = "example" +resource "aws_config_conformance_pack" "example" { + name = "example" + + input_parameter { + parameter_name = "AccessKeysRotatedParameterMaxAccessKeyAge" + parameter_value = "90" + } + template_body = < **Note:** If both `template_body` and `template_s3_uri` are specified, AWS Config uses the `template_s3_uri` and ignores the `template_body`. + The following arguments are supported: -* `name` - (Required) The name of the conformance pack -* `template_s3_uri` - (Optional, required if `template_body` is not provided) Where to load the template from in S3 (ex: `s3://my-conformance-pack-bucket/packs/example-conformance-pack-template.yaml`). This argument is not exported due to AWS API restrictions. -* `template_body` - (Optional, required if `template_s3_uri` is not provided) Body of the conformance pack template. This argument is not exported due to AWS API restrictions. -* `input_parameters` - (Optional) Map of input parameters that is passed to the conformance pack template -* `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates -* `delivery_s3_key_prefix` - (Optional) Prefix for the Amazon S3 bucket where AWS Config stores conformance pack templates +* `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. +* `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. +* `delivery_s3_key_prefix` - (Optional) The prefix for the Amazon S3 bucket. Maximum length of 1024. +* `input_parameter` - (Optional) Set of configuration blocks describing input parameters passed to the conformance pack template. Documented below. When configured, the parameters must also be included in the `template_body` or in the template stored in Amazon S3 if using `template_s3_uri`. +* `template_body` - (Optional, required if `template_s3_uri` is not provided) A string containing full conformance pack template body. Maximum length of 51200. Drift detection is not possible with this argument. +* `template_s3_uri` - (Optional, required if `template_body` is not provided) Location of file, e.g. `s3://bucketname/prefix`, containing the template body. The uri must point to the conformance pack template that is located in an Amazon S3 bucket in the same region as the conformance pack. Maximum length of 1024. Drift detection is not possible with this argument. + +### input_parameter Argument Reference + +The `input_parameter` configuration block supports the following arguments: + +* `parameter_name` - (Required) The input key. +* `parameter_value` - (Required) The input value. ## Attributes Reference -In addition to all arguments above (except `template_s3_uri` and `template_body`), the following attributes are exported: +In addition to all arguments above (except for `template_body` and `template_s3_uri`), the following attributes are exported: -* `arn` - Amazon Resource Name (ARN) of the conformance pack +* `arn` - Amazon Resource Name (ARN) of the conformance pack. ## Import -Config Managed Rules can be imported using the name, e.g. +Config Conformance Packs can be imported using the `name`, e.g. ``` $ terraform import aws_config_conformance_pack.example example -``` \ No newline at end of file +``` From 9e27396bb30d466b13c10aeaa4d60f7d053ebcf8 Mon Sep 17 00:00:00 2001 From: Marc Jay Date: Tue, 14 Jan 2020 23:58:03 +0000 Subject: [PATCH 1081/1212] Add delivery options to aws_ses_configuration_set resource to allow a TlsPolicy to be specified --- aws/resource_aws_ses_configuration_set.go | 56 ++++++++- ...resource_aws_ses_configuration_set_test.go | 119 +++++++++++++++++- 2 files changed, 171 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_ses_configuration_set.go b/aws/resource_aws_ses_configuration_set.go index 883a132335d..383190d6e2a 100644 --- a/aws/resource_aws_ses_configuration_set.go +++ b/aws/resource_aws_ses_configuration_set.go @@ -7,11 +7,13 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsSesConfigurationSet() *schema.Resource { return &schema.Resource{ Create: resourceAwsSesConfigurationSetCreate, + Update: resourceAwsSesConfigurationSetUpdate, Read: resourceAwsSesConfigurationSetRead, Delete: resourceAwsSesConfigurationSetDelete, Importer: &schema.ResourceImporter{ @@ -24,6 +26,22 @@ func resourceAwsSesConfigurationSet() *schema.Resource { Required: true, ForceNew: true, }, + "delivery_options": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tls_policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + ses.TlsPolicyRequire, + ses.TlsPolicyOptional, + }, false), + }, + }, + }, + }, }, } } @@ -46,6 +64,31 @@ func resourceAwsSesConfigurationSetCreate(d *schema.ResourceData, meta interface d.SetId(configurationSetName) + return resourceAwsSesConfigurationSetUpdate(d, meta) +} + +func resourceAwsSesConfigurationSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesconn + + configurationSetName := d.Get("name").(string) + + updateOpts := &ses.PutConfigurationSetDeliveryOptionsInput{ + ConfigurationSetName: aws.String(configurationSetName), + } + + if v, ok := d.GetOk("delivery_options"); ok { + options := v.(*schema.Set).List() + delivery := options[0].(map[string]interface{}) + updateOpts.DeliveryOptions = &ses.DeliveryOptions{ + TlsPolicy: aws.String(delivery["tls_policy"].(string)), + } + } + + _, err := conn.PutConfigurationSetDeliveryOptions(updateOpts) + if err != nil { + return fmt.Errorf("Error updating SES configuration set: %s", err) + } + return resourceAwsSesConfigurationSetRead(d, meta) } @@ -53,7 +96,8 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} conn := meta.(*AWSClient).sesconn configSetInput := &ses.DescribeConfigurationSetInput{ - ConfigurationSetName: aws.String(d.Id()), + ConfigurationSetName: aws.String(d.Id()), + ConfigurationSetAttributeNames: aws.StringSlice([]string{ses.ConfigurationSetAttributeDeliveryOptions}), } response, err := conn.DescribeConfigurationSet(configSetInput) @@ -67,6 +111,16 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} return err } + if response.DeliveryOptions != nil { + var deliveryOptions []map[string]interface{} + tlsPolicy := map[string]interface{}{ + "tls_policy": response.DeliveryOptions.TlsPolicy, + } + + deliveryOptions = append(deliveryOptions, tlsPolicy) + d.Set("delivery_options", deliveryOptions) + } + d.Set("name", aws.StringValue(response.ConfigurationSet.Name)) return nil diff --git a/aws/resource_aws_ses_configuration_set_test.go b/aws/resource_aws_ses_configuration_set_test.go index 454f7df9b0a..ef68609fd96 100644 --- a/aws/resource_aws_ses_configuration_set_test.go +++ b/aws/resource_aws_ses_configuration_set_test.go @@ -79,7 +79,7 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), ), @@ -93,6 +93,77 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { }) } +func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { + var escRandomInteger = acctest.RandInt() + resourceName := "aws_ses_configuration_set.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSES(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESConfigurationSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + testAccCheckAwsSESConfigurationSetRequiresTLS(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "delivery_options.*", map[string]string{ + "tls_policy": "Require", + }), + ), + }, + { + ResourceName: "aws_ses_configuration_set.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSESConfigurationSet_deliveryOptionsUpdate(t *testing.T) { + var escRandomInteger = acctest.RandInt() + resourceName := "aws_ses_configuration_set.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSES(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESConfigurationSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), + resource.TestCheckNoResourceAttr(resourceName, "delivery_options.#"), + ), + }, + { + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), + testAccCheckAwsSESConfigurationSetRequiresTLS("aws_ses_configuration_set.test"), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "delivery_options.*", map[string]string{ + "tls_policy": "Require", + }), + ), + }, + { + ResourceName: "aws_ses_configuration_set.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -122,6 +193,37 @@ func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc { } } +func testAccCheckAwsSESConfigurationSetRequiresTLS(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES configuration set not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).sesconn + + describeOpts := &ses.DescribeConfigurationSetInput{ + ConfigurationSetName: aws.String(rs.Primary.ID), + ConfigurationSetAttributeNames: aws.StringSlice([]string{ses.ConfigurationSetAttributeDeliveryOptions}), + } + + response, err := conn.DescribeConfigurationSet(describeOpts) + if err != nil { + return err + } + + if response.DeliveryOptions == nil { + return fmt.Errorf("The configuration set did not have DeliveryOptions set") + } + + if aws.StringValue(response.DeliveryOptions.TlsPolicy) != ses.TlsPolicyRequire { + return fmt.Errorf("The configuration set did not have DeliveryOptions with a TlsPolicy setting set to Require") + } + + return nil + } +} + func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).sesconn @@ -144,10 +246,21 @@ func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error { return nil } -func testAccAWSSESConfigurationSetConfig(escRandomInteger int) string { +func testAccAWSSESConfigurationSetBasicConfig(escRandomInteger int) string { + return fmt.Sprintf(` +resource "aws_ses_configuration_set" "test" { + name = "some-configuration-set-%d" +} +`, escRandomInteger) +} + +func testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger int) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" + name = "some-configuration-set-%d" + delivery_options { + tls_policy = "Require" + } } `, escRandomInteger) } From b4983adfe8120c2b3a327505fddde73e4c58bef3 Mon Sep 17 00:00:00 2001 From: Marc Jay Date: Wed, 15 Jan 2020 02:53:40 +0000 Subject: [PATCH 1082/1212] Add acceptance test for aws_ses_configuration_set resource updates Update r/aws_ses_configuration_set documentation for delivery_options/tls_policy References #11197 --- aws/resource_aws_ses_configuration_set.go | 9 ++++++--- website/docs/r/ses_configuration_set.markdown | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_ses_configuration_set.go b/aws/resource_aws_ses_configuration_set.go index 383190d6e2a..572d6da69a1 100644 --- a/aws/resource_aws_ses_configuration_set.go +++ b/aws/resource_aws_ses_configuration_set.go @@ -33,7 +33,8 @@ func resourceAwsSesConfigurationSet() *schema.Resource { Schema: map[string]*schema.Schema{ "tls_policy": { Type: schema.TypeString, - Required: true, + Optional: true, + Default: ses.TlsPolicyOptional, ValidateFunc: validation.StringInSlice([]string{ ses.TlsPolicyRequire, ses.TlsPolicyOptional, @@ -116,9 +117,11 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} tlsPolicy := map[string]interface{}{ "tls_policy": response.DeliveryOptions.TlsPolicy, } - deliveryOptions = append(deliveryOptions, tlsPolicy) - d.Set("delivery_options", deliveryOptions) + + if err := d.Set("delivery_options", deliveryOptions); err != nil { + return fmt.Errorf("Error setting delivery_options for SES configuration set %s: %s", d.Id(), err) + } } d.Set("name", aws.StringValue(response.ConfigurationSet.Name)) diff --git a/website/docs/r/ses_configuration_set.markdown b/website/docs/r/ses_configuration_set.markdown index 98b2c0beef2..f3b9be2b26f 100644 --- a/website/docs/r/ses_configuration_set.markdown +++ b/website/docs/r/ses_configuration_set.markdown @@ -18,12 +18,28 @@ resource "aws_ses_configuration_set" "test" { } ``` +### Require TLS Connections + +```hcl +resource "aws_ses_configuration_set" "test" { + name = "some-configuration-set-test" + + delivery_options { + tls_policy = "Require" + } +} +``` + ## Argument Reference The following arguments are supported: * `name` - (Required) The name of the configuration set +Delivery Options (`delivery_options`) support the following: + +* `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: `Require` and `Optional`. If the value is `Require`, messages are only delivered if a TLS connection can be established. If the value is `Optional`, messages can be delivered in plain text if a TLS connection can't be established. Defaults to `Optional`. + ## Import SES Configuration Sets can be imported using their `name`, e.g. From aefcf5574f807939e2916412a9933eee65501183 Mon Sep 17 00:00:00 2001 From: Pradeep Bhadani Date: Thu, 11 Feb 2021 09:51:12 +0000 Subject: [PATCH 1083/1212] docs: Update bucket name to valid bucket name --- website/docs/r/s3_bucket_notification.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/r/s3_bucket_notification.html.markdown b/website/docs/r/s3_bucket_notification.html.markdown index 290406d621b..06e358475ef 100644 --- a/website/docs/r/s3_bucket_notification.html.markdown +++ b/website/docs/r/s3_bucket_notification.html.markdown @@ -37,7 +37,7 @@ POLICY } resource "aws_s3_bucket" "bucket" { - bucket = "your_bucket_name" + bucket = "your-bucket-name" } resource "aws_s3_bucket_notification" "bucket_notification" { @@ -76,7 +76,7 @@ POLICY } resource "aws_s3_bucket" "bucket" { - bucket = "your_bucket_name" + bucket = "your-bucket-name" } resource "aws_s3_bucket_notification" "bucket_notification" { @@ -129,7 +129,7 @@ resource "aws_lambda_function" "func" { } resource "aws_s3_bucket" "bucket" { - bucket = "your_bucket_name" + bucket = "your-bucket-name" } resource "aws_s3_bucket_notification" "bucket_notification" { @@ -200,7 +200,7 @@ resource "aws_lambda_function" "func2" { } resource "aws_s3_bucket" "bucket" { - bucket = "your_bucket_name" + bucket = "your-bucket-name" } resource "aws_s3_bucket_notification" "bucket_notification" { @@ -252,7 +252,7 @@ POLICY } resource "aws_s3_bucket" "bucket" { - bucket = "your_bucket_name" + bucket = "your-bucket-name" } resource "aws_s3_bucket_notification" "bucket_notification" { From f9fb4ee87ef62052e71f8aaf074d41d865fc957d Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 25 Jun 2020 23:50:28 +0300 Subject: [PATCH 1084/1212] add arn attribute + disappears --- ...urce_aws_ec2_traffic_mirror_filter_rule.go | 35 +++++++--- ...aws_ec2_traffic_mirror_filter_rule_test.go | 70 +++++++++++++++++-- website/docs/index.html.markdown | 1 + ...2_traffic_mirror_filter_rule.html.markdown | 1 + 4 files changed, 95 insertions(+), 12 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go index 992391d9869..2a527029b31 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -21,6 +22,10 @@ func resourceAwsEc2TrafficMirrorFilterRule() *schema.Resource { State: resourceAwsEc2TrafficMirrorFilterRuleImport, }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "description": { Type: schema.TypeString, Optional: true, @@ -42,12 +47,14 @@ func resourceAwsEc2TrafficMirrorFilterRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from_port": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IsPortNumber, }, "to_port": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IsPortNumber, }, }, }, @@ -80,12 +87,14 @@ func resourceAwsEc2TrafficMirrorFilterRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from_port": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IsPortNumber, }, "to_port": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IsPortNumber, }, }, }, @@ -185,6 +194,16 @@ func resourceAwsEc2TrafficMirrorFilterRuleRead(d *schema.ResourceData, meta inte return fmt.Errorf("error setting source_port_range: %s", err) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ec2", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("traffic-mirror-filter-rule/%s", d.Id()), + }.String() + + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index 7f0d17d428b..b18340ffca2 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -13,7 +13,7 @@ import ( ) func TestAccAWSEc2TrafficMirrorFilterRule_basic(t *testing.T) { - resourceName := "aws_ec2_traffic_mirror_filter_rule.rule" + resourceName := "aws_ec2_traffic_mirror_filter_rule.test" dstCidr := "10.0.0.0/8" srcCidr := "0.0.0.0/0" ruleNum := 1 @@ -39,6 +39,7 @@ func TestAccAWSEc2TrafficMirrorFilterRule_basic(t *testing.T) { Config: testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, direction, ruleNum), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEc2TrafficMirrorFilterRuleExists(resourceName), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`traffic-mirror-filter-rule/tmfr-.+`)), resource.TestMatchResourceAttr(resourceName, "traffic_mirror_filter_id", regexp.MustCompile("tmf-.*")), resource.TestCheckResourceAttr(resourceName, "destination_cidr_block", dstCidr), resource.TestCheckResourceAttr(resourceName, "rule_action", action), @@ -99,6 +100,34 @@ func TestAccAWSEc2TrafficMirrorFilterRule_basic(t *testing.T) { }) } +func TestAccAWSEc2TrafficMirrorFilterRule_disappears(t *testing.T) { + resourceName := "aws_ec2_traffic_mirror_session_rule.test" + dstCidr := "10.0.0.0/8" + srcCidr := "0.0.0.0/0" + ruleNum := 1 + action := "accept" + direction := "ingress" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSEc2TrafficMirrorFilterRule(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEc2TrafficMirrorFilterRuleDestroy, + Steps: []resource.TestStep{ + { + Config: testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, direction, ruleNum), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEc2TrafficMirrorFilterRuleExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2TrafficMirrorFilterRule(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAWSEc2TrafficMirrorFilterRuleExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] @@ -150,6 +179,7 @@ func testAccCheckAWSEc2TrafficMirrorFilterRuleExists(name string) resource.TestC func testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, dir string, num int) string { return fmt.Sprintf(` +<<<<<<< HEAD resource "aws_ec2_traffic_mirror_filter" "filter" { } @@ -160,15 +190,26 @@ resource "aws_ec2_traffic_mirror_filter_rule" "rule" { rule_number = %d source_cidr_block = "%s" traffic_direction = "%s" +======= +resource "aws_ec2_traffic_mirror_filter" "test" {} + +resource "aws_ec2_traffic_mirror_filter_rule" "test" { + traffic_mirror_filter_id = "${aws_ec2_traffic_mirror_filter.test.id}" + destination_cidr_block = "%s" + rule_action = "%s" + rule_number = %d + source_cidr_block = "%s" + traffic_direction = "%s" +>>>>>>> fd1c1723b... add arn attribute + disappears } `, dstCidr, action, num, srcCidr, dir) } func testAccEc2TrafficMirrorFilterRuleConfigFull(dstCidr, srcCidr, action, dir, description string, ruleNum, srcPortFrom, srcPortTo, dstPortFrom, dstPortTo, protocol int) string { return fmt.Sprintf(` -resource "aws_ec2_traffic_mirror_filter" "filter" { -} +resource "aws_ec2_traffic_mirror_filter" "test" {} +<<<<<<< HEAD resource "aws_ec2_traffic_mirror_filter_rule" "rule" { traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.filter.id destination_cidr_block = "%s" @@ -186,6 +227,27 @@ resource "aws_ec2_traffic_mirror_filter_rule" "rule" { from_port = %d to_port = %d } +======= +resource "aws_ec2_traffic_mirror_filter_rule" "test" { + traffic_mirror_filter_id = "${aws_ec2_traffic_mirror_filter.test.id}" + destination_cidr_block = "%s" + rule_action = "%s" + rule_number = %d + source_cidr_block = "%s" + traffic_direction = "%s" + description = "%s" + protocol = %d + + source_port_range { + from_port = %d + to_port = %d + } + + destination_port_range { + from_port = %d + to_port = %d + } +>>>>>>> fd1c1723b... add arn attribute + disappears } `, dstCidr, action, ruleNum, srcCidr, dir, description, protocol, srcPortFrom, srcPortTo, dstPortFrom, dstPortTo) } @@ -239,7 +301,7 @@ func testAccCheckAWSEc2TrafficMirrorFilterRuleDestroy(s *terraform.State) error ruleList = append(ruleList, filter.EgressFilterRules...) for _, rule := range ruleList { - if *rule.TrafficMirrorFilterRuleId == ruleId { + if aws.StringValue(rule.TrafficMirrorFilterRuleId) == ruleId { return fmt.Errorf("Rule %s still exists in filter %s", ruleId, filterId) } } diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index a40bde3c5a7..894cbc5345a 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -265,6 +265,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ebs_volume` data source](/docs/providers/aws/d/ebs_volume.html) - [`aws_ec2_capacity_reservation` resource](/docs/providers/aws/r/ec2_capacity_reservation.html) - [`aws_ec2_client_vpn_endpoint` resource](/docs/providers/aws/r/ec2_client_vpn_endpoint.html) + - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) - [`aws_ec2_traffic_mirror_session` resource](/docs/providers/aws/r/ec2_traffic_mirror_session.html) - [`aws_ec2_traffic_mirror_target` resource](/docs/providers/aws/r/ec2_traffic_mirror_target.html) - [`aws_ec2_transit_gateway_route_table` data source](/docs/providers/aws/d/ec2_transit_gateway_route_table.html) diff --git a/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown b/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown index 2c2d5230055..0adce505e0c 100644 --- a/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown +++ b/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown @@ -77,6 +77,7 @@ Traffic mirror port range support following attributes: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN of the traffic mirror filter rule. * `id` - The name of the traffic mirror filter rule. ## Import From 7828592b06488dd4a5b3e291a46cf840900105b8 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 26 Jun 2020 00:00:25 +0300 Subject: [PATCH 1085/1212] fix test --- aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index b18340ffca2..0f2def56b59 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -101,7 +101,7 @@ func TestAccAWSEc2TrafficMirrorFilterRule_basic(t *testing.T) { } func TestAccAWSEc2TrafficMirrorFilterRule_disappears(t *testing.T) { - resourceName := "aws_ec2_traffic_mirror_session_rule.test" + resourceName := "aws_ec2_traffic_mirror_filter_rule.test" dstCidr := "10.0.0.0/8" srcCidr := "0.0.0.0/0" ruleNum := 1 From 765204c12e9fcf203e8d808f8ff041dade43b444 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 26 Jun 2020 00:09:28 +0300 Subject: [PATCH 1086/1212] fix delete from state message sdk wrappers --- aws/resource_aws_ec2_traffic_mirror_filter_rule.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go index 2a527029b31..19a9abe1adb 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go @@ -171,7 +171,7 @@ func resourceAwsEc2TrafficMirrorFilterRuleRead(d *schema.ResourceData, meta inte } if nil == rule { - log.Printf("[WARN] EC2 Traffic Mirror Filter (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] EC2 Traffic Mirror Filter Rule (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -210,15 +210,16 @@ func resourceAwsEc2TrafficMirrorFilterRuleRead(d *schema.ResourceData, meta inte func findEc2TrafficMirrorFilterRule(ruleId string, filters []*ec2.TrafficMirrorFilter) (rule *ec2.TrafficMirrorFilterRule) { log.Printf("[DEBUG] searching %s in %d filters", ruleId, len(filters)) for _, v := range filters { - log.Printf("[DEBUG]: searching filter %s, ingress rule count = %d, egress rule count = %d", *v.TrafficMirrorFilterId, len(v.IngressFilterRules), len(v.EgressFilterRules)) + log.Printf("[DEBUG]: searching filter %s, ingress rule count = %d, egress rule count = %d", + aws.StringValue(v.TrafficMirrorFilterId), len(v.IngressFilterRules), len(v.EgressFilterRules)) for _, r := range v.IngressFilterRules { - if *r.TrafficMirrorFilterRuleId == ruleId { + if aws.StringValue(r.TrafficMirrorFilterRuleId) == ruleId { rule = r break } } for _, r := range v.EgressFilterRules { - if *r.TrafficMirrorFilterRuleId == ruleId { + if aws.StringValue(r.TrafficMirrorFilterRuleId) == ruleId { rule = r break } @@ -226,7 +227,7 @@ func findEc2TrafficMirrorFilterRule(ruleId string, filters []*ec2.TrafficMirrorF } if nil != rule { - log.Printf("[DEBUG]: Found %s in %s", ruleId, *rule.TrafficDirection) + log.Printf("[DEBUG]: Found %s in %s", ruleId, aws.StringValue(rule.TrafficDirection)) } return rule From 886a0607af828f14c2da2aaffd9caa9059f8d4b6 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 26 Jun 2020 00:12:44 +0300 Subject: [PATCH 1087/1212] fix port validation + more sdk wrappers --- aws/resource_aws_ec2_traffic_mirror_filter_rule.go | 8 ++++---- aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go index 19a9abe1adb..05f56ff78d4 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go @@ -49,12 +49,12 @@ func resourceAwsEc2TrafficMirrorFilterRule() *schema.Resource { "from_port": { Type: schema.TypeInt, Optional: true, - ValidateFunc: validation.IsPortNumber, + ValidateFunc: validation.IsPortNumberOrZero, }, "to_port": { Type: schema.TypeInt, Optional: true, - ValidateFunc: validation.IsPortNumber, + ValidateFunc: validation.IsPortNumberOrZero, }, }, }, @@ -89,12 +89,12 @@ func resourceAwsEc2TrafficMirrorFilterRule() *schema.Resource { "from_port": { Type: schema.TypeInt, Optional: true, - ValidateFunc: validation.IsPortNumber, + ValidateFunc: validation.IsPortNumberOrZero, }, "to_port": { Type: schema.TypeInt, Optional: true, - ValidateFunc: validation.IsPortNumber, + ValidateFunc: validation.IsPortNumberOrZero, }, }, }, diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index 0f2def56b59..8fa9600fa7d 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -163,7 +163,7 @@ func testAccCheckAWSEc2TrafficMirrorFilterRuleExists(name string) resource.TestC var exists bool for _, rule := range ruleList { - if *rule.TrafficMirrorFilterRuleId == ruleId { + if aws.StringValue(rule.TrafficMirrorFilterRuleId) == ruleId { exists = true break } From ac09f4cb3ef2dcfe420f163824920e01b89f92d2 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 21 Oct 2020 21:49:04 +0300 Subject: [PATCH 1088/1212] fix test --- ...aws_ec2_traffic_mirror_filter_rule_test.go | 34 ------------------- 1 file changed, 34 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index 8fa9600fa7d..a0b0d2a5c38 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -179,7 +179,6 @@ func testAccCheckAWSEc2TrafficMirrorFilterRuleExists(name string) resource.TestC func testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, dir string, num int) string { return fmt.Sprintf(` -<<<<<<< HEAD resource "aws_ec2_traffic_mirror_filter" "filter" { } @@ -190,17 +189,6 @@ resource "aws_ec2_traffic_mirror_filter_rule" "rule" { rule_number = %d source_cidr_block = "%s" traffic_direction = "%s" -======= -resource "aws_ec2_traffic_mirror_filter" "test" {} - -resource "aws_ec2_traffic_mirror_filter_rule" "test" { - traffic_mirror_filter_id = "${aws_ec2_traffic_mirror_filter.test.id}" - destination_cidr_block = "%s" - rule_action = "%s" - rule_number = %d - source_cidr_block = "%s" - traffic_direction = "%s" ->>>>>>> fd1c1723b... add arn attribute + disappears } `, dstCidr, action, num, srcCidr, dir) } @@ -209,7 +197,6 @@ func testAccEc2TrafficMirrorFilterRuleConfigFull(dstCidr, srcCidr, action, dir, return fmt.Sprintf(` resource "aws_ec2_traffic_mirror_filter" "test" {} -<<<<<<< HEAD resource "aws_ec2_traffic_mirror_filter_rule" "rule" { traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.filter.id destination_cidr_block = "%s" @@ -227,27 +214,6 @@ resource "aws_ec2_traffic_mirror_filter_rule" "rule" { from_port = %d to_port = %d } -======= -resource "aws_ec2_traffic_mirror_filter_rule" "test" { - traffic_mirror_filter_id = "${aws_ec2_traffic_mirror_filter.test.id}" - destination_cidr_block = "%s" - rule_action = "%s" - rule_number = %d - source_cidr_block = "%s" - traffic_direction = "%s" - description = "%s" - protocol = %d - - source_port_range { - from_port = %d - to_port = %d - } - - destination_port_range { - from_port = %d - to_port = %d - } ->>>>>>> fd1c1723b... add arn attribute + disappears } `, dstCidr, action, ruleNum, srcCidr, dir, description, protocol, srcPortFrom, srcPortTo, dstPortFrom, dstPortTo) } From 86e03683c96edb6e214929b716dea18db38480f2 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 21 Oct 2020 21:50:49 +0300 Subject: [PATCH 1089/1212] lint docs --- website/docs/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 894cbc5345a..5a6d9fd1726 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -265,7 +265,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ebs_volume` data source](/docs/providers/aws/d/ebs_volume.html) - [`aws_ec2_capacity_reservation` resource](/docs/providers/aws/r/ec2_capacity_reservation.html) - [`aws_ec2_client_vpn_endpoint` resource](/docs/providers/aws/r/ec2_client_vpn_endpoint.html) - - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) + - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) - [`aws_ec2_traffic_mirror_session` resource](/docs/providers/aws/r/ec2_traffic_mirror_session.html) - [`aws_ec2_traffic_mirror_target` resource](/docs/providers/aws/r/ec2_traffic_mirror_target.html) - [`aws_ec2_transit_gateway_route_table` data source](/docs/providers/aws/d/ec2_transit_gateway_route_table.html) From becfb165a8be9238f68fae2d7cd7c2e479afc94b Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 21 Oct 2020 21:53:06 +0300 Subject: [PATCH 1090/1212] test --- aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index a0b0d2a5c38..d3421ee35a3 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -179,7 +179,7 @@ func testAccCheckAWSEc2TrafficMirrorFilterRuleExists(name string) resource.TestC func testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, dir string, num int) string { return fmt.Sprintf(` -resource "aws_ec2_traffic_mirror_filter" "filter" { +resource "aws_ec2_traffic_mirror_filter" "test" { } resource "aws_ec2_traffic_mirror_filter_rule" "rule" { @@ -197,7 +197,7 @@ func testAccEc2TrafficMirrorFilterRuleConfigFull(dstCidr, srcCidr, action, dir, return fmt.Sprintf(` resource "aws_ec2_traffic_mirror_filter" "test" {} -resource "aws_ec2_traffic_mirror_filter_rule" "rule" { +resource "aws_ec2_traffic_mirror_filter_rule" "test" { traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.filter.id destination_cidr_block = "%s" rule_action = "%s" From 05b3f1d03af06e4628c422fed5e9cda6fd6dde35 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Wed, 21 Oct 2020 21:53:55 +0300 Subject: [PATCH 1091/1212] test --- aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index d3421ee35a3..d71ac521f70 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -182,8 +182,8 @@ func testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, dir strin resource "aws_ec2_traffic_mirror_filter" "test" { } -resource "aws_ec2_traffic_mirror_filter_rule" "rule" { - traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.filter.id +resource "aws_ec2_traffic_mirror_filter_rule" "test" { + traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.test.id destination_cidr_block = "%s" rule_action = "%s" rule_number = %d @@ -198,7 +198,7 @@ func testAccEc2TrafficMirrorFilterRuleConfigFull(dstCidr, srcCidr, action, dir, resource "aws_ec2_traffic_mirror_filter" "test" {} resource "aws_ec2_traffic_mirror_filter_rule" "test" { - traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.filter.id + traffic_mirror_filter_id = aws_ec2_traffic_mirror_filter.test.id destination_cidr_block = "%s" rule_action = "%s" rule_number = %d From 4ecdedb993cf013e79462ebb3ef1ae16aba54c92 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 15:22:29 +0200 Subject: [PATCH 1092/1212] add changelog --- .changelog/13949.txt | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .changelog/13949.txt diff --git a/.changelog/13949.txt b/.changelog/13949.txt new file mode 100644 index 00000000000..cf3f1b591e3 --- /dev/null +++ b/.changelog/13949.txt @@ -0,0 +1,8 @@ +```release-note:enhancement +resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. +``` + +```release-note:enhancement +resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, +`destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. +``` From f576ad2c863e9ec462f72d9c2f7896a44353f85e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 09:07:01 -0500 Subject: [PATCH 1093/1212] resource/ec2_traffic_mirror_filter_rule: Minor cleanup --- ...urce_aws_ec2_traffic_mirror_filter_rule.go | 2 +- ...aws_ec2_traffic_mirror_filter_rule_test.go | 2 +- ...2_traffic_mirror_filter_rule.html.markdown | 22 +++++++++---------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go index 05f56ff78d4..9ba31f5664d 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule.go @@ -196,7 +196,7 @@ func resourceAwsEc2TrafficMirrorFilterRuleRead(d *schema.ResourceData, meta inte arn := arn.ARN{ Partition: meta.(*AWSClient).partition, - Service: "ec2", + Service: ec2.ServiceName, Region: meta.(*AWSClient).region, AccountID: meta.(*AWSClient).accountid, Resource: fmt.Sprintf("traffic-mirror-filter-rule/%s", d.Id()), diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go index d71ac521f70..274983a3192 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_rule_test.go @@ -39,7 +39,7 @@ func TestAccAWSEc2TrafficMirrorFilterRule_basic(t *testing.T) { Config: testAccEc2TrafficMirrorFilterRuleConfig(dstCidr, srcCidr, action, direction, ruleNum), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEc2TrafficMirrorFilterRuleExists(resourceName), - testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`traffic-mirror-filter-rule/tmfr-.+`)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", ec2.ServiceName, regexp.MustCompile(`traffic-mirror-filter-rule/tmfr-.+`)), resource.TestMatchResourceAttr(resourceName, "traffic_mirror_filter_id", regexp.MustCompile("tmf-.*")), resource.TestCheckResourceAttr(resourceName, "destination_cidr_block", dstCidr), resource.TestCheckResourceAttr(resourceName, "rule_action", action), diff --git a/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown b/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown index 0adce505e0c..f5fd1340ac9 100644 --- a/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown +++ b/website/docs/r/ec2_traffic_mirror_filter_rule.html.markdown @@ -57,16 +57,16 @@ resource "aws_ec2_traffic_mirror_filter_rule" "rulein" { The following arguments are supported: -* `description` - (Optional) A description of the traffic mirror filter rule. +* `description` - (Optional) Description of the traffic mirror filter rule. * `traffic_mirror_filter_id` - (Required) ID of the traffic mirror filter to which this rule should be added -* `destination_cidr_block` - (Required) The destination CIDR block to assign to the Traffic Mirror rule. -* `destination_port_range` - (Optional) The destination port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below -* `protocol` - (Optional) The protocol number, for example 17 (UDP), to assign to the Traffic Mirror rule. For information about the protocol value, see [Protocol Numbers](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) on the Internet Assigned Numbers Authority (IANA) website. -* `rule_action` - (Required) The action to take (accept | reject) on the filtered traffic. Valid values are `accept` and `reject` -* `rule_number` - (Required) The number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number. -* `source_cidr_block` - (Required) The source CIDR block to assign to the Traffic Mirror rule. -* `source_port_range` - (Optional) The source port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below -* `traffic_direction` - (Required) The direction of traffic to be captured. Valid values are `ingress` and `egress` +* `destination_cidr_block` - (Required) Destination CIDR block to assign to the Traffic Mirror rule. +* `destination_port_range` - (Optional) Destination port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below +* `protocol` - (Optional) Protocol number, for example 17 (UDP), to assign to the Traffic Mirror rule. For information about the protocol value, see [Protocol Numbers](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) on the Internet Assigned Numbers Authority (IANA) website. +* `rule_action` - (Required) Action to take (accept | reject) on the filtered traffic. Valid values are `accept` and `reject` +* `rule_number` - (Required) Number of the Traffic Mirror rule. This number must be unique for each Traffic Mirror rule in a given direction. The rules are processed in ascending order by rule number. +* `source_cidr_block` - (Required) Source CIDR block to assign to the Traffic Mirror rule. +* `source_port_range` - (Optional) Source port range. Supported only when the protocol is set to TCP(6) or UDP(17). See Traffic mirror port range documented below +* `traffic_direction` - (Required) Direction of traffic to be captured. Valid values are `ingress` and `egress` Traffic mirror port range support following attributes: @@ -77,8 +77,8 @@ Traffic mirror port range support following attributes: In addition to all arguments above, the following attributes are exported: -* `arn` - The ARN of the traffic mirror filter rule. -* `id` - The name of the traffic mirror filter rule. +* `arn` - ARN of the traffic mirror filter rule. +* `id` - Name of the traffic mirror filter rule. ## Import From ed9364e8b26e7f8b0e44afa262bc3bae4bb9f3bc Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 14:40:50 +0000 Subject: [PATCH 1094/1212] Update CHANGELOG.md for #17501 --- CHANGELOG.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f07198be927..b26919fa3da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,24 @@ ## 3.28.0 (Unreleased) +FEATURES: + +* **New Resource:** `aws_sagemaker_model_package_group` ([#17366](https://github.com/hashicorp/terraform-provider-aws/issues/17366)) +* **New Resource:** `aws_securityhub_organization_admin_account` ([#17501](https://github.com/hashicorp/terraform-provider-aws/issues/17501)) + ENHANCEMENTS: +* data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments ([#12055](https://github.com/hashicorp/terraform-provider-aws/issues/12055)) * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) +* resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, +`destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) +BUG FIXES: + +* resource/aws_glue_catalog_database: Use Catalog Id when deleting Databases. ([#17489](https://github.com/hashicorp/terraform-provider-aws/issues/17489)) +* resource/aws_instance: Fix use of `throughput` and `iops` for `gp3` volumes at the same time ([#17380](https://github.com/hashicorp/terraform-provider-aws/issues/17380)) + ## 3.27.0 (February 05, 2021) FEATURES: From 52d6b7ee28ad7b3876f6e9dad76ad14bfeb89a96 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 09:42:54 -0500 Subject: [PATCH 1095/1212] tests/s3_bucket: Add distinct destination error check --- aws/resource_aws_s3_bucket_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index 81e49aa8669..4da47108025 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -1562,6 +1562,7 @@ func TestAccAWSS3Bucket_Replication_MultipleDestinations_EmptyFilter(t *testing. testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) }, + ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ @@ -1628,6 +1629,7 @@ func TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter(t *testi testAccPreCheck(t) testAccMultipleRegionPreCheck(t, 2) }, + ErrorCheck: testAccErrorCheckSkipS3(t), ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), Steps: []resource.TestStep{ @@ -2569,6 +2571,13 @@ func TestWebsiteEndpoint(t *testing.T) { } } +// testAccErrorCheckSkipS3 skips tests that have error messages indicating unsupported features +func testAccErrorCheckSkipS3(t *testing.T) resource.ErrorCheckFunc { + return testAccErrorCheckSkipMessagesContaining(t, + "Number of distinct destination bucket ARNs cannot exceed", + ) +} + func testAccCheckAWSS3BucketDestroy(s *terraform.State) error { return testAccCheckAWSS3BucketDestroyWithProvider(s, testAccProvider) } From b9061447986565351f8afbdd8895d2b06af25612 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Feb 2021 09:51:32 -0500 Subject: [PATCH 1096/1212] CR updates including documentation, acctest coverage, create->read flow --- aws/resource_aws_ses_configuration_set.go | 118 +++++++----- ...resource_aws_ses_configuration_set_test.go | 180 ++++++++++++------ website/docs/r/ses_configuration_set.markdown | 7 +- 3 files changed, 202 insertions(+), 103 deletions(-) diff --git a/aws/resource_aws_ses_configuration_set.go b/aws/resource_aws_ses_configuration_set.go index 572d6da69a1..3af7f7ddae3 100644 --- a/aws/resource_aws_ses_configuration_set.go +++ b/aws/resource_aws_ses_configuration_set.go @@ -13,36 +13,34 @@ import ( func resourceAwsSesConfigurationSet() *schema.Resource { return &schema.Resource{ Create: resourceAwsSesConfigurationSetCreate, - Update: resourceAwsSesConfigurationSetUpdate, Read: resourceAwsSesConfigurationSetRead, + Update: resourceAwsSesConfigurationSetUpdate, Delete: resourceAwsSesConfigurationSetDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, "delivery_options": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "tls_policy": { - Type: schema.TypeString, - Optional: true, - Default: ses.TlsPolicyOptional, - ValidateFunc: validation.StringInSlice([]string{ - ses.TlsPolicyRequire, - ses.TlsPolicyOptional, - }, false), + Type: schema.TypeString, + Optional: true, + Default: ses.TlsPolicyOptional, + ValidateFunc: validation.StringInSlice(ses.TlsPolicy_Values(), false), }, }, }, }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, }, } } @@ -60,34 +58,21 @@ func resourceAwsSesConfigurationSetCreate(d *schema.ResourceData, meta interface _, err := conn.CreateConfigurationSet(createOpts) if err != nil { - return fmt.Errorf("Error creating SES configuration set: %s", err) + return fmt.Errorf("error creating SES configuration set (%s): %w", configurationSetName, err) } d.SetId(configurationSetName) - return resourceAwsSesConfigurationSetUpdate(d, meta) -} - -func resourceAwsSesConfigurationSetUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).sesconn - - configurationSetName := d.Get("name").(string) - - updateOpts := &ses.PutConfigurationSetDeliveryOptionsInput{ - ConfigurationSetName: aws.String(configurationSetName), - } - - if v, ok := d.GetOk("delivery_options"); ok { - options := v.(*schema.Set).List() - delivery := options[0].(map[string]interface{}) - updateOpts.DeliveryOptions = &ses.DeliveryOptions{ - TlsPolicy: aws.String(delivery["tls_policy"].(string)), + if v, ok := d.GetOk("delivery_options"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input := &ses.PutConfigurationSetDeliveryOptionsInput{ + ConfigurationSetName: aws.String(configurationSetName), + DeliveryOptions: expandSesConfigurationSetDeliveryOptions(v.([]interface{})), } - } - _, err := conn.PutConfigurationSetDeliveryOptions(updateOpts) - if err != nil { - return fmt.Errorf("Error updating SES configuration set: %s", err) + _, err := conn.PutConfigurationSetDeliveryOptions(input) + if err != nil { + return fmt.Errorf("error adding SES configuration set (%s) delivery options: %w", configurationSetName, err) + } } return resourceAwsSesConfigurationSetRead(d, meta) @@ -112,16 +97,8 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} return err } - if response.DeliveryOptions != nil { - var deliveryOptions []map[string]interface{} - tlsPolicy := map[string]interface{}{ - "tls_policy": response.DeliveryOptions.TlsPolicy, - } - deliveryOptions = append(deliveryOptions, tlsPolicy) - - if err := d.Set("delivery_options", deliveryOptions); err != nil { - return fmt.Errorf("Error setting delivery_options for SES configuration set %s: %s", d.Id(), err) - } + if err := d.Set("delivery_options", flattenSesConfigurationSetDeliveryOptions(response.DeliveryOptions)); err != nil { + return fmt.Errorf("error setting delivery_options: %w", err) } d.Set("name", aws.StringValue(response.ConfigurationSet.Name)) @@ -129,6 +106,24 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} return nil } +func resourceAwsSesConfigurationSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesconn + + if d.HasChange("delivery_options") { + input := &ses.PutConfigurationSetDeliveryOptionsInput{ + ConfigurationSetName: aws.String(d.Id()), + DeliveryOptions: expandSesConfigurationSetDeliveryOptions(d.Get("delivery_options").([]interface{})), + } + + _, err := conn.PutConfigurationSetDeliveryOptions(input) + if err != nil { + return fmt.Errorf("error updating SES configuration set (%s) delivery options: %w", d.Id(), err) + } + } + + return resourceAwsSesConfigurationSetRead(d, meta) +} + func resourceAwsSesConfigurationSetDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sesconn @@ -139,3 +134,34 @@ func resourceAwsSesConfigurationSetDelete(d *schema.ResourceData, meta interface return err } + +func expandSesConfigurationSetDeliveryOptions(l []interface{}) *ses.DeliveryOptions { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + options := &ses.DeliveryOptions{} + + if v, ok := tfMap["tls_policy"].(string); ok && v != "" { + options.TlsPolicy = aws.String(v) + } + + return options +} + +func flattenSesConfigurationSetDeliveryOptions(options *ses.DeliveryOptions) []interface{} { + if options == nil { + return nil + } + + m := map[string]interface{}{ + "tls_policy": aws.StringValue(options.TlsPolicy), + } + + return []interface{}{m} +} diff --git a/aws/resource_aws_ses_configuration_set_test.go b/aws/resource_aws_ses_configuration_set_test.go index ef68609fd96..d2c0611797c 100644 --- a/aws/resource_aws_ses_configuration_set_test.go +++ b/aws/resource_aws_ses_configuration_set_test.go @@ -69,6 +69,7 @@ func testSweepSesConfigurationSets(region string) error { func TestAccAWSSESConfigurationSet_basic(t *testing.T) { var escRandomInteger = acctest.RandInt() + resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -81,11 +82,12 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { { Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), }, { - ResourceName: "aws_ses_configuration_set.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -106,18 +108,99 @@ func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyRequire), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delivery_options.0.tls_policy", ses.TlsPolicyRequire), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { + var escRandomInteger = acctest.RandInt() + resourceName := "aws_ses_configuration_set.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSES(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESConfigurationSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + ), + }, + { + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyRequire), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delivery_options.0.tls_policy", ses.TlsPolicyRequire), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyOptional), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "delivery_options.0.tls_policy", ses.TlsPolicyOptional), + ), + }, + { + Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSESConfigurationSet_emptyDeliveryOptions(t *testing.T) { + var escRandomInteger = acctest.RandInt() + resourceName := "aws_ses_configuration_set.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSES(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESConfigurationSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), - testAccCheckAwsSESConfigurationSetRequiresTLS(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "delivery_options.*", map[string]string{ - "tls_policy": "Require", - }), + resource.TestCheckResourceAttr(resourceName, "delivery_options.0.tls_policy", ses.TlsPolicyOptional), ), }, { - ResourceName: "aws_ses_configuration_set.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -125,7 +208,7 @@ func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { }) } -func TestAccAWSSESConfigurationSet_deliveryOptionsUpdate(t *testing.T) { +func TestAccAWSSESConfigurationSet_update_emptyDeliveryOptions(t *testing.T) { var escRandomInteger = acctest.RandInt() resourceName := "aws_ses_configuration_set.test" @@ -140,23 +223,32 @@ func TestAccAWSSESConfigurationSet_deliveryOptionsUpdate(t *testing.T) { { Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), - resource.TestCheckNoResourceAttr(resourceName, "delivery_options.#"), + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), }, { - Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESConfigurationSetExists("aws_ses_configuration_set.test"), - testAccCheckAwsSESConfigurationSetRequiresTLS("aws_ses_configuration_set.test"), + testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "delivery_options.*", map[string]string{ - "tls_policy": "Require", - }), + resource.TestCheckResourceAttr(resourceName, "delivery_options.0.tls_policy", ses.TlsPolicyOptional), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), }, { - ResourceName: "aws_ses_configuration_set.test", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -193,37 +285,6 @@ func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc { } } -func testAccCheckAwsSESConfigurationSetRequiresTLS(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("SES configuration set not found: %s", n) - } - - conn := testAccProvider.Meta().(*AWSClient).sesconn - - describeOpts := &ses.DescribeConfigurationSetInput{ - ConfigurationSetName: aws.String(rs.Primary.ID), - ConfigurationSetAttributeNames: aws.StringSlice([]string{ses.ConfigurationSetAttributeDeliveryOptions}), - } - - response, err := conn.DescribeConfigurationSet(describeOpts) - if err != nil { - return err - } - - if response.DeliveryOptions == nil { - return fmt.Errorf("The configuration set did not have DeliveryOptions set") - } - - if aws.StringValue(response.DeliveryOptions.TlsPolicy) != ses.TlsPolicyRequire { - return fmt.Errorf("The configuration set did not have DeliveryOptions with a TlsPolicy setting set to Require") - } - - return nil - } -} - func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).sesconn @@ -249,18 +310,27 @@ func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error { func testAccAWSSESConfigurationSetBasicConfig(escRandomInteger int) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" + name = "some-configuration-set-%d" } `, escRandomInteger) } -func testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger int) string { +func testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger int, tlsPolicy string) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" - delivery_options { - tls_policy = "Require" - } + name = "some-configuration-set-%d" + delivery_options { + tls_policy = %q + } +} +`, escRandomInteger, tlsPolicy) +} + +func testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger int) string { + return fmt.Sprintf(` +resource "aws_ses_configuration_set" "test" { + name = "some-configuration-set-%d" + delivery_options {} } `, escRandomInteger) } diff --git a/website/docs/r/ses_configuration_set.markdown b/website/docs/r/ses_configuration_set.markdown index f3b9be2b26f..1565dccbb54 100644 --- a/website/docs/r/ses_configuration_set.markdown +++ b/website/docs/r/ses_configuration_set.markdown @@ -34,11 +34,14 @@ resource "aws_ses_configuration_set" "test" { The following arguments are supported: +* `delivery_options` - (Optional) A configuration block that specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Detailed below. * `name` - (Required) The name of the configuration set -Delivery Options (`delivery_options`) support the following: +### delivery_options Argument Reference -* `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: `Require` and `Optional`. If the value is `Require`, messages are only delivered if a TLS connection can be established. If the value is `Optional`, messages can be delivered in plain text if a TLS connection can't be established. Defaults to `Optional`. +The `delivery_options` configuration block supports the following argument: + +* `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is `Require`, messages are only delivered if a TLS connection can be established. If the value is `Optional`, messages can be delivered in plain text if a TLS connection can't be established. Valid values: `Require` or `Optional`. Defaults to `Optional`. ## Import From e6963c07e32eb7829e45ae7279ae8a8d8a1a54f2 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 09:55:59 -0500 Subject: [PATCH 1097/1212] tests/s3_bucket: Add acc test for 2 destinations --- aws/resource_aws_s3_bucket_test.go | 119 +++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index 4da47108025..9c2ce501a5b 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -1685,6 +1685,65 @@ func TestAccAWSS3Bucket_Replication_MultipleDestinations_NonEmptyFilter(t *testi }) } +func TestAccAWSS3Bucket_Replication_MultipleDestinations_TwoDestination(t *testing.T) { + // This tests 2 destinations since GovCloud and possibly other non-standard partitions allow a max of 2 + rInt := acctest.RandInt() + alternateRegion := testAccGetAlternateRegion() + region := testAccGetRegion() + resourceName := "aws_s3_bucket.bucket" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + }, + ErrorCheck: testAccErrorCheckSkipS3(t), + ProviderFactories: testAccProviderFactoriesAlternate(&providers), + CheckDestroy: testAccCheckWithProviders(testAccCheckAWSS3BucketDestroyWithProvider, &providers), + Steps: []resource.TestStep{ + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsTwoDestination(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExistsWithProvider(resourceName, testAccAwsRegionProviderFunc(region, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + testAccCheckAWSS3BucketExistsWithProvider("aws_s3_bucket.destination2", testAccAwsRegionProviderFunc(alternateRegion, &providers)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.0.rules.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule1", + "priority": "1", + "status": "Enabled", + "filter.#": "1", + "filter.0.prefix": "prefix1", + "destination.#": "1", + "destination.0.storage_class": "STANDARD", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "replication_configuration.0.rules.*", map[string]string{ + "id": "rule2", + "priority": "2", + "status": "Enabled", + "filter.#": "1", + "filter.0.tags.%": "1", + "filter.0.tags.Key2": "Value2", + "destination.#": "1", + "destination.0.storage_class": "STANDARD_IA", + }), + ), + }, + { + Config: testAccAWSS3BucketConfigReplicationWithMultipleDestinationsTwoDestination(rInt), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + func TestAccAWSS3Bucket_ReplicationConfiguration_Rule_Destination_AccessControlTranslation(t *testing.T) { rInt := acctest.RandInt() region := testAccGetRegion() @@ -4073,6 +4132,66 @@ resource "aws_s3_bucket" "bucket" { `, randInt)) } +func testAccAWSS3BucketConfigReplicationWithMultipleDestinationsTwoDestination(randInt int) string { + return composeConfig( + testAccAWSS3BucketConfigReplicationBasic(randInt), + fmt.Sprintf(` +resource "aws_s3_bucket" "destination2" { + provider = "awsalternate" + bucket = "tf-test-bucket-destination2-%[1]d" + + versioning { + enabled = true + } +} + +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%[1]d" + acl = "private" + + versioning { + enabled = true + } + + replication_configuration { + role = aws_iam_role.role.arn + + rules { + id = "rule1" + priority = 1 + status = "Enabled" + + filter { + prefix = "prefix1" + } + + destination { + bucket = aws_s3_bucket.destination.arn + storage_class = "STANDARD" + } + } + + rules { + id = "rule2" + priority = 2 + status = "Enabled" + + filter { + tags = { + Key2 = "Value2" + } + } + + destination { + bucket = aws_s3_bucket.destination2.arn + storage_class = "STANDARD_IA" + } + } + } +} +`, randInt)) +} + func testAccAWSS3BucketConfigReplicationWithSseKmsEncryptedObjects(randInt int) string { return testAccAWSS3BucketConfigReplicationBasic(randInt) + fmt.Sprintf(` resource "aws_kms_key" "replica" { From d0a279122a4dbffdde4e3c5ea952ee6ea0785a6e Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Feb 2021 09:59:16 -0500 Subject: [PATCH 1098/1212] Update CHANGELOG for #11600 --- .changelog/11600.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/11600.txt diff --git a/.changelog/11600.txt b/.changelog/11600.txt new file mode 100644 index 00000000000..c2b2bbf4dfd --- /dev/null +++ b/.changelog/11600.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ses_configuration_set: Add `delivery_options` argument +``` \ No newline at end of file From 40e2a371f1ebef3795cbce22ea3921721c722a88 Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 11 Feb 2021 10:09:26 -0500 Subject: [PATCH 1099/1212] Update CHANGELOG for #17313 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b26919fa3da..8e52edc6b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Resource:** `aws_config_conformance_pack` [GH-17313] * **New Resource:** `aws_sagemaker_model_package_group` ([#17366](https://github.com/hashicorp/terraform-provider-aws/issues/17366)) * **New Resource:** `aws_securityhub_organization_admin_account` ([#17501](https://github.com/hashicorp/terraform-provider-aws/issues/17501)) From ef17ab43221ebe16f1525f4be225d4e16c62be7e Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 17:22:11 +0200 Subject: [PATCH 1100/1212] resource/aws_ec2_traffic_mirror_filter: Add arn attribute (#13948) References: - https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonec2.html#amazonec2-resources-for-iam-policies - https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TrafficMirrorFilter.html (no arn or owner id field) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSEc2TrafficMirrorFilter_disappears (12.34s) --- PASS: TestAccAWSEc2TrafficMirrorFilter_tags (35.38s) --- PASS: TestAccAWSEc2TrafficMirrorFilter_basic (35.84s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSEc2TrafficMirrorFilter_disappears (16.95s) --- PASS: TestAccAWSEc2TrafficMirrorFilter_basic (44.16s) --- PASS: TestAccAWSEc2TrafficMirrorFilter_tags (44.43s) ``` --- .changelog/13948.txt | 3 +++ aws/resource_aws_ec2_traffic_mirror_filter.go | 15 +++++++++++++++ ...resource_aws_ec2_traffic_mirror_filter_test.go | 15 +++------------ website/docs/index.html.markdown | 1 + .../r/ec2_traffic_mirror_filter.html.markdown | 1 + 5 files changed, 23 insertions(+), 12 deletions(-) create mode 100644 .changelog/13948.txt diff --git a/.changelog/13948.txt b/.changelog/13948.txt new file mode 100644 index 00000000000..71f2f4e7481 --- /dev/null +++ b/.changelog/13948.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ec2_traffic_mirror_filter: Add `arn` attribute. +``` diff --git a/aws/resource_aws_ec2_traffic_mirror_filter.go b/aws/resource_aws_ec2_traffic_mirror_filter.go index fcf9a485924..4541a94dce1 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter.go @@ -5,6 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -21,6 +22,10 @@ func resourceAwsEc2TrafficMirrorFilter() *schema.Resource { State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "description": { Type: schema.TypeString, Optional: true, @@ -150,6 +155,16 @@ func resourceAwsEc2TrafficMirrorFilterRead(d *schema.ResourceData, meta interfac return fmt.Errorf("error setting network_services for filter %v: %s", d.Id(), err) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ec2", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("traffic-mirror-filter/%s", d.Id()), + }.String() + + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ec2_traffic_mirror_filter_test.go b/aws/resource_aws_ec2_traffic_mirror_filter_test.go index 04042a270d4..911e619eb87 100644 --- a/aws/resource_aws_ec2_traffic_mirror_filter_test.go +++ b/aws/resource_aws_ec2_traffic_mirror_filter_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -28,6 +29,7 @@ func TestAccAWSEc2TrafficMirrorFilter_basic(t *testing.T) { Config: testAccTrafficMirrorFilterConfig(description), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEc2TrafficMirrorFilterExists(resourceName, &v), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ec2", regexp.MustCompile(`traffic-mirror-filter/tmf-.+`)), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "network_services.#", "1"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -122,7 +124,7 @@ func TestAccAWSEc2TrafficMirrorFilter_disappears(t *testing.T) { Config: testAccTrafficMirrorFilterConfig(description), Check: resource.ComposeTestCheckFunc( testAccCheckAWSEc2TrafficMirrorFilterExists(resourceName, &v), - testAccCheckAWSEc2TrafficMirrorFilterDisappears(&v), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEc2TrafficMirrorFilter(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -161,17 +163,6 @@ func testAccCheckAWSEc2TrafficMirrorFilterExists(name string, traffic *ec2.Traff } } -func testAccCheckAWSEc2TrafficMirrorFilterDisappears(traffic *ec2.TrafficMirrorFilter) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ec2conn - _, err := conn.DeleteTrafficMirrorFilter(&ec2.DeleteTrafficMirrorFilterInput{ - TrafficMirrorFilterId: traffic.TrafficMirrorFilterId, - }) - - return err - } -} - func testAccTrafficMirrorFilterConfig(description string) string { return fmt.Sprintf(` resource "aws_ec2_traffic_mirror_filter" "test" { diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 5a6d9fd1726..8266d49b471 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -265,6 +265,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ebs_volume` data source](/docs/providers/aws/d/ebs_volume.html) - [`aws_ec2_capacity_reservation` resource](/docs/providers/aws/r/ec2_capacity_reservation.html) - [`aws_ec2_client_vpn_endpoint` resource](/docs/providers/aws/r/ec2_client_vpn_endpoint.html) + - [`aws_ec2_traffic_mirror_filter` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter.html) - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) - [`aws_ec2_traffic_mirror_session` resource](/docs/providers/aws/r/ec2_traffic_mirror_session.html) - [`aws_ec2_traffic_mirror_target` resource](/docs/providers/aws/r/ec2_traffic_mirror_target.html) diff --git a/website/docs/r/ec2_traffic_mirror_filter.html.markdown b/website/docs/r/ec2_traffic_mirror_filter.html.markdown index 0bf7ca21b10..d4fb95cb685 100644 --- a/website/docs/r/ec2_traffic_mirror_filter.html.markdown +++ b/website/docs/r/ec2_traffic_mirror_filter.html.markdown @@ -35,6 +35,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN of the traffic mirror filter. * `id` - The name of the filter. ## Import From 21520f5230ff0e79d6c7e1a8f035d67d399f1316 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 10:34:10 -0500 Subject: [PATCH 1101/1212] resource/aws_lambda_event_source_mapping: Add topics argument (#14746) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSLambdaEventSourceMapping_sqs_withFunctionName (55.29s) --- FAIL: TestAccAWSLambdaEventSourceMapping_sqs_basic (64.70s) # https://github.com/hashicorp/terraform-provider-aws/pull/14765 --- PASS: TestAccAWSLambdaEventSourceMapping_SQSBatchWindow (77.70s) --- PASS: TestAccAWSLambdaEventSourceMapping_StartingPositionTimestamp (83.78s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRecordAgeInSeconds (106.53s) --- PASS: TestAccAWSLambdaEventSourceMapping_changesInEnabledAreDetected (107.34s) --- PASS: TestAccAWSLambdaEventSourceMapping_BisectBatch (107.85s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisDestinationConfig (108.87s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_removeBatchSize (109.37s) --- PASS: TestAccAWSLambdaEventSourceMapping_disappears (112.05s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisBatchWindow (115.26s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttempts (116.99s) --- PASS: TestAccAWSLambdaEventSourceMapping_ParallelizationFactor (117.33s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_basic (118.04s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttemptsZero (120.57s) --- PASS: TestAccAWSLambdaEventSourceMapping_MSK (1684.38s) ``` --- .changelog/14746.txt | 3 + ...esource_aws_lambda_event_source_mapping.go | 18 ++- ...ce_aws_lambda_event_source_mapping_test.go | 153 ++++++++++++++++++ .../lambda_event_source_mapping.html.markdown | 18 ++- 4 files changed, 187 insertions(+), 5 deletions(-) create mode 100644 .changelog/14746.txt diff --git a/.changelog/14746.txt b/.changelog/14746.txt new file mode 100644 index 00000000000..02fdfe83106 --- /dev/null +++ b/.changelog/14746.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source +``` diff --git a/aws/resource_aws_lambda_event_source_mapping.go b/aws/resource_aws_lambda_event_source_mapping.go index 6ccf481b20e..6464f5f1dfa 100644 --- a/aws/resource_aws_lambda_event_source_mapping.go +++ b/aws/resource_aws_lambda_event_source_mapping.go @@ -60,6 +60,13 @@ func resourceAwsLambdaEventSourceMapping() *schema.Resource { ForceNew: true, ValidateFunc: validation.IsRFC3339Time, }, + "topics": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, "batch_size": { Type: schema.TypeInt, Optional: true, @@ -205,6 +212,11 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte t, _ := time.Parse(time.RFC3339, startingPositionTimestamp.(string)) params.StartingPositionTimestamp = aws.Time(t) } + + if topics, ok := d.GetOk("topics"); ok && topics.(*schema.Set).Len() > 0 { + params.Topics = expandStringSet(topics.(*schema.Set)) + } + if parallelizationFactor, ok := d.GetOk("parallelization_factor"); ok { params.ParallelizationFactor = aws.Int64(int64(parallelizationFactor.(int))) } @@ -253,9 +265,8 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte return fmt.Errorf("Error creating Lambda event source mapping: %s", err) } - // No error - d.Set("uuid", eventSourceMappingConfiguration.UUID) d.SetId(aws.StringValue(eventSourceMappingConfiguration.UUID)) + return resourceAwsLambdaEventSourceMappingRead(d, meta) } @@ -298,6 +309,9 @@ func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interf if err := d.Set("destination_config", flattenLambdaEventSourceMappingDestinationConfig(eventSourceMappingConfiguration.DestinationConfig)); err != nil { return fmt.Errorf("error setting destination_config: %s", err) } + if err := d.Set("topics", flattenStringSet(eventSourceMappingConfiguration.Topics)); err != nil { + return fmt.Errorf("error setting topics: %s", err) + } state := aws.StringValue(eventSourceMappingConfiguration.State) diff --git a/aws/resource_aws_lambda_event_source_mapping_test.go b/aws/resource_aws_lambda_event_source_mapping_test.go index f4e7c7d65af..64bfdaccbe5 100644 --- a/aws/resource_aws_lambda_event_source_mapping_test.go +++ b/aws/resource_aws_lambda_event_source_mapping_test.go @@ -62,6 +62,7 @@ func TestAccAWSLambdaEventSourceMapping_kinesis_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "function_arn", functionResourceNameUpdated, "arn"), resource.TestCheckResourceAttrPair(resourceName, "event_source_arn", eventSourceResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "starting_position", "TRIM_HORIZON"), + resource.TestCheckResourceAttr(resourceName, "topics.#", "0"), ), }, }, @@ -621,6 +622,39 @@ func TestAccAWSLambdaEventSourceMapping_KinesisDestinationConfig(t *testing.T) { }) } +func TestAccAWSLambdaEventSourceMapping_MSK(t *testing.T) { + var v lambda.EventSourceMappingConfiguration + resourceName := "aws_lambda_event_source_mapping.test" + eventSourceResourceName := "aws_msk_cluster.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaEventSourceMappingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLambdaEventSourceMappingConfigMsk(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLambdaEventSourceMappingExists(resourceName, &v), + testAccCheckAWSLambdaEventSourceMappingAttributes(&v), + resource.TestCheckResourceAttrPair(resourceName, "event_source_arn", eventSourceResourceName, "arn"), + testAccCheckResourceAttrRfc3339(resourceName, "last_modified"), + resource.TestCheckNoResourceAttr(resourceName, "starting_position"), + resource.TestCheckResourceAttr(resourceName, "topics.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "topics.*", "test"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"enabled", "starting_position"}, + }, + }, + }) +} + func testAccCheckAWSLambdaEventSourceMappingIsBeingDisabled(conf *lambda.EventSourceMappingConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).lambdaconn @@ -1527,3 +1561,122 @@ resource "aws_lambda_event_source_mapping" "test" { } `, batchWindow)) } + +func testAccAWSLambdaEventSourceMappingConfigMsk(rName string) string { + return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(` +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < Date: Fri, 2 Oct 2020 10:23:26 -0700 Subject: [PATCH 1102/1212] resource/lambda_function: fix incorrect description of IAM role attribute. --- website/docs/r/lambda_function.html.markdown | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index f900acd25b1..200d7f2a2c6 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -208,6 +208,10 @@ package via S3 it may be useful to use [the `aws_s3_bucket_object` resource](s3_ For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading large files efficiently. +## Invoking Functions + +To give an external source (like a CloudWatch Event Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model][4] for more details. + ## Argument Reference * `filename` - (Optional) The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options and `image_uri` cannot be used. @@ -219,7 +223,7 @@ large files efficiently. * `function_name` - (Required) A unique name for your Lambda Function. * `dead_letter_config` - (Optional) Nested block to configure the function's *dead letter queue*. See details below. * `handler` - (Required) The function [entrypoint][3] in your code. -* `role` - (Required) IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details. +* `role` - (Required) The ARN of the function's execution role that grants it permission to access AWS services and resources. * `description` - (Optional) Description of what your Lambda Function does. * `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] * `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] From 406615ceaaa08e5ced3d64ff0b0c880f499ab9e8 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 10:50:38 -0500 Subject: [PATCH 1103/1212] docs/lambda_function: Clean up docs --- website/docs/r/lambda_function.html.markdown | 118 ++++++++----------- 1 file changed, 51 insertions(+), 67 deletions(-) diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index 200d7f2a2c6..2f7b88bec04 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -16,6 +16,8 @@ For a detailed example of setting up Lambda and API Gateway, see [Serverless App ~> **NOTE:** Due to [AWS Lambda improved VPC networking changes that began deploying in September 2019](https://aws.amazon.com/blogs/compute/announcing-improved-vpc-networking-for-aws-lambda-functions/), EC2 subnets and security groups associated with Lambda Functions can take up to 45 minutes to successfully delete. Terraform AWS Provider version 2.31.0 and later automatically handles this increased timeout, however prior versions require setting the customizable deletion timeouts of those Terraform resources to 45 minutes (`delete = "45m"`). AWS and HashiCorp are working together to reduce the amount of time required for resource deletion and updates can be tracked in this [GitHub issue](https://github.com/hashicorp/terraform-provider-aws/issues/10329). +-> To give an external source (like a CloudWatch Event Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model][4] for more details. On the other hand, the `role` argument of this resource is the function's execution role for identity and access to AWS services and resources. + ## Example Usage ### Basic Example @@ -197,101 +199,83 @@ resource "aws_iam_role_policy_attachment" "lambda_logs" { ## Specifying the Deployment Package -AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. -See [Runtimes][6] for the valid values of `runtime`. The expected structure of the deployment package can be found in -[the AWS Lambda documentation for each runtime][8]. +AWS Lambda expects source code to be provided as a deployment package whose structure varies depending on which `runtime` is in use. See [Runtimes][6] for the valid values of `runtime`. The expected structure of the deployment package can be found in [the AWS Lambda documentation for each runtime][8]. -Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or -indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment -package via S3 it may be useful to use [the `aws_s3_bucket_object` resource](s3_bucket_object.html) to upload it. +Once you have created your deployment package you can specify it either directly as a local file (using the `filename` argument) or indirectly via Amazon S3 (using the `s3_bucket`, `s3_key` and `s3_object_version` arguments). When providing the deployment package via S3 it may be useful to use [the `aws_s3_bucket_object` resource](s3_bucket_object.html) to upload it. -For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading -large files efficiently. +For larger deployment packages it is recommended by Amazon to upload via S3, since the S3 API has better support for uploading large files efficiently. -## Invoking Functions +## Argument Reference -To give an external source (like a CloudWatch Event Rule, SNS, or S3) permission to access the Lambda function, use the [`aws_lambda_permission`](lambda_permission.html) resource. See [Lambda Permission Model][4] for more details. +The following arguments are required: -## Argument Reference +* `function_name` - (Required) Unique name for your Lambda Function. +* `handler` - (Required) Function [entrypoint][3] in your code. +* `role` - (Required) Amazon Resource Name (ARN) of the function's execution role. The role provides the function's identity and access to AWS services and resources. +* `runtime` - (Required) Identifier of the function's runtime. See [Runtimes][6] for valid values. -* `filename` - (Optional) The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options and `image_uri` cannot be used. -* `s3_bucket` - (Optional) The S3 bucket location containing the function's deployment package. Conflicts with `filename` and `image_uri`. This bucket must reside in the same AWS region where you are creating the Lambda function. -* `s3_key` - (Optional) The S3 key of an object containing the function's deployment package. Conflicts with `filename` and `image_uri`. -* `s3_object_version` - (Optional) The object version containing the function's deployment package. Conflicts with `filename` and `image_uri`. -* `image_uri` - (Optional) The ECR image URI containing the function's deployment package. Conflicts with `filename`, `s3_bucket`, `s3_key`, and `s3_object_version`. -* `package_type` - (Optional) The Lambda deployment package type. Valid values are `Zip` and `Image`. Defaults to `Zip`. -* `function_name` - (Required) A unique name for your Lambda Function. -* `dead_letter_config` - (Optional) Nested block to configure the function's *dead letter queue*. See details below. -* `handler` - (Required) The function [entrypoint][3] in your code. -* `role` - (Required) The ARN of the function's execution role that grants it permission to access AWS services and resources. +The following arguments are optional: + +* `dead_letter_config` - (Optional) Configuration block. Detailed below. * `description` - (Optional) Description of what your Lambda Function does. +* `environment` - (Optional) Configuration block. Detailed below. +* `file_system_config` - (Optional) Configuration block. Detailed below. +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. +* `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. * `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] * `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] -* `runtime` - (Optional) See [Runtimes][6] for valid values. -* `timeout` - (Optional) The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5] -* `reserved_concurrent_executions` - (Optional) The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. -* `vpc_config` - (Optional) Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7] -* `environment` - (Optional) The Lambda environment's configuration settings. Fields documented below. -* `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. +* `reserved_concurrent_executions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] +* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. +* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. +* `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. * `source_code_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 and later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive. -* `tags` - (Optional) A map of tags to assign to the object. -* `file_system_config` - (Optional) The connection settings for an EFS file system. Fields documented below. Before creating or updating Lambda functions with `file_system_config`, EFS mount targets much be in available lifecycle state. Use `depends_on` to explicitly declare this dependency. See [Using Amazon EFS with Lambda][12]. -* `code_signing_config_arn` - (Optional) Amazon Resource Name (ARN) for a Code Signing Configuration. -* `image_config` - (Optional) The Lambda OCI image configurations. Fields documented below. See [Using container images with Lambda][13] +* `tags` - (Optional) Map of tags to assign to the object. +* `tracing_config` - (Optional) Configuration block. Detailed below. +* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5] +* `vpc_config` - (Optional) Configuration block. Detailed below. + +### dead_letter_config -**dead_letter_config** is a child block with a single argument: +Dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see [Dead Letter Queues](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq). -* `target_arn` - (Required) The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this - option is used, the function's IAM role must be granted suitable access to write to the target object, - which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on - which service is targeted. +* `target_arn` - (Required) ARN of an SNS topic or SQS queue to notify when an invocation fails. If this option is used, the function's IAM role must be granted suitable access to write to the target object, which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on which service is targeted. -**tracing_config** is a child block with a single argument: +### environment -* `mode` - (Required) Can be either `PassThrough` or `Active`. If PassThrough, Lambda will only trace - the request from an upstream service if it contains a tracing header with - "sampled=1". If Active, Lambda will respect any tracing header it receives - from an upstream service. If no tracing header is received, Lambda will call - X-Ray for a tracing decision. +* `variables` - (Optional) Map of environment variables that are accessible from the function code during execution. -**vpc_config** requires the following: +### file_system_config -* `subnet_ids` - (Required) A list of subnet IDs associated with the Lambda function. -* `security_group_ids` - (Required) A list of security group IDs associated with the Lambda function. +Connection settings for an EFS file system. Before creating or updating Lambda functions with `file_system_config`, EFS mount targets much be in available lifecycle state. Use `depends_on` to explicitly declare this dependency. See [Using Amazon EFS with Lambda][12]. -~> **NOTE:** if both `subnet_ids` and `security_group_ids` are empty then vpc_config is considered to be empty or unset. +* `arn` - (Required) Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. +* `local_mount_path` - (Required) Path where the function can access the file system, starting with /mnt/. -For **environment** the following attributes are supported: +### tracing_config -* `variables` - (Optional) A map that defines environment variables for the Lambda function. +* `mode` - (Required) Whether to to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are `PassThrough` and `Active`. If `PassThrough`, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If `Active`, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. -**file_system_config** is a child block with two arguments: +### vpc_config -* `arn` - (Required) The Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. -* `local_mount_path` - (Required) The path where the function can access the file system, starting with /mnt/. +For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can only access resources and the internet through that VPC. See [VPC Settings][7]. -**image_config** is a child block with three arguments: +~> **NOTE:** If both `subnet_ids` and `security_group_ids` are empty then `vpc_config` is considered to be empty or unset. -* `entry_point` - (Optional) The ENTRYPOINT for the docker image. -* `command` - (Optional) The CMD for the docker image. -* `working_directory` - (Optional) The working directory for the docker image. +* `security_group_ids` - (Required) List of security group IDs associated with the Lambda function. +* `subnet_ids` - (Required) List of subnet IDs associated with the Lambda function. ## Attributes Reference -In addition to all arguments above, the following attributes are exported: +In addition to arguments above, the following attributes are exported: -* `arn` - The Amazon Resource Name (ARN) identifying your Lambda Function. -* `qualified_arn` - The Amazon Resource Name (ARN) identifying your Lambda Function Version - (if versioning is enabled via `publish = true`). -* `invoke_arn` - The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri` -* `version` - Latest published version of your Lambda Function. -* `last_modified` - The date this resource was last modified. -* `kms_key_arn` - (Optional) The ARN for the KMS encryption key. -* `signing_job_arn` - The Amazon Resource Name (ARN) of a signing job. -* `signing_profile_version_arn` - The Amazon Resource Name (ARN) for a signing profile version. +* `arn` - Amazon Resource Name (ARN) identifying your Lambda Function. +* `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. +* `last_modified` - Date this resource was last modified. +* `qualified_arn` - ARN identifying your Lambda Function Version (if versioning is enabled via `publish = true`). * `source_code_hash` - Base64-encoded representation of raw SHA-256 sum of the zip file, provided either via `filename` or `s3_*` parameters. -* `source_code_size` - The size in bytes of the function .zip file. +* `source_code_size` - Size in bytes of the function .zip file. +* `version` - Latest published version of your Lambda Function. [1]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html [2]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-s3-events-adminuser-create-test-function-create-function.html @@ -299,7 +283,7 @@ In addition to all arguments above, the following attributes are exported: [4]: https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html [5]: https://docs.aws.amazon.com/lambda/latest/dg/limits.html [6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime -[7]: http://docs.aws.amazon.com/lambda/latest/dg/vpc.html +[7]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html [8]: https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html [9]: https://docs.aws.amazon.com/lambda/latest/dg/concurrent-executions.html [10]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html From b484d38dd03b942dd6b731014cdd47b7b4b4dd38 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 11:05:00 -0500 Subject: [PATCH 1104/1212] resource/aws_lambda_event_source_mapping: Refactor waiters to service package and add CHANGELOG Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSLambdaEventSourceMapping_BisectBatch (84.88s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisBatchWindow (91.98s) --- PASS: TestAccAWSLambdaEventSourceMapping_StartingPositionTimestamp (93.96s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttempts (99.00s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRecordAgeInSeconds (119.52s) --- PASS: TestAccAWSLambdaEventSourceMapping_sqs_basic (122.45s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttemptsZero (123.92s) --- PASS: TestAccAWSLambdaEventSourceMapping_disappears (140.88s) --- PASS: TestAccAWSLambdaEventSourceMapping_sqs_withFunctionName (148.30s) --- PASS: TestAccAWSLambdaEventSourceMapping_changesInEnabledAreDetected (150.96s) --- PASS: TestAccAWSLambdaEventSourceMapping_SQSBatchWindow (151.81s) --- PASS: TestAccAWSLambdaEventSourceMapping_ParallelizationFactor (154.00s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_removeBatchSize (179.49s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_basic (194.97s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisDestinationConfig (203.61s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSLambdaEventSourceMapping_sqs_withFunctionName (51.56s) --- PASS: TestAccAWSLambdaEventSourceMapping_SQSBatchWindow (79.77s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRecordAgeInSeconds (87.12s) --- PASS: TestAccAWSLambdaEventSourceMapping_ParallelizationFactor (87.57s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisBatchWindow (108.33s) --- PASS: TestAccAWSLambdaEventSourceMapping_BisectBatch (113.78s) --- PASS: TestAccAWSLambdaEventSourceMapping_disappears (118.66s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_removeBatchSize (126.46s) --- PASS: TestAccAWSLambdaEventSourceMapping_StartingPositionTimestamp (138.00s) --- PASS: TestAccAWSLambdaEventSourceMapping_changesInEnabledAreDetected (148.92s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttemptsZero (161.04s) --- PASS: TestAccAWSLambdaEventSourceMapping_KinesisDestinationConfig (161.87s) --- PASS: TestAccAWSLambdaEventSourceMapping_MaximumRetryAttempts (166.13s) --- PASS: TestAccAWSLambdaEventSourceMapping_kinesis_basic (193.48s) --- PASS: TestAccAWSLambdaEventSourceMapping_sqs_basic (226.55s) ``` --- .changelog/14765.txt | 3 + aws/internal/service/lambda/waiter/status.go | 41 +++++++++++++ aws/internal/service/lambda/waiter/waiter.go | 61 +++++++++++++++++++ ...esource_aws_lambda_event_source_mapping.go | 54 +++------------- 4 files changed, 115 insertions(+), 44 deletions(-) create mode 100644 .changelog/14765.txt create mode 100644 aws/internal/service/lambda/waiter/status.go create mode 100644 aws/internal/service/lambda/waiter/waiter.go diff --git a/.changelog/14765.txt b/.changelog/14765.txt new file mode 100644 index 00000000000..3825f937811 --- /dev/null +++ b/.changelog/14765.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_event_source_mapping: Wait for create and update operations to complete +``` diff --git a/aws/internal/service/lambda/waiter/status.go b/aws/internal/service/lambda/waiter/status.go new file mode 100644 index 00000000000..ed6de2e71b0 --- /dev/null +++ b/aws/internal/service/lambda/waiter/status.go @@ -0,0 +1,41 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + EventSourceMappingStateCreating = "Creating" + EventSourceMappingStateDisabled = "Disabled" + EventSourceMappingStateDisabling = "Disabling" + EventSourceMappingStateEnabled = "Enabled" + EventSourceMappingStateEnabling = "Enabling" + EventSourceMappingStateUpdating = "Updating" +) + +func EventSourceMappingState(conn *lambda.Lambda, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &lambda.GetEventSourceMappingInput{ + UUID: aws.String(id), + } + + output, err := conn.GetEventSourceMapping(input) + + if tfawserr.ErrCodeEquals(err, lambda.ErrCodeResourceNotFoundException) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if output == nil { + return nil, "", nil + } + + return output, aws.StringValue(output.State), nil + } +} diff --git a/aws/internal/service/lambda/waiter/waiter.go b/aws/internal/service/lambda/waiter/waiter.go new file mode 100644 index 00000000000..dfc3ee75f3c --- /dev/null +++ b/aws/internal/service/lambda/waiter/waiter.go @@ -0,0 +1,61 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/lambda" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + EventSourceMappingCreateTimeout = 10 * time.Minute + EventSourceMappingUpdateTimeout = 10 * time.Minute +) + +func EventSourceMappingCreate(conn *lambda.Lambda, id string) (*lambda.EventSourceMappingConfiguration, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + EventSourceMappingStateCreating, + EventSourceMappingStateDisabling, + EventSourceMappingStateEnabling, + }, + Target: []string{ + EventSourceMappingStateDisabled, + EventSourceMappingStateEnabled, + }, + Refresh: EventSourceMappingState(conn, id), + Timeout: EventSourceMappingCreateTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*lambda.EventSourceMappingConfiguration); ok { + return output, err + } + + return nil, err +} + +func EventSourceMappingUpdate(conn *lambda.Lambda, id string) (*lambda.EventSourceMappingConfiguration, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + EventSourceMappingStateDisabling, + EventSourceMappingStateEnabling, + EventSourceMappingStateUpdating, + }, + Target: []string{ + EventSourceMappingStateDisabled, + EventSourceMappingStateEnabled, + }, + Refresh: EventSourceMappingState(conn, id), + Timeout: EventSourceMappingUpdateTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*lambda.EventSourceMappingConfiguration); ok { + return output, err + } + + return nil, err +} diff --git a/aws/resource_aws_lambda_event_source_mapping.go b/aws/resource_aws_lambda_event_source_mapping.go index 20339cb6e46..4115b1ae9a9 100644 --- a/aws/resource_aws_lambda_event_source_mapping.go +++ b/aws/resource_aws_lambda_event_source_mapping.go @@ -15,15 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -const ( - LambdaEventSourceMappingCreating = "Creating" - LambdaEventSourceMappingEnabling = "Enabling" - LambdaEventSourceMappingUpdating = "Updating" - LambdaEventSourceMappingDisabling = "Disabling" - LambdaEventSourceMappingEnabled = "Enabled" - LambdaEventSourceMappingDisabled = "Disabled" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/lambda/waiter" ) func resourceAwsLambdaEventSourceMapping() *schema.Resource { @@ -259,15 +251,15 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte eventSourceMappingConfiguration, err = conn.CreateEventSourceMapping(params) } if err != nil { - return fmt.Errorf("Error creating Lambda event source mapping: %s", err) + return fmt.Errorf("error creating Lambda Event Source Mapping (%s): %w", d.Id(), err) } // No error d.Set("uuid", eventSourceMappingConfiguration.UUID) d.SetId(aws.StringValue(eventSourceMappingConfiguration.UUID)) - if err := waitForLambdaEventSourceMapping(conn, *eventSourceMappingConfiguration.UUID); err != nil { - return err + if _, err := waiter.EventSourceMappingCreate(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Lambda Event Source Mapping (%s) to create: %w", d.Id(), err) } return resourceAwsLambdaEventSourceMappingRead(d, meta) @@ -316,12 +308,12 @@ func resourceAwsLambdaEventSourceMappingRead(d *schema.ResourceData, meta interf state := aws.StringValue(eventSourceMappingConfiguration.State) switch state { - case LambdaEventSourceMappingEnabled: + case waiter.EventSourceMappingStateEnabled, waiter.EventSourceMappingStateEnabling: d.Set("enabled", true) - case LambdaEventSourceMappingDisabled: + case waiter.EventSourceMappingStateDisabled, waiter.EventSourceMappingStateDisabling: d.Set("enabled", false) default: - return fmt.Errorf("state is neither enabled nor disabled but %s", *eventSourceMappingConfiguration.State) + log.Printf("[WARN] Lambda event source mapping is neither enabled nor disabled but %s", state) } return nil @@ -415,38 +407,12 @@ func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta inte _, err = conn.UpdateEventSourceMapping(params) } if err != nil { - return fmt.Errorf("Error updating Lambda event source mapping: %s", err) + return fmt.Errorf("error updating Lambda Event Source Mapping (%s): %w", d.Id(), err) } - if err := waitForLambdaEventSourceMapping(conn, d.Id()); err != nil { - return err + if _, err := waiter.EventSourceMappingUpdate(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for Lambda Event Source Mapping (%s) to update: %w", d.Id(), err) } return resourceAwsLambdaEventSourceMappingRead(d, meta) } - -func waitForLambdaEventSourceMapping(conn *lambda.Lambda, id string) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{LambdaEventSourceMappingCreating, LambdaEventSourceMappingEnabling, LambdaEventSourceMappingUpdating, LambdaEventSourceMappingDisabling}, - Target: []string{LambdaEventSourceMappingEnabled, LambdaEventSourceMappingDisabled}, - Refresh: func() (interface{}, string, error) { - params := &lambda.GetEventSourceMappingInput{ - UUID: aws.String(id), - } - - res, err := conn.GetEventSourceMapping(params) - if err != nil { - return nil, "", err - } - - return res, aws.StringValue(res.State), err - }, - Timeout: 10 * time.Minute, - Delay: 5 * time.Second, - } - - log.Printf("[DEBUG] Waiting for LambdaEventSourceMapping state update: %s", id) - _, err := stateConf.WaitForState() - - return err -} From f34a9dbdaa99d12edd162d91eabc1be752f1d18f Mon Sep 17 00:00:00 2001 From: angie pinilla Date: Thu, 11 Feb 2021 11:17:02 -0500 Subject: [PATCH 1105/1212] Update CHANGELOG for #11600 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e52edc6b92..36eab045928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS: * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) +* resource/aws_ses_configuration_set: Add `delivery_options` argument [GH-11600] * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) BUG FIXES: From b41d5538a4614feca96aabb8dd7866b8de40a99a Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 18:24:26 +0200 Subject: [PATCH 1106/1212] resource/aws_ses_event_destination: Add arn attribute and validations (#13964) References: - https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html#amazonses-resources-for-iam-policies - https://docs.aws.amazon.com/ses/latest/APIReference/API_EventDestination.html Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSESEventDestination_disappears (85.94s) --- PASS: TestAccAWSSESEventDestination_basic (98.02s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSESEventDestination_disappears (75.67s) --- PASS: TestAccAWSSESEventDestination_basic (78.60s) ``` --- .changelog/13964.txt | 7 ++++ aws/resource_aws_ses_event_destination.go | 42 ++++++++++++++++--- ...resource_aws_ses_event_destination_test.go | 13 +++--- website/docs/index.html.markdown | 1 + ...wn => ses_event_destination.html.markdown} | 7 ++++ 5 files changed, 59 insertions(+), 11 deletions(-) create mode 100644 .changelog/13964.txt rename website/docs/r/{ses_event_destination.markdown => ses_event_destination.html.markdown} (93%) diff --git a/.changelog/13964.txt b/.changelog/13964.txt new file mode 100644 index 00000000000..84c79e3f464 --- /dev/null +++ b/.changelog/13964.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ses_event_destination: Add `arn` attribute +``` + +```release-note:enhancement +resource/aws_ses_event_destination: Add plan time validation for `name`, `cloudwatch_destination.default_value`, `cloudwatch_destination.default_name`, `kinesis_destination.role_arn`, `kinesis_destination.stream_arn`, and `sns_destination.topic_arn` attributes +``` diff --git a/aws/resource_aws_ses_event_destination.go b/aws/resource_aws_ses_event_destination.go index 619d6089c8a..b364be19659 100644 --- a/aws/resource_aws_ses_event_destination.go +++ b/aws/resource_aws_ses_event_destination.go @@ -3,9 +3,11 @@ package aws import ( "fmt" "log" + "regexp" "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -21,10 +23,18 @@ func resourceAwsSesEventDestination() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z_-]+$`), "must contain only alphanumeric, underscore, and hyphen characters"), + ), }, "configuration_set_name": { @@ -70,11 +80,19 @@ func resourceAwsSesEventDestination() *schema.Resource { "default_value": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z_-]+$`), "must contain only alphanumeric, underscore, and hyphen characters"), + ), }, "dimension_name": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z_:-]+$`), "must contain only alphanumeric, underscore, and hyphen characters"), + ), }, "value_source": { @@ -99,13 +117,15 @@ func resourceAwsSesEventDestination() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "stream_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "role_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, }, }, @@ -120,8 +140,9 @@ func resourceAwsSesEventDestination() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "topic_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, }, }, @@ -234,6 +255,15 @@ func resourceAwsSesEventDestinationRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("error setting sns_destination: %w", err) } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ses", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("configuration-set/%s:event-destination/%s", configurationSetName, d.Id()), + }.String() + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ses_event_destination_test.go b/aws/resource_aws_ses_event_destination_test.go index b7a48af6053..0890dcc23bd 100644 --- a/aws/resource_aws_ses_event_destination_test.go +++ b/aws/resource_aws_ses_event_destination_test.go @@ -13,8 +13,8 @@ import ( func TestAccAWSSESEventDestination_basic(t *testing.T) { rName1 := acctest.RandomWithPrefix("tf-acc-test") - rName2 := acctest.RandomWithPrefix("tf-acc-test") - rName3 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test-kinesis") + rName3 := acctest.RandomWithPrefix("tf-acc-test-sns") cloudwatchDestinationResourceName := "aws_ses_event_destination.cloudwatch" kinesisDestinationResourceName := "aws_ses_event_destination.kinesis" snsDestinationResourceName := "aws_ses_event_destination.sns" @@ -34,6 +34,9 @@ func TestAccAWSSESEventDestination_basic(t *testing.T) { testAccCheckAwsSESEventDestinationExists(cloudwatchDestinationResourceName, &v1), testAccCheckAwsSESEventDestinationExists(kinesisDestinationResourceName, &v2), testAccCheckAwsSESEventDestinationExists(snsDestinationResourceName, &v3), + testAccCheckResourceAttrRegionalARN(cloudwatchDestinationResourceName, "arn", "ses", fmt.Sprintf("configuration-set/%s:event-destination/%s", rName1, rName1)), + testAccCheckResourceAttrRegionalARN(kinesisDestinationResourceName, "arn", "ses", fmt.Sprintf("configuration-set/%s:event-destination/%s", rName1, rName2)), + testAccCheckResourceAttrRegionalARN(snsDestinationResourceName, "arn", "ses", fmt.Sprintf("configuration-set/%s:event-destination/%s", rName1, rName3)), resource.TestCheckResourceAttr(cloudwatchDestinationResourceName, "name", rName1), resource.TestCheckResourceAttr(kinesisDestinationResourceName, "name", rName2), resource.TestCheckResourceAttr(snsDestinationResourceName, "name", rName3), @@ -63,8 +66,8 @@ func TestAccAWSSESEventDestination_basic(t *testing.T) { func TestAccAWSSESEventDestination_disappears(t *testing.T) { rName1 := acctest.RandomWithPrefix("tf-acc-test") - rName2 := acctest.RandomWithPrefix("tf-acc-test") - rName3 := acctest.RandomWithPrefix("tf-acc-test") + rName2 := acctest.RandomWithPrefix("tf-acc-test-kinesis") + rName3 := acctest.RandomWithPrefix("tf-acc-test-sns") cloudwatchDestinationResourceName := "aws_ses_event_destination.cloudwatch" kinesisDestinationResourceName := "aws_ses_event_destination.kinesis" snsDestinationResourceName := "aws_ses_event_destination.sns" @@ -109,7 +112,7 @@ func testAccCheckSESEventDestinationDestroy(s *terraform.State) error { found := false for _, element := range response.ConfigurationSets { - if *element.Name == rs.Primary.ID { + if aws.StringValue(element.Name) == rs.Primary.ID { found = true } } diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 8266d49b471..9dac41fba13 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -318,6 +318,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) - [`aws_ses_domain_identity_verification` resource](/docs/providers/aws/r/ses_domain_identity_verification.html) - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) + - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) - [`aws_ses_receipt_filter` resource](/docs/providers/aws/r/ses_receipt_filter.html) - [`aws_ssm_document` data source](/docs/providers/aws/d/ssm_document.html) - [`aws_ssm_document` resource](/docs/providers/aws/r/ssm_document.html) diff --git a/website/docs/r/ses_event_destination.markdown b/website/docs/r/ses_event_destination.html.markdown similarity index 93% rename from website/docs/r/ses_event_destination.markdown rename to website/docs/r/ses_event_destination.html.markdown index ef701579f94..82c5f76d52e 100644 --- a/website/docs/r/ses_event_destination.markdown +++ b/website/docs/r/ses_event_destination.html.markdown @@ -89,6 +89,13 @@ The following arguments are supported: * `topic_arn` - (Required) The ARN of the SNS topic +## Attributes Reference + +In addition to the arguments, which are exported, the following attributes are exported: + +* `id` - The SES event destination name. +* `arn` - The SES event destination ARN. + ## Import SES event destinations can be imported using `configuration_set_name` together with the event destination's `name`, From 1564047d39e00767b1deb0ffe3ebc9b7a9d96855 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 11:27:33 -0500 Subject: [PATCH 1107/1212] docs/lambda_function: Clean up docs after rebase --- website/docs/r/lambda_function.html.markdown | 26 +++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index 2f7b88bec04..8a2a7de87a4 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -216,23 +216,27 @@ The following arguments are required: The following arguments are optional: +* `code_signing_config_arn` - (Optional) To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function. * `dead_letter_config` - (Optional) Configuration block. Detailed below. * `description` - (Optional) Description of what your Lambda Function does. * `environment` - (Optional) Configuration block. Detailed below. * `file_system_config` - (Optional) Configuration block. Detailed below. -* `filename` - (Optional) Path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used. +* `filename` - (Optional) Path to the function's deployment package within the local filesystem. Conflicts with `image_uri`, `s3_bucket`, `s3_key`, and `s3_object_version`. +* `image_config` - (Optional) Configuration block. Detailed below. +* `image_uri` - (Optional) ECR image URI containing the function's deployment package. Conflicts with `filename`, `s3_bucket`, `s3_key`, and `s3_object_version`. * `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. * `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] * `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] +* `package_type` - (Optional) Lambda deployment package type. Valid values are `Zip` and `Image`. Defaults to `Zip`. * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. * `reserved_concurrent_executions` - (Optional) Amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9] -* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function. -* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename`. -* `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename`. +* `s3_bucket` - (Optional) S3 bucket location containing the function's deployment package. Conflicts with `filename` and `image_uri`. This bucket must reside in the same AWS region where you are creating the Lambda function. +* `s3_key` - (Optional) S3 key of an object containing the function's deployment package. Conflicts with `filename` and `image_uri`. +* `s3_object_version` - (Optional) Object version containing the function's deployment package. Conflicts with `filename` and `image_uri`. * `source_code_hash` - (Optional) Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (Terraform 0.11.12 and later) or `base64sha256(file("file.zip"))` (Terraform 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive. * `tags` - (Optional) Map of tags to assign to the object. +* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]. * `tracing_config` - (Optional) Configuration block. Detailed below. -* `timeout` - (Optional) Amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5] * `vpc_config` - (Optional) Configuration block. Detailed below. ### dead_letter_config @@ -252,6 +256,14 @@ Connection settings for an EFS file system. Before creating or updating Lambda f * `arn` - (Required) Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system. * `local_mount_path` - (Required) Path where the function can access the file system, starting with /mnt/. +### image_config + +Container image configuration values that override the values in the container image Dockerfile. + +* `command` - (Optional) Parameters that you want to pass in with `entry_point`. +* `entry_point` - (Optional) Entry point to your application, which is typically the location of the runtime executable. +* `working_directory` - (Optional) Working directory. + ### tracing_config * `mode` - (Required) Whether to to sample and trace a subset of incoming requests with AWS X-Ray. Valid values are `PassThrough` and `Active`. If `PassThrough`, Lambda will only trace the request from an upstream service if it contains a tracing header with "sampled=1". If `Active`, Lambda will respect any tracing header it receives from an upstream service. If no tracing header is received, Lambda will call X-Ray for a tracing decision. @@ -273,9 +285,11 @@ In addition to arguments above, the following attributes are exported: * `invoke_arn` - ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`. * `last_modified` - Date this resource was last modified. * `qualified_arn` - ARN identifying your Lambda Function Version (if versioning is enabled via `publish = true`). -* `source_code_hash` - Base64-encoded representation of raw SHA-256 sum of the zip file, provided either via `filename` or `s3_*` parameters. +* `signing_job_arn` - ARN of the signing job. +* `signing_profile_version_arn` - ARN of the signing profile version. * `source_code_size` - Size in bytes of the function .zip file. * `version` - Latest published version of your Lambda Function. +* `vpc_config.vpc_id` - ID of the VPC. [1]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html [2]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-s3-events-adminuser-create-test-function-create-function.html From c8ef4c0750ca4faabbf2a3ea602cbe598cd0b108 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 11:30:26 -0500 Subject: [PATCH 1108/1212] docs/contributing: Add Data Sources section to Provider Design page (#17546) This quick section is designed to explain the "in words" design methodology behind singular versus plural data sources. --- docs/contributing/provider-design.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/contributing/provider-design.md b/docs/contributing/provider-design.md index 9585d265768..17d62a6a31e 100644 --- a/docs/contributing/provider-design.md +++ b/docs/contributing/provider-design.md @@ -9,6 +9,9 @@ The Terraform AWS Provider follows the guidelines established in the [HashiCorp - [Resource Type Considerations](#resource-type-considerations) - [Authorization and Acceptance Resources](#authorization-and-acceptance-resources) - [Cross-Service Functionality](#cross-service-functionality) + - [Data Sources](#data-sources) + - [Plural Data Sources](#plural-data-sources) + - [Singular Data Sources](#singular-data-sources) - [IAM Resource-Based Policy Resources](#iam-resource-based-policy-resources) - [Managing Resource Running State](#managing-resource-running-state) - [Task Execution and Waiter Resources](#task-execution-and-waiter-resources) @@ -81,6 +84,20 @@ The rationale behind this design decision includes the following: A poignant real-world example of the last point involved a Lambda resource. The resource helped clean up extra resources (ENIs) due to a common misconfiguration. Practitioners found the functionality helpful since the issue was hard to diagnose. Years later, AWS updated the Lambda API. Immediately, practitioners reported that Terraform executions were failing. Downgrading the provider was not possible since many configurations depended on recent releases. For environments running many versions behind, forcing an upgrade with the fix would likely cause unrelated and unexpected changes. In the end, HashiCorp and AWS performed a large-scale outreach to help upgrade and fixing the misconfigurations. Provider maintainers and practitioners lost considerable time. +### Data Sources + +A separate class of Terraform resource types are [data sources](https://www.terraform.io/docs/language/data-sources/). These are typically intended as a configuration method to lookup or fetch data in a read-only manner. Data sources should not have side effects on the remote system. + +When discussing data sources, they are typically classified by the intended number of return objects or data. Singular data sources represent a one-to-one lookup or data operation. Plural data sources represent a one-to-many operation. + +#### Plural Data Sources + +These data sources are intended to return zero, one, or many results, usually associated with a managed resource type. Typically results are a set unless ordering guarantees are provided by the remote system. These should be named with a plural suffix (e.g. `s` or `es`) and should not include any specific attribute in the naming (e.g. prefer `aws_ec2_transit_gateways` instead of `aws_ec2_transit_gateway_ids`). + +#### Singular Data Sources + +These data sources are intended to return one result or an error. These should not include any specific attribute in the naming (e.g. prefer `aws_ec2_transit_gateway` instead of `aws_ec2_transit_gateway_id`). + ### IAM Resource-Based Policy Resources For some AWS components, the AWS API allows specifying an [IAM resource-based policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html), the IAM policy to associate with a component. Some examples include: From ebd8086b934f12fd0bdadfd60f9ab7b407ff2694 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 16:32:13 +0000 Subject: [PATCH 1109/1212] Update CHANGELOG.md for #17546 --- CHANGELOG.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36eab045928..e345dfa7435 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ FEATURES: -* **New Resource:** `aws_config_conformance_pack` [GH-17313] +* **New Resource:** `aws_config_conformance_pack` ([#17313](https://github.com/hashicorp/terraform-provider-aws/issues/17313)) * **New Resource:** `aws_sagemaker_model_package_group` ([#17366](https://github.com/hashicorp/terraform-provider-aws/issues/17366)) * **New Resource:** `aws_securityhub_organization_admin_account` ([#17501](https://github.com/hashicorp/terraform-provider-aws/issues/17501)) @@ -10,16 +10,21 @@ ENHANCEMENTS: * data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments ([#12055](https://github.com/hashicorp/terraform-provider-aws/issues/12055)) * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_ec2_traffic_mirror_filter: Add `arn` attribute. ([#13948](https://github.com/hashicorp/terraform-provider-aws/issues/13948)) * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) -* resource/aws_ses_configuration_set: Add `delivery_options` argument [GH-11600] +* resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source ([#14746](https://github.com/hashicorp/terraform-provider-aws/issues/14746)) +* resource/aws_ses_configuration_set: Add `delivery_options` argument ([#11600](https://github.com/hashicorp/terraform-provider-aws/issues/11600)) +* resource/aws_ses_event_destination: Add `arn` attribute ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) +* resource/aws_ses_event_destination: Add plan time validation for `name`, `cloudwatch_destination.default_value`, `cloudwatch_destination.default_name`, `kinesis_destination.role_arn`, `kinesis_destination.stream_arn`, and `sns_destination.topic_arn` attributes ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) BUG FIXES: * resource/aws_glue_catalog_database: Use Catalog Id when deleting Databases. ([#17489](https://github.com/hashicorp/terraform-provider-aws/issues/17489)) * resource/aws_instance: Fix use of `throughput` and `iops` for `gp3` volumes at the same time ([#17380](https://github.com/hashicorp/terraform-provider-aws/issues/17380)) +* resource/aws_lambda_event_source_mapping: Wait for create and update operations to complete ([#14765](https://github.com/hashicorp/terraform-provider-aws/issues/14765)) ## 3.27.0 (February 05, 2021) From b000220a28ee8d1e0acf356c1fc85e16370844d5 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 16 Dec 2020 09:34:51 -0800 Subject: [PATCH 1110/1212] website: Show using jsonencode to generate IAM policy documents The existing examples of inline IAM policy documents tend to show them as "heredoc" strings, which was the required style in Terraform v0.11 and earlier. However, users seem to frequently start from these simple examples and then later need to insert dynamic elements such as ARNs or lists of ARNs from other resources, and because they started with a multi-line string template they then understandably start experimenting with string templating to produce the dynamic JSON, and run into various issues with incorrect quoting and generation of commas. When helping users in that case we typically suggest that they switch to using jsonencode for generating that dynamic JSON, because then they can just use normal Terraform language expression features and let Terraform itself worry about making the result valid JSON syntax. In an attempt to help users discover that solution themselves, rather than fighting with string templates first, I've changed the examples for the resource types I've most commonly seen questions about in order to show generating the JSON string programmatically using the jsonencode function. Not everyone will necessarily realise right away that they can use dynamic expressions in there, I hope that this will at least set folks on a better path when they start evaluating possible solutions by making it less likely that string templates will be seen as a viable option. There are many more heredoc-based IAM policy examples in other resource types -- particularly in various examples that include aws_iam_role helper resources with assume_role_policy arguments -- but I wanted to keep this confined to a common set to start just to avoid this being a huge diff that could be disruptive to merge. Hopefully over time we can update more of these, and also write any new examples in this jsonencode style. The jsonencode style is also typically how Terraform CLI will present the value in the plan diff, in order to potentially produce a structural diff _within_ the JSON data structure, so hopefully this will also help set users up to be less surprised when they encounter that for the first time. --- website/docs/r/iam_group_policy.html.markdown | 28 +++++----- website/docs/r/iam_policy.html.markdown | 28 +++++----- website/docs/r/iam_role.html.markdown | 28 +++++----- website/docs/r/iam_role_policy.html.markdown | 48 ++++++++-------- website/docs/r/iam_user_policy.html.markdown | 28 +++++----- website/docs/r/iot_policy.html.markdown | 28 +++++----- .../docs/r/lambda_permission.html.markdown | 56 +++++++++---------- website/docs/r/s3_bucket_policy.html.markdown | 41 ++++++++------ 8 files changed, 144 insertions(+), 141 deletions(-) diff --git a/website/docs/r/iam_group_policy.html.markdown b/website/docs/r/iam_group_policy.html.markdown index 7555851a188..b5bd77a1c16 100644 --- a/website/docs/r/iam_group_policy.html.markdown +++ b/website/docs/r/iam_group_policy.html.markdown @@ -17,20 +17,20 @@ resource "aws_iam_group_policy" "my_developer_policy" { name = "my_developer_policy" group = aws_iam_group.my_developers.name - policy = < Date: Thu, 11 Feb 2021 11:56:40 -0500 Subject: [PATCH 1111/1212] docs/contributing: Initial acceptance test concurrency section (#17515) * docs/contributing: Initial acceptance test concurrency section Documents the current practices to convert parallel acceptance testing to serialized. Future work with configurable concurrency and testing semaphores will augment this section. * Update docs/contributing/running-and-writing-acceptance-tests.md Co-authored-by: Simon Davis Co-authored-by: Simon Davis --- .../running-and-writing-acceptance-tests.md | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/docs/contributing/running-and-writing-acceptance-tests.md b/docs/contributing/running-and-writing-acceptance-tests.md index fd5c17988ac..72208fda706 100644 --- a/docs/contributing/running-and-writing-acceptance-tests.md +++ b/docs/contributing/running-and-writing-acceptance-tests.md @@ -22,6 +22,7 @@ - [Cross-Account Acceptance Tests](#cross-account-acceptance-tests) - [Cross-Region Acceptance Tests](#cross-region-acceptance-tests) - [Service-Specific Region Acceptance Tests](#service-specific-region-acceptance-tests) + - [Acceptance Test Concurrency](#acceptance-test-concurrency) - [Data Source Acceptance Testing](#data-source-acceptance-testing) - [Acceptance Test Sweepers](#acceptance-test-sweepers) - [Running Test Sweepers](#running-test-sweepers) @@ -954,6 +955,45 @@ func testAccDataSourceAwsPricingProductConfigRedshift() string { If the testing configurations require more than one region, reach out to the maintainers for further assistance. +#### Acceptance Test Concurrency + +Certain AWS service APIs allow a limited number of a certain component, while the acceptance testing runs at a default concurrency of twenty tests at a time. For example as of this writing, the SageMaker service only allows one SageMaker Domain per AWS Region. Running the tests with the default concurrency will fail with API errors relating to the component quota being exceeded. + +When encountering these types of components, the acceptance testing can be setup to limit the available concurrency of that particular component. When limited to one component at a time, this may also be referred to as serializing the acceptance tests. + +To convert to serialized (one test at a time) acceptance testing: + +- Convert all existing capital `T` test functions with the limited component to begin with a lowercase `t`, e.g. `TestAccSagemakerDomain_basic` becomes `testAccSagemakerDomain_basic`. This will prevent the test framework from executing these tests directly as the prefix `Test` is required. + - In each of these test functions, convert `resource.ParallelTest` to `resource.Test` +- Create a capital `T` `TestAcc{Service}{Thing}_serial` test function that then references all the lowercase `t` test functions. If multiple test files are referenced, this new test be created in a new shared file such as `aws/{Service}_test.go`. The contents of this test can be setup like the following: + +```go +func TestAccAwsExampleThing_serial(t *testing.T) { + testCases := map[string]map[string]func(t *testing.T){ + "Thing": { + "basic": testAccAWSExampleThing_basic, + "disappears": testAccAWSExampleThing_disappears, + // ... potentially other resource tests ... + }, + // ... potentially other top level resource test groups ... + } + + for group, m := range testCases { + m := m + t.Run(group, func(t *testing.T) { + for name, tc := range m { + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } + }) + } +} +``` + +_NOTE: Future iterations of these acceptance testing concurrency instructions will include the ability to handle more than one component at a time including service quota lookup, if supported by the service API._ + ### Data Source Acceptance Testing Writing acceptance testing for data sources is similar to resources, with the biggest changes being: From f735f5e169c50b5b5991f25741f799f89342af98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Feb 2021 12:00:11 -0500 Subject: [PATCH 1112/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17558) Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.4.2 to 2.4.3. - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/master/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.4.2...v2.4.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index dc933bb76ed..0c52f8868aa 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-version v1.2.1 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 github.com/jen20/awspolicyequivalence v1.1.0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba github.com/mattn/go-colorable v0.1.7 // indirect diff --git a/go.sum b/go.sum index 03b9dbf4d66..b5e1af8cfb0 100644 --- a/go.sum +++ b/go.sum @@ -207,14 +207,14 @@ github.com/hashicorp/hcl/v2 v2.3.0 h1:iRly8YaMwTBAKhn1Ybk7VSdzbnopghktCD031P8ggU github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= +github.com/hashicorp/terraform-exec v0.13.0 h1:1Pth+pdWJAufJuWWjaVOVNEkoRTOjGn3hQpAqj4aPdg= +github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.8.0 h1:XObQ3PgqU52YLQKEaJ08QtUshAfN3yu4u8ebSW0vztc= github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-plugin-go v0.2.1 h1:EW/R8bB2Zbkjmugzsy1d27yS8/0454b3MtYHkzOknqA= github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 h1:8oo4eMtv3nEZGqe8W0UzMxKnKWuwS/Tb2YyIFJkL59g= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 h1:DGnxpIYRHXQZb2TOlQ1OCEYxoRQrAcbLIcYm8kvbFuU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3/go.mod h1:5wrrTcxbSaQXamCDbHZTHk6yTF9OEZaOvQ9fvLXBE3o= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= From d1e29d1a6ac22dffcabd30821f447886d44a4f7c Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 19:24:19 +0200 Subject: [PATCH 1113/1212] resource/aws_ses_template: Add arn attribute (#13963) References: - https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html#amazonses-resources-for-iam-policies - https://docs.aws.amazon.com/ses/latest/APIReference/API_Template.html (no arn or owner id fields) Output from acceptance testing: ``` --- PASS: TestAccAWSSesTemplate_disappears (10.40s) --- PASS: TestAccAWSSesTemplate_basic (12.74s) --- SKIP: TestAccAWSSesTemplate_Update (0.00s) ``` --- .changelog/13963.txt | 3 +++ aws/resource_aws_ses_template.go | 14 ++++++++++++++ aws/resource_aws_ses_template_test.go | 4 ++-- website/docs/index.html.markdown | 1 + website/docs/r/ses_template.html.markdown | 1 + 5 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 .changelog/13963.txt diff --git a/.changelog/13963.txt b/.changelog/13963.txt new file mode 100644 index 00000000000..adbc0733b87 --- /dev/null +++ b/.changelog/13963.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ses_template: Add `arn` attribute +``` diff --git a/aws/resource_aws_ses_template.go b/aws/resource_aws_ses_template.go index 7700af8a8bf..771c560be48 100644 --- a/aws/resource_aws_ses_template.go +++ b/aws/resource_aws_ses_template.go @@ -5,6 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -22,6 +23,10 @@ func resourceAwsSesTemplate() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, @@ -102,6 +107,15 @@ func resourceAwsSesTemplateRead(d *schema.ResourceData, meta interface{}) error d.Set("subject", gto.Template.SubjectPart) d.Set("text", gto.Template.TextPart) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ses", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("template/%s", d.Id()), + }.String() + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ses_template_test.go b/aws/resource_aws_ses_template_test.go index 8789cc42009..d4c8f9ab94d 100644 --- a/aws/resource_aws_ses_template_test.go +++ b/aws/resource_aws_ses_template_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -57,6 +56,7 @@ func TestAccAWSSesTemplate_Update(t *testing.T) { Config: testAccCheckAwsSesTemplateResourceConfigBasic1(rName), Check: resource.ComposeTestCheckFunc( testAccCheckSesTemplateExists(resourceName, &template), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "ses", fmt.Sprintf("template/%s", rName)), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "html", "html"), resource.TestCheckResourceAttr(resourceName, "subject", "subject"), @@ -159,7 +159,7 @@ func testAccCheckSesTemplateDestroy(s *terraform.State) error { gto, err := conn.GetTemplate(&input) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "TemplateDoesNotExist") { + if isAWSErr(err, ses.ErrCodeTemplateDoesNotExistException, "") { return nil } return resource.NonRetryableError(err) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 9dac41fba13..0d0039d87f8 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -320,6 +320,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) - [`aws_ses_receipt_filter` resource](/docs/providers/aws/r/ses_receipt_filter.html) + - [`aws_ses_template` resource](/docs/providers/aws/r/ses_template.html) - [`aws_ssm_document` data source](/docs/providers/aws/d/ssm_document.html) - [`aws_ssm_document` resource](/docs/providers/aws/r/ssm_document.html) - [`aws_ssm_parameter` data source](/docs/providers/aws/d/ssm_parameter.html) diff --git a/website/docs/r/ses_template.html.markdown b/website/docs/r/ses_template.html.markdown index 2c0d9b9b8bb..fda424bb7a5 100644 --- a/website/docs/r/ses_template.html.markdown +++ b/website/docs/r/ses_template.html.markdown @@ -34,6 +34,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `arn` - The ARN of the SES template * `id` - The name of the SES template ## Import From b98bbb5ea7c943b622709dc08f593b51390a0d10 Mon Sep 17 00:00:00 2001 From: Luciano Mammino Date: Thu, 11 Feb 2021 17:31:17 +0000 Subject: [PATCH 1114/1212] Update website/docs/r/rds_cluster.html.markdown Co-authored-by: Simon Davis --- website/docs/r/rds_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 80b267c70aa..76698100e76 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -209,7 +209,7 @@ resource "aws_rds_cluster" "example" { ``` * `auto_pause` - (Optional) Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to `true`. -* `max_capacity` - (Optional) The maximum capacity. The maximum capacity must be greater than or equal to the minimum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256` (or `1`, `2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384` for PostgreSQL). Defaults to `16`. +* `max_capacity` - (Optional) The maximum capacity for an Aurora DB cluster in `serverless` DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `16`. * `min_capacity` - (Optional) The minimum capacity. The minimum capacity must be lesser than or equal to the maximum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256` (or `1`, `2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384` for PostgreSQL). Defaults to `1`. * `seconds_until_auto_pause` - (Optional) The time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are `300` through `86400`. Defaults to `300`. * `timeout_action` - (Optional) The action to take when the timeout is reached. Valid values: `ForceApplyCapacityChange`, `RollbackCapacityChange`. Defaults to `RollbackCapacityChange`. See [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.timeout-action). From 3ba6c9206e8ab3218e698ffe40b1690b28299d41 Mon Sep 17 00:00:00 2001 From: Luciano Mammino Date: Thu, 11 Feb 2021 17:31:23 +0000 Subject: [PATCH 1115/1212] Update website/docs/r/rds_cluster.html.markdown Co-authored-by: Simon Davis --- website/docs/r/rds_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 76698100e76..165097e739e 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -210,7 +210,7 @@ resource "aws_rds_cluster" "example" { * `auto_pause` - (Optional) Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to `true`. * `max_capacity` - (Optional) The maximum capacity for an Aurora DB cluster in `serverless` DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `16`. -* `min_capacity` - (Optional) The minimum capacity. The minimum capacity must be lesser than or equal to the maximum capacity. Valid capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, and `256` (or `1`, `2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384` for PostgreSQL). Defaults to `1`. +* `min_capacity` - (Optional) The minimum capacity for an Aurora DB cluster in `serverless` DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `1`. * `seconds_until_auto_pause` - (Optional) The time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are `300` through `86400`. Defaults to `300`. * `timeout_action` - (Optional) The action to take when the timeout is reached. Valid values: `ForceApplyCapacityChange`, `RollbackCapacityChange`. Defaults to `RollbackCapacityChange`. See [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.timeout-action). From b99cd895aa148e0af1493d610622152f12013df3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Feb 2021 17:35:00 +0000 Subject: [PATCH 1116/1212] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#17557) --- awsproviderlint/go.mod | 2 +- awsproviderlint/go.sum | 8 +- .../internal/version/version.go | 2 +- .../hashicorp/terraform-exec/tfexec/apply.go | 2 +- .../hashicorp/terraform-exec/tfexec/cmd.go | 20 +- .../terraform-exec/tfexec/cmd_default.go | 45 ++++ .../terraform-exec/tfexec/cmd_linux.go | 54 ++++ .../terraform-exec/tfexec/destroy.go | 2 +- .../hashicorp/terraform-exec/tfexec/errors.go | 181 +------------ .../terraform-exec/tfexec/exit_errors.go | 247 ++++++++++++++++++ .../hashicorp/terraform-exec/tfexec/fmt.go | 6 +- .../hashicorp/terraform-exec/tfexec/import.go | 2 +- .../hashicorp/terraform-exec/tfexec/init.go | 30 ++- .../hashicorp/terraform-exec/tfexec/output.go | 2 +- .../hashicorp/terraform-exec/tfexec/plan.go | 2 +- .../terraform-exec/tfexec/providers_schema.go | 2 +- .../terraform-exec/tfexec/refresh.go | 2 +- .../hashicorp/terraform-exec/tfexec/show.go | 8 +- .../terraform-exec/tfexec/state_mv.go | 2 +- .../terraform-exec/tfexec/state_rm.go | 104 ++++++++ .../terraform-exec/tfexec/terraform.go | 11 +- .../terraform-exec/tfexec/upgrade012.go | 2 +- .../terraform-exec/tfexec/validate.go | 9 +- .../terraform-exec/tfexec/version.go | 3 +- .../terraform-exec/tfexec/workspace_list.go | 2 +- .../terraform-exec/tfexec/workspace_new.go | 2 +- .../terraform-exec/tfexec/workspace_select.go | 2 +- .../v2/internal/plugintest/helper.go | 11 +- .../v2/internal/plugintest/working_dir.go | 74 ++---- .../terraform-plugin-sdk/v2/meta/meta.go | 2 +- awsproviderlint/vendor/modules.txt | 4 +- 31 files changed, 562 insertions(+), 283 deletions(-) create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go create mode 100644 awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go diff --git a/awsproviderlint/go.mod b/awsproviderlint/go.mod index 9b820c021a0..52e0aa45508 100644 --- a/awsproviderlint/go.mod +++ b/awsproviderlint/go.mod @@ -5,6 +5,6 @@ go 1.15 require ( github.com/aws/aws-sdk-go v1.37.4 github.com/bflad/tfproviderlint v0.21.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab ) diff --git a/awsproviderlint/go.sum b/awsproviderlint/go.sum index eff5dc75ac7..84ff112b14d 100644 --- a/awsproviderlint/go.sum +++ b/awsproviderlint/go.sum @@ -204,8 +204,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.3.0 h1:5WLBsnv9BoEUGlHJZETROZZxw+qO3/TFQEh6JMP2uaY= github.com/hashicorp/terraform-exec v0.3.0/go.mod h1:yKWvMPtkTaHpeAmllw+1qdHZ7E5u+pAZ+x8e2jQF6gM= -github.com/hashicorp/terraform-exec v0.12.0 h1:Tb1VC2gqArl9EJziJjoazep2MyxMk00tnNKV/rgMba0= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= +github.com/hashicorp/terraform-exec v0.13.0 h1:1Pth+pdWJAufJuWWjaVOVNEkoRTOjGn3hQpAqj4aPdg= +github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= @@ -217,8 +217,8 @@ github.com/hashicorp/terraform-plugin-sdk v1.9.0 h1:WBHHIX/RgF6/lbfMCzx0qKl96BbQ github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0 h1:jPPqctLDg75CilV3IpypAz6on3MSMOiUMzXNz+Xex6E= github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0/go.mod h1:xOf85UtHJ0/9/EF3eKgZFlJ6feN8sDtjQRWRHhimCUw= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 h1:8oo4eMtv3nEZGqe8W0UzMxKnKWuwS/Tb2YyIFJkL59g= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2/go.mod h1:jgCWyjKf1BRqzuA3IPJb6PJ2YY86ePJurX9xfJtuYNU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 h1:DGnxpIYRHXQZb2TOlQ1OCEYxoRQrAcbLIcYm8kvbFuU= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3/go.mod h1:5wrrTcxbSaQXamCDbHZTHk6yTF9OEZaOvQ9fvLXBE3o= github.com/hashicorp/terraform-plugin-test v1.2.0 h1:AWFdqyfnOj04sxTdaAF57QqvW7XXrT8PseUHkbKsE8I= github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= github.com/hashicorp/terraform-plugin-test/v2 v2.0.0-20200724200815-faa9931ac59e h1:Q8lNGrk3SVdXEbLuUJD03jghIjykJT9pu1aReKgb858= diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index c528de8387f..4e567eeaf12 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -1,6 +1,6 @@ package version -const version = "0.12.0" +const version = "0.13.0" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go index b055f418dc9..82d09d5fc75 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -91,7 +91,7 @@ func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go index 3b9005a6430..e792dc9ce73 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go @@ -171,7 +171,8 @@ func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string { } func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, tf.execPath, args...) + cmd := exec.Command(tf.execPath, args...) + cmd.Env = tf.buildEnv(mergeEnv) cmd.Dir = tf.workingDir @@ -180,11 +181,11 @@ func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string] return cmd } -func (tf *Terraform) runTerraformCmdJSON(cmd *exec.Cmd, v interface{}) error { +func (tf *Terraform) runTerraformCmdJSON(ctx context.Context, cmd *exec.Cmd, v interface{}) error { var outbuf = bytes.Buffer{} cmd.Stdout = mergeWriters(cmd.Stdout, &outbuf) - err := tf.runTerraformCmd(cmd) + err := tf.runTerraformCmd(ctx, cmd) if err != nil { return err } @@ -194,19 +195,6 @@ func (tf *Terraform) runTerraformCmdJSON(cmd *exec.Cmd, v interface{}) error { return dec.Decode(v) } -func (tf *Terraform) runTerraformCmd(cmd *exec.Cmd) error { - var errBuf strings.Builder - - cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) - cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) - - err := cmd.Run() - if err != nil { - return tf.parseError(err, errBuf.String()) - } - return nil -} - // mergeUserAgent does some minor deduplication to ensure we aren't // just using the same append string over and over. func mergeUserAgent(uas ...string) string { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go new file mode 100644 index 00000000000..2e88dd3e6ac --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -0,0 +1,45 @@ +// +build !linux + +package tfexec + +import ( + "context" + "os/exec" + "strings" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + err := cmd.Process.Kill() + if err != nil { + tf.logger.Printf("error from kill: %s", err) + } + } + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go new file mode 100644 index 00000000000..7cbdcb96f12 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -0,0 +1,54 @@ +package tfexec + +import ( + "context" + "os/exec" + "strings" + "syscall" +) + +func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { + var errBuf strings.Builder + + cmd.Stdout = mergeWriters(cmd.Stdout, tf.stdout) + cmd.Stderr = mergeWriters(cmd.Stderr, tf.stderr, &errBuf) + + cmd.SysProcAttr = &syscall.SysProcAttr{ + // kill children if parent is dead + Pdeathsig: syscall.SIGKILL, + // set process group ID + Setpgid: true, + } + + go func() { + <-ctx.Done() + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + if cmd != nil && cmd.Process != nil && cmd.ProcessState != nil { + // send SIGINT to process group + err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT) + if err != nil { + tf.logger.Printf("error from SIGINT: %s", err) + } + } + + // TODO: send a kill if it doesn't respond for a bit? + } + }() + + // check for early cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := cmd.Run() + if err == nil && ctx.Err() != nil { + err = ctx.Err() + } + if err != nil { + return tf.wrapExitError(ctx, err, errBuf.String()) + } + + return nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go index 61305580ea7..8011c0ba865 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go @@ -92,7 +92,7 @@ func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error { if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go index 241e6517bd9..7a32ef2f1f7 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -1,111 +1,8 @@ package tfexec -import ( - "errors" - "fmt" - "os/exec" - "regexp" - "strings" -) +import "fmt" -var ( - // The "Required variable not set:" case is for 0.11 - missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) - missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) - - usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) - - // "Could not load plugin" is present in 0.13 - noInitErrRegexp = regexp.MustCompile(`Error: Could not satisfy plugin requirements|Error: Could not load plugin`) - - noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) - - workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) - - workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) - - tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) - tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) - configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) -) - -func (tf *Terraform) parseError(err error, stderr string) error { - ee, ok := err.(*exec.ExitError) - if !ok { - return err - } - - switch { - case tfVersionMismatchErrRegexp.MatchString(stderr): - constraint := "" - constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(constraints); i++ { - constraint = strings.TrimSpace(constraints[i]) - if constraint != "" { - break - } - } - - if constraint == "" { - // hardcode a value here for weird cases (incl. 0.12) - constraint = "unknown" - } - - // only set this if it happened to be cached already - ver := "" - if tf != nil && tf.execVersion != nil { - ver = tf.execVersion.String() - } - - return &ErrTFVersionMismatch{ - Constraint: constraint, - TFVersion: ver, - } - case missingVarErrRegexp.MatchString(stderr): - name := "" - names := missingVarNameRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(names); i++ { - name = strings.TrimSpace(names[i]) - if name != "" { - break - } - } - - return &ErrMissingVar{name} - case usageRegexp.MatchString(stderr): - return &ErrCLIUsage{stderr: stderr} - case noInitErrRegexp.MatchString(stderr): - return &ErrNoInit{stderr: stderr} - case noConfigErrRegexp.MatchString(stderr): - return &ErrNoConfig{stderr: stderr} - case workspaceDoesNotExistRegexp.MatchString(stderr): - submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrNoWorkspace{submatches[1]} - } - case workspaceAlreadyExistsRegexp.MatchString(stderr): - submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrWorkspaceExists{submatches[1]} - } - case configInvalidErrRegexp.MatchString(stderr): - return &ErrConfigInvalid{stderr: stderr} - } - errString := strings.TrimSpace(stderr) - if errString == "" { - // if stderr is empty, return the ExitError directly, as it will have a better message - return ee - } - return errors.New(stderr) -} - -type ErrConfigInvalid struct { - stderr string -} - -func (e *ErrConfigInvalid) Error() string { - return "configuration is invalid" -} +// this file contains non-parsed exported errors type ErrNoSuitableBinary struct { err error @@ -115,17 +12,8 @@ func (e *ErrNoSuitableBinary) Error() string { return fmt.Sprintf("no suitable terraform binary could be found: %s", e.err.Error()) } -// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the -// value specified for required_version in the terraform block. -type ErrTFVersionMismatch struct { - TFVersion string - - // Constraint is not returned in the error messaging on 0.12 - Constraint string -} - -func (e *ErrTFVersionMismatch) Error() string { - return "terraform core version not supported by configuration" +func (e *ErrNoSuitableBinary) Unwrap() error { + return e.err } // ErrVersionMismatch is returned when the detected Terraform version is not compatible with the @@ -140,69 +28,12 @@ func (e *ErrVersionMismatch) Error() string { return fmt.Sprintf("unexpected version %s (min: %s, max: %s)", e.Actual, e.MinInclusive, e.MaxExclusive) } -type ErrNoInit struct { - stderr string -} - -func (e *ErrNoInit) Error() string { - return e.stderr -} - -type ErrNoConfig struct { - stderr string -} - -func (e *ErrNoConfig) Error() string { - return e.stderr -} - -// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. -// -// CLI indicates usage errors in three different ways: either -// 1. Exit 1, with a custom error message on stderr. -// 2. Exit 1, with command usage logged to stderr. -// 3. Exit 127, with command usage logged to stdout. -// Currently cases 1 and 2 are handled. -// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? -type ErrCLIUsage struct { - stderr string -} - -func (e *ErrCLIUsage) Error() string { - return e.stderr -} - // ErrManualEnvVar is returned when an env var that should be set programatically via an option or method // is set via the manual environment passing functions. type ErrManualEnvVar struct { - name string -} - -func (err *ErrManualEnvVar) Error() string { - return fmt.Sprintf("manual setting of env var %q detected", err.name) -} - -type ErrMissingVar struct { - VariableName string -} - -func (err *ErrMissingVar) Error() string { - return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) -} - -type ErrNoWorkspace struct { Name string } -func (err *ErrNoWorkspace) Error() string { - return fmt.Sprintf("workspace %q does not exist", err.Name) -} - -// ErrWorkspaceExists is returned when creating a workspace that already exists -type ErrWorkspaceExists struct { - Name string -} - -func (err *ErrWorkspaceExists) Error() string { - return fmt.Sprintf("workspace %q already exists", err.Name) +func (err *ErrManualEnvVar) Error() string { + return fmt.Sprintf("manual setting of env var %q detected", err.Name) } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go new file mode 100644 index 00000000000..5596fa2a136 --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go @@ -0,0 +1,247 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strings" +) + +// this file contains errors parsed from stderr + +var ( + // The "Required variable not set:" case is for 0.11 + missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) + missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) + + usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) + + // "Could not load plugin" is present in 0.13 + noInitErrRegexp = regexp.MustCompile(`Error: Could not satisfy plugin requirements|Error: Could not load plugin`) + + noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) + + workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) + + workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) + + tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) + tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) + configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) +) + +func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // not an exit error, short circuit, nothing to wrap + return err + } + + ctxErr := ctx.Err() + + // nothing to parse, return early + errString := strings.TrimSpace(stderr) + if errString == "" { + return &unwrapper{exitErr, ctxErr} + } + + switch { + case tfVersionMismatchErrRegexp.MatchString(stderr): + constraint := "" + constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(constraints); i++ { + constraint = strings.TrimSpace(constraints[i]) + if constraint != "" { + break + } + } + + if constraint == "" { + // hardcode a value here for weird cases (incl. 0.12) + constraint = "unknown" + } + + // only set this if it happened to be cached already + ver := "" + if tf != nil && tf.execVersion != nil { + ver = tf.execVersion.String() + } + + return &ErrTFVersionMismatch{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Constraint: constraint, + TFVersion: ver, + } + case missingVarErrRegexp.MatchString(stderr): + name := "" + names := missingVarNameRegexp.FindStringSubmatch(stderr) + for i := 1; i < len(names); i++ { + name = strings.TrimSpace(names[i]) + if name != "" { + break + } + } + + return &ErrMissingVar{ + unwrapper: unwrapper{exitErr, ctxErr}, + + VariableName: name, + } + case usageRegexp.MatchString(stderr): + return &ErrCLIUsage{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noInitErrRegexp.MatchString(stderr): + return &ErrNoInit{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case noConfigErrRegexp.MatchString(stderr): + return &ErrNoConfig{ + unwrapper: unwrapper{exitErr, ctxErr}, + + stderr: stderr, + } + case workspaceDoesNotExistRegexp.MatchString(stderr): + submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrNoWorkspace{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case workspaceAlreadyExistsRegexp.MatchString(stderr): + submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) + if len(submatches) == 2 { + return &ErrWorkspaceExists{ + unwrapper: unwrapper{exitErr, ctxErr}, + + Name: submatches[1], + } + } + case configInvalidErrRegexp.MatchString(stderr): + return &ErrConfigInvalid{stderr: stderr} + } + + return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) +} + +type unwrapper struct { + err error + ctxErr error +} + +func (u *unwrapper) Unwrap() error { + return u.err +} + +func (u *unwrapper) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return u.ctxErr == context.DeadlineExceeded || + u.ctxErr == context.Canceled + } + return false +} + +func (u *unwrapper) Error() string { + return u.err.Error() +} + +type ErrConfigInvalid struct { + stderr string +} + +func (e *ErrConfigInvalid) Error() string { + return "configuration is invalid" +} + +type ErrMissingVar struct { + unwrapper + + VariableName string +} + +func (err *ErrMissingVar) Error() string { + return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) +} + +type ErrNoWorkspace struct { + unwrapper + + Name string +} + +func (err *ErrNoWorkspace) Error() string { + return fmt.Sprintf("workspace %q does not exist", err.Name) +} + +// ErrWorkspaceExists is returned when creating a workspace that already exists +type ErrWorkspaceExists struct { + unwrapper + + Name string +} + +func (err *ErrWorkspaceExists) Error() string { + return fmt.Sprintf("workspace %q already exists", err.Name) +} + +type ErrNoInit struct { + unwrapper + + stderr string +} + +func (e *ErrNoInit) Error() string { + return e.stderr +} + +type ErrNoConfig struct { + unwrapper + + stderr string +} + +func (e *ErrNoConfig) Error() string { + return e.stderr +} + +// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. +// +// CLI indicates usage errors in three different ways: either +// 1. Exit 1, with a custom error message on stderr. +// 2. Exit 1, with command usage logged to stderr. +// 3. Exit 127, with command usage logged to stdout. +// Currently cases 1 and 2 are handled. +// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? +type ErrCLIUsage struct { + unwrapper + + stderr string +} + +func (e *ErrCLIUsage) Error() string { + return e.stderr +} + +// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the +// value specified for required_version in the terraform block. +type ErrTFVersionMismatch struct { + unwrapper + + TFVersion string + + // Constraint is not returned in the error messaging on 0.12 + Constraint string +} + +func (e *ErrTFVersionMismatch) Error() string { + return "terraform core version not supported by configuration" +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go index de30890a697..10f6cb4cf42 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go @@ -63,7 +63,7 @@ func (tf *Terraform) Format(ctx context.Context, unformatted io.Reader, formatte cmd.Stdin = unformatted cmd.Stdout = mergeWriters(cmd.Stdout, formatted) - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } // FormatWrite attempts to format and modify all config files in the working or selected (via DirOption) directory. @@ -82,7 +82,7 @@ func (tf *Terraform) FormatWrite(ctx context.Context, opts ...FormatOption) erro return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } // FormatCheck returns true if the config files in the working or selected (via DirOption) directory are already formatted. @@ -104,7 +104,7 @@ func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (boo var outBuf bytes.Buffer cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf) - err = tf.runTerraformCmd(cmd) + err = tf.runTerraformCmd(ctx, cmd) if err == nil { return true, nil, nil } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go index cffb4e92583..e243d728177 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go @@ -78,7 +78,7 @@ func (tf *Terraform) Import(ctx context.Context, address, id string, opts ...Imp if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) importCmd(ctx context.Context, address, id string, opts ...ImportOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go index 7d2a6bceb80..bff9ecd3ed5 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go @@ -98,13 +98,21 @@ func (tf *Terraform) Init(ctx context.Context, opts ...InitOption) error { if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd, error) { c := defaultInitOptions for _, o := range opts { + switch o.(type) { + case *LockOption, *LockTimeoutOption, *VerifyPluginsOption, *GetPluginsOption: + err := tf.compatible(ctx, nil, tf0_15_0) + if err != nil { + return nil, fmt.Errorf("-lock, -lock-timeout, -verify-plugins, and -get-plugins options are no longer available as of Terraform 0.15: %w", err) + } + } + o.configureInit(&c) } @@ -114,17 +122,27 @@ func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd if c.fromModule != "" { args = append(args, "-from-module="+c.fromModule) } - if c.lockTimeout != "" { - args = append(args, "-lock-timeout="+c.lockTimeout) + + // string opts removed in 0.15: pass if set and <0.15 + err := tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } } // boolean opts: always pass args = append(args, "-backend="+fmt.Sprint(c.backend)) args = append(args, "-get="+fmt.Sprint(c.get)) - args = append(args, "-get-plugins="+fmt.Sprint(c.getPlugins)) - args = append(args, "-lock="+fmt.Sprint(c.lock)) args = append(args, "-upgrade="+fmt.Sprint(c.upgrade)) - args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins)) + + // boolean opts removed in 0.15: pass if <0.15 + err = tf.compatible(ctx, nil, tf0_15_0) + if err == nil { + args = append(args, "-lock="+fmt.Sprint(c.lock)) + args = append(args, "-get-plugins="+fmt.Sprint(c.getPlugins)) + args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins)) + } // unary flags: pass if true if c.reconfigure { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go index 4c20402532b..b16b8b72890 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go @@ -37,7 +37,7 @@ func (tf *Terraform) Output(ctx context.Context, opts ...OutputOption) (map[stri outputCmd := tf.outputCmd(ctx, opts...) outputs := map[string]OutputMeta{} - err := tf.runTerraformCmdJSON(outputCmd, &outputs) + err := tf.runTerraformCmdJSON(ctx, outputCmd, &outputs) if err != nil { return nil, err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go index 07541acd5dc..bfe77db73ff 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -96,7 +96,7 @@ func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) if err != nil { return false, err } - err = tf.runTerraformCmd(cmd) + err = tf.runTerraformCmd(ctx, cmd) if err != nil && cmd.ProcessState.ExitCode() == 2 { return true, nil } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go index 75e593a9a2d..52efc5db606 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go @@ -12,7 +12,7 @@ func (tf *Terraform) ProvidersSchema(ctx context.Context) (*tfjson.ProviderSchem schemaCmd := tf.providersSchemaCmd(ctx) var ret tfjson.ProviderSchemas - err := tf.runTerraformCmdJSON(schemaCmd, &ret) + err := tf.runTerraformCmdJSON(ctx, schemaCmd, &ret) if err != nil { return nil, err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go index 5839d2df004..78f6b4b5014 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go @@ -75,7 +75,7 @@ func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) erro if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go index dbb58f04022..a8d67f1a4cf 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go @@ -50,7 +50,7 @@ func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.Stat var ret tfjson.State ret.UseJSONNumber(true) - err = tf.runTerraformCmdJSON(showCmd, &ret) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err } @@ -93,7 +93,7 @@ func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts . var ret tfjson.State ret.UseJSONNumber(true) - err = tf.runTerraformCmdJSON(showCmd, &ret) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (tf *Terraform) ShowPlanFile(ctx context.Context, planPath string, opts ... showCmd := tf.showCmd(ctx, true, mergeEnv, planPath) var ret tfjson.Plan - err = tf.runTerraformCmdJSON(showCmd, &ret) + err = tf.runTerraformCmdJSON(ctx, showCmd, &ret) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (tf *Terraform) ShowPlanFileRaw(ctx context.Context, planPath string, opts var ret bytes.Buffer showCmd.Stdout = &ret - err := tf.runTerraformCmd(showCmd) + err := tf.runTerraformCmd(ctx, showCmd) if err != nil { return "", err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go index 1646e52cd80..fc7eecf86ce 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go @@ -60,7 +60,7 @@ func (tf *Terraform) StateMv(ctx context.Context, source string, destination str if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) stateMvCmd(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go new file mode 100644 index 00000000000..0c5dd66676e --- /dev/null +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go @@ -0,0 +1,104 @@ +package tfexec + +import ( + "context" + "os/exec" + "strconv" +) + +type stateRmConfig struct { + backup string + backupOut string + dryRun bool + lock bool + lockTimeout string + state string + stateOut string +} + +var defaultStateRmOptions = stateRmConfig{ + lock: true, + lockTimeout: "0s", +} + +// StateRmCmdOption represents options used in the Refresh method. +type StateRmCmdOption interface { + configureStateRm(*stateRmConfig) +} + +func (opt *BackupOption) configureStateRm(conf *stateRmConfig) { + conf.backup = opt.path +} + +func (opt *BackupOutOption) configureStateRm(conf *stateRmConfig) { + conf.backupOut = opt.path +} + +func (opt *DryRunOption) configureStateRm(conf *stateRmConfig) { + conf.dryRun = opt.dryRun +} + +func (opt *LockOption) configureStateRm(conf *stateRmConfig) { + conf.lock = opt.lock +} + +func (opt *LockTimeoutOption) configureStateRm(conf *stateRmConfig) { + conf.lockTimeout = opt.timeout +} + +func (opt *StateOption) configureStateRm(conf *stateRmConfig) { + conf.state = opt.path +} + +func (opt *StateOutOption) configureStateRm(conf *stateRmConfig) { + conf.stateOut = opt.path +} + +// StateRm represents the terraform state rm subcommand. +func (tf *Terraform) StateRm(ctx context.Context, address string, opts ...StateRmCmdOption) error { + cmd, err := tf.stateRmCmd(ctx, address, opts...) + if err != nil { + return err + } + return tf.runTerraformCmd(ctx, cmd) +} + +func (tf *Terraform) stateRmCmd(ctx context.Context, address string, opts ...StateRmCmdOption) (*exec.Cmd, error) { + c := defaultStateRmOptions + + for _, o := range opts { + o.configureStateRm(&c) + } + + args := []string{"state", "rm", "-no-color"} + + // string opts: only pass if set + if c.backup != "" { + args = append(args, "-backup="+c.backup) + } + if c.backupOut != "" { + args = append(args, "-backup-out="+c.backupOut) + } + if c.lockTimeout != "" { + args = append(args, "-lock-timeout="+c.lockTimeout) + } + if c.state != "" { + args = append(args, "-state="+c.state) + } + if c.stateOut != "" { + args = append(args, "-state-out="+c.stateOut) + } + + // boolean and numerical opts: always pass + args = append(args, "-lock="+strconv.FormatBool(c.lock)) + + // unary flags: pass if true + if c.dryRun { + args = append(args, "-dry-run") + } + + // positional arguments + args = append(args, address) + + return tf.buildTerraformCmd(ctx, nil, args...), nil +} diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index 804402b47f4..74787af0c25 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -22,6 +22,12 @@ type printfer interface { // but you can override paths used in some commands depending on the available // options. // +// All functions that execute CLI commands take a context.Context. It should be noted that +// exec.Cmd.Run will not return context.DeadlineExceeded or context.Canceled by default, we +// have augmented our wrapped errors to respond true to errors.Is for context.DeadlineExceeded +// and context.Canceled if those are present on the context when the error is parsed. See +// https://github.com/golang/go/issues/21880 for more about the Go limitations. +// // By default, the instance inherits the environment from the calling code (using os.Environ) // but it ignores certain environment variables that are managed within the code and prohibits // setting them through SetEnv: @@ -66,8 +72,9 @@ func NewTerraform(workingDir string, execPath string) (*Terraform, error) { if execPath == "" { err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the tfinstall package.") - return nil, &ErrNoSuitableBinary{err: err} - + return nil, &ErrNoSuitableBinary{ + err: err, + } } tf := Terraform{ execPath: execPath, diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go index 5af05b7e623..e55237a7b6e 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go @@ -40,7 +40,7 @@ func (tf *Terraform) Upgrade012(ctx context.Context, opts ...Upgrade012Option) e if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) upgrade012Cmd(ctx context.Context, opts ...Upgrade012Option) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go index cac9d6b8479..756eccd75c6 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go @@ -22,14 +22,15 @@ func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, erro var outbuf = bytes.Buffer{} cmd.Stdout = &outbuf - err = tf.runTerraformCmd(cmd) + err = tf.runTerraformCmd(ctx, cmd) // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors if err != nil && cmd.ProcessState.ExitCode() != 1 { return nil, err } - var out tfjson.ValidateOutput - jsonErr := json.Unmarshal(outbuf.Bytes(), &out) + var ret tfjson.ValidateOutput + // TODO: ret.UseJSONNumber(true) validate output should support JSON numbers + jsonErr := json.Unmarshal(outbuf.Bytes(), &ret) if jsonErr != nil { // the original call was possibly bad, if it has an error, actually just return that if err != nil { @@ -39,5 +40,5 @@ func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, erro return nil, jsonErr } - return &out, nil + return &ret, nil } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index 6f3f13959da..9e05b056165 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -15,6 +15,7 @@ var ( tf0_7_7 = version.Must(version.NewVersion("0.7.7")) tf0_12_0 = version.Must(version.NewVersion("0.12.0")) tf0_13_0 = version.Must(version.NewVersion("0.13.0")) + tf0_15_0 = version.Must(version.NewVersion("0.15.0")) ) // Version returns structured output from the terraform version command including both the Terraform CLI version @@ -44,7 +45,7 @@ func (tf *Terraform) version(ctx context.Context) (*version.Version, map[string] var outBuf bytes.Buffer versionCmd.Stdout = &outBuf - err := tf.runTerraformCmd(versionCmd) + err := tf.runTerraformCmd(ctx, versionCmd) if err != nil { return nil, nil, err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go index edf9adab06d..b8d0309454d 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go @@ -14,7 +14,7 @@ func (tf *Terraform) WorkspaceList(ctx context.Context) ([]string, string, error var outBuf bytes.Buffer wlCmd.Stdout = &outBuf - err := tf.runTerraformCmd(wlCmd) + err := tf.runTerraformCmd(ctx, wlCmd) if err != nil { return nil, "", err } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go index 1925c286b68..2e05ffdb764 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go @@ -41,7 +41,7 @@ func (tf *Terraform) WorkspaceNew(ctx context.Context, workspace string, opts .. if err != nil { return err } - return tf.runTerraformCmd(cmd) + return tf.runTerraformCmd(ctx, cmd) } func (tf *Terraform) workspaceNewCmd(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) (*exec.Cmd, error) { diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go index 87f5301ec0c..5a51330f6f5 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go @@ -6,5 +6,5 @@ import "context" func (tf *Terraform) WorkspaceSelect(ctx context.Context, workspace string) error { // TODO: [DIR] param option - return tf.runTerraformCmd(tf.buildTerraformCmd(ctx, nil, "workspace", "select", "-no-color", workspace)) + return tf.runTerraformCmd(ctx, tf.buildTerraformCmd(ctx, nil, "workspace", "select", "-no-color", workspace)) } diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go index d7fc0c6ff6a..4b130b499dd 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/helper.go @@ -4,6 +4,8 @@ import ( "fmt" "io/ioutil" "os" + + "github.com/hashicorp/terraform-exec/tfexec" ) const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" @@ -107,14 +109,21 @@ func (h *Helper) NewWorkingDir() (*WorkingDir, error) { return nil, err } - // symlink the provider source files into the base directory + // symlink the provider source files into the config directory + // e.g. testdata err = symlinkDirectoriesOnly(h.sourceDir, dir) if err != nil { return nil, err } + tf, err := tfexec.NewTerraform(dir, h.terraformExec) + if err != nil { + return nil, err + } + return &WorkingDir{ h: h, + tf: tf, baseDir: dir, terraformExec: h.terraformExec, }, nil diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go index 81b8e1ebc75..1faeb5c7a71 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -13,6 +13,11 @@ import ( tfjson "github.com/hashicorp/terraform-json" ) +const ( + ConfigFileName = "terraform_plugin_test.tf" + PlanFileName = "tfplan" +) + // WorkingDir represents a distinct working directory that can be used for // running tests. Each test should construct its own WorkingDir by calling // NewWorkingDir or RequireNewWorkingDir on its package's singleton @@ -26,9 +31,6 @@ type WorkingDir struct { // baseArgs is arguments that should be appended to all commands baseArgs []string - // configDir contains the singular config file generated for each test - configDir string - // tf is the instance of tfexec.Terraform used for running Terraform commands tf *tfexec.Terraform @@ -75,55 +77,32 @@ func (wd *WorkingDir) GetHelper() *Helper { return wd.h } -func (wd *WorkingDir) relativeConfigDir() (string, error) { - relPath, err := filepath.Rel(wd.baseDir, wd.configDir) - if err != nil { - return "", fmt.Errorf("Error determining relative path of configuration directory: %w", err) - } - return relPath, nil -} - // SetConfig sets a new configuration for the working directory. // // This must be called at least once before any call to Init, Plan, Apply, or // Destroy to establish the configuration. Any previously-set configuration is // discarded and any saved plan is cleared. func (wd *WorkingDir) SetConfig(cfg string) error { - // Each call to SetConfig creates a new directory under our baseDir. - // We create them within so that our final cleanup step will delete them - // automatically without any additional tracking. - configDir, err := ioutil.TempDir(wd.baseDir, "config") - if err != nil { - return err - } - configFilename := filepath.Join(configDir, "terraform_plugin_test.tf") - err = ioutil.WriteFile(configFilename, []byte(cfg), 0700) - if err != nil { - return err - } - - tf, err := tfexec.NewTerraform(wd.baseDir, wd.terraformExec) + configFilename := filepath.Join(wd.baseDir, ConfigFileName) + err := ioutil.WriteFile(configFilename, []byte(cfg), 0700) if err != nil { return err } var mismatch *tfexec.ErrVersionMismatch - err = tf.SetDisablePluginTLS(true) + err = wd.tf.SetDisablePluginTLS(true) if err != nil && !errors.As(err, &mismatch) { return err } - err = tf.SetSkipProviderVerify(true) + err = wd.tf.SetSkipProviderVerify(true) if err != nil && !errors.As(err, &mismatch) { return err } if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { - tf.SetLogPath(p) + wd.tf.SetLogPath(p) } - wd.configDir = configDir - wd.tf = tf - // Changing configuration invalidates any saved plan. err = wd.ClearPlan() if err != nil { @@ -156,28 +135,32 @@ func (wd *WorkingDir) ClearPlan() error { // Init runs "terraform init" for the given working directory, forcing Terraform // to use the current version of the plugin under test. func (wd *WorkingDir) Init() error { - if wd.configDir == "" { + if _, err := os.Stat(wd.configFilename()); err != nil { return fmt.Errorf("must call SetConfig before Init") } - return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Dir(wd.configDir)) + return wd.tf.Init(context.Background(), tfexec.Reattach(wd.reattachInfo)) +} + +func (wd *WorkingDir) configFilename() string { + return filepath.Join(wd.baseDir, ConfigFileName) } func (wd *WorkingDir) planFilename() string { - return filepath.Join(wd.baseDir, "tfplan") + return filepath.Join(wd.baseDir, PlanFileName) } // CreatePlan runs "terraform plan" to create a saved plan file, which if successful // will then be used for the next call to Apply. func (wd *WorkingDir) CreatePlan() error { - _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out("tfplan"), tfexec.Dir(wd.configDir)) + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName)) return err } // CreateDestroyPlan runs "terraform plan -destroy" to create a saved plan // file, which if successful will then be used for the next call to Apply. func (wd *WorkingDir) CreateDestroyPlan() error { - _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out("tfplan"), tfexec.Destroy(true), tfexec.Dir(wd.configDir)) + _, err := wd.tf.Plan(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Out(PlanFileName), tfexec.Destroy(true)) return err } @@ -188,18 +171,9 @@ func (wd *WorkingDir) CreateDestroyPlan() error { func (wd *WorkingDir) Apply() error { args := []tfexec.ApplyOption{tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)} if wd.HasSavedPlan() { - args = append(args, tfexec.DirOrPlan("tfplan")) - } else { - // we need to use a relative config dir here or we get an - // error about Terraform not having any configuration. See - // https://github.com/hashicorp/terraform-plugin-sdk/issues/495 - // for more info. - configDir, err := wd.relativeConfigDir() - if err != nil { - return err - } - args = append(args, tfexec.DirOrPlan(configDir)) + args = append(args, tfexec.DirOrPlan(PlanFileName)) } + return wd.tf.Apply(context.Background(), args...) } @@ -209,7 +183,7 @@ func (wd *WorkingDir) Apply() error { // If destroy fails then remote objects might still exist, and continue to // exist after a particular test is concluded. func (wd *WorkingDir) Destroy() error { - return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false), tfexec.Dir(wd.configDir)) + return wd.tf.Destroy(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.Refresh(false)) } // HasSavedPlan returns true if there is a saved plan in the working directory. If @@ -262,12 +236,12 @@ func (wd *WorkingDir) State() (*tfjson.State, error) { // Import runs terraform import func (wd *WorkingDir) Import(resource, id string) error { - return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.configDir), tfexec.Reattach(wd.reattachInfo)) + return wd.tf.Import(context.Background(), resource, id, tfexec.Config(wd.baseDir), tfexec.Reattach(wd.reattachInfo)) } // Refresh runs terraform refresh func (wd *WorkingDir) Refresh() error { - return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate")), tfexec.Dir(wd.configDir)) + return wd.tf.Refresh(context.Background(), tfexec.Reattach(wd.reattachInfo), tfexec.State(filepath.Join(wd.baseDir, "terraform.tfstate"))) } // Schemas returns an object describing the provider schemas. diff --git a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go index a36cabe9049..eb7b3f08e41 100644 --- a/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go +++ b/awsproviderlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var SDKVersion = "2.4.2" +var SDKVersion = "2.4.3" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/awsproviderlint/vendor/modules.txt b/awsproviderlint/vendor/modules.txt index ebe9107044f..847ac7d9968 100644 --- a/awsproviderlint/vendor/modules.txt +++ b/awsproviderlint/vendor/modules.txt @@ -262,7 +262,7 @@ github.com/hashicorp/hcl/v2/ext/customdecode github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.12.0 +# github.com/hashicorp/terraform-exec v0.13.0 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec github.com/hashicorp/terraform-exec/tfinstall @@ -275,7 +275,7 @@ github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5 github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov5/server github.com/hashicorp/terraform-plugin-go/tfprotov5/tftypes -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.2 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3 ## explicit github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging From 7057a296eeaec385c9501e9885ce186bf03030d8 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 17:36:41 +0000 Subject: [PATCH 1117/1212] Update CHANGELOG.md for #17557 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e345dfa7435..be0a0b19ebc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ ENHANCEMENTS: * resource/aws_ses_configuration_set: Add `delivery_options` argument ([#11600](https://github.com/hashicorp/terraform-provider-aws/issues/11600)) * resource/aws_ses_event_destination: Add `arn` attribute ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) * resource/aws_ses_event_destination: Add plan time validation for `name`, `cloudwatch_destination.default_value`, `cloudwatch_destination.default_name`, `kinesis_destination.role_arn`, `kinesis_destination.stream_arn`, and `sns_destination.topic_arn` attributes ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) +* resource/aws_ses_template: Add `arn` attribute ([#13963](https://github.com/hashicorp/terraform-provider-aws/issues/13963)) * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) BUG FIXES: From 7864e613ebcaa3227a9fc835f1db4ba3acfcb4bb Mon Sep 17 00:00:00 2001 From: bill-rich Date: Fri, 29 Jan 2021 09:32:04 -0800 Subject: [PATCH 1118/1212] Allow use of resource_type and resource_type_list --- aws/resource_aws_fms_policy.go | 76 +++++++++++++------------ aws/resource_aws_fms_policy_test.go | 50 ++++++++++++++++ website/docs/r/fms_policy.html.markdown | 3 +- 3 files changed, 91 insertions(+), 38 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index 5c90292af8e..2d7f83cc343 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -93,13 +93,20 @@ func resourceAwsFmsPolicy() *schema.Resource { }, "resource_type_list": { - Type: schema.TypeSet, - Required: true, + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + ConflictsWith: []string{"resource_type"}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"AWS::ApiGateway::Stage", "AWS::ElasticLoadBalancingV2::LoadBalancer", "AWS::CloudFront::Distribution", "AWS::EC2::NetworkInterface", "AWS::EC2::Instance", "AWS::EC2::SecurityGroup"}, false), }, - Set: schema.HashString, + }, + + "resource_type": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"resource_type_list"}, }, "policy_update_token": { @@ -138,31 +145,7 @@ func resourceAwsFmsPolicy() *schema.Resource { func resourceAwsFmsPolicyCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).fmsconn - fmsPolicy := &fms.Policy{ - PolicyName: aws.String(d.Get("name").(string)), - RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), - ResourceType: aws.String("ResourceTypeList"), - ResourceTypeList: expandStringSet(d.Get("resource_type_list").(*schema.Set)), - ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), - } - - securityServicePolicy := d.Get("security_service_policy_data").([]interface{})[0].(map[string]interface{}) - fmsPolicy.SecurityServicePolicyData = &fms.SecurityServicePolicyData{ - ManagedServiceData: aws.String(securityServicePolicy["managed_service_data"].(string)), - Type: aws.String(securityServicePolicy["type"].(string)), - } - - if rTags, tagsOk := d.GetOk("resource_tags"); tagsOk { - fmsPolicy.ResourceTags = constructResourceTags(rTags) - } - - if v, ok := d.GetOk("include_map"); ok { - fmsPolicy.IncludeMap = expandFMSPolicyMap(v.([]interface{})) - } - - if v, ok := d.GetOk("exclude_map"); ok { - fmsPolicy.ExcludeMap = expandFMSPolicyMap(v.([]interface{})) - } + fmsPolicy := resourceAwsFmsPolicyExpandPolicy(d) params := &fms.PutPolicyInput{ Policy: fmsPolicy, @@ -201,22 +184,29 @@ func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { return err } + return resourceAwsFmsPolicyFlattenPolicy(d, resp) +} + +func resourceAwsFmsPolicyFlattenPolicy(d *schema.ResourceData, resp *fms.GetPolicyOutput) error { d.Set("arn", aws.StringValue(resp.PolicyArn)) d.Set("name", aws.StringValue(resp.Policy.PolicyName)) d.Set("exclude_resource_tags", aws.BoolValue(resp.Policy.ExcludeResourceTags)) - if err = d.Set("exclude_map", flattenFMSPolicyMap(resp.Policy.ExcludeMap)); err != nil { + if err := d.Set("exclude_map", flattenFMSPolicyMap(resp.Policy.ExcludeMap)); err != nil { return err } - if err = d.Set("include_map", flattenFMSPolicyMap(resp.Policy.IncludeMap)); err != nil { + if err := d.Set("include_map", flattenFMSPolicyMap(resp.Policy.IncludeMap)); err != nil { return err } d.Set("remediation_enabled", aws.BoolValue(resp.Policy.RemediationEnabled)) - if err = d.Set("resource_type_list", resp.Policy.ResourceTypeList); err != nil { + if err := d.Set("resource_type_list", resp.Policy.ResourceTypeList); err != nil { return err } + if aws.StringValue(resp.Policy.ResourceType) != "ResourceTypeList" { + d.Set("resource_type", aws.StringValue(resp.Policy.ResourceType)) + } d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) - if err = d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)); err != nil { + if err := d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)); err != nil { return err } @@ -224,23 +214,27 @@ func resourceAwsFmsPolicyRead(d *schema.ResourceData, meta interface{}) error { "type": *resp.Policy.SecurityServicePolicyData.Type, "managed_service_data": *resp.Policy.SecurityServicePolicyData.ManagedServiceData, }} - if err = d.Set("security_service_policy_data", securityServicePolicy); err != nil { + if err := d.Set("security_service_policy_data", securityServicePolicy); err != nil { return err } return nil } -func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).fmsconn +func resourceAwsFmsPolicyExpandPolicy(d *schema.ResourceData) *fms.Policy { + resourceType := aws.String("ResourceTypeList") + resourceTypeList := expandStringSet(d.Get("resource_type_list").(*schema.Set)) + if t, ok := d.GetOk("resource_type"); ok { + resourceType = aws.String(t.(string)) + } fmsPolicy := &fms.Policy{ PolicyName: aws.String(d.Get("name").(string)), PolicyId: aws.String(d.Id()), PolicyUpdateToken: aws.String(d.Get("policy_update_token").(string)), RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), - ResourceType: aws.String("ResourceTypeList"), - ResourceTypeList: expandStringSet(d.Get("resource_type_list").(*schema.Set)), + ResourceType: resourceType, + ResourceTypeList: resourceTypeList, ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } @@ -256,6 +250,14 @@ func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error Type: aws.String(securityServicePolicy["type"].(string)), } + return fmsPolicy +} + +func resourceAwsFmsPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fmsconn + + fmsPolicy := resourceAwsFmsPolicyExpandPolicy(d) + params := &fms.PutPolicyInput{Policy: fmsPolicy} _, err := conn.PutPolicy(params) diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index 802aab68931..a2b93ac1b52 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -40,6 +40,34 @@ func TestAccAWSFmsPolicy_basic(t *testing.T) { }) } +func TestAccAWSFmsPolicy_cloudfrontDistribution(t *testing.T) { + fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) + wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsFmsPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccFmsPolicyConfig_cloudfrontDistribution(fmsPolicyName, wafRuleGroupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsFmsPolicyExists("aws_fms_policy.test"), + testAccMatchResourceAttrRegionalARN("aws_fms_policy.test", "arn", "fms", regexp.MustCompile(`policy/`)), + resource.TestCheckResourceAttr("aws_fms_policy.test", "name", fmsPolicyName), + resource.TestCheckResourceAttr("aws_fms_policy.test", "security_service_policy_data.#", "1"), + ), + }, + { + ResourceName: "aws_fms_policy.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"policy_update_token", "delete_all_policy_resources"}, + }, + }, + }) +} + func TestAccAWSFmsPolicy_includeMap(t *testing.T) { fmsPolicyName := fmt.Sprintf("tf-fms-%s", acctest.RandString(5)) wafRuleGroupName := fmt.Sprintf("tf-waf-rg-%s", acctest.RandString(5)) @@ -187,6 +215,28 @@ resource "aws_fms_policy" "test" { data "aws_organizations_organization" "example" {} +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = "%[2]s" +} +`, name, group) +} + +func testAccFmsPolicyConfig_cloudfrontDistribution(name string, group string) string { + return fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = "%[1]s" + remediation_enabled = false + resource_type = "AWS::CloudFront::Distribution" + + security_service_policy_data { + type = "WAFV2" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } +} + + resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" name = "%[2]s" diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index 45725f6d25e..803ac1f3057 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -59,7 +59,8 @@ The following arguments are supported: * `include_map` - (Optional) A map of lists, with a single key named 'account' with a list of AWS Account IDs to include for this policy. * `remediation_enabled` - (Required) A boolean value, indicates if the policy should automatically applied to resources that already exist in the account. * `resource_tags` - (Optional) A map of resource tags, that if present will filter protections on resources based on the exclude_resource_tags. -* `resource_type_list` - (Required, Forces new resource) A list of resource types to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. +* `resource_type` - (Optional) A resource type to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. Conflicts with `resource_type_list`. +* `resource_type_list` - (Optional) A list of resource types to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. Conflicts with `resource_type`. * `security_service_policy_data` - (Required) The objects to include in Security Service Policy Data. Documented below. ## `exclude_map` Configuration Block From dce8bef122845653abbffd6f82f14e7c539b3f96 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 19:50:17 +0200 Subject: [PATCH 1119/1212] resource/aws_ses_active_receipt_rule_set: Add arn attribute (#13962) References: - https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html#amazonses-resources-for-iam-policies - https://docs.aws.amazon.com/ses/latest/APIReference/API_ReceiptRuleSetMetadata.html (no arn or owner id fields) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSESActiveReceiptRuleSet_serial (24.89s) --- PASS: TestAccAWSSESActiveReceiptRuleSet_serial/basic (13.02s) --- PASS: TestAccAWSSESActiveReceiptRuleSet_serial/disappears (11.87s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSESActiveReceiptRuleSet_serial (3.74s) --- SKIP: TestAccAWSSESActiveReceiptRuleSet_serial/basic (1.95s) --- SKIP: TestAccAWSSESActiveReceiptRuleSet_serial/disappears (1.79s) ``` --- .changelog/13962.txt | 7 +++++ ...esource_aws_ses_active_receipt_rule_set.go | 27 +++++++++++++++---- ...ce_aws_ses_active_receipt_rule_set_test.go | 3 ++- website/docs/index.html.markdown | 5 ++-- .../ses_active_receipt_rule_set.html.markdown | 7 +++++ 5 files changed, 41 insertions(+), 8 deletions(-) create mode 100644 .changelog/13962.txt diff --git a/.changelog/13962.txt b/.changelog/13962.txt new file mode 100644 index 00000000000..ce98a3fd700 --- /dev/null +++ b/.changelog/13962.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ses_active_receipt_rule_set: Add `arn` attribute +``` + +```release-note:enhancement +resource/aws_ses_active_receipt_rule_set: Add plan time validation for `rule_set_name` argument +``` diff --git a/aws/resource_aws_ses_active_receipt_rule_set.go b/aws/resource_aws_ses_active_receipt_rule_set.go index ccdd8ff2743..b506278ee56 100644 --- a/aws/resource_aws_ses_active_receipt_rule_set.go +++ b/aws/resource_aws_ses_active_receipt_rule_set.go @@ -5,8 +5,10 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsSesActiveReceiptRuleSet() *schema.Resource { @@ -17,9 +19,14 @@ func resourceAwsSesActiveReceiptRuleSet() *schema.Resource { Delete: resourceAwsSesActiveReceiptRuleSetDelete, Schema: map[string]*schema.Schema{ - "rule_set_name": { + "arn": { Type: schema.TypeString, - Required: true, + Computed: true, + }, + "rule_set_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), }, }, } @@ -59,13 +66,23 @@ func resourceAwsSesActiveReceiptRuleSetRead(d *schema.ResourceData, meta interfa return err } - if response.Metadata != nil { - d.Set("rule_set_name", response.Metadata.Name) - } else { + if response.Metadata == nil { log.Print("[WARN] No active Receipt Rule Set found") d.SetId("") + return nil } + d.Set("rule_set_name", response.Metadata.Name) + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ses", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("receipt-rule-set/%s", d.Id()), + }.String() + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ses_active_receipt_rule_set_test.go b/aws/resource_aws_ses_active_receipt_rule_set_test.go index 3b2c9d239ff..3bc025bf64c 100644 --- a/aws/resource_aws_ses_active_receipt_rule_set_test.go +++ b/aws/resource_aws_ses_active_receipt_rule_set_test.go @@ -45,6 +45,7 @@ func testAccAWSSESActiveReceiptRuleSet_basic(t *testing.T) { Config: testAccAWSSESActiveReceiptRuleSetConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESActiveReceiptRuleSetExists(resourceName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "ses", fmt.Sprintf("receipt-rule-set/%s", rName)), ), }, }, @@ -128,7 +129,7 @@ func testAccCheckAwsSESActiveReceiptRuleSetExists(n string) resource.TestCheckFu func testAccAWSSESActiveReceiptRuleSetConfig(name string) string { return fmt.Sprintf(` resource "aws_ses_receipt_rule_set" "test" { - rule_set_name = "%s" + rule_set_name = %[1]q } resource "aws_ses_active_receipt_rule_set" "test" { diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 0d0039d87f8..de3ede981d2 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -315,12 +315,13 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_redshift_snapshot_schedule` resource](/docs/providers/aws/r/redshift_snapshot_schedule.html) - [`aws_redshift_subnet_group` resource](/docs/providers/aws/r/redshift_subnet_group.html) - [`aws_s3_account_public_access_block` resource](/docs/providers/aws/r/s3_account_public_access_block.html) + - [`aws_ses_active_receipt_rule_set` resource](/docs/providers/aws/r/ses_active_receipt_rule_set.html) - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) - [`aws_ses_domain_identity_verification` resource](/docs/providers/aws/r/ses_domain_identity_verification.html) - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) - - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) + - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) - [`aws_ses_receipt_filter` resource](/docs/providers/aws/r/ses_receipt_filter.html) - - [`aws_ses_template` resource](/docs/providers/aws/r/ses_template.html) + - [`aws_ses_template` resource](/docs/providers/aws/r/ses_template.html) - [`aws_ssm_document` data source](/docs/providers/aws/d/ssm_document.html) - [`aws_ssm_document` resource](/docs/providers/aws/r/ssm_document.html) - [`aws_ssm_parameter` data source](/docs/providers/aws/d/ssm_parameter.html) diff --git a/website/docs/r/ses_active_receipt_rule_set.html.markdown b/website/docs/r/ses_active_receipt_rule_set.html.markdown index 25db2e57033..23fb50b536c 100644 --- a/website/docs/r/ses_active_receipt_rule_set.html.markdown +++ b/website/docs/r/ses_active_receipt_rule_set.html.markdown @@ -23,3 +23,10 @@ resource "aws_ses_active_receipt_rule_set" "main" { The following arguments are supported: * `rule_set_name` - (Required) The name of the rule set + +## Attributes Reference + +In addition to the arguments, which are exported, the following attributes are exported: + +* `id` - The SES receipt rule set name. +* `arn` - The SES receipt rule set ARN. From 167df860bdd7fcaa648f29a85b0e61a51b65f4b0 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 27 Jun 2020 22:35:45 +0300 Subject: [PATCH 1120/1212] add arn --- aws/resource_aws_ses_configuration_set.go | 21 ++++++-- ...resource_aws_ses_configuration_set_test.go | 54 ++++++++++--------- website/docs/r/ses_configuration_set.markdown | 7 +++ 3 files changed, 54 insertions(+), 28 deletions(-) diff --git a/aws/resource_aws_ses_configuration_set.go b/aws/resource_aws_ses_configuration_set.go index 3af7f7ddae3..b9702ea7216 100644 --- a/aws/resource_aws_ses_configuration_set.go +++ b/aws/resource_aws_ses_configuration_set.go @@ -5,6 +5,7 @@ import ( "log" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -21,6 +22,10 @@ func resourceAwsSesConfigurationSet() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "delivery_options": { Type: schema.TypeList, Optional: true, @@ -37,9 +42,10 @@ func resourceAwsSesConfigurationSet() *schema.Resource { }, }, "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 64), }, }, } @@ -103,6 +109,15 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} d.Set("name", aws.StringValue(response.ConfigurationSet.Name)) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ses", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("configuration-set/%s", d.Id()), + }.String() + d.Set("arn", arn) + return nil } diff --git a/aws/resource_aws_ses_configuration_set_test.go b/aws/resource_aws_ses_configuration_set_test.go index d2c0611797c..e8979578fcf 100644 --- a/aws/resource_aws_ses_configuration_set_test.go +++ b/aws/resource_aws_ses_configuration_set_test.go @@ -68,7 +68,7 @@ func testSweepSesConfigurationSets(region string) error { } func TestAccAWSSESConfigurationSet_basic(t *testing.T) { - var escRandomInteger = acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -80,9 +80,11 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "ses", fmt.Sprintf("configuration-set/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), }, @@ -96,7 +98,7 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { } func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { - var escRandomInteger = acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -108,7 +110,7 @@ func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyRequire), + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(rName, ses.TlsPolicyRequire), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), @@ -125,7 +127,7 @@ func TestAccAWSSESConfigurationSet_deliveryOptions(t *testing.T) { } func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { - var escRandomInteger = acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -137,13 +139,13 @@ func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), ), }, { - Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyRequire), + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(rName, ses.TlsPolicyRequire), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), @@ -156,7 +158,7 @@ func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger, ses.TlsPolicyOptional), + Config: testAccAWSSESConfigurationSetDeliveryOptionsConfig(rName, ses.TlsPolicyOptional), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), @@ -164,7 +166,7 @@ func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { ), }, { - Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), @@ -180,7 +182,7 @@ func TestAccAWSSESConfigurationSet_update_deliveryOptions(t *testing.T) { } func TestAccAWSSESConfigurationSet_emptyDeliveryOptions(t *testing.T) { - var escRandomInteger = acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -192,7 +194,7 @@ func TestAccAWSSESConfigurationSet_emptyDeliveryOptions(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), @@ -209,7 +211,7 @@ func TestAccAWSSESConfigurationSet_emptyDeliveryOptions(t *testing.T) { } func TestAccAWSSESConfigurationSet_update_emptyDeliveryOptions(t *testing.T) { - var escRandomInteger = acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ses_configuration_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -221,14 +223,14 @@ func TestAccAWSSESConfigurationSet_update_emptyDeliveryOptions(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), }, { - Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "1"), @@ -241,7 +243,7 @@ func TestAccAWSSESConfigurationSet_update_emptyDeliveryOptions(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSSESConfigurationSetBasicConfig(escRandomInteger), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), @@ -307,30 +309,32 @@ func testAccCheckSESConfigurationSetDestroy(s *terraform.State) error { return nil } -func testAccAWSSESConfigurationSetBasicConfig(escRandomInteger int) string { +func testAccAWSSESConfigurationSetBasicConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" + name = %[1]q } -`, escRandomInteger) +`, rName) } -func testAccAWSSESConfigurationSetDeliveryOptionsConfig(escRandomInteger int, tlsPolicy string) string { +func testAccAWSSESConfigurationSetDeliveryOptionsConfig(rName, tlsPolicy string) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" + name = %[1]q + delivery_options { - tls_policy = %q + tls_policy = %[2]q } } -`, escRandomInteger, tlsPolicy) +`, rName, tlsPolicy) } -func testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(escRandomInteger int) string { +func testAccAWSSESConfigurationSetEmptyDeliveryOptionsConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_configuration_set" "test" { - name = "some-configuration-set-%d" + name = %[1]q + delivery_options {} } -`, escRandomInteger) +`, rName) } diff --git a/website/docs/r/ses_configuration_set.markdown b/website/docs/r/ses_configuration_set.markdown index 1565dccbb54..3648b2943cf 100644 --- a/website/docs/r/ses_configuration_set.markdown +++ b/website/docs/r/ses_configuration_set.markdown @@ -43,6 +43,13 @@ The `delivery_options` configuration block supports the following argument: * `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is `Require`, messages are only delivered if a TLS connection can be established. If the value is `Optional`, messages can be delivered in plain text if a TLS connection can't be established. Valid values: `Require` or `Optional`. Defaults to `Optional`. +## Attributes Reference + +In addition to the arguments, which are exported, the following attributes are exported: + +* `id` - The SES configuration set name. +* `arn` - The SES configuration set ARN. + ## Import SES Configuration Sets can be imported using their `name`, e.g. From 1a3d6b1e0d33dfd4685b6eed32185f75e2886bd8 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 25 Jul 2020 15:31:19 +0300 Subject: [PATCH 1121/1212] add docs --- website/docs/index.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index de3ede981d2..822968395ea 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -316,8 +316,9 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_redshift_subnet_group` resource](/docs/providers/aws/r/redshift_subnet_group.html) - [`aws_s3_account_public_access_block` resource](/docs/providers/aws/r/s3_account_public_access_block.html) - [`aws_ses_active_receipt_rule_set` resource](/docs/providers/aws/r/ses_active_receipt_rule_set.html) - - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) + - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) - [`aws_ses_domain_identity_verification` resource](/docs/providers/aws/r/ses_domain_identity_verification.html) + - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) - [`aws_ses_receipt_filter` resource](/docs/providers/aws/r/ses_receipt_filter.html) From 85d8640a857e3848748256d088acdbfcd0d46735 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 25 Jul 2020 16:03:13 +0300 Subject: [PATCH 1122/1212] fix file ending --- ...iguration_set.markdown => ses_configuration_set.html.markdown} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/docs/r/{ses_configuration_set.markdown => ses_configuration_set.html.markdown} (100%) diff --git a/website/docs/r/ses_configuration_set.markdown b/website/docs/r/ses_configuration_set.html.markdown similarity index 100% rename from website/docs/r/ses_configuration_set.markdown rename to website/docs/r/ses_configuration_set.html.markdown From 145ed11baae7400b9b2feb2ed8e6ca6895af8f64 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 12 Nov 2020 20:47:50 +0200 Subject: [PATCH 1123/1212] lint --- website/docs/index.html.markdown | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 822968395ea..3ccf531e0a4 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -269,13 +269,13 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) - [`aws_ec2_traffic_mirror_session` resource](/docs/providers/aws/r/ec2_traffic_mirror_session.html) - [`aws_ec2_traffic_mirror_target` resource](/docs/providers/aws/r/ec2_traffic_mirror_target.html) - - [`aws_ec2_transit_gateway_route_table` data source](/docs/providers/aws/d/ec2_transit_gateway_route_table.html) - - [`aws_ec2_transit_gateway_route_table` resource](/docs/providers/aws/r/ec2_transit_gateway_route_table.html) + - [`aws_ec2_transit_gateway_route_table` data source](/docs/providers/aws/d/ec2_transit_gateway_route_table.html) + - [`aws_ec2_transit_gateway_route_table` resource](/docs/providers/aws/r/ec2_transit_gateway_route_table.html) - [`aws_ecs_capacity_provider` resource (import)](/docs/providers/aws/r/ecs_capacity_provider.html) - [`aws_ecs_cluster` resource (import)](/docs/providers/aws/r/ecs_cluster.html) - [`aws_ecs_service` resource (import)](/docs/providers/aws/r/ecs_service.html) - [`aws_customer_gateway` data source](/docs/providers/aws/d/customer_gateway.html) - - [`aws_customer_gateway` resource](/docs/providers/aws/r/customer_gateway.html) + - [`aws_customer_gateway` resource](/docs/providers/aws/r/customer_gateway.html) - [`aws_efs_access_point` data source](/docs/providers/aws/d/efs_access_point.html) - [`aws_efs_access_point` resource](/docs/providers/aws/r/efs_access_point.html) - [`aws_efs_file_system` data source](/docs/providers/aws/d/efs_file_system.html) @@ -316,7 +316,8 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_redshift_subnet_group` resource](/docs/providers/aws/r/redshift_subnet_group.html) - [`aws_s3_account_public_access_block` resource](/docs/providers/aws/r/s3_account_public_access_block.html) - [`aws_ses_active_receipt_rule_set` resource](/docs/providers/aws/r/ses_active_receipt_rule_set.html) - - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) + - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) + - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) - [`aws_ses_domain_identity_verification` resource](/docs/providers/aws/r/ses_domain_identity_verification.html) - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) From 6e2167a5dfbf8837bb39469ba18c23a07f33897c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 16:59:35 +0200 Subject: [PATCH 1124/1212] add changelog --- .changelog/13972.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/13972.txt diff --git a/.changelog/13972.txt b/.changelog/13972.txt new file mode 100644 index 00000000000..17989b13b3d --- /dev/null +++ b/.changelog/13972.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ses_configuration_set: Add `arn` attribute. +``` From e18d5d8d96eb628f60f71a8f538287bdeace875c Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 17:09:03 +0200 Subject: [PATCH 1125/1212] add disappears + validation --- ...resource_aws_ses_configuration_set_test.go | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/aws/resource_aws_ses_configuration_set_test.go b/aws/resource_aws_ses_configuration_set_test.go index e8979578fcf..0917a41e965 100644 --- a/aws/resource_aws_ses_configuration_set_test.go +++ b/aws/resource_aws_ses_configuration_set_test.go @@ -258,6 +258,30 @@ func TestAccAWSSESConfigurationSet_update_emptyDeliveryOptions(t *testing.T) { }) } +func TestAccAWSSESConfigurationSet_disappears(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_configuration_set.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccPreCheckAWSSES(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESConfigurationSetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSESConfigurationSetConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESConfigurationSetExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSesConfigurationSet(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAwsSESConfigurationSetExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] From ca305c859d894554be868f5b89800aaac4bc8571 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 17:12:47 +0200 Subject: [PATCH 1126/1212] changelog --- .changelog/13972.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.changelog/13972.txt b/.changelog/13972.txt index 17989b13b3d..e9d792a5282 100644 --- a/.changelog/13972.txt +++ b/.changelog/13972.txt @@ -1,3 +1,8 @@ ```release-note:enhancement resource/aws_ses_configuration_set: Add `arn` attribute. ``` + +```release-note:enhancement +resource/aws_ses_configuration_set: Add plan time validation to `name`. +``` + From a138cd027d9d96663ab60b806a1f1e8a6ed49ecc Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 12:39:49 -0500 Subject: [PATCH 1127/1212] resoure/ses_configuration_set: Cleanup after rebase --- aws/resource_aws_ses_configuration_set.go | 2 +- aws/resource_aws_ses_configuration_set_test.go | 4 ++-- .../docs/r/ses_configuration_set.html.markdown | 17 +++++++++-------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/aws/resource_aws_ses_configuration_set.go b/aws/resource_aws_ses_configuration_set.go index b9702ea7216..ac1e080d0fd 100644 --- a/aws/resource_aws_ses_configuration_set.go +++ b/aws/resource_aws_ses_configuration_set.go @@ -111,7 +111,7 @@ func resourceAwsSesConfigurationSetRead(d *schema.ResourceData, meta interface{} arn := arn.ARN{ Partition: meta.(*AWSClient).partition, - Service: "ses", + Service: ses.ServiceName, Region: meta.(*AWSClient).region, AccountID: meta.(*AWSClient).accountid, Resource: fmt.Sprintf("configuration-set/%s", d.Id()), diff --git a/aws/resource_aws_ses_configuration_set_test.go b/aws/resource_aws_ses_configuration_set_test.go index 0917a41e965..6279ff066be 100644 --- a/aws/resource_aws_ses_configuration_set_test.go +++ b/aws/resource_aws_ses_configuration_set_test.go @@ -83,7 +83,7 @@ func TestAccAWSSESConfigurationSet_basic(t *testing.T) { Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), - testAccCheckResourceAttrRegionalARN(resourceName, "arn", "ses", fmt.Sprintf("configuration-set/%s", rName)), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", ses.ServiceName, fmt.Sprintf("configuration-set/%s", rName)), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "delivery_options.#", "0"), ), @@ -271,7 +271,7 @@ func TestAccAWSSESConfigurationSet_disappears(t *testing.T) { CheckDestroy: testAccCheckSESConfigurationSetDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESConfigurationSetConfig(rName), + Config: testAccAWSSESConfigurationSetBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESConfigurationSetExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSesConfigurationSet(), resourceName), diff --git a/website/docs/r/ses_configuration_set.html.markdown b/website/docs/r/ses_configuration_set.html.markdown index 3648b2943cf..a45c051a980 100644 --- a/website/docs/r/ses_configuration_set.html.markdown +++ b/website/docs/r/ses_configuration_set.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_ses_configuration_set -Provides an SES configuration set resource +Provides an SES configuration set resource. ## Example Usage @@ -32,14 +32,15 @@ resource "aws_ses_configuration_set" "test" { ## Argument Reference -The following arguments are supported: +The following argument is required: -* `delivery_options` - (Optional) A configuration block that specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Detailed below. -* `name` - (Required) The name of the configuration set +* `name` - (Required) Name of the configuration set. -### delivery_options Argument Reference +The following argument is optional: -The `delivery_options` configuration block supports the following argument: +* `delivery_options` - (Optional) Configuration block. Detailed below. + +### delivery_options * `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is `Require`, messages are only delivered if a TLS connection can be established. If the value is `Optional`, messages can be delivered in plain text if a TLS connection can't be established. Valid values: `Require` or `Optional`. Defaults to `Optional`. @@ -47,8 +48,8 @@ The `delivery_options` configuration block supports the following argument: In addition to the arguments, which are exported, the following attributes are exported: -* `id` - The SES configuration set name. -* `arn` - The SES configuration set ARN. +* `arn` - SES configuration set ARN. +* `id` - SES configuration set name. ## Import From 0fee10d945f9fb50e1a7c39bcbda35299d4262dd Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 12:55:41 -0500 Subject: [PATCH 1128/1212] website/index: Remove duplicate --- website/docs/index.html.markdown | 1 - 1 file changed, 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 3ccf531e0a4..a75a97f3990 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -317,7 +317,6 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_s3_account_public_access_block` resource](/docs/providers/aws/r/s3_account_public_access_block.html) - [`aws_ses_active_receipt_rule_set` resource](/docs/providers/aws/r/ses_active_receipt_rule_set.html) - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) - - [`aws_ses_configuration_set` resource](/docs/providers/aws/r/ses_configuration_set.html) - [`aws_ses_domain_identity_verification` resource](/docs/providers/aws/r/ses_domain_identity_verification.html) - [`aws_ses_domain_identity` resource](/docs/providers/aws/r/ses_domain_identity.html) - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) From 66647bf4524432acf6130c775f8354c1f293fc44 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Tue, 21 Jan 2020 15:06:20 +0200 Subject: [PATCH 1129/1212] add plan time validation to `role_arn` and `target_arn` refactor to read after create add disappearing test case --- ...resource_aws_cloudwatch_log_destination.go | 40 +++++++-------- ...rce_aws_cloudwatch_log_destination_test.go | 51 +++++++++++++++++-- 2 files changed, 65 insertions(+), 26 deletions(-) diff --git a/aws/resource_aws_cloudwatch_log_destination.go b/aws/resource_aws_cloudwatch_log_destination.go index f1433e2561c..310b5242a9b 100644 --- a/aws/resource_aws_cloudwatch_log_destination.go +++ b/aws/resource_aws_cloudwatch_log_destination.go @@ -32,13 +32,15 @@ func resourceAwsCloudWatchLogDestination() *schema.Resource { }, "role_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "target_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "arn": { @@ -53,21 +55,20 @@ func resourceAwsCloudWatchLogDestinationPut(d *schema.ResourceData, meta interfa conn := meta.(*AWSClient).cloudwatchlogsconn name := d.Get("name").(string) - role_arn := d.Get("role_arn").(string) - target_arn := d.Get("target_arn").(string) + roleArn := d.Get("role_arn").(string) + targetArn := d.Get("target_arn").(string) params := &cloudwatchlogs.PutDestinationInput{ DestinationName: aws.String(name), - RoleArn: aws.String(role_arn), - TargetArn: aws.String(target_arn), + RoleArn: aws.String(roleArn), + TargetArn: aws.String(targetArn), } - var resp *cloudwatchlogs.PutDestinationOutput var err error err = resource.Retry(3*time.Minute, func() *resource.RetryError { - resp, err = conn.PutDestination(params) + _, err = conn.PutDestination(params) - if isAWSErr(err, cloudwatchlogs.ErrCodeInvalidParameterException, "Could not deliver test message to specified") { + if isAWSErr(err, cloudwatchlogs.ErrCodeInvalidParameterException, "") { return resource.RetryableError(err) } if err != nil { @@ -76,20 +77,20 @@ func resourceAwsCloudWatchLogDestinationPut(d *schema.ResourceData, meta interfa return nil }) if isResourceTimeoutError(err) { - resp, err = conn.PutDestination(params) + _, err = conn.PutDestination(params) } if err != nil { return fmt.Errorf("Error putting cloudwatch log destination: %s", err) } d.SetId(name) - d.Set("arn", resp.Destination.Arn) - return nil + + return resourceAwsCloudWatchLogDestinationRead(d, meta) } func resourceAwsCloudWatchLogDestinationRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn - name := d.Get("name").(string) - destination, exists, err := lookupCloudWatchLogDestination(conn, name, nil) + + destination, exists, err := lookupCloudWatchLogDestination(conn, d.Id(), nil) if err != nil { return err } @@ -99,7 +100,6 @@ func resourceAwsCloudWatchLogDestinationRead(d *schema.ResourceData, meta interf return nil } - d.SetId(name) d.Set("arn", destination.Arn) d.Set("role_arn", destination.RoleArn) d.Set("target_arn", destination.TargetArn) @@ -110,14 +110,12 @@ func resourceAwsCloudWatchLogDestinationRead(d *schema.ResourceData, meta interf func resourceAwsCloudWatchLogDestinationDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn - name := d.Get("name").(string) - params := &cloudwatchlogs.DeleteDestinationInput{ - DestinationName: aws.String(name), + DestinationName: aws.String(d.Id()), } _, err := conn.DeleteDestination(params) if err != nil { - return fmt.Errorf("Error deleting Destination with name %s", name) + return fmt.Errorf("Error deleting Destination with name %s", d.Id()) } return nil diff --git a/aws/resource_aws_cloudwatch_log_destination_test.go b/aws/resource_aws_cloudwatch_log_destination_test.go index 030ff40670f..859c3fc546a 100644 --- a/aws/resource_aws_cloudwatch_log_destination_test.go +++ b/aws/resource_aws_cloudwatch_log_destination_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" @@ -13,6 +14,8 @@ import ( func TestAccAWSCloudwatchLogDestination_basic(t *testing.T) { var destination cloudwatchlogs.Destination resourceName := "aws_cloudwatch_log_destination.test" + streamResourceName := "aws_kinesis_stream.test" + roleResourceName := "aws_iam_role.test" rstring := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ @@ -24,6 +27,9 @@ func TestAccAWSCloudwatchLogDestination_basic(t *testing.T) { Config: testAccAWSCloudwatchLogDestinationConfig(rstring), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCloudwatchLogDestinationExists(resourceName, &destination), + resource.TestCheckResourceAttrPair(resourceName, "target_arn", streamResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", roleResourceName, "arn"), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "logs", regexp.MustCompile(`destination:.+`)), ), }, { @@ -35,6 +41,29 @@ func TestAccAWSCloudwatchLogDestination_basic(t *testing.T) { }) } +func TestAccAWSCloudwatchLogDestination_disappears(t *testing.T) { + var destination cloudwatchlogs.Destination + resourceName := "aws_cloudwatch_log_destination.test" + + rstring := acctest.RandString(5) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudwatchLogDestinationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCloudwatchLogDestinationConfig(rstring), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCloudwatchLogDestinationExists(resourceName, &destination), + testAccCheckAWSCloudwatchLogDestinationDisappears(&destination), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAWSCloudwatchLogDestinationDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn @@ -78,10 +107,22 @@ func testAccCheckAWSCloudwatchLogDestinationExists(n string, d *cloudwatchlogs.D } } +func testAccCheckAWSCloudwatchLogDestinationDisappears(d *cloudwatchlogs.Destination) resource.TestCheckFunc { + return func(s *terraform.State) error { + + conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn + input := &cloudwatchlogs.DeleteDestinationInput{DestinationName: d.DestinationName} + + _, err := conn.DeleteDestination(input) + + return err + } +} + func testAccAWSCloudwatchLogDestinationConfig(rstring string) string { return fmt.Sprintf(` resource "aws_kinesis_stream" "test" { - name = "RootAccess_%s" + name = "RootAccess_%[1]s" shard_count = 1 } @@ -107,7 +148,7 @@ data "aws_iam_policy_document" "role" { } resource "aws_iam_role" "test" { - name = "CWLtoKinesisRole_%s" + name = "CWLtoKinesisRole_%[1]s" assume_role_policy = data.aws_iam_policy_document.role.json } @@ -138,13 +179,13 @@ data "aws_iam_policy_document" "policy" { } resource "aws_iam_role_policy" "test" { - name = "Permissions-Policy-For-CWL_%s" + name = "Permissions-Policy-For-CWL_%[1]s" role = aws_iam_role.test.id policy = data.aws_iam_policy_document.policy.json } resource "aws_cloudwatch_log_destination" "test" { - name = "testDestination_%s" + name = "testDestination_%[1]s" target_arn = aws_kinesis_stream.test.arn role_arn = aws_iam_role.test.arn depends_on = [aws_iam_role_policy.test] @@ -176,5 +217,5 @@ resource "aws_cloudwatch_log_destination_policy" "test" { destination_name = aws_cloudwatch_log_destination.test.name access_policy = data.aws_iam_policy_document.access.json } -`, rstring, rstring, rstring, rstring) +`, rstring) } From 07edef39cb99aafa4e5b45333ecbd2cf4df5b671 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Fri, 8 May 2020 09:02:18 +0300 Subject: [PATCH 1130/1212] re use Disappears func --- ...resource_aws_cloudwatch_log_destination_test.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/aws/resource_aws_cloudwatch_log_destination_test.go b/aws/resource_aws_cloudwatch_log_destination_test.go index 859c3fc546a..5b8348a2179 100644 --- a/aws/resource_aws_cloudwatch_log_destination_test.go +++ b/aws/resource_aws_cloudwatch_log_destination_test.go @@ -56,7 +56,7 @@ func TestAccAWSCloudwatchLogDestination_disappears(t *testing.T) { Config: testAccAWSCloudwatchLogDestinationConfig(rstring), Check: resource.ComposeTestCheckFunc( testAccCheckAWSCloudwatchLogDestinationExists(resourceName, &destination), - testAccCheckAWSCloudwatchLogDestinationDisappears(&destination), + testAccCheckResourceDisappears(testAccProvider, resourceAwsCloudWatchLogDestination(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -107,18 +107,6 @@ func testAccCheckAWSCloudwatchLogDestinationExists(n string, d *cloudwatchlogs.D } } -func testAccCheckAWSCloudwatchLogDestinationDisappears(d *cloudwatchlogs.Destination) resource.TestCheckFunc { - return func(s *terraform.State) error { - - conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - input := &cloudwatchlogs.DeleteDestinationInput{DestinationName: d.DestinationName} - - _, err := conn.DeleteDestination(input) - - return err - } -} - func testAccAWSCloudwatchLogDestinationConfig(rstring string) string { return fmt.Sprintf(` resource "aws_kinesis_stream" "test" { From b83043398e098e69699158c64585b3e5df3031ee Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 18:15:35 +0200 Subject: [PATCH 1131/1212] validatuin + changelog --- .changelog/11687.txt | 3 +++ aws/resource_aws_cloudwatch_log_destination.go | 6 ++++++ 2 files changed, 9 insertions(+) create mode 100644 .changelog/11687.txt diff --git a/.changelog/11687.txt b/.changelog/11687.txt new file mode 100644 index 00000000000..4c9095f5bf1 --- /dev/null +++ b/.changelog/11687.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_log_destination: Add plan time validation to `role_arn`, `name` and `target_arn`. +``` diff --git a/aws/resource_aws_cloudwatch_log_destination.go b/aws/resource_aws_cloudwatch_log_destination.go index 310b5242a9b..6f32e9b10f2 100644 --- a/aws/resource_aws_cloudwatch_log_destination.go +++ b/aws/resource_aws_cloudwatch_log_destination.go @@ -2,12 +2,14 @@ package aws import ( "fmt" + "regexp" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsCloudWatchLogDestination() *schema.Resource { @@ -29,6 +31,10 @@ func resourceAwsCloudWatchLogDestination() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateFunc: validation.Any( + validation.StringLenBetween(1, 512), + validation.StringMatch(regexp.MustCompile(`[^:*]*`), ""), + ), }, "role_arn": { From 587ff20102b91f59874839c6374198ab752a4995 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 1 Jul 2020 12:07:35 -0400 Subject: [PATCH 1132/1212] d/aws_route_table: Rework 'TestAccDataSourceAwsRouteTable_basic'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsRouteTable_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccDataSourceAwsRouteTable_basic -timeout 120m === RUN TestAccDataSourceAwsRouteTable_basic === PAUSE TestAccDataSourceAwsRouteTable_basic === CONT TestAccDataSourceAwsRouteTable_basic --- PASS: TestAccDataSourceAwsRouteTable_basic (52.54s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 52.620s --- aws/data_source_aws_route_table_test.go | 223 +++++++++--------------- 1 file changed, 83 insertions(+), 140 deletions(-) diff --git a/aws/data_source_aws_route_table_test.go b/aws/data_source_aws_route_table_test.go index 05faae829c5..1e674a57dfc 100644 --- a/aws/data_source_aws_route_table_test.go +++ b/aws/data_source_aws_route_table_test.go @@ -1,134 +1,87 @@ package aws import ( + "fmt" "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccDataSourceAwsRouteTable_basic(t *testing.T) { rtResourceName := "aws_route_table.test" snResourceName := "aws_subnet.test" vpcResourceName := "aws_vpc.test" - gwResourceName := "aws_internet_gateway.test" - ds1ResourceName := "data.aws_route_table.by_tag" - ds2ResourceName := "data.aws_route_table.by_filter" - ds3ResourceName := "data.aws_route_table.by_subnet" - ds4ResourceName := "data.aws_route_table.by_id" - ds5ResourceName := "data.aws_route_table.by_gateway" - tagValue := "terraform-testacc-routetable-data-source" + igwResourceName := "aws_internet_gateway.test" + datasource1Name := "data.aws_route_table.by_tag" + datasource2Name := "data.aws_route_table.by_filter" + datasource3Name := "data.aws_route_table.by_subnet" + datasource4Name := "data.aws_route_table.by_id" + datasource5Name := "data.aws_route_table.by_gateway" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsRouteTableGroupConfig, + Config: testAccDataSourceAwsRouteTableConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrPair( - ds1ResourceName, "id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds1ResourceName, "route_table_id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds1ResourceName, "owner_id", rtResourceName, "owner_id"), - resource.TestCheckResourceAttrPair( - ds1ResourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckNoResourceAttr( - ds1ResourceName, "subnet_id"), - resource.TestCheckNoResourceAttr( - ds1ResourceName, "gateway_id"), - resource.TestCheckResourceAttr( - ds1ResourceName, "associations.#", "2"), - testAccCheckListHasSomeElementAttrPair( - ds1ResourceName, "associations", "subnet_id", snResourceName, "id"), - testAccCheckListHasSomeElementAttrPair( - ds1ResourceName, "associations", "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds1ResourceName, "tags.Name", tagValue), - - resource.TestCheckResourceAttrPair( - ds2ResourceName, "id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds2ResourceName, "route_table_id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds2ResourceName, "owner_id", rtResourceName, "owner_id"), - resource.TestCheckResourceAttrPair( - ds2ResourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckNoResourceAttr( - ds2ResourceName, "subnet_id"), - resource.TestCheckNoResourceAttr( - ds2ResourceName, "gateway_id"), - resource.TestCheckResourceAttr( - ds2ResourceName, "associations.#", "2"), - testAccCheckListHasSomeElementAttrPair( - ds2ResourceName, "associations", "subnet_id", snResourceName, "id"), - testAccCheckListHasSomeElementAttrPair( - ds2ResourceName, "associations", "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds2ResourceName, "tags.Name", tagValue), - - resource.TestCheckResourceAttrPair( - ds3ResourceName, "id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds3ResourceName, "route_table_id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds3ResourceName, "owner_id", rtResourceName, "owner_id"), - resource.TestCheckResourceAttrPair( - ds3ResourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds3ResourceName, "subnet_id", snResourceName, "id"), - resource.TestCheckNoResourceAttr( - ds3ResourceName, "gateway_id"), - resource.TestCheckResourceAttr( - ds3ResourceName, "associations.#", "2"), - testAccCheckListHasSomeElementAttrPair( - ds3ResourceName, "associations", "subnet_id", snResourceName, "id"), - testAccCheckListHasSomeElementAttrPair( - ds3ResourceName, "associations", "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds3ResourceName, "tags.Name", tagValue), - - resource.TestCheckResourceAttrPair( - ds4ResourceName, "id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds4ResourceName, "route_table_id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds4ResourceName, "owner_id", rtResourceName, "owner_id"), - resource.TestCheckResourceAttrPair( - ds4ResourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckNoResourceAttr( - ds4ResourceName, "subnet_id"), - resource.TestCheckNoResourceAttr( - ds4ResourceName, "gateway_id"), - resource.TestCheckResourceAttr( - ds4ResourceName, "associations.#", "2"), - testAccCheckListHasSomeElementAttrPair( - ds4ResourceName, "associations", "subnet_id", snResourceName, "id"), - testAccCheckListHasSomeElementAttrPair( - ds4ResourceName, "associations", "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds4ResourceName, "tags.Name", tagValue), - - resource.TestCheckResourceAttrPair( - ds5ResourceName, "id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds5ResourceName, "route_table_id", rtResourceName, "id"), - resource.TestCheckResourceAttrPair( - ds5ResourceName, "owner_id", rtResourceName, "owner_id"), - resource.TestCheckResourceAttrPair( - ds5ResourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckNoResourceAttr( - ds5ResourceName, "subnet_id"), - resource.TestCheckResourceAttrPair( - ds5ResourceName, "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds5ResourceName, "associations.#", "2"), - testAccCheckListHasSomeElementAttrPair( - ds5ResourceName, "associations", "subnet_id", snResourceName, "id"), - testAccCheckListHasSomeElementAttrPair( - ds5ResourceName, "associations", "gateway_id", gwResourceName, "id"), - resource.TestCheckResourceAttr( - ds5ResourceName, "tags.Name", tagValue), + // By tags. + resource.TestCheckResourceAttrPair(datasource1Name, "id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource1Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource1Name, "owner_id", rtResourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasource1Name, "vpc_id", vpcResourceName, "id"), + resource.TestCheckNoResourceAttr(datasource1Name, "subnet_id"), + resource.TestCheckNoResourceAttr(datasource1Name, "gateway_id"), + resource.TestCheckResourceAttr(datasource1Name, "associations.#", "2"), + testAccCheckListHasSomeElementAttrPair(datasource1Name, "associations", "subnet_id", snResourceName, "id"), + testAccCheckListHasSomeElementAttrPair(datasource1Name, "associations", "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource1Name, "tags.Name", rName), + // By filter. + resource.TestCheckResourceAttrPair(datasource2Name, "id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource2Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource2Name, "owner_id", rtResourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasource2Name, "vpc_id", vpcResourceName, "id"), + resource.TestCheckNoResourceAttr(datasource2Name, "subnet_id"), + resource.TestCheckNoResourceAttr(datasource2Name, "gateway_id"), + resource.TestCheckResourceAttr(datasource2Name, "associations.#", "2"), + testAccCheckListHasSomeElementAttrPair(datasource2Name, "associations", "subnet_id", snResourceName, "id"), + testAccCheckListHasSomeElementAttrPair(datasource2Name, "associations", "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource2Name, "tags.Name", rName), + // By subnet ID. + resource.TestCheckResourceAttrPair(datasource3Name, "id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource3Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource3Name, "owner_id", rtResourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasource3Name, "vpc_id", vpcResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource3Name, "subnet_id", snResourceName, "id"), + resource.TestCheckNoResourceAttr(datasource3Name, "gateway_id"), + resource.TestCheckResourceAttr(datasource3Name, "associations.#", "2"), + testAccCheckListHasSomeElementAttrPair(datasource3Name, "associations", "subnet_id", snResourceName, "id"), + testAccCheckListHasSomeElementAttrPair(datasource3Name, "associations", "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource3Name, "tags.Name", rName), + // By route table ID. + resource.TestCheckResourceAttrPair(datasource4Name, "id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource4Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource4Name, "owner_id", rtResourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasource4Name, "vpc_id", vpcResourceName, "id"), + resource.TestCheckNoResourceAttr(datasource4Name, "subnet_id"), + resource.TestCheckNoResourceAttr(datasource4Name, "gateway_id"), + resource.TestCheckResourceAttr(datasource4Name, "associations.#", "2"), + testAccCheckListHasSomeElementAttrPair(datasource4Name, "associations", "subnet_id", snResourceName, "id"), + testAccCheckListHasSomeElementAttrPair(datasource4Name, "associations", "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource4Name, "tags.Name", rName), + // By gateway ID. + resource.TestCheckResourceAttrPair(datasource5Name, "id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource5Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource5Name, "owner_id", rtResourceName, "owner_id"), + resource.TestCheckResourceAttrPair(datasource5Name, "vpc_id", vpcResourceName, "id"), + resource.TestCheckNoResourceAttr(datasource5Name, "subnet_id"), + resource.TestCheckResourceAttrPair(datasource5Name, "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource5Name, "associations.#", "2"), + testAccCheckListHasSomeElementAttrPair(datasource5Name, "associations", "subnet_id", snResourceName, "id"), + testAccCheckListHasSomeElementAttrPair(datasource5Name, "associations", "gateway_id", igwResourceName, "id"), + resource.TestCheckResourceAttr(datasource5Name, "tags.Name", rName), ), ExpectNonEmptyPlan: true, }, @@ -158,12 +111,13 @@ func TestAccDataSourceAwsRouteTable_main(t *testing.T) { }) } -const testAccDataSourceAwsRouteTableGroupConfig = ` +func testAccDataSourceAwsRouteTableConfigBasic(rName string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { - Name = "terraform-testacc-route-table-data-source" + Name = %[1]q } } @@ -172,7 +126,7 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-route-table-data-source" + Name = %[1]q } } @@ -180,7 +134,7 @@ resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-routetable-data-source" + Name = %[1]q } } @@ -193,7 +147,7 @@ resource "aws_internet_gateway" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-routetable-data-source" + Name = %[1]q } } @@ -204,14 +158,11 @@ resource "aws_route_table_association" "b" { data "aws_route_table" "by_filter" { filter { - name = "association.route-table-association-id" + name = "association.route-table-association-id" values = [aws_route_table_association.a.id] } - depends_on = [ - "aws_route_table_association.a", - "aws_route_table_association.b" - ] + depends_on = [aws_route_table_association.a, aws_route_table_association.b] } data "aws_route_table" "by_tag" { @@ -219,36 +170,28 @@ data "aws_route_table" "by_tag" { Name = aws_route_table.test.tags["Name"] } - depends_on = [ - "aws_route_table_association.a", - "aws_route_table_association.b" - ] + depends_on = [aws_route_table_association.a, aws_route_table_association.b] } data "aws_route_table" "by_subnet" { subnet_id = aws_subnet.test.id - depends_on = [ - "aws_route_table_association.a", - "aws_route_table_association.b" - ] + + depends_on = [aws_route_table_association.a, aws_route_table_association.b] } data "aws_route_table" "by_gateway" { gateway_id = aws_internet_gateway.test.id - depends_on = [ - "aws_route_table_association.a", - "aws_route_table_association.b" - ] + + depends_on = [aws_route_table_association.a, aws_route_table_association.b] } data "aws_route_table" "by_id" { route_table_id = aws_route_table.test.id - depends_on = [ - "aws_route_table_association.a", - "aws_route_table_association.b" - ] + + depends_on = [aws_route_table_association.a, aws_route_table_association.b] +} +`, rName) } -` const testAccDataSourceAwsRouteTableMainRoute = ` resource "aws_vpc" "test" { From 8869beb7e4aa7aaee5c3eb56cdd57566d219ab6d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 1 Jul 2020 12:12:57 -0400 Subject: [PATCH 1133/1212] d/aws_route_table: Rework 'TestAccDataSourceAwsRouteTable_main'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccDataSourceAwsRouteTable_main' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccDataSourceAwsRouteTable_main -timeout 120m === RUN TestAccDataSourceAwsRouteTable_main === PAUSE TestAccDataSourceAwsRouteTable_main === CONT TestAccDataSourceAwsRouteTable_main --- PASS: TestAccDataSourceAwsRouteTable_main (37.06s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 37.168s --- aws/data_source_aws_route_table_test.go | 26 ++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/aws/data_source_aws_route_table_test.go b/aws/data_source_aws_route_table_test.go index 1e674a57dfc..44be08279a9 100644 --- a/aws/data_source_aws_route_table_test.go +++ b/aws/data_source_aws_route_table_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccDataSourceAwsRouteTable_basic(t *testing.T) { @@ -90,21 +90,19 @@ func TestAccDataSourceAwsRouteTable_basic(t *testing.T) { } func TestAccDataSourceAwsRouteTable_main(t *testing.T) { - dsResourceName := "data.aws_route_table.by_filter" + datasourceName := "data.aws_route_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsRouteTableMainRoute, + Config: testAccDataSourceAwsRouteTableConfigMain(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet( - dsResourceName, "id"), - resource.TestCheckResourceAttrSet( - dsResourceName, "vpc_id"), - resource.TestCheckResourceAttr( - dsResourceName, "associations.0.main", "true"), + resource.TestCheckResourceAttrSet(datasourceName, "id"), + resource.TestCheckResourceAttrSet(datasourceName, "vpc_id"), + resource.TestCheckResourceAttr(datasourceName, "associations.0.main", "true"), ), }, }, @@ -193,16 +191,17 @@ data "aws_route_table" "by_id" { `, rName) } -const testAccDataSourceAwsRouteTableMainRoute = ` +func testAccDataSourceAwsRouteTableConfigMain(rName string) string { + return fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { - Name = "terraform-testacc-route-table-data-source-main-route" + Name = %[1]q } } -data "aws_route_table" "by_filter" { +data "aws_route_table" "test" { filter { name = "association.main" values = ["true"] @@ -213,4 +212,5 @@ data "aws_route_table" "by_filter" { values = [aws_vpc.test.id] } } -` +`, rName) +} From 0ccd54c880ab9835df6e606a338b7ad76b6b638f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 23 Sep 2020 08:54:16 -0400 Subject: [PATCH 1134/1212] Fix Acceptance Test Linting / terrafmt error. --- aws/data_source_aws_route_table_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/data_source_aws_route_table_test.go b/aws/data_source_aws_route_table_test.go index 44be08279a9..b11309a9b05 100644 --- a/aws/data_source_aws_route_table_test.go +++ b/aws/data_source_aws_route_table_test.go @@ -156,7 +156,7 @@ resource "aws_route_table_association" "b" { data "aws_route_table" "by_filter" { filter { - name = "association.route-table-association-id" + name = "association.route-table-association-id" values = [aws_route_table_association.a.id] } From 92f2f199cae37b19dc530d8aea8ccdc9dc753ba5 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 20:17:36 +0200 Subject: [PATCH 1135/1212] resource/aws_ses_receipt_rule: Add arn attribute and validations (#13960) References: - https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html#amazonses-resources-for-iam-policies - https://docs.aws.amazon.com/ses/latest/APIReference/API_ReceiptRule.html - https://docs.aws.amazon.com/sdk-for-go/api/service/ses/#pkg-constants Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSESReceiptRule_actions (16.74s) --- PASS: TestAccAWSSESReceiptRule_basic (17.25s) --- PASS: TestAccAWSSESReceiptRule_order (19.23s) --- PASS: TestAccAWSSESReceiptRule_disappears (24.55s) --- PASS: TestAccAWSSESReceiptRule_s3Action (29.53s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- SKIP: TestAccAWSSESReceiptRule_disappears (2.97s) --- SKIP: TestAccAWSSESReceiptRule_order (2.97s) --- SKIP: TestAccAWSSESReceiptRule_s3Action (2.97s) --- SKIP: TestAccAWSSESReceiptRule_actions (3.01s) --- SKIP: TestAccAWSSESReceiptRule_basic (3.25s) ``` --- .changelog/13960.txt | 7 + aws/resource_aws_ses_receipt_rule.go | 145 +++++++++++------- aws/resource_aws_ses_receipt_rule_test.go | 125 ++++++++------- website/docs/index.html.markdown | 1 + website/docs/r/ses_receipt_rule.html.markdown | 7 + 5 files changed, 176 insertions(+), 109 deletions(-) create mode 100644 .changelog/13960.txt diff --git a/.changelog/13960.txt b/.changelog/13960.txt new file mode 100644 index 00000000000..5ec32e64167 --- /dev/null +++ b/.changelog/13960.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ses_receipt_rule: Add `arn` attribute +``` + +```release-note:enhancement +resource/aws_ses_receipt_rule: Add plan time validations for `name`, `tls_policy`, `add_header_action.header_name`, `add_header_action.header_value`, `bounce_action.topic_arn`, `lambda_action.function_arn`, `lambda_action.topic_arn`, `lambda_action.invocation_type`, `s3_action,topic_arn`, `sns_action.topic_arn`, `stop_action.scope`, `stop_action.topic_arn`, `workmail_action.topic_arn`, and `workmail_action.organization_arn` attributes +``` diff --git a/aws/resource_aws_ses_receipt_rule.go b/aws/resource_aws_ses_receipt_rule.go index 653c403699f..d8a1dfce9b9 100644 --- a/aws/resource_aws_ses_receipt_rule.go +++ b/aws/resource_aws_ses_receipt_rule.go @@ -4,10 +4,12 @@ import ( "bytes" "fmt" "log" + "regexp" "sort" "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -25,10 +27,20 @@ func resourceAwsSesReceiptRule() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z_-]+$`), "must contain only alphanumeric, underscore, and hyphen characters"), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z]`), "must begin with a alphanumeric character"), + validation.StringMatch(regexp.MustCompile(`[0-9a-zA-Z]$`), "must end with a alphanumeric character"), + ), }, "rule_set_name": { @@ -45,26 +57,26 @@ func resourceAwsSesReceiptRule() *schema.Resource { "enabled": { Type: schema.TypeBool, Optional: true, - Computed: true, + Default: false, }, "recipients": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, - Set: schema.HashString, }, "scan_enabled": { Type: schema.TypeBool, Optional: true, - Computed: true, + Default: false, }, "tls_policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(ses.TlsPolicy_Values(), false), }, "add_header_action": { @@ -75,11 +87,16 @@ func resourceAwsSesReceiptRule() *schema.Resource { "header_name": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 50), + validation.StringMatch(regexp.MustCompile(`^[0-9a-zA-Z-]+$`), "must contain only alphanumeric and dash characters"), + ), }, "header_value": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 2048), }, "position": { @@ -125,8 +142,9 @@ func resourceAwsSesReceiptRule() *schema.Resource { }, "topic_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "position": { @@ -162,19 +180,22 @@ func resourceAwsSesReceiptRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "function_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "invocation_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(ses.InvocationType_Values(), false), }, "topic_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "position": { @@ -224,8 +245,9 @@ func resourceAwsSesReceiptRule() *schema.Resource { }, "topic_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "position": { @@ -264,8 +286,9 @@ func resourceAwsSesReceiptRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "topic_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "position": { @@ -290,13 +313,15 @@ func resourceAwsSesReceiptRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "scope": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ses.StopScope_Values(), false), }, "topic_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "position": { @@ -326,13 +351,15 @@ func resourceAwsSesReceiptRule() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "organization_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "topic_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "position": { @@ -394,7 +421,7 @@ func resourceAwsSesReceiptRuleCreate(d *schema.ResourceData, meta interface{}) e d.SetId(d.Get("name").(string)) - return resourceAwsSesReceiptRuleUpdate(d, meta) + return resourceAwsSesReceiptRuleRead(d, meta) } func resourceAwsSesReceiptRuleUpdate(d *schema.ResourceData, meta interface{}) error { @@ -429,9 +456,10 @@ func resourceAwsSesReceiptRuleUpdate(d *schema.ResourceData, meta interface{}) e func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).sesconn + ruleSetName := d.Get("rule_set_name").(string) describeOpts := &ses.DescribeReceiptRuleInput{ RuleName: aws.String(d.Id()), - RuleSetName: aws.String(d.Get("rule_set_name").(string)), + RuleSetName: aws.String(ruleSetName), } response, err := conn.DescribeReceiptRule(describeOpts) @@ -450,7 +478,7 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err } d.Set("enabled", response.Rule.Enabled) - d.Set("recipients", flattenStringList(response.Rule.Recipients)) + d.Set("recipients", flattenStringSet(response.Rule.Recipients)) d.Set("scan_enabled", response.Rule.ScanEnabled) d.Set("tls_policy", response.Rule.TlsPolicy) @@ -465,8 +493,8 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err for i, element := range response.Rule.Actions { if element.AddHeaderAction != nil { addHeaderAction := map[string]interface{}{ - "header_name": *element.AddHeaderAction.HeaderName, - "header_value": *element.AddHeaderAction.HeaderValue, + "header_name": aws.StringValue(element.AddHeaderAction.HeaderName), + "header_value": aws.StringValue(element.AddHeaderAction.HeaderValue), "position": i + 1, } addHeaderActionList = append(addHeaderActionList, addHeaderAction) @@ -474,18 +502,18 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.BounceAction != nil { bounceAction := map[string]interface{}{ - "message": *element.BounceAction.Message, - "sender": *element.BounceAction.Sender, - "smtp_reply_code": *element.BounceAction.SmtpReplyCode, + "message": aws.StringValue(element.BounceAction.Message), + "sender": aws.StringValue(element.BounceAction.Sender), + "smtp_reply_code": aws.StringValue(element.BounceAction.SmtpReplyCode), "position": i + 1, } if element.BounceAction.StatusCode != nil { - bounceAction["status_code"] = *element.BounceAction.StatusCode + bounceAction["status_code"] = aws.StringValue(element.BounceAction.StatusCode) } if element.BounceAction.TopicArn != nil { - bounceAction["topic_arn"] = *element.BounceAction.TopicArn + bounceAction["topic_arn"] = aws.StringValue(element.BounceAction.TopicArn) } bounceActionList = append(bounceActionList, bounceAction) @@ -493,16 +521,16 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.LambdaAction != nil { lambdaAction := map[string]interface{}{ - "function_arn": *element.LambdaAction.FunctionArn, + "function_arn": aws.StringValue(element.LambdaAction.FunctionArn), "position": i + 1, } if element.LambdaAction.InvocationType != nil { - lambdaAction["invocation_type"] = *element.LambdaAction.InvocationType + lambdaAction["invocation_type"] = aws.StringValue(element.LambdaAction.InvocationType) } if element.LambdaAction.TopicArn != nil { - lambdaAction["topic_arn"] = *element.LambdaAction.TopicArn + lambdaAction["topic_arn"] = aws.StringValue(element.LambdaAction.TopicArn) } lambdaActionList = append(lambdaActionList, lambdaAction) @@ -510,20 +538,20 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.S3Action != nil { s3Action := map[string]interface{}{ - "bucket_name": *element.S3Action.BucketName, + "bucket_name": aws.StringValue(element.S3Action.BucketName), "position": i + 1, } if element.S3Action.KmsKeyArn != nil { - s3Action["kms_key_arn"] = *element.S3Action.KmsKeyArn + s3Action["kms_key_arn"] = aws.StringValue(element.S3Action.KmsKeyArn) } if element.S3Action.ObjectKeyPrefix != nil { - s3Action["object_key_prefix"] = *element.S3Action.ObjectKeyPrefix + s3Action["object_key_prefix"] = aws.StringValue(element.S3Action.ObjectKeyPrefix) } if element.S3Action.TopicArn != nil { - s3Action["topic_arn"] = *element.S3Action.TopicArn + s3Action["topic_arn"] = aws.StringValue(element.S3Action.TopicArn) } s3ActionList = append(s3ActionList, s3Action) @@ -531,7 +559,7 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.SNSAction != nil { snsAction := map[string]interface{}{ - "topic_arn": *element.SNSAction.TopicArn, + "topic_arn": aws.StringValue(element.SNSAction.TopicArn), "position": i + 1, } @@ -540,12 +568,12 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.StopAction != nil { stopAction := map[string]interface{}{ - "scope": *element.StopAction.Scope, + "scope": aws.StringValue(element.StopAction.Scope), "position": i + 1, } if element.StopAction.TopicArn != nil { - stopAction["topic_arn"] = *element.StopAction.TopicArn + stopAction["topic_arn"] = aws.StringValue(element.StopAction.TopicArn) } stopActionList = append(stopActionList, stopAction) @@ -553,12 +581,12 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err if element.WorkmailAction != nil { workmailAction := map[string]interface{}{ - "organization_arn": *element.WorkmailAction.OrganizationArn, + "organization_arn": aws.StringValue(element.WorkmailAction.OrganizationArn), "position": i + 1, } if element.WorkmailAction.TopicArn != nil { - workmailAction["topic_arn"] = *element.WorkmailAction.TopicArn + workmailAction["topic_arn"] = aws.StringValue(element.WorkmailAction.TopicArn) } workmailActionList = append(workmailActionList, workmailAction) @@ -597,7 +625,20 @@ func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) err } err = d.Set("workmail_action", workmailActionList) - return err + if err != nil { + return err + } + + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "ses", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("receipt-rule-set/%s:receipt-rule/%s", ruleSetName, d.Id()), + }.String() + d.Set("arn", arn) + + return nil } func resourceAwsSesReceiptRuleDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/aws/resource_aws_ses_receipt_rule_test.go b/aws/resource_aws_ses_receipt_rule_test.go index 25968b86788..9e5397b4713 100644 --- a/aws/resource_aws_ses_receipt_rule_test.go +++ b/aws/resource_aws_ses_receipt_rule_test.go @@ -14,7 +14,9 @@ import ( ) func TestAccAWSSESReceiptRule_basic(t *testing.T) { - rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_receipt_rule.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -25,22 +27,25 @@ func TestAccAWSSESReceiptRule_basic(t *testing.T) { CheckDestroy: testAccCheckSESReceiptRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESReceiptRuleBasicConfig(rInt), + Config: testAccAWSSESReceiptRuleBasicConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESReceiptRuleExists("aws_ses_receipt_rule.basic"), + testAccCheckAwsSESReceiptRuleExists(resourceName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "ses", fmt.Sprintf("receipt-rule-set/%s:receipt-rule/%s", rName, rName)), ), }, { - ResourceName: "aws_ses_receipt_rule.basic", + ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc("aws_ses_receipt_rule.basic"), + ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc(resourceName), }, }, }) } func TestAccAWSSESReceiptRule_s3Action(t *testing.T) { - rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_receipt_rule.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -51,22 +56,24 @@ func TestAccAWSSESReceiptRule_s3Action(t *testing.T) { CheckDestroy: testAccCheckSESReceiptRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESReceiptRuleS3ActionConfig(rInt), + Config: testAccAWSSESReceiptRuleS3ActionConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESReceiptRuleExists("aws_ses_receipt_rule.basic"), + testAccCheckAwsSESReceiptRuleExists(resourceName), ), }, { - ResourceName: "aws_ses_receipt_rule.basic", + ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc("aws_ses_receipt_rule.basic"), + ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc(resourceName), }, }, }) } func TestAccAWSSESReceiptRule_order(t *testing.T) { - rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_receipt_rule.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -77,22 +84,24 @@ func TestAccAWSSESReceiptRule_order(t *testing.T) { CheckDestroy: testAccCheckSESReceiptRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESReceiptRuleOrderConfig(rInt), + Config: testAccAWSSESReceiptRuleOrderConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESReceiptRuleOrder("aws_ses_receipt_rule.second"), + testAccCheckAwsSESReceiptRuleOrder(resourceName), ), }, { - ResourceName: "aws_ses_receipt_rule.second", + ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc("aws_ses_receipt_rule.second"), + ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc(resourceName), }, }, }) } func TestAccAWSSESReceiptRule_actions(t *testing.T) { - rInt := acctest.RandInt() + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_receipt_rule.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -103,23 +112,24 @@ func TestAccAWSSESReceiptRule_actions(t *testing.T) { CheckDestroy: testAccCheckSESReceiptRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESReceiptRuleActionsConfig(rInt), + Config: testAccAWSSESReceiptRuleActionsConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsSESReceiptRuleActions("aws_ses_receipt_rule.actions"), + testAccCheckAwsSESReceiptRuleActions(resourceName), ), }, { - ResourceName: "aws_ses_receipt_rule.actions", + ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc("aws_ses_receipt_rule.actions"), + ImportStateIdFunc: testAccAwsSesReceiptRuleImportStateIdFunc(resourceName), }, }, }) } func TestAccAWSSESReceiptRule_disappears(t *testing.T) { - rInt := acctest.RandInt() - resourceName := "aws_ses_receipt_rule.basic" + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_ses_receipt_rule.test" + ruleSetResourceName := "aws_ses_receipt_rule_set.test" resource.ParallelTest(t, resource.TestCase{ @@ -132,7 +142,7 @@ func TestAccAWSSESReceiptRule_disappears(t *testing.T) { CheckDestroy: testAccCheckSESReceiptRuleDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSESReceiptRuleBasicConfig(rInt), + Config: testAccAWSSESReceiptRuleBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESReceiptRuleExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSesReceiptRuleSet(), ruleSetResourceName), @@ -140,7 +150,7 @@ func TestAccAWSSESReceiptRule_disappears(t *testing.T) { ExpectNonEmptyPlan: true, }, { - Config: testAccAWSSESReceiptRuleBasicConfig(rInt), + Config: testAccAWSSESReceiptRuleBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAwsSESReceiptRuleExists(resourceName), testAccCheckResourceDisappears(testAccProvider, resourceAwsSesReceiptRule(), resourceName), @@ -204,7 +214,7 @@ func testAccCheckAwsSESReceiptRuleExists(n string) resource.TestCheckFunc { return err } - if !*response.Rule.Enabled { + if !aws.BoolValue(response.Rule.Enabled) { return fmt.Errorf("Enabled (%v) was not set to true", *response.Rule.Enabled) } @@ -212,11 +222,11 @@ func testAccCheckAwsSESReceiptRuleExists(n string) resource.TestCheckFunc { return fmt.Errorf("Recipients (%v) was not set to [test@example.com]", response.Rule.Recipients) } - if !*response.Rule.ScanEnabled { + if !aws.BoolValue(response.Rule.ScanEnabled) { return fmt.Errorf("ScanEnabled (%v) was not set to true", *response.Rule.ScanEnabled) } - if *response.Rule.TlsPolicy != "Require" { + if aws.StringValue(response.Rule.TlsPolicy) != ses.TlsPolicyRequire { return fmt.Errorf("TLS Policy (%s) was not set to Require", *response.Rule.TlsPolicy) } @@ -259,7 +269,8 @@ func testAccCheckAwsSESReceiptRuleOrder(n string) resource.TestCheckFunc { if len(response.Rules) != 2 { return fmt.Errorf("Number of rules (%d) was not equal to 2", len(response.Rules)) - } else if *response.Rules[0].Name != "first" || *response.Rules[1].Name != "second" { + } else if aws.StringValue(response.Rules[0].Name) != "first" || + aws.StringValue(response.Rules[1].Name) != "second" { return fmt.Errorf("Order of rules (%v) was incorrect", response.Rules) } @@ -297,25 +308,25 @@ func testAccCheckAwsSESReceiptRuleActions(n string) resource.TestCheckFunc { } addHeaderAction := actions[0].AddHeaderAction - if *addHeaderAction.HeaderName != "Another-Header" { + if aws.StringValue(addHeaderAction.HeaderName) != "Another-Header" { return fmt.Errorf("Header Name (%s) was not equal to Another-Header", *addHeaderAction.HeaderName) } - if *addHeaderAction.HeaderValue != "First" { + if aws.StringValue(addHeaderAction.HeaderValue) != "First" { return fmt.Errorf("Header Value (%s) was not equal to First", *addHeaderAction.HeaderValue) } secondAddHeaderAction := actions[1].AddHeaderAction - if *secondAddHeaderAction.HeaderName != "Added-By" { + if aws.StringValue(secondAddHeaderAction.HeaderName) != "Added-By" { return fmt.Errorf("Header Name (%s) was not equal to Added-By", *secondAddHeaderAction.HeaderName) } - if *secondAddHeaderAction.HeaderValue != "Terraform" { + if aws.StringValue(secondAddHeaderAction.HeaderValue) != "Terraform" { return fmt.Errorf("Header Value (%s) was not equal to Terraform", *secondAddHeaderAction.HeaderValue) } stopAction := actions[2].StopAction - if *stopAction.Scope != "RuleSet" { + if aws.StringValue(stopAction.Scope) != ses.StopScopeRuleSet { return fmt.Errorf("Scope (%s) was not equal to RuleSet", *stopAction.Scope) } @@ -346,37 +357,37 @@ func testAccPreCheckSESReceiptRule(t *testing.T) { } } -func testAccAWSSESReceiptRuleBasicConfig(rInt int) string { +func testAccAWSSESReceiptRuleBasicConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_receipt_rule_set" "test" { - rule_set_name = "test-me-%d" + rule_set_name = %[1]q } -resource "aws_ses_receipt_rule" "basic" { - name = "basic" +resource "aws_ses_receipt_rule" "test" { + name = %[1]q rule_set_name = aws_ses_receipt_rule_set.test.rule_set_name recipients = ["test@example.com"] enabled = true scan_enabled = true tls_policy = "Require" } -`, rInt) +`, rName) } -func testAccAWSSESReceiptRuleS3ActionConfig(rInt int) string { +func testAccAWSSESReceiptRuleS3ActionConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_receipt_rule_set" "test" { - rule_set_name = "test-me-%d" + rule_set_name = %[1]q } -resource "aws_s3_bucket" "emails" { - bucket = "ses-terraform-emails-%d" +resource "aws_s3_bucket" "test" { + bucket = %[1]q acl = "public-read-write" force_destroy = "true" } -resource "aws_ses_receipt_rule" "basic" { - name = "basic" +resource "aws_ses_receipt_rule" "test" { + name = %[1]q rule_set_name = aws_ses_receipt_rule_set.test.rule_set_name recipients = ["test@example.com"] enabled = true @@ -384,40 +395,40 @@ resource "aws_ses_receipt_rule" "basic" { tls_policy = "Require" s3_action { - bucket_name = aws_s3_bucket.emails.id + bucket_name = aws_s3_bucket.test.id position = 1 } } -`, rInt, rInt) +`, rName) } -func testAccAWSSESReceiptRuleOrderConfig(rInt int) string { +func testAccAWSSESReceiptRuleOrderConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_receipt_rule_set" "test" { - rule_set_name = "test-me-%d" + rule_set_name = %[1]q } -resource "aws_ses_receipt_rule" "second" { +resource "aws_ses_receipt_rule" "test" { name = "second" rule_set_name = aws_ses_receipt_rule_set.test.rule_set_name - after = aws_ses_receipt_rule.first.name + after = aws_ses_receipt_rule.test1.name } -resource "aws_ses_receipt_rule" "first" { +resource "aws_ses_receipt_rule" "test1" { name = "first" rule_set_name = aws_ses_receipt_rule_set.test.rule_set_name } -`, rInt) +`, rName) } -func testAccAWSSESReceiptRuleActionsConfig(rInt int) string { +func testAccAWSSESReceiptRuleActionsConfig(rName string) string { return fmt.Sprintf(` resource "aws_ses_receipt_rule_set" "test" { - rule_set_name = "test-me-%d" + rule_set_name = %[1]q } -resource "aws_ses_receipt_rule" "actions" { - name = "actions4" +resource "aws_ses_receipt_rule" "test" { + name = %[1]q rule_set_name = aws_ses_receipt_rule_set.test.rule_set_name add_header_action { @@ -437,5 +448,5 @@ resource "aws_ses_receipt_rule" "actions" { position = 3 } } -`, rInt) +`, rName) } diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index a75a97f3990..0805755aec2 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -322,6 +322,7 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_ses_email_identity` resource](/docs/providers/aws/r/ses_email_identity.html) - [`aws_ses_event_destination` resource](/docs/providers/aws/r/ses_event_destination.html) - [`aws_ses_receipt_filter` resource](/docs/providers/aws/r/ses_receipt_filter.html) + - [`aws_ses_receipt_rule` resource](/docs/providers/aws/r/ses_receipt_rule.html) - [`aws_ses_template` resource](/docs/providers/aws/r/ses_template.html) - [`aws_ssm_document` data source](/docs/providers/aws/d/ssm_document.html) - [`aws_ssm_document` resource](/docs/providers/aws/r/ssm_document.html) diff --git a/website/docs/r/ses_receipt_rule.html.markdown b/website/docs/r/ses_receipt_rule.html.markdown index ca5f459c577..62257fcd93b 100644 --- a/website/docs/r/ses_receipt_rule.html.markdown +++ b/website/docs/r/ses_receipt_rule.html.markdown @@ -100,6 +100,13 @@ WorkMail actions support the following: * `topic_arn` - (Optional) The ARN of an SNS topic to notify * `position` - (Required) The position of the action in the receipt rule +## Attributes Reference + +In addition to the arguments, which are exported, the following attributes are exported: + +* `id` - The SES receipt rule name. +* `arn` - The SES receipt rule ARN. + ## Import SES receipt rules can be imported using the ruleset name and rule name separated by `:`. From 1e557824a104d41bf43f4ce68ffc6f15282f8e37 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 13:19:38 -0500 Subject: [PATCH 1136/1212] Update website/docs/r/cloudfront_realtime_log_config.html.markdown Co-authored-by: angie pinilla --- website/docs/r/cloudfront_realtime_log_config.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/cloudfront_realtime_log_config.html.markdown b/website/docs/r/cloudfront_realtime_log_config.html.markdown index 90824ec49e6..12b6528554c 100644 --- a/website/docs/r/cloudfront_realtime_log_config.html.markdown +++ b/website/docs/r/cloudfront_realtime_log_config.html.markdown @@ -105,5 +105,5 @@ In addition to all arguments above, the following attributes are exported: CloudFront real-time log configurations can be imported using the ARN, e.g. ``` -$ terraform import aws_cloudfront_realtime_log_config.example TODO +$ terraform import aws_cloudfront_realtime_log_config.example arn:aws:cloudfront::111122223333:realtime-log-config/ExampleNameForRealtimeLogConfig ``` From 5a7afcdff18c522f5ad68dd511176ea3d97c08cc Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 13:20:56 -0500 Subject: [PATCH 1137/1212] tests/data-source/route_table: Remove empty expectation --- aws/data_source_aws_route_table_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/data_source_aws_route_table_test.go b/aws/data_source_aws_route_table_test.go index b11309a9b05..7c909403797 100644 --- a/aws/data_source_aws_route_table_test.go +++ b/aws/data_source_aws_route_table_test.go @@ -83,7 +83,6 @@ func TestAccDataSourceAwsRouteTable_basic(t *testing.T) { testAccCheckListHasSomeElementAttrPair(datasource5Name, "associations", "gateway_id", igwResourceName, "id"), resource.TestCheckResourceAttr(datasource5Name, "tags.Name", rName), ), - ExpectNonEmptyPlan: true, }, }, }) From ec6a77aceec4001f1583f806eb380eb610bdb409 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 13 Nov 2020 23:53:43 +0200 Subject: [PATCH 1138/1212] recreate when role is recreated --- aws/resource_aws_iam_instance_profile.go | 27 +++++++++-- aws/resource_aws_iam_instance_profile_test.go | 46 ++++++++++++++++++- 2 files changed, 68 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_iam_instance_profile.go b/aws/resource_aws_iam_instance_profile.go index ca4a422181e..cc3b2b6e01f 100644 --- a/aws/resource_aws_iam_instance_profile.go +++ b/aws/resource_aws_iam_instance_profile.go @@ -129,7 +129,7 @@ func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) erro // IAM unfortunately does not provide a better error code or message for eventual consistency // InvalidParameterValue: Value (XXX) for parameter iamInstanceProfile.name is invalid. Invalid IAM Instance Profile name // NoSuchEntity: The request was rejected because it referenced an entity that does not exist. The error message describes the entity. HTTP Status Code: 404 - if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile name") || isAWSErr(err, "NoSuchEntity", "The role with name") { + if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile name") || isAWSErr(err, iam.ErrCodeNoSuchEntityException, "The role with name") { return resource.RetryableError(err) } if err != nil { @@ -154,7 +154,7 @@ func instanceProfileRemoveRole(iamconn *iam.IAM, profileName, roleName string) e } _, err := iamconn.RemoveRoleFromInstanceProfile(request) - if isAWSErr(err, "NoSuchEntity", "") { + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { return nil } return err @@ -203,7 +203,7 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) } result, err := iamconn.GetInstanceProfile(request) - if isAWSErr(err, "NoSuchEntity", "") { + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { log.Printf("[WARN] IAM Instance Profile %s is already gone", d.Id()) d.SetId("") return nil @@ -212,7 +212,26 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error reading IAM instance profile %s: %s", d.Id(), err) } - return instanceProfileReadResult(d, result.InstanceProfile) + instanceProfile := result.InstanceProfile + if instanceProfile.Roles != nil && len(instanceProfile.Roles) > 0 { + + roleName := aws.StringValue(instanceProfile.Roles[0].RoleName) + input := &iam.GetRoleInput{ + RoleName: aws.String(roleName), + } + + _, err := iamconn.GetRole(input) + if err != nil { + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + log.Printf("[WARN] IAM Role %q attcahed to IAM Instance Profile %q not found, removing from state", roleName, d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Error reading IAM Role %s attcahed to IAM Instance Profile %s: %w", roleName, d.Id(), err) + } + } + + return instanceProfileReadResult(d, instanceProfile) } func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/aws/resource_aws_iam_instance_profile_test.go b/aws/resource_aws_iam_instance_profile_test.go index 2ec2fc4be5e..a5891a10c68 100644 --- a/aws/resource_aws_iam_instance_profile_test.go +++ b/aws/resource_aws_iam_instance_profile_test.go @@ -92,6 +92,50 @@ func TestAccAWSIAMInstanceProfile_namePrefix(t *testing.T) { }) } +func TestAccAWSIAMInstanceProfile_disappears(t *testing.T) { + var conf iam.GetInstanceProfileOutput + resourceName := "aws_iam_instance_profile.test" + rName := acctest.RandString(5) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSInstanceProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsIamInstanceProfileConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSInstanceProfileExists(resourceName, &conf), + testAccCheckResourceDisappears(testAccProvider, resourceAwsIamInstanceProfile(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSIAMInstanceProfile_disappears_role(t *testing.T) { + var conf iam.GetInstanceProfileOutput + resourceName := "aws_iam_instance_profile.test" + rName := acctest.RandString(5) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSInstanceProfileDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsIamInstanceProfileConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSInstanceProfileExists(resourceName, &conf), + testAccCheckResourceDisappears(testAccProvider, resourceAwsIamRole(), "aws_iam_role.test"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAWSInstanceProfileGeneratedNamePrefix(resource, prefix string) resource.TestCheckFunc { return func(s *terraform.State) error { r, ok := s.RootModule().Resources[resource] @@ -125,7 +169,7 @@ func testAccCheckAWSInstanceProfileDestroy(s *terraform.State) error { return fmt.Errorf("still exist.") } - if isAWSErr(err, "NoSuchEntity", "") { + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { continue } From 2d5c15ff9df802f579fe92d709cc9b531fa77477 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 14 Nov 2020 00:09:29 +0200 Subject: [PATCH 1139/1212] detache role instead of delete instance profile --- aws/resource_aws_iam_instance_profile.go | 62 ++++++++++++------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/aws/resource_aws_iam_instance_profile.go b/aws/resource_aws_iam_instance_profile.go index cc3b2b6e01f..450a6bc4110 100644 --- a/aws/resource_aws_iam_instance_profile.go +++ b/aws/resource_aws_iam_instance_profile.go @@ -78,7 +78,7 @@ func resourceAwsIamInstanceProfile() *schema.Resource { } func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn + conn := meta.(*AWSClient).iamconn var name string if v, ok := d.GetOk("name"); ok { @@ -95,12 +95,12 @@ func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{ } var err error - response, err := iamconn.CreateInstanceProfile(request) + response, err := conn.CreateInstanceProfile(request) if err == nil { err = instanceProfileReadResult(d, response.InstanceProfile) } if err != nil { - return fmt.Errorf("Error creating IAM instance profile %s: %s", name, err) + return fmt.Errorf("Error creating IAM instance profile %s: %w", name, err) } waiterRequest := &iam.GetInstanceProfileInput{ @@ -109,15 +109,15 @@ func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{ // don't return until the IAM service reports that the instance profile is ready. // this ensures that terraform resources which rely on the instance profile will 'see' // that the instance profile exists. - err = iamconn.WaitUntilInstanceProfileExists(waiterRequest) + err = conn.WaitUntilInstanceProfileExists(waiterRequest) if err != nil { - return fmt.Errorf("Timed out while waiting for instance profile %s: %s", name, err) + return fmt.Errorf("Timed out while waiting for instance profile %s: %w", name, err) } return resourceAwsIamInstanceProfileUpdate(d, meta) } -func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) error { +func instanceProfileAddRole(conn *iam.IAM, profileName, roleName string) error { request := &iam.AddRoleToInstanceProfileInput{ InstanceProfileName: aws.String(profileName), RoleName: aws.String(roleName), @@ -125,7 +125,7 @@ func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) erro err := resource.Retry(30*time.Second, func() *resource.RetryError { var err error - _, err = iamconn.AddRoleToInstanceProfile(request) + _, err = conn.AddRoleToInstanceProfile(request) // IAM unfortunately does not provide a better error code or message for eventual consistency // InvalidParameterValue: Value (XXX) for parameter iamInstanceProfile.name is invalid. Invalid IAM Instance Profile name // NoSuchEntity: The request was rejected because it referenced an entity that does not exist. The error message describes the entity. HTTP Status Code: 404 @@ -138,33 +138,33 @@ func instanceProfileAddRole(iamconn *iam.IAM, profileName, roleName string) erro return nil }) if isResourceTimeoutError(err) { - _, err = iamconn.AddRoleToInstanceProfile(request) + _, err = conn.AddRoleToInstanceProfile(request) } if err != nil { - return fmt.Errorf("Error adding IAM Role %s to Instance Profile %s: %s", roleName, profileName, err) + return fmt.Errorf("Error adding IAM Role %s to Instance Profile %s: %w", roleName, profileName, err) } return err } -func instanceProfileRemoveRole(iamconn *iam.IAM, profileName, roleName string) error { +func instanceProfileRemoveRole(conn *iam.IAM, profileName, roleName string) error { request := &iam.RemoveRoleFromInstanceProfileInput{ InstanceProfileName: aws.String(profileName), RoleName: aws.String(roleName), } - _, err := iamconn.RemoveRoleFromInstanceProfile(request) + _, err := conn.RemoveRoleFromInstanceProfile(request) if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { return nil } return err } -func instanceProfileRemoveAllRoles(d *schema.ResourceData, iamconn *iam.IAM) error { +func instanceProfileRemoveAllRoles(d *schema.ResourceData, conn *iam.IAM) error { if role, ok := d.GetOk("role"); ok { - err := instanceProfileRemoveRole(iamconn, d.Id(), role.(string)) + err := instanceProfileRemoveRole(conn, d.Id(), role.(string)) if err != nil { - return fmt.Errorf("Error removing role %s from IAM instance profile %s: %s", role, d.Id(), err) + return fmt.Errorf("Error removing role %s from IAM instance profile %s: %w", role, d.Id(), err) } } @@ -172,22 +172,22 @@ func instanceProfileRemoveAllRoles(d *schema.ResourceData, iamconn *iam.IAM) err } func resourceAwsIamInstanceProfileUpdate(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn + conn := meta.(*AWSClient).iamconn if d.HasChange("role") { oldRole, newRole := d.GetChange("role") if oldRole.(string) != "" { - err := instanceProfileRemoveRole(iamconn, d.Id(), oldRole.(string)) + err := instanceProfileRemoveRole(conn, d.Id(), oldRole.(string)) if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", oldRole.(string), d.Id(), err) + return fmt.Errorf("Error removing role %s to IAM instance profile %s: %w", oldRole.(string), d.Id(), err) } } if newRole.(string) != "" { - err := instanceProfileAddRole(iamconn, d.Id(), newRole.(string)) + err := instanceProfileAddRole(conn, d.Id(), newRole.(string)) if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %s", newRole.(string), d.Id(), err) + return fmt.Errorf("Error adding role %s to IAM instance profile %s: %w", newRole.(string), d.Id(), err) } } } @@ -196,36 +196,36 @@ func resourceAwsIamInstanceProfileUpdate(d *schema.ResourceData, meta interface{ } func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn + conn := meta.(*AWSClient).iamconn request := &iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(d.Id()), } - result, err := iamconn.GetInstanceProfile(request) + result, err := conn.GetInstanceProfile(request) if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { log.Printf("[WARN] IAM Instance Profile %s is already gone", d.Id()) d.SetId("") return nil } if err != nil { - return fmt.Errorf("Error reading IAM instance profile %s: %s", d.Id(), err) + return fmt.Errorf("Error reading IAM instance profile %s: %w", d.Id(), err) } instanceProfile := result.InstanceProfile if instanceProfile.Roles != nil && len(instanceProfile.Roles) > 0 { - roleName := aws.StringValue(instanceProfile.Roles[0].RoleName) input := &iam.GetRoleInput{ RoleName: aws.String(roleName), } - _, err := iamconn.GetRole(input) + _, err := conn.GetRole(input) if err != nil { if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { - log.Printf("[WARN] IAM Role %q attcahed to IAM Instance Profile %q not found, removing from state", roleName, d.Id()) - d.SetId("") - return nil + err := instanceProfileRemoveRole(conn, d.Id(), roleName) + if err != nil { + return fmt.Errorf("Error removing role %s to IAM instance profile %s: %w", roleName, d.Id(), err) + } } return fmt.Errorf("Error reading IAM Role %s attcahed to IAM Instance Profile %s: %w", roleName, d.Id(), err) } @@ -235,18 +235,18 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) } func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{}) error { - iamconn := meta.(*AWSClient).iamconn + conn := meta.(*AWSClient).iamconn - if err := instanceProfileRemoveAllRoles(d, iamconn); err != nil { + if err := instanceProfileRemoveAllRoles(d, conn); err != nil { return err } request := &iam.DeleteInstanceProfileInput{ InstanceProfileName: aws.String(d.Id()), } - _, err := iamconn.DeleteInstanceProfile(request) + _, err := conn.DeleteInstanceProfile(request) if err != nil { - return fmt.Errorf("Error deleting IAM instance profile %s: %s", d.Id(), err) + return fmt.Errorf("Error deleting IAM instance profile %s: %w", d.Id(), err) } return nil From 362ada3d56e6596921636b45be897d03780f2869 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 14 Nov 2020 00:10:09 +0200 Subject: [PATCH 1140/1212] conn --- aws/resource_aws_iam_instance_profile_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_iam_instance_profile_test.go b/aws/resource_aws_iam_instance_profile_test.go index a5891a10c68..8b1836ec1a0 100644 --- a/aws/resource_aws_iam_instance_profile_test.go +++ b/aws/resource_aws_iam_instance_profile_test.go @@ -154,7 +154,7 @@ func testAccCheckAWSInstanceProfileGeneratedNamePrefix(resource, prefix string) } func testAccCheckAWSInstanceProfileDestroy(s *terraform.State) error { - iamconn := testAccProvider.Meta().(*AWSClient).iamconn + conn := testAccProvider.Meta().(*AWSClient).iamconn for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iam_instance_profile" { @@ -162,7 +162,7 @@ func testAccCheckAWSInstanceProfileDestroy(s *terraform.State) error { } // Try to get role - _, err := iamconn.GetInstanceProfile(&iam.GetInstanceProfileInput{ + _, err := conn.GetInstanceProfile(&iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(rs.Primary.ID), }) if err == nil { @@ -190,9 +190,9 @@ func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileO return fmt.Errorf("No Instance Profile name is set") } - iamconn := testAccProvider.Meta().(*AWSClient).iamconn + conn := testAccProvider.Meta().(*AWSClient).iamconn - resp, err := iamconn.GetInstanceProfile(&iam.GetInstanceProfileInput{ + resp, err := conn.GetInstanceProfile(&iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(rs.Primary.ID), }) if err != nil { From 8bb541ead96e0f2f3782fb7b52ce29a1abd8ff08 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 14 Nov 2020 00:10:45 +0200 Subject: [PATCH 1141/1212] ignore error on delete --- aws/resource_aws_iam_instance_profile.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_iam_instance_profile.go b/aws/resource_aws_iam_instance_profile.go index 450a6bc4110..9acb181274c 100644 --- a/aws/resource_aws_iam_instance_profile.go +++ b/aws/resource_aws_iam_instance_profile.go @@ -246,6 +246,9 @@ func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{ } _, err := conn.DeleteInstanceProfile(request) if err != nil { + if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { + return nil + } return fmt.Errorf("Error deleting IAM instance profile %s: %w", d.Id(), err) } From 79370d9b2a70d018cd367a5e243116c23969a135 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Sat, 14 Nov 2020 00:19:02 +0200 Subject: [PATCH 1142/1212] more checks --- aws/resource_aws_iam_instance_profile_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_iam_instance_profile_test.go b/aws/resource_aws_iam_instance_profile_test.go index 8b1836ec1a0..0bc63b918c5 100644 --- a/aws/resource_aws_iam_instance_profile_test.go +++ b/aws/resource_aws_iam_instance_profile_test.go @@ -26,6 +26,9 @@ func TestAccAWSIAMInstanceProfile_basic(t *testing.T) { Config: testAccAwsIamInstanceProfileConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSInstanceProfileExists(resourceName, &conf), + testAccCheckResourceAttrGlobalARN(resourceName, "arn", "iam", fmt.Sprintf("instance-profile/test-%s", rName)), + resource.TestCheckResourceAttrPair(resourceName, "role", "aws_iam_role.test", "name"), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("test-%s", rName)), ), }, { From 5bf1a3a2c218fad29a8bf5b61ca6a1022986f494 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 17:53:30 +0200 Subject: [PATCH 1143/1212] changelog --- .changelog/16188.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/16188.txt diff --git a/.changelog/16188.txt b/.changelog/16188.txt new file mode 100644 index 00000000000..23f2b79e8b0 --- /dev/null +++ b/.changelog/16188.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_iam_instance_profile: Detach role when role doesn't exist + remove when deleted from state. +``` From c9c74ea49d8bf091231cb2fb4082c38cc104484a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 13:27:41 -0500 Subject: [PATCH 1144/1212] Update aws/resource_aws_cloudfront_realtime_log_config.go Co-authored-by: angie pinilla --- aws/resource_aws_cloudfront_realtime_log_config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 4159d293e93..70c4de111b1 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -124,6 +124,9 @@ func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta int } if logConfig == nil { + if d.IsNewResource() { + return fmt.Errorf("error reading CloudFront Real-time Log Config (%s): not found", d.Id()) + } log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) d.SetId("") return nil From a8858061689bde5d0ac87bf3440d19496c0c18ee Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 13:28:58 -0500 Subject: [PATCH 1145/1212] Use 'tfawserr' package. --- aws/resource_aws_cloudfront_realtime_log_config.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 4159d293e93..a8088d34658 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -6,6 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/cloudfront/finder" @@ -113,7 +114,7 @@ func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta int logConfig, err := finder.RealtimeLogConfigByARN(conn, d.Id()) - if isAWSErr(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig, "") { + if tfawserr.ErrCodeEquals(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig) { log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) d.SetId("") return nil @@ -170,7 +171,7 @@ func resourceAwsCloudFrontRealtimeLogConfigDelete(d *schema.ResourceData, meta i ARN: aws.String(d.Id()), }) - if isAWSErr(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig, "") { + if tfawserr.ErrCodeEquals(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig) { return nil } From 18ece9feba345b7267f22acfc436315276a73563 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 13:29:38 -0500 Subject: [PATCH 1146/1212] Formatting. --- aws/resource_aws_cloudfront_realtime_log_config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 91696805064..9e588aa42ca 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -125,9 +125,9 @@ func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta int } if logConfig == nil { - if d.IsNewResource() { - return fmt.Errorf("error reading CloudFront Real-time Log Config (%s): not found", d.Id()) - } + if d.IsNewResource() { + return fmt.Errorf("error reading CloudFront Real-time Log Config (%s): not found", d.Id()) + } log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) d.SetId("") return nil From d590aeaf170980d44587206871949a511f2b7eb5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 1 Jul 2020 13:28:39 -0400 Subject: [PATCH 1147/1212] d/aws_route_table: Rework 'TestAccAWSRouteDataSource_basic'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteDataSource_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteDataSource_basic -timeout 120m === RUN TestAccAWSRouteDataSource_basic === PAUSE TestAccAWSRouteDataSource_basic === CONT TestAccAWSRouteDataSource_basic --- PASS: TestAccAWSRouteDataSource_basic (141.12s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 141.166s d/aws_route_table: Rework 'TestAccAWSRouteDataSource_TransitGatewayID'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteDataSource_TransitGatewayID' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteDataSource_TransitGatewayID -timeout 120m === RUN TestAccAWSRouteDataSource_TransitGatewayID === PAUSE TestAccAWSRouteDataSource_TransitGatewayID === CONT TestAccAWSRouteDataSource_TransitGatewayID --- PASS: TestAccAWSRouteDataSource_TransitGatewayID (296.93s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 296.976s d/aws_route_table: Add 'TestAccAWSRouteDataSource_IPv6DestinationCidr'. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSRouteDataSource_IPv6DestinationCidr' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws/ -v -count 1 -parallel 20 -run=TestAccAWSRouteDataSource_IPv6DestinationCidr -timeout 120m === RUN TestAccAWSRouteDataSource_IPv6DestinationCidr === PAUSE TestAccAWSRouteDataSource_IPv6DestinationCidr === CONT TestAccAWSRouteDataSource_IPv6DestinationCidr --- PASS: TestAccAWSRouteDataSource_IPv6DestinationCidr (43.81s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 43.869s Fix Acceptance Test Linting / terrafmt error. --- aws/data_source_aws_route_test.go | 228 +++++++++++++++++++----------- 1 file changed, 146 insertions(+), 82 deletions(-) diff --git a/aws/data_source_aws_route_test.go b/aws/data_source_aws_route_test.go index 6772660cdca..f1a97b41cb3 100644 --- a/aws/data_source_aws_route_test.go +++ b/aws/data_source_aws_route_test.go @@ -5,23 +5,42 @@ import ( "testing" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func TestAccAWSRouteDataSource_basic(t *testing.T) { + instanceRouteResourceName := "aws_route.instance" + pcxRouteResourceName := "aws_route.vpc_peering_connection" + rtResourceName := "aws_route_table.test" + instanceResourceName := "aws_instance.test" + pcxResourceName := "aws_vpc_peering_connection.test" + datasource1Name := "data.aws_route.by_destination_cidr_block" + datasource2Name := "data.aws_route.by_instance_id" + datasource3Name := "data.aws_route.by_peering_connection_id" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccDataSourceAwsRouteGroupConfig(), + Config: testAccDataSourceAwsRouteConfigBasic(rName), Check: resource.ComposeTestCheckFunc( - testAccDataSourceAwsRouteCheck("data.aws_route.by_destination_cidr_block"), - testAccDataSourceAwsRouteCheck("data.aws_route.by_instance_id"), - testAccDataSourceAwsRouteCheck("data.aws_route.by_peering_connection_id"), + // By destination CIDR. + resource.TestCheckResourceAttrPair(datasource1Name, "destination_cidr_block", instanceRouteResourceName, "destination_cidr_block"), + resource.TestCheckResourceAttrPair(datasource1Name, "route_table_id", rtResourceName, "id"), + + // By instance ID. + resource.TestCheckResourceAttrPair(datasource2Name, "destination_cidr_block", instanceRouteResourceName, "destination_cidr_block"), + resource.TestCheckResourceAttrPair(datasource2Name, "instance_id", instanceResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource2Name, "route_table_id", rtResourceName, "id"), + + // By VPC peering connection ID. + resource.TestCheckResourceAttrPair(datasource3Name, "destination_cidr_block", pcxRouteResourceName, "destination_cidr_block"), + resource.TestCheckResourceAttrPair(datasource3Name, "route_table_id", rtResourceName, "id"), + resource.TestCheckResourceAttrPair(datasource3Name, "vpc_peering_connection_id", pcxResourceName, "id"), ), - ExpectNonEmptyPlan: true, }, }, }) @@ -31,6 +50,7 @@ func TestAccAWSRouteDataSource_TransitGatewayID(t *testing.T) { var route ec2.Route dataSourceName := "data.aws_route.test" resourceName := "aws_route.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -38,7 +58,7 @@ func TestAccAWSRouteDataSource_TransitGatewayID(t *testing.T) { CheckDestroy: testAccCheckAWSRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteDataSourceConfigTransitGatewayID(), + Config: testAccAWSRouteDataSourceConfigIpv4TransitGateway(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSRouteExists(resourceName, &route), resource.TestCheckResourceAttrPair(resourceName, "destination_cidr_block", dataSourceName, "destination_cidr_block"), @@ -50,10 +70,32 @@ func TestAccAWSRouteDataSource_TransitGatewayID(t *testing.T) { }) } +func TestAccAWSRouteDataSource_IPv6DestinationCidr(t *testing.T) { + dataSourceName := "data.aws_route.test" + resourceName := "aws_route.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSRouteDataSourceConfigIpv6EgressOnlyInternetGateway(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(resourceName, "destination_ipv6_cidr_block", dataSourceName, "destination_ipv6_cidr_block"), + resource.TestCheckResourceAttrPair(resourceName, "route_table_id", dataSourceName, "route_table_id"), + ), + }, + }, + }) +} + func TestAccAWSRouteDataSource_LocalGatewayID(t *testing.T) { var route ec2.Route dataSourceName := "data.aws_route.by_local_gateway_id" resourceName := "aws_route.test" + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSOutpostsOutposts(t) }, @@ -61,7 +103,7 @@ func TestAccAWSRouteDataSource_LocalGatewayID(t *testing.T) { CheckDestroy: testAccCheckAWSRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSRouteDataSourceConfigLocalGatewayID(), + Config: testAccAWSRouteDataSourceConfigIpv4LocalGateway(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSRouteExists(resourceName, &route), resource.TestCheckResourceAttrPair(resourceName, "destination_cidr_block", dataSourceName, "destination_cidr_block"), @@ -73,69 +115,36 @@ func TestAccAWSRouteDataSource_LocalGatewayID(t *testing.T) { }) } -func testAccDataSourceAwsRouteCheck(name string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - - if !ok { - return fmt.Errorf("root module has no resource called %s", name) - } - - r, ok := s.RootModule().Resources["aws_route.test"] - if !ok { - return fmt.Errorf("can't find aws_route.test in state") - } - rts, ok := s.RootModule().Resources["aws_route_table.test"] - if !ok { - return fmt.Errorf("can't find aws_route_table.test in state") - } - - attr := rs.Primary.Attributes - - if attr["route_table_id"] != r.Primary.Attributes["route_table_id"] { - return fmt.Errorf( - "route_table_id is %s; want %s", - attr["route_table_id"], - r.Primary.Attributes["route_table_id"], - ) - } - - if attr["route_table_id"] != rts.Primary.Attributes["id"] { - return fmt.Errorf( - "route_table_id is %s; want %s", - attr["route_table_id"], - rts.Primary.Attributes["id"], - ) - } - - return nil - } -} - -func testAccDataSourceAwsRouteGroupConfig() string { +func testAccDataSourceAwsRouteConfigBasic(rName string) string { return composeConfig( testAccLatestAmazonLinuxHvmEbsAmiConfig(), - testAccAvailableAZsNoOptInDefaultExcludeConfig(), ` + testAccAvailableAZsNoOptInDefaultExcludeConfig(), + testAccAvailableEc2InstanceTypeForAvailabilityZone("data.aws_availability_zones.available.names[0]", "t3.micro", "t2.micro"), + fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { - Name = "terraform-testacc-route-table-data-source" + Name = %[1]q } } -resource "aws_vpc" "dest" { +resource "aws_vpc" "target" { cidr_block = "172.17.0.0/16" tags = { - Name = "terraform-testacc-route-table-data-source" + Name = %[1]q } } resource "aws_vpc_peering_connection" "test" { - peer_vpc_id = aws_vpc.dest.id + peer_vpc_id = aws_vpc.target.id vpc_id = aws_vpc.test.id auto_accept = true + + tags = { + Name = %[1]q + } } resource "aws_subnet" "test" { @@ -144,7 +153,7 @@ resource "aws_subnet" "test" { availability_zone = data.aws_availability_zones.available.names[0] tags = { - Name = "tf-acc-route-table-data-source" + Name = %[1]q } } @@ -152,11 +161,11 @@ resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "terraform-testacc-routetable-data-source" + Name = %[1]q } } -resource "aws_route" "pcx" { +resource "aws_route" "vpc_peering_connection" { route_table_id = aws_route_table.test.id vpc_peering_connection_id = aws_vpc_peering_connection.test.id destination_cidr_block = "10.0.2.0/24" @@ -167,54 +176,48 @@ resource "aws_route_table_association" "a" { route_table_id = aws_route_table.test.id } -resource "aws_instance" "web" { +resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - instance_type = "t2.micro" + instance_type = data.aws_ec2_instance_type_offering.available.instance_type subnet_id = aws_subnet.test.id tags = { - Name = "HelloWorld" + Name = %[1]q } } -resource "aws_route" "test" { +resource "aws_route" "instance" { route_table_id = aws_route_table.test.id destination_cidr_block = "10.0.1.0/24" - instance_id = aws_instance.web.id - - timeouts { - create = "5m" - } + instance_id = aws_instance.test.id } data "aws_route" "by_peering_connection_id" { route_table_id = aws_route_table.test.id - vpc_peering_connection_id = aws_route.pcx.vpc_peering_connection_id + vpc_peering_connection_id = aws_route.vpc_peering_connection.vpc_peering_connection_id } data "aws_route" "by_destination_cidr_block" { route_table_id = aws_route_table.test.id - destination_cidr_block = "10.0.1.0/24" - depends_on = [aws_route.test] + destination_cidr_block = aws_route.instance.destination_cidr_block } data "aws_route" "by_instance_id" { route_table_id = aws_route_table.test.id - instance_id = aws_instance.web.id - depends_on = [aws_route.test] + instance_id = aws_route.instance.instance_id } -`) +`, rName)) } -func testAccAWSRouteDataSourceConfigTransitGatewayID() string { - return testAccAvailableAZsNoOptInDefaultExcludeConfig() + ` -# IncorrectState: Transit Gateway is not available in some availability zones - +func testAccAWSRouteDataSourceConfigIpv4TransitGateway(rName string) string { + return composeConfig( + testAccAvailableAZsNoOptInDefaultExcludeConfig(), + fmt.Sprintf(` resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags = { - Name = "tf-acc-test-ec2-route-datasource-transit-gateway-id" + Name = %[1]q } } @@ -224,16 +227,24 @@ resource "aws_subnet" "test" { vpc_id = aws_vpc.test.id tags = { - Name = "tf-acc-test-ec2-route-datasource-transit-gateway-id" + Name = %[1]q } } -resource "aws_ec2_transit_gateway" "test" {} +resource "aws_ec2_transit_gateway" "test" { + tags = { + Name = %[1]q + } +} resource "aws_ec2_transit_gateway_vpc_attachment" "test" { subnet_ids = [aws_subnet.test.id] transit_gateway_id = aws_ec2_transit_gateway.test.id vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_route" "test" { @@ -246,32 +257,85 @@ data "aws_route" "test" { route_table_id = aws_route.test.route_table_id transit_gateway_id = aws_route.test.transit_gateway_id } -` +`, rName)) +} + +func testAccAWSRouteDataSourceConfigIpv6EgressOnlyInternetGateway(rName string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_egress_only_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } -func testAccAWSRouteDataSourceConfigLocalGatewayID() string { - return ` +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_route" "test" { + route_table_id = aws_route_table.test.id + destination_ipv6_cidr_block = "::/0" + egress_only_gateway_id = aws_egress_only_internet_gateway.test.id +} + +data "aws_route" "test" { + route_table_id = aws_route.test.route_table_id + destination_ipv6_cidr_block = aws_route.test.destination_ipv6_cidr_block +} +`, rName) +} + +func testAccAWSRouteDataSourceConfigIpv4LocalGateway(rName string) string { + return fmt.Sprintf(` data "aws_ec2_local_gateways" "all" {} + data "aws_ec2_local_gateway" "first" { id = tolist(data.aws_ec2_local_gateways.all.ids)[0] } data "aws_ec2_local_gateway_route_tables" "all" {} + data "aws_ec2_local_gateway_route_table" "first" { local_gateway_route_table_id = tolist(data.aws_ec2_local_gateway_route_tables.all.ids)[0] } resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } } resource "aws_ec2_local_gateway_route_table_vpc_association" "example" { local_gateway_route_table_id = data.aws_ec2_local_gateway_route_table.first.id vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_route_table" "test" { vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } } resource "aws_route" "test" { @@ -286,5 +350,5 @@ data "aws_route" "by_local_gateway_id" { local_gateway_id = data.aws_ec2_local_gateway.first.id depends_on = [aws_route.test] } -` +`, rName) } From 6e9f21cf7cb2151d021658503314b45b9bafdcd6 Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 11 Feb 2021 10:33:31 -0800 Subject: [PATCH 1148/1212] Fix shared expand function and use WAFv2 policy --- aws/resource_aws_fms_policy.go | 13 ++++++++----- aws/resource_aws_fms_policy_test.go | 10 +++++++--- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/aws/resource_aws_fms_policy.go b/aws/resource_aws_fms_policy.go index 2d7f83cc343..d9b240b64a7 100644 --- a/aws/resource_aws_fms_policy.go +++ b/aws/resource_aws_fms_policy.go @@ -95,6 +95,7 @@ func resourceAwsFmsPolicy() *schema.Resource { "resource_type_list": { Type: schema.TypeSet, Optional: true, + Computed: true, Set: schema.HashString, ConflictsWith: []string{"resource_type"}, Elem: &schema.Schema{ @@ -106,6 +107,7 @@ func resourceAwsFmsPolicy() *schema.Resource { "resource_type": { Type: schema.TypeString, Optional: true, + Computed: true, ConflictsWith: []string{"resource_type_list"}, }, @@ -202,9 +204,7 @@ func resourceAwsFmsPolicyFlattenPolicy(d *schema.ResourceData, resp *fms.GetPoli if err := d.Set("resource_type_list", resp.Policy.ResourceTypeList); err != nil { return err } - if aws.StringValue(resp.Policy.ResourceType) != "ResourceTypeList" { - d.Set("resource_type", aws.StringValue(resp.Policy.ResourceType)) - } + d.Set("resource_type", aws.StringValue(resp.Policy.ResourceType)) d.Set("policy_update_token", aws.StringValue(resp.Policy.PolicyUpdateToken)) if err := d.Set("resource_tags", flattenFMSResourceTags(resp.Policy.ResourceTags)); err != nil { return err @@ -230,14 +230,17 @@ func resourceAwsFmsPolicyExpandPolicy(d *schema.ResourceData) *fms.Policy { fmsPolicy := &fms.Policy{ PolicyName: aws.String(d.Get("name").(string)), - PolicyId: aws.String(d.Id()), - PolicyUpdateToken: aws.String(d.Get("policy_update_token").(string)), RemediationEnabled: aws.Bool(d.Get("remediation_enabled").(bool)), ResourceType: resourceType, ResourceTypeList: resourceTypeList, ExcludeResourceTags: aws.Bool(d.Get("exclude_resource_tags").(bool)), } + if d.Id() != "" { + fmsPolicy.PolicyId = aws.String(d.Id()) + fmsPolicy.PolicyUpdateToken = aws.String(d.Get("policy_update_token").(string)) + } + fmsPolicy.ExcludeMap = expandFMSPolicyMap(d.Get("exclude_map").([]interface{})) fmsPolicy.IncludeMap = expandFMSPolicyMap(d.Get("include_map").([]interface{})) diff --git a/aws/resource_aws_fms_policy_test.go b/aws/resource_aws_fms_policy_test.go index a2b93ac1b52..6fc08067a5b 100644 --- a/aws/resource_aws_fms_policy_test.go +++ b/aws/resource_aws_fms_policy_test.go @@ -223,7 +223,10 @@ resource "aws_wafregional_rule_group" "test" { } func testAccFmsPolicyConfig_cloudfrontDistribution(name string, group string) string { - return fmt.Sprintf(` + return composeConfig( + testAccWebACLLoggingConfigurationDependenciesConfig(name), + testAccWebACLLoggingConfigurationKinesisDependencyConfig(name), + fmt.Sprintf(` resource "aws_fms_policy" "test" { exclude_resource_tags = false name = "%[1]s" @@ -232,7 +235,7 @@ resource "aws_fms_policy" "test" { security_service_policy_data { type = "WAFV2" - managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + managed_service_data = "{\"type\":\"WAFV2\",\"preProcessRuleGroups\":[{\"ruleGroupArn\":null,\"overrideAction\":{\"type\":\"NONE\"},\"managedRuleGroupIdentifier\":{\"version\":null,\"vendorName\":\"AWS\",\"managedRuleGroupName\":\"AWSManagedRulesAmazonIpReputationList\"},\"ruleGroupType\":\"ManagedRuleGroup\",\"excludeRules\":[]}],\"postProcessRuleGroups\":[],\"defaultAction\":{\"type\":\"ALLOW\"},\"overrideCustomerWebACLAssociation\":false,\"loggingConfiguration\":{\"logDestinationConfigs\":[\"${aws_kinesis_firehose_delivery_stream.test.arn}\"],\"redactedFields\":[{\"redactedFieldType\":\"SingleHeader\",\"redactedFieldValue\":\"Cookies\"}]}}" } } @@ -241,7 +244,8 @@ resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" name = "%[2]s" } -`, name, group) +`, name, group), + ) } func testAccFmsPolicyConfig_updated(name string, group string) string { From 7424bc16645a0672746c1022dd7c808cb00d1232 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 10:36:29 -0800 Subject: [PATCH 1149/1212] Adds unit test from #17359 --- aws/resource_aws_lambda_function_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/aws/resource_aws_lambda_function_test.go b/aws/resource_aws_lambda_function_test.go index a2c9e2c0936..b6ae44ce0ef 100644 --- a/aws/resource_aws_lambda_function_test.go +++ b/aws/resource_aws_lambda_function_test.go @@ -3093,3 +3093,9 @@ resource "aws_lambda_function" "test" { } `, rName, runtime)) } + +func TestFlattenLambdaImageConfigShouldNotFailWithEmptyImageConfig(t *testing.T) { + t.Parallel() + response := lambda.ImageConfigResponse{} + flattenLambdaImageConfig(&response) +} From 246b508541b35c72d1e47a596ca140e4d84ed3ec Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 13:38:32 -0500 Subject: [PATCH 1150/1212] r/aws_cloudfront_realtime_log_config: Additional context for creation failure. Acceptance test output: $ make testacc TEST=./aws/ TESTARGS='-run=TestAccAWSCloudFrontRealtimeLogConfig_basic' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAWSCloudFrontRealtimeLogConfig_basic -timeout 120m === RUN TestAccAWSCloudFrontRealtimeLogConfig_basic === PAUSE TestAccAWSCloudFrontRealtimeLogConfig_basic === CONT TestAccAWSCloudFrontRealtimeLogConfig_basic --- PASS: TestAccAWSCloudFrontRealtimeLogConfig_basic (55.83s) PASS ok github.com/terraform-providers/terraform-provider-aws/aws 55.936s --- aws/resource_aws_cloudfront_realtime_log_config.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 9e588aa42ca..ca7336fc5f1 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -90,10 +90,11 @@ func resourceAwsCloudFrontRealtimeLogConfig() *schema.Resource { func resourceAwsCloudFrontRealtimeLogConfigCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudfrontconn + name := d.Get("name").(string) input := &cloudfront.CreateRealtimeLogConfigInput{ EndPoints: expandCloudFrontEndPoints(d.Get("endpoint").([]interface{})), Fields: expandStringSet(d.Get("fields").(*schema.Set)), - Name: aws.String(d.Get("name").(string)), + Name: aws.String(name), SamplingRate: aws.Int64(int64(d.Get("sampling_rate").(int))), } @@ -101,7 +102,7 @@ func resourceAwsCloudFrontRealtimeLogConfigCreate(d *schema.ResourceData, meta i output, err := conn.CreateRealtimeLogConfig(input) if err != nil { - return fmt.Errorf("error creating CloudFront Real-time Log Config: %w", err) + return fmt.Errorf("error creating CloudFront Real-time Log Config (%s): %w", name, err) } d.SetId(aws.StringValue(output.RealtimeLogConfig.ARN)) From 48f807f86db077f1260d0c818ea1e51867e673fc Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 10:44:04 -0800 Subject: [PATCH 1151/1212] Adds changelog --- .changelog/17082.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17082.txt diff --git a/.changelog/17082.txt b/.changelog/17082.txt new file mode 100644 index 00000000000..a728922d4a2 --- /dev/null +++ b/.changelog/17082.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_function: Prevent crash when using `Image` package type +``` From c095947f886f40f1709553b108069f074b2c0692 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 18:57:28 +0000 Subject: [PATCH 1152/1212] Update CHANGELOG.md for #17522 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be0a0b19ebc..4cc5774a6d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,14 +10,21 @@ ENHANCEMENTS: * data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments ([#12055](https://github.com/hashicorp/terraform-provider-aws/issues/12055)) * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_cloudwatch_log_destination: Add plan time validation to `role_arn`, `name` and `target_arn`. ([#11687](https://github.com/hashicorp/terraform-provider-aws/issues/11687)) * resource/aws_ec2_traffic_mirror_filter: Add `arn` attribute. ([#13948](https://github.com/hashicorp/terraform-provider-aws/issues/13948)) * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source ([#14746](https://github.com/hashicorp/terraform-provider-aws/issues/14746)) +* resource/aws_ses_active_receipt_rule_set: Add `arn` attribute ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) +* resource/aws_ses_active_receipt_rule_set: Add plan time validation for `rule_set_name` argument ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) +* resource/aws_ses_configuration_set: Add `arn` attribute. ([#13972](https://github.com/hashicorp/terraform-provider-aws/issues/13972)) * resource/aws_ses_configuration_set: Add `delivery_options` argument ([#11600](https://github.com/hashicorp/terraform-provider-aws/issues/11600)) +* resource/aws_ses_configuration_set: Add plan time validation to `name`. ([#13972](https://github.com/hashicorp/terraform-provider-aws/issues/13972)) * resource/aws_ses_event_destination: Add `arn` attribute ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) * resource/aws_ses_event_destination: Add plan time validation for `name`, `cloudwatch_destination.default_value`, `cloudwatch_destination.default_name`, `kinesis_destination.role_arn`, `kinesis_destination.stream_arn`, and `sns_destination.topic_arn` attributes ([#13964](https://github.com/hashicorp/terraform-provider-aws/issues/13964)) +* resource/aws_ses_receipt_rule: Add `arn` attribute ([#13960](https://github.com/hashicorp/terraform-provider-aws/issues/13960)) +* resource/aws_ses_receipt_rule: Add plan time validations for `name`, `tls_policy`, `add_header_action.header_name`, `add_header_action.header_value`, `bounce_action.topic_arn`, `lambda_action.function_arn`, `lambda_action.topic_arn`, `lambda_action.invocation_type`, `s3_action,topic_arn`, `sns_action.topic_arn`, `stop_action.scope`, `stop_action.topic_arn`, `workmail_action.topic_arn`, and `workmail_action.organization_arn` attributes ([#13960](https://github.com/hashicorp/terraform-provider-aws/issues/13960)) * resource/aws_ses_template: Add `arn` attribute ([#13963](https://github.com/hashicorp/terraform-provider-aws/issues/13963)) * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) From ad997d921691091883579fb87d93ef4a94ecedd9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 14:00:20 -0500 Subject: [PATCH 1153/1212] Update aws/resource_aws_cloudfront_realtime_log_config.go Co-authored-by: angie pinilla --- aws/resource_aws_cloudfront_realtime_log_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index ca7336fc5f1..6a9f3bbde6d 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -115,7 +115,7 @@ func resourceAwsCloudFrontRealtimeLogConfigRead(d *schema.ResourceData, meta int logConfig, err := finder.RealtimeLogConfigByARN(conn, d.Id()) - if tfawserr.ErrCodeEquals(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, cloudfront.ErrCodeNoSuchRealtimeLogConfig) { log.Printf("[WARN] CloudFront Real-time Log Config (%s) not found, removing from state", d.Id()) d.SetId("") return nil From 4456c98247a67bf94a4a0e83bf8bdb8358674915 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 14:00:34 -0500 Subject: [PATCH 1154/1212] Update aws/resource_aws_cloudfront_realtime_log_config.go Co-authored-by: angie pinilla --- aws/resource_aws_cloudfront_realtime_log_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_cloudfront_realtime_log_config.go b/aws/resource_aws_cloudfront_realtime_log_config.go index 6a9f3bbde6d..51c4b0a5c3d 100644 --- a/aws/resource_aws_cloudfront_realtime_log_config.go +++ b/aws/resource_aws_cloudfront_realtime_log_config.go @@ -180,7 +180,7 @@ func resourceAwsCloudFrontRealtimeLogConfigDelete(d *schema.ResourceData, meta i } if err != nil { - return fmt.Errorf("error deleting Route53 Resolver Query Log Config (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting Cloudfront Real-time Log Config (%s): %w", d.Id(), err) } return nil From af77d84ce67a301178b55fa8aa994c44e0ec738b Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 21:12:30 +0200 Subject: [PATCH 1155/1212] resource/aws_codebuild_report_group: Add delete_reports argument and service package refactoring (#17338) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSCodeBuildReportGroup_disappears (10.64s) --- PASS: TestAccAWSCodeBuildReportGroup_deleteReports (13.91s) --- PASS: TestAccAWSCodeBuildReportGroup_basic (14.42s) --- PASS: TestAccAWSCodeBuildReportGroup_tags (30.94s) --- PASS: TestAccAWSCodeBuildReportGroup_export_s3 (46.32s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSCodeBuildReportGroup_disappears (14.58s) --- PASS: TestAccAWSCodeBuildReportGroup_basic (19.24s) --- PASS: TestAccAWSCodeBuildReportGroup_deleteReports (19.27s) --- PASS: TestAccAWSCodeBuildReportGroup_tags (44.22s) --- PASS: TestAccAWSCodeBuildReportGroup_export_s3 (50.66s) ``` --- .changelog/17338.txt | 3 + .../service/codebuild/finder/finder.go | 32 ++++ .../service/codebuild/waiter/status.go | 29 ++++ .../service/codebuild/waiter/waiter.go | 31 ++++ aws/resource_aws_codebuild_report_group.go | 34 +++-- ...esource_aws_codebuild_report_group_test.go | 141 ++++++++++++++---- .../r/codebuild_report_group.html.markdown | 1 + 7 files changed, 230 insertions(+), 41 deletions(-) create mode 100644 .changelog/17338.txt create mode 100644 aws/internal/service/codebuild/finder/finder.go create mode 100644 aws/internal/service/codebuild/waiter/status.go create mode 100644 aws/internal/service/codebuild/waiter/waiter.go diff --git a/.changelog/17338.txt b/.changelog/17338.txt new file mode 100644 index 00000000000..6d07bb41a4d --- /dev/null +++ b/.changelog/17338.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codebuild_report_group: Add `delete_reports` argument +``` diff --git a/aws/internal/service/codebuild/finder/finder.go b/aws/internal/service/codebuild/finder/finder.go new file mode 100644 index 00000000000..7c8c4cd8ec7 --- /dev/null +++ b/aws/internal/service/codebuild/finder/finder.go @@ -0,0 +1,32 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codebuild" +) + +// ReportGroupByArn returns the Report Group corresponding to the specified Arn. +func ReportGroupByArn(conn *codebuild.CodeBuild, arn string) (*codebuild.ReportGroup, error) { + + output, err := conn.BatchGetReportGroups(&codebuild.BatchGetReportGroupsInput{ + ReportGroupArns: aws.StringSlice([]string{arn}), + }) + if err != nil { + return nil, err + } + + if output == nil { + return nil, nil + } + + if len(output.ReportGroups) == 0 { + return nil, nil + } + + reportGroup := output.ReportGroups[0] + if reportGroup == nil { + return nil, nil + } + + return reportGroup, nil +} diff --git a/aws/internal/service/codebuild/waiter/status.go b/aws/internal/service/codebuild/waiter/status.go new file mode 100644 index 00000000000..041e9a31c80 --- /dev/null +++ b/aws/internal/service/codebuild/waiter/status.go @@ -0,0 +1,29 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/codebuild/finder" +) + +const ( + ReportGroupStatusUnknown = "Unknown" + ReportGroupStatusNotFound = "NotFound" +) + +// ReportGroupStatus fetches the Report Group and its Status +func ReportGroupStatus(conn *codebuild.CodeBuild, arn string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := finder.ReportGroupByArn(conn, arn) + if err != nil { + return nil, ReportGroupStatusUnknown, err + } + + if output == nil { + return nil, ReportGroupStatusNotFound, nil + } + + return output, aws.StringValue(output.Status), nil + } +} diff --git a/aws/internal/service/codebuild/waiter/waiter.go b/aws/internal/service/codebuild/waiter/waiter.go new file mode 100644 index 00000000000..29a2f7c9c90 --- /dev/null +++ b/aws/internal/service/codebuild/waiter/waiter.go @@ -0,0 +1,31 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + // Maximum amount of time to wait for an Operation to return Deleted + ReportGroupDeleteTimeout = 2 * time.Minute +) + +// ReportGroupDeleted waits for an ReportGroup to return Deleted +func ReportGroupDeleted(conn *codebuild.CodeBuild, arn string) (*codebuild.ReportGroup, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{codebuild.ReportGroupStatusTypeDeleting}, + Target: []string{}, + Refresh: ReportGroupStatus(conn, arn), + Timeout: ReportGroupDeleteTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*codebuild.ReportGroup); ok { + return output, err + } + + return nil, err +} diff --git a/aws/resource_aws_codebuild_report_group.go b/aws/resource_aws_codebuild_report_group.go index 0d86b56170c..7007196927b 100644 --- a/aws/resource_aws_codebuild_report_group.go +++ b/aws/resource_aws_codebuild_report_group.go @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/codebuild/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/codebuild/waiter" ) func resourceAwsCodeBuildReportGroup() *schema.Resource { @@ -90,6 +92,11 @@ func resourceAwsCodeBuildReportGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "delete_reports": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "tags": tagsSchema(), }, } @@ -106,7 +113,7 @@ func resourceAwsCodeBuildReportGroupCreate(d *schema.ResourceData, meta interfac resp, err := conn.CreateReportGroup(createOpts) if err != nil { - return fmt.Errorf("error creating CodeBuild Report Groups: %w", err) + return fmt.Errorf("error creating CodeBuild Report Group: %w", err) } d.SetId(aws.StringValue(resp.ReportGroup.Arn)) @@ -118,23 +125,13 @@ func resourceAwsCodeBuildReportGroupRead(d *schema.ResourceData, meta interface{ conn := meta.(*AWSClient).codebuildconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig - resp, err := conn.BatchGetReportGroups(&codebuild.BatchGetReportGroupsInput{ - ReportGroupArns: aws.StringSlice([]string{d.Id()}), - }) + reportGroup, err := finder.ReportGroupByArn(conn, d.Id()) if err != nil { return fmt.Errorf("error Listing CodeBuild Report Groups: %w", err) } - if len(resp.ReportGroups) == 0 { - log.Printf("[WARN] CodeBuild Report Groups (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } - - reportGroup := resp.ReportGroups[0] - if reportGroup == nil { - log.Printf("[WARN] CodeBuild Report Groups (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] CodeBuild Report Group (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -175,7 +172,7 @@ func resourceAwsCodeBuildReportGroupUpdate(d *schema.ResourceData, meta interfac _, err := conn.UpdateReportGroup(input) if err != nil { - return fmt.Errorf("error updating CodeBuild Report Groups: %w", err) + return fmt.Errorf("error updating CodeBuild Report Group: %w", err) } return resourceAwsCodeBuildReportGroupRead(d, meta) @@ -185,11 +182,16 @@ func resourceAwsCodeBuildReportGroupDelete(d *schema.ResourceData, meta interfac conn := meta.(*AWSClient).codebuildconn deleteOpts := &codebuild.DeleteReportGroupInput{ - Arn: aws.String(d.Id()), + Arn: aws.String(d.Id()), + DeleteReports: aws.Bool(d.Get("delete_reports").(bool)), } if _, err := conn.DeleteReportGroup(deleteOpts); err != nil { - return fmt.Errorf("error deleting CodeBuild Report Groups(%s): %w", d.Id(), err) + return fmt.Errorf("error deleting CodeBuild Report Group (%s): %w", d.Id(), err) + } + + if _, err := waiter.ReportGroupDeleted(conn, d.Id()); err != nil { + return fmt.Errorf("error while waiting for CodeBuild Report Group (%s) to become deleted: %w", d.Id(), err) } return nil diff --git a/aws/resource_aws_codebuild_report_group_test.go b/aws/resource_aws_codebuild_report_group_test.go index a42b6cc4758..625eb14ab4c 100644 --- a/aws/resource_aws_codebuild_report_group_test.go +++ b/aws/resource_aws_codebuild_report_group_test.go @@ -2,15 +2,72 @@ package aws import ( "fmt" + "log" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codebuild" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/codebuild/finder" ) +func init() { + resource.AddTestSweepers("aws_codebuild_report_group", &resource.Sweeper{ + Name: "aws_codebuild_report_group", + F: testSweepCodeBuildReportGroups, + }) +} + +func testSweepCodeBuildReportGroups(region string) error { + client, err := sharedClientForRegion(region) + + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*AWSClient).codebuildconn + input := &codebuild.ListReportGroupsInput{} + var sweeperErrs *multierror.Error + + err = conn.ListReportGroupsPages(input, func(page *codebuild.ListReportGroupsOutput, isLast bool) bool { + if page == nil { + return !isLast + } + + for _, arn := range page.ReportGroups { + id := aws.StringValue(arn) + r := resourceAwsCodeBuildReportGroup() + d := r.Data(nil) + d.SetId(id) + d.Set("delete_reports", true) + + err := r.Delete(d, client) + if err != nil { + sweeperErr := fmt.Errorf("error deleting CodeBuild Report Group (%s): %w", id, err) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + continue + } + } + + return !isLast + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping CodeBuild Report Group sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving CodeBuild ReportGroups: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + func TestAccAWSCodeBuildReportGroup_basic(t *testing.T) { var reportGroup codebuild.ReportGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -33,9 +90,10 @@ func TestAccAWSCodeBuildReportGroup_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_reports"}, }, }, }) @@ -67,9 +125,10 @@ func TestAccAWSCodeBuildReportGroup_export_s3(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_reports"}, }, { Config: testAccAWSCodeBuildReportGroupS3ExportUpdatedConfig(rName), @@ -108,9 +167,10 @@ func TestAccAWSCodeBuildReportGroup_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_reports"}, }, { Config: testAccAWSCodeBuildReportGroupConfigTags2(rName, "key1", "value1updated", "key2", "value2"), @@ -133,6 +193,33 @@ func TestAccAWSCodeBuildReportGroup_tags(t *testing.T) { }) } +func TestAccAWSCodeBuildReportGroup_deleteReports(t *testing.T) { + var reportGroup codebuild.ReportGroup + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_codebuild_report_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodeBuildReportGroup(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeBuildReportGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCodeBuildReportGroupDeleteReportsConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeBuildReportGroupExists(resourceName, &reportGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_reports"}, + }, + }, + }) +} + func TestAccAWSCodeBuildReportGroup_disappears(t *testing.T) { var reportGroup codebuild.ReportGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -179,22 +266,15 @@ func testAccCheckAWSCodeBuildReportGroupDestroy(s *terraform.State) error { continue } - resp, err := conn.BatchGetReportGroups(&codebuild.BatchGetReportGroupsInput{ - ReportGroupArns: aws.StringSlice([]string{rs.Primary.ID}), - }) + resp, err := finder.ReportGroupByArn(conn, rs.Primary.ID) if err != nil { return err } - if len(resp.ReportGroups) == 0 { - return nil + if resp != nil { + return fmt.Errorf("Found Report Group %s", rs.Primary.ID) } - for _, reportGroup := range resp.ReportGroups { - if rs.Primary.ID == aws.StringValue(reportGroup.Arn) { - return fmt.Errorf("Found Report Groups %s", rs.Primary.ID) - } - } } return nil } @@ -208,19 +288,16 @@ func testAccCheckAWSCodeBuildReportGroupExists(name string, reportGroup *codebui conn := testAccProvider.Meta().(*AWSClient).codebuildconn - resp, err := conn.BatchGetReportGroups(&codebuild.BatchGetReportGroupsInput{ - ReportGroupArns: aws.StringSlice([]string{rs.Primary.ID}), - }) + resp, err := finder.ReportGroupByArn(conn, rs.Primary.ID) if err != nil { return err } - if len(resp.ReportGroups) != 1 || - aws.StringValue(resp.ReportGroups[0].Arn) != rs.Primary.ID { + if resp == nil { return fmt.Errorf("Report Group %s not found", rs.Primary.ID) } - *reportGroup = *resp.ReportGroups[0] + *reportGroup = *resp return nil } @@ -348,3 +425,17 @@ resource "aws_codebuild_report_group" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2) } + +func testAccAWSCodeBuildReportGroupDeleteReportsConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_codebuild_report_group" "test" { + name = %[1]q + type = "TEST" + delete_reports = true + + export_config { + type = "NO_EXPORT" + } +} +`, rName) +} diff --git a/website/docs/r/codebuild_report_group.html.markdown b/website/docs/r/codebuild_report_group.html.markdown index 6c120fc5410..952471015bf 100644 --- a/website/docs/r/codebuild_report_group.html.markdown +++ b/website/docs/r/codebuild_report_group.html.markdown @@ -65,6 +65,7 @@ The following arguments are supported: * `name` - (Required) The name of a Report Group. * `type` - (Required) The type of the Report Group. Valid value are `TEST` and `CODE_COVERAGE`. * `export_config` - (Required) Information about the destination where the raw data of this Report Group is exported. see [Export Config](#export-config) documented below. +* `delete_reports` - (Optional) If `true`, deletes any reports that belong to a report group before deleting the report group. If `false`, you must delete any reports in the report group before deleting it. Default value is `false`. * `tags` - (Optional) Key-value mapping of resource tags ### Export Config From a5544f295a7c6bcd40d7a7d36a29a37a9b1e5814 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 14:15:03 -0500 Subject: [PATCH 1156/1212] resource/iam_instance_profile: Clean up --- aws/resource_aws_iam_instance_profile.go | 34 ++++++++----------- .../docs/r/iam_instance_profile.html.markdown | 21 +++++------- 2 files changed, 23 insertions(+), 32 deletions(-) diff --git a/aws/resource_aws_iam_instance_profile.go b/aws/resource_aws_iam_instance_profile.go index 9acb181274c..33e15b2a8bc 100644 --- a/aws/resource_aws_iam_instance_profile.go +++ b/aws/resource_aws_iam_instance_profile.go @@ -28,17 +28,10 @@ func resourceAwsIamInstanceProfile() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "create_date": { Type: schema.TypeString, Computed: true, }, - - "unique_id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { Type: schema.TypeString, Optional: true, @@ -50,7 +43,6 @@ func resourceAwsIamInstanceProfile() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]*$`), "must match [\\w+=,.@-]"), ), }, - "name_prefix": { Type: schema.TypeString, Optional: true, @@ -61,18 +53,20 @@ func resourceAwsIamInstanceProfile() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^[\w+=,.@-]*$`), "must match [\\w+=,.@-]"), ), }, - "path": { Type: schema.TypeString, Optional: true, Default: "/", ForceNew: true, }, - "role": { Type: schema.TypeString, Optional: true, }, + "unique_id": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -100,7 +94,7 @@ func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{ err = instanceProfileReadResult(d, response.InstanceProfile) } if err != nil { - return fmt.Errorf("Error creating IAM instance profile %s: %w", name, err) + return fmt.Errorf("creating IAM instance profile %s: %w", name, err) } waiterRequest := &iam.GetInstanceProfileInput{ @@ -111,7 +105,7 @@ func resourceAwsIamInstanceProfileCreate(d *schema.ResourceData, meta interface{ // that the instance profile exists. err = conn.WaitUntilInstanceProfileExists(waiterRequest) if err != nil { - return fmt.Errorf("Timed out while waiting for instance profile %s: %w", name, err) + return fmt.Errorf("timed out while waiting for instance profile %s: %w", name, err) } return resourceAwsIamInstanceProfileUpdate(d, meta) @@ -141,7 +135,7 @@ func instanceProfileAddRole(conn *iam.IAM, profileName, roleName string) error { _, err = conn.AddRoleToInstanceProfile(request) } if err != nil { - return fmt.Errorf("Error adding IAM Role %s to Instance Profile %s: %w", roleName, profileName, err) + return fmt.Errorf("adding IAM Role %s to Instance Profile %s: %w", roleName, profileName, err) } return err @@ -164,7 +158,7 @@ func instanceProfileRemoveAllRoles(d *schema.ResourceData, conn *iam.IAM) error if role, ok := d.GetOk("role"); ok { err := instanceProfileRemoveRole(conn, d.Id(), role.(string)) if err != nil { - return fmt.Errorf("Error removing role %s from IAM instance profile %s: %w", role, d.Id(), err) + return fmt.Errorf("removing role %s from IAM instance profile %s: %w", role, d.Id(), err) } } @@ -180,14 +174,14 @@ func resourceAwsIamInstanceProfileUpdate(d *schema.ResourceData, meta interface{ if oldRole.(string) != "" { err := instanceProfileRemoveRole(conn, d.Id(), oldRole.(string)) if err != nil { - return fmt.Errorf("Error removing role %s to IAM instance profile %s: %w", oldRole.(string), d.Id(), err) + return fmt.Errorf("removing role %s to IAM instance profile %s: %w", oldRole.(string), d.Id(), err) } } if newRole.(string) != "" { err := instanceProfileAddRole(conn, d.Id(), newRole.(string)) if err != nil { - return fmt.Errorf("Error adding role %s to IAM instance profile %s: %w", newRole.(string), d.Id(), err) + return fmt.Errorf("adding role %s to IAM instance profile %s: %w", newRole.(string), d.Id(), err) } } } @@ -209,7 +203,7 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) return nil } if err != nil { - return fmt.Errorf("Error reading IAM instance profile %s: %w", d.Id(), err) + return fmt.Errorf("reading IAM instance profile %s: %w", d.Id(), err) } instanceProfile := result.InstanceProfile @@ -224,10 +218,10 @@ func resourceAwsIamInstanceProfileRead(d *schema.ResourceData, meta interface{}) if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { err := instanceProfileRemoveRole(conn, d.Id(), roleName) if err != nil { - return fmt.Errorf("Error removing role %s to IAM instance profile %s: %w", roleName, d.Id(), err) + return fmt.Errorf("removing role %s to IAM instance profile %s: %w", roleName, d.Id(), err) } } - return fmt.Errorf("Error reading IAM Role %s attcahed to IAM Instance Profile %s: %w", roleName, d.Id(), err) + return fmt.Errorf("reading IAM Role %s attcahed to IAM Instance Profile %s: %w", roleName, d.Id(), err) } } @@ -249,7 +243,7 @@ func resourceAwsIamInstanceProfileDelete(d *schema.ResourceData, meta interface{ if isAWSErr(err, iam.ErrCodeNoSuchEntityException, "") { return nil } - return fmt.Errorf("Error deleting IAM instance profile %s: %w", d.Id(), err) + return fmt.Errorf("deleting IAM instance profile %s: %w", d.Id(), err) } return nil diff --git a/website/docs/r/iam_instance_profile.html.markdown b/website/docs/r/iam_instance_profile.html.markdown index b1fc14ef3cf..5a97590858b 100644 --- a/website/docs/r/iam_instance_profile.html.markdown +++ b/website/docs/r/iam_instance_profile.html.markdown @@ -42,24 +42,21 @@ EOF ## Argument Reference -The following arguments are supported: +The following arguments are optional: -* `name` - (Optional, Forces new resource) The profile's name. If omitted, Terraform will assign a random, unique name. +* `name` - (Optional, Forces new resource) Name of the instance profile. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. Can be a string of characters consisting of upper and lowercase alphanumeric characters and these special characters: `_`, `+`, `=`, `,`, `.`, `@`, `-`. Spaces are not allowed. * `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. -* `path` - (Optional, default "/") Path in which to create the profile. -* `role` - (Optional) The role name to include in the profile. +* `path` - (Optional, default "/") Path to the instance profile. For more information about paths, see [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. Can be a string of characters consisting of either a forward slash (`/`) by itself or a string that must begin and end with forward slashes. Can include any ASCII character from the ! (\u0021) through the DEL character (\u007F), including most punctuation characters, digits, and upper and lowercase letters. +* `role` - (Optional) Name of the role to add to the profile. ## Attributes Reference -In addition to all arguments above, the following attributes are exported: +In addition to the arguments above, the following attributes are exported: -* `id` - The instance profile's ID. -* `arn` - The ARN assigned by AWS to the instance profile. -* `create_date` - The creation timestamp of the instance profile. -* `name` - The instance profile's name. -* `path` - The path of the instance profile in IAM. -* `role` - The role assigned to the instance profile. -* `unique_id` - The [unique ID][1] assigned by AWS. +* `arn` - ARN assigned by AWS to the instance profile. +* `create_date` - Creation timestamp of the instance profile. +* `id` - Instance profile's ID. +* `unique_id` - [Unique ID][1] assigned by AWS. [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#GUIDs From 8cb94555763099278654b6a554e231d00aed0130 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 21:42:01 +0200 Subject: [PATCH 1157/1212] resource/aws_lb_listener_certificate: Add import support (#16474) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAwsLbListenerCertificate_disappears (170.83s) --- PASS: TestAccAwsLbListenerCertificate_basic (176.25s) --- PASS: TestAccAwsLbListenerCertificate_multiple (234.15s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAwsLbListenerCertificate_disappears (155.87s) --- PASS: TestAccAwsLbListenerCertificate_basic (159.04s) --- PASS: TestAccAwsLbListenerCertificate_multiple (251.86s) ``` --- .changelog/16474.txt | 3 ++ aws/internal/service/elbv2/id.go | 23 ++++++++ aws/resource_aws_lb_listener_certificate.go | 54 ++++++++++++------- ...source_aws_lb_listener_certificate_test.go | 34 ++++++++++++ .../r/lb_listener_certificate.html.markdown | 14 +++++ 5 files changed, 110 insertions(+), 18 deletions(-) create mode 100644 .changelog/16474.txt create mode 100644 aws/internal/service/elbv2/id.go diff --git a/.changelog/16474.txt b/.changelog/16474.txt new file mode 100644 index 00000000000..1b05df7f8d9 --- /dev/null +++ b/.changelog/16474.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lb_listener_certificate: Add import support +``` diff --git a/aws/internal/service/elbv2/id.go b/aws/internal/service/elbv2/id.go new file mode 100644 index 00000000000..34ff840e676 --- /dev/null +++ b/aws/internal/service/elbv2/id.go @@ -0,0 +1,23 @@ +package elbv2 + +import ( + "fmt" + "strings" +) + +const listenerCertificateIDSeparator = "_" + +func ListenerCertificateParseID(id string) (string, string, error) { + parts := strings.Split(id, listenerCertificateIDSeparator) + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", + fmt.Errorf("unexpected format for ID (%q), expected listener-arn"+listenerCertificateIDSeparator+ + "certificate-arn", id) +} + +func ListenerCertificateCreateID(listenerArn, certificateArn string) string { + return strings.Join([]string{listenerArn, listenerCertificateIDSeparator, certificateArn}, "") +} diff --git a/aws/resource_aws_lb_listener_certificate.go b/aws/resource_aws_lb_listener_certificate.go index bb567f85e08..03c13532406 100644 --- a/aws/resource_aws_lb_listener_certificate.go +++ b/aws/resource_aws_lb_listener_certificate.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/elbv2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tfelbv2 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/elbv2" ) func resourceAwsLbListenerCertificate() *schema.Resource { @@ -16,17 +17,22 @@ func resourceAwsLbListenerCertificate() *schema.Resource { Create: resourceAwsLbListenerCertificateCreate, Read: resourceAwsLbListenerCertificateRead, Delete: resourceAwsLbListenerCertificateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "listener_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, }, "certificate_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, }, }, } @@ -35,16 +41,19 @@ func resourceAwsLbListenerCertificate() *schema.Resource { func resourceAwsLbListenerCertificateCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elbv2conn + listenerArn := d.Get("listener_arn").(string) + certificateArn := d.Get("certificate_arn").(string) + params := &elbv2.AddListenerCertificatesInput{ - ListenerArn: aws.String(d.Get("listener_arn").(string)), + ListenerArn: aws.String(listenerArn), Certificates: []*elbv2.Certificate{ { - CertificateArn: aws.String(d.Get("certificate_arn").(string)), + CertificateArn: aws.String(certificateArn), }, }, } - log.Printf("[DEBUG] Adding certificate: %s of listener: %s", d.Get("certificate_arn").(string), d.Get("listener_arn").(string)) + log.Printf("[DEBUG] Adding certificate: %s of listener: %s", certificateArn, listenerArn) err := resource.Retry(1*time.Minute, func() *resource.RetryError { _, err := conn.AddListenerCertificates(params) @@ -66,10 +75,10 @@ func resourceAwsLbListenerCertificateCreate(d *schema.ResourceData, meta interfa } if err != nil { - return fmt.Errorf("error adding LB Listener Certificate: %s", err) + return fmt.Errorf("error adding LB Listener Certificate: %w", err) } - d.SetId(d.Get("listener_arn").(string) + "_" + d.Get("certificate_arn").(string)) + d.SetId(tfelbv2.ListenerCertificateCreateID(listenerArn, certificateArn)) return resourceAwsLbListenerCertificateRead(d, meta) } @@ -77,13 +86,15 @@ func resourceAwsLbListenerCertificateCreate(d *schema.ResourceData, meta interfa func resourceAwsLbListenerCertificateRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elbv2conn - certificateArn := d.Get("certificate_arn").(string) - listenerArn := d.Get("listener_arn").(string) + listenerArn, certificateArn, err := tfelbv2.ListenerCertificateParseID(d.Id()) + if err != nil { + return fmt.Errorf("error parsing ELBv2 Listener Certificate ID (%s): %w", d.Id(), err) + } log.Printf("[DEBUG] Reading certificate: %s of listener: %s", certificateArn, listenerArn) var certificate *elbv2.Certificate - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err = resource.Retry(1*time.Minute, func() *resource.RetryError { var err error certificate, err = findAwsLbListenerCertificate(certificateArn, listenerArn, true, nil, conn) if err != nil { @@ -112,18 +123,25 @@ func resourceAwsLbListenerCertificateRead(d *schema.ResourceData, meta interface return err } + d.Set("certificate_arn", certificateArn) + d.Set("listener_arn", listenerArn) + return nil } func resourceAwsLbListenerCertificateDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elbv2conn - log.Printf("[DEBUG] Deleting certificate: %s of listener: %s", d.Get("certificate_arn").(string), d.Get("listener_arn").(string)) + + certificateArn := d.Get("certificate_arn").(string) + listenerArn := d.Get("listener_arn").(string) + + log.Printf("[DEBUG] Deleting certificate: %s of listener: %s", certificateArn, listenerArn) params := &elbv2.RemoveListenerCertificatesInput{ - ListenerArn: aws.String(d.Get("listener_arn").(string)), + ListenerArn: aws.String(listenerArn), Certificates: []*elbv2.Certificate{ { - CertificateArn: aws.String(d.Get("certificate_arn").(string)), + CertificateArn: aws.String(certificateArn), }, }, } @@ -136,7 +154,7 @@ func resourceAwsLbListenerCertificateDelete(d *schema.ResourceData, meta interfa if isAWSErr(err, elbv2.ErrCodeListenerNotFoundException, "") { return nil } - return fmt.Errorf("Error removing LB Listener Certificate: %s", err) + return fmt.Errorf("Error removing LB Listener Certificate: %w", err) } return nil diff --git a/aws/resource_aws_lb_listener_certificate_test.go b/aws/resource_aws_lb_listener_certificate_test.go index 330c93f20ec..ff6a71320e6 100644 --- a/aws/resource_aws_lb_listener_certificate_test.go +++ b/aws/resource_aws_lb_listener_certificate_test.go @@ -33,6 +33,11 @@ func TestAccAwsLbListenerCertificate_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "listener_arn", lbListenerResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -46,6 +51,7 @@ func TestAccAwsLbListenerCertificate_multiple(t *testing.T) { } rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_lb_listener_certificate.default" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -66,6 +72,11 @@ func TestAccAwsLbListenerCertificate_multiple(t *testing.T) { resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.additional_2", "certificate_arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccLbListenerCertificateConfigMultipleAddNew(rName, keys, certificates), Check: resource.ComposeTestCheckFunc( @@ -102,6 +113,29 @@ func TestAccAwsLbListenerCertificate_multiple(t *testing.T) { }) } +func TestAccAwsLbListenerCertificate_disappears(t *testing.T) { + key := tlsRsaPrivateKeyPem(2048) + certificate := tlsRsaX509SelfSignedCertificatePem(key, "example.com") + resourceName := "aws_lb_listener_certificate.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsLbListenerCertificateDestroy, + Steps: []resource.TestStep{ + { + Config: testAccLbListenerCertificateConfig(rName, key, certificate), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsLbListenerCertificateExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsLbListenerCertificate(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAwsLbListenerCertificateDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elbv2conn diff --git a/website/docs/r/lb_listener_certificate.html.markdown b/website/docs/r/lb_listener_certificate.html.markdown index f0d5225414e..e3c9babf459 100644 --- a/website/docs/r/lb_listener_certificate.html.markdown +++ b/website/docs/r/lb_listener_certificate.html.markdown @@ -41,3 +41,17 @@ The following arguments are supported: * `listener_arn` - (Required, Forces New Resource) The ARN of the listener to which to attach the certificate. * `certificate_arn` - (Required, Forces New Resource) The ARN of the certificate to attach to the listener. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The `listener_arn` and `certificate_arn` separated by a `_`. + +## Import + +Listener Certificates can be imported using their id, e.g. + +``` +$ terraform import aws_lb_listener_certificate.example arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b_arn:aws:iam::123456789012:server-certificate/tf-acc-test-6453083910015726063 +``` From 10954b321d829f2fae781604a0b09701e94d9ccb Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 11 Feb 2021 21:48:03 +0200 Subject: [PATCH 1158/1212] resource/aws_efs_access_point: Refactor waiter to separate package (#15265) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSEFSAccessPoint_basic (37.59s) --- PASS: TestAccAWSEFSAccessPoint_root_directory (47.35s) --- PASS: TestAccAWSEFSAccessPoint_posix_user_secondary_gids (51.50s) --- PASS: TestAccAWSEFSAccessPoint_tags (56.90s) --- PASS: TestAccAWSEFSAccessPoint_disappears (70.55s) --- PASS: TestAccAWSEFSAccessPoint_posix_user (73.11s) --- PASS: TestAccAWSEFSAccessPoint_root_directory_creation_info (80.01s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSEFSAccessPoint_basic (41.39s) --- PASS: TestAccAWSEFSAccessPoint_posix_user (45.24s) --- PASS: TestAccAWSEFSAccessPoint_root_directory_creation_info (50.31s) --- PASS: TestAccAWSEFSAccessPoint_posix_user_secondary_gids (52.55s) --- PASS: TestAccAWSEFSAccessPoint_tags (83.77s) --- PASS: TestAccAWSEFSAccessPoint_disappears (84.26s) --- PASS: TestAccAWSEFSAccessPoint_root_directory (92.01s) ``` --- aws/internal/service/efs/waiter/status.go | 30 ++++++++ aws/internal/service/efs/waiter/waiter.go | 50 ++++++++++++ aws/resource_aws_efs_access_point.go | 92 ++++------------------- aws/resource_aws_efs_access_point_test.go | 28 +++---- 4 files changed, 108 insertions(+), 92 deletions(-) create mode 100644 aws/internal/service/efs/waiter/status.go create mode 100644 aws/internal/service/efs/waiter/waiter.go diff --git a/aws/internal/service/efs/waiter/status.go b/aws/internal/service/efs/waiter/status.go new file mode 100644 index 00000000000..d48fb58c92b --- /dev/null +++ b/aws/internal/service/efs/waiter/status.go @@ -0,0 +1,30 @@ +package waiter + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// AccessPointLifeCycleState fetches the Access Point and its LifecycleState +func AccessPointLifeCycleState(conn *efs.EFS, accessPointId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &efs.DescribeAccessPointsInput{ + AccessPointId: aws.String(accessPointId), + } + + output, err := conn.DescribeAccessPoints(input) + + if err != nil { + return nil, "", err + } + + if output == nil || len(output.AccessPoints) == 0 || output.AccessPoints[0] == nil { + return nil, "", nil + } + + mt := output.AccessPoints[0] + + return mt, aws.StringValue(mt.LifeCycleState), nil + } +} diff --git a/aws/internal/service/efs/waiter/waiter.go b/aws/internal/service/efs/waiter/waiter.go new file mode 100644 index 00000000000..71db067975a --- /dev/null +++ b/aws/internal/service/efs/waiter/waiter.go @@ -0,0 +1,50 @@ +package waiter + +import ( + "time" + + "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const ( + // Maximum amount of time to wait for an Operation to return Success + AccessPointCreatedTimeout = 10 * time.Minute + AccessPointDeletedTimeout = 10 * time.Minute +) + +// AccessPointCreated waits for an Operation to return Success +func AccessPointCreated(conn *efs.EFS, accessPointId string) (*efs.AccessPointDescription, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{efs.LifeCycleStateCreating}, + Target: []string{efs.LifeCycleStateAvailable}, + Refresh: AccessPointLifeCycleState(conn, accessPointId), + Timeout: AccessPointCreatedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*efs.AccessPointDescription); ok { + return output, err + } + + return nil, err +} + +// AccessPointDelete waits for an Access Point to return Deleted +func AccessPointDeleted(conn *efs.EFS, accessPointId string) (*efs.AccessPointDescription, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{efs.LifeCycleStateAvailable, efs.LifeCycleStateDeleting, efs.LifeCycleStateDeleted}, + Target: []string{}, + Refresh: AccessPointLifeCycleState(conn, accessPointId), + Timeout: AccessPointDeletedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*efs.AccessPointDescription); ok { + return output, err + } + + return nil, err +} diff --git a/aws/resource_aws_efs_access_point.go b/aws/resource_aws_efs_access_point.go index b392192db4e..7776dd71e69 100644 --- a/aws/resource_aws_efs_access_point.go +++ b/aws/resource_aws_efs_access_point.go @@ -3,14 +3,13 @@ package aws import ( "fmt" "log" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/efs/waiter" ) func resourceAwsEfsAccessPoint() *schema.Resource { @@ -143,40 +142,11 @@ func resourceAwsEfsAccessPointCreate(d *schema.ResourceData, meta interface{}) e } d.SetId(aws.StringValue(ap.AccessPointId)) - log.Printf("[INFO] EFS access point ID: %s", d.Id()) - - stateConf := &resource.StateChangeConf{ - Pending: []string{efs.LifeCycleStateCreating}, - Target: []string{efs.LifeCycleStateAvailable}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeAccessPoints(&efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(d.Id()), - }) - if err != nil { - return nil, "error", err - } - - if hasEmptyAccessPoints(resp) { - return nil, "error", fmt.Errorf("EFS access point %q could not be found.", d.Id()) - } - - mt := resp.AccessPoints[0] - - log.Printf("[DEBUG] Current status of %q: %q", aws.StringValue(mt.AccessPointId), aws.StringValue(mt.LifeCycleState)) - return mt, aws.StringValue(mt.LifeCycleState), nil - }, - Timeout: 10 * time.Minute, - Delay: 2 * time.Second, - MinTimeout: 3 * time.Second, - } - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for EFS access point (%s) to create: %s", d.Id(), err) + if _, err := waiter.AccessPointCreated(conn, d.Id()); err != nil { + return fmt.Errorf("error waiting for EFS access point (%s) to be available: %w", d.Id(), err) } - log.Printf("[DEBUG] EFS access point created: %s", *ap.AccessPointId) - return resourceAwsEfsAccessPointRead(d, meta) } @@ -187,7 +157,7 @@ func resourceAwsEfsAccessPointUpdate(d *schema.ResourceData, meta interface{}) e o, n := d.GetChange("tags") if err := keyvaluetags.EfsUpdateTags(conn, d.Id(), o, n); err != nil { - return fmt.Errorf("error updating EFS file system (%s) tags: %s", d.Id(), err) + return fmt.Errorf("error updating EFS file system (%s) tags: %w", d.Id(), err) } } @@ -207,7 +177,7 @@ func resourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) err d.SetId("") return nil } - return fmt.Errorf("Error reading EFS access point %s: %s", d.Id(), err) + return fmt.Errorf("Error reading EFS access point %s: %w", d.Id(), err) } if hasEmptyAccessPoints(resp) { @@ -218,8 +188,6 @@ func resourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) err log.Printf("[DEBUG] Found EFS access point: %#v", ap) - d.SetId(aws.StringValue(ap.AccessPointId)) - fsARN := arn.ARN{ AccountID: meta.(*AWSClient).accountid, Partition: meta.(*AWSClient).partition, @@ -234,15 +202,15 @@ func resourceAwsEfsAccessPointRead(d *schema.ResourceData, meta interface{}) err d.Set("owner_id", ap.OwnerId) if err := d.Set("posix_user", flattenEfsAccessPointPosixUser(ap.PosixUser)); err != nil { - return fmt.Errorf("error setting posix user: %s", err) + return fmt.Errorf("error setting posix user: %w", err) } if err := d.Set("root_directory", flattenEfsAccessPointRootDirectory(ap.RootDirectory)); err != nil { - return fmt.Errorf("error setting root directory: %s", err) + return fmt.Errorf("error setting root directory: %w", err) } if err := d.Set("tags", keyvaluetags.EfsKeyValueTags(ap.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %s", err) + return fmt.Errorf("error setting tags: %w", err) } return nil @@ -256,12 +224,17 @@ func resourceAwsEfsAccessPointDelete(d *schema.ResourceData, meta interface{}) e AccessPointId: aws.String(d.Id()), }) if err != nil { + if isAWSErr(err, efs.ErrCodeAccessPointNotFound, "") { + return nil + } return fmt.Errorf("error deleting EFS Access Point (%s): %w", d.Id(), err) } - err = waitForDeleteEfsAccessPoint(conn, d.Id(), 10*time.Minute) - if err != nil { - return fmt.Errorf("Error waiting for EFS access point (%q) to delete: %s", d.Id(), err.Error()) + if _, err := waiter.AccessPointDeleted(conn, d.Id()); err != nil { + if isAWSErr(err, efs.ErrCodeAccessPointNotFound, "") { + return nil + } + return fmt.Errorf("error waiting for EFS access point (%s) deletion: %w", d.Id(), err) } log.Printf("[DEBUG] EFS access point %q deleted.", d.Id()) @@ -269,39 +242,6 @@ func resourceAwsEfsAccessPointDelete(d *schema.ResourceData, meta interface{}) e return nil } -func waitForDeleteEfsAccessPoint(conn *efs.EFS, id string, timeout time.Duration) error { - stateConf := &resource.StateChangeConf{ - Pending: []string{efs.LifeCycleStateAvailable, efs.LifeCycleStateDeleting, efs.LifeCycleStateDeleted}, - Target: []string{}, - Refresh: func() (interface{}, string, error) { - resp, err := conn.DescribeAccessPoints(&efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(id), - }) - if err != nil { - if isAWSErr(err, efs.ErrCodeAccessPointNotFound, "") { - return nil, "", nil - } - - return nil, "error", err - } - - if hasEmptyAccessPoints(resp) { - return nil, "", nil - } - - mt := resp.AccessPoints[0] - - log.Printf("[DEBUG] Current status of %q: %q", aws.StringValue(mt.AccessPointId), aws.StringValue(mt.LifeCycleState)) - return mt, aws.StringValue(mt.LifeCycleState), nil - }, - Timeout: timeout, - Delay: 2 * time.Second, - MinTimeout: 3 * time.Second, - } - _, err := stateConf.WaitForState() - return err -} - func hasEmptyAccessPoints(aps *efs.DescribeAccessPointsOutput) bool { if aps != nil && len(aps.AccessPoints) > 0 { return false diff --git a/aws/resource_aws_efs_access_point_test.go b/aws/resource_aws_efs_access_point_test.go index 8588a5ff93c..878b36a1154 100644 --- a/aws/resource_aws_efs_access_point_test.go +++ b/aws/resource_aws_efs_access_point_test.go @@ -5,7 +5,6 @@ import ( "log" "regexp" "testing" - "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/efs" @@ -25,9 +24,10 @@ func init() { func testSweepEfsAccessPoints(region string) error { client, err := sharedClientForRegion(region) if err != nil { - return fmt.Errorf("error getting client: %s", err) + return fmt.Errorf("error getting client: %w", err) } conn := client.(*AWSClient).efsconn + var sweeperErrs *multierror.Error var errors error input := &efs.DescribeFileSystemsInput{} @@ -36,7 +36,6 @@ func testSweepEfsAccessPoints(region string) error { id := aws.StringValue(filesystem.FileSystemId) log.Printf("[INFO] Deleting access points for EFS File System: %s", id) - var errors error input := &efs.DescribeAccessPointsInput{ FileSystemId: filesystem.FileSystemId, } @@ -56,17 +55,14 @@ func testSweepEfsAccessPoints(region string) error { id := aws.StringValue(AccessPoint.AccessPointId) log.Printf("[INFO] Deleting EFS access point: %s", id) - _, err := conn.DeleteAccessPoint(&efs.DeleteAccessPointInput{ - AccessPointId: AccessPoint.AccessPointId, - }) - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error deleting EFS access point %q: %w", id, err)) - continue - } + r := resourceAwsEfsAccessPoint() + d := r.Data(nil) + d.SetId(id) + err := r.Delete(d, client) - err = waitForDeleteEfsAccessPoint(conn, id, 10*time.Minute) if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error waiting for EFS access point %q to delete: %w", id, err)) + log.Printf("[ERROR] %s", err) + sweeperErrs = multierror.Append(sweeperErrs, err) continue } } @@ -83,7 +79,7 @@ func testSweepEfsAccessPoints(region string) error { errors = multierror.Append(errors, fmt.Errorf("error retrieving EFS File Systems: %w", err)) } - return errors + return sweeperErrs.ErrorOrNil() } func TestAccAWSEFSAccessPoint_basic(t *testing.T) { @@ -350,9 +346,9 @@ func testAccCheckEfsAccessPointExists(resourceID string, mount *efs.AccessPointD return err } - if *mt.AccessPoints[0].AccessPointId != fs.Primary.ID { - return fmt.Errorf("access point ID mismatch: %q != %q", - *mt.AccessPoints[0].AccessPointId, fs.Primary.ID) + apId := aws.StringValue(mt.AccessPoints[0].AccessPointId) + if apId != fs.Primary.ID { + return fmt.Errorf("access point ID mismatch: %q != %q", apId, fs.Primary.ID) } *mount = *mt.AccessPoints[0] From 5191dcfc937f3261d11d693a481496b8ffda495b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 15:00:19 -0500 Subject: [PATCH 1159/1212] Update aws/resource_aws_cloudwatch_log_group.go --- aws/resource_aws_cloudwatch_log_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_cloudwatch_log_group.go b/aws/resource_aws_cloudwatch_log_group.go index 21d85e617be..83794d87e22 100644 --- a/aws/resource_aws_cloudwatch_log_group.go +++ b/aws/resource_aws_cloudwatch_log_group.go @@ -44,7 +44,7 @@ func resourceAwsCloudWatchLogGroup() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 0, - ValidateFunc: validation.IntInSlice([]int{1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653}), + ValidateFunc: validation.IntInSlice([]int{0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653}), }, "kms_key_id": { From 59939faeba6e00c99ef1b8fff763bb1f4ff5084b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 15:05:13 -0500 Subject: [PATCH 1160/1212] Add CHANGELOG for #14673 --- .changelog/14673.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14673.txt diff --git a/.changelog/14673.txt b/.changelog/14673.txt new file mode 100644 index 00000000000..ead81998a18 --- /dev/null +++ b/.changelog/14673.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudwatch_log_group: Add plan time validation for `retention_in_days` argument +``` From 84036357f9977900997f0b84263ce227d7f95688 Mon Sep 17 00:00:00 2001 From: 178inaba Date: Tue, 6 Nov 2018 02:24:30 +0900 Subject: [PATCH 1161/1212] Add validate DynamoDB indexes --- aws/validators.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/aws/validators.go b/aws/validators.go index 14f80bb226b..5825cd6216e 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -2248,6 +2248,8 @@ func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { if _, ok := indexedAttributes[attrName]; !ok { missingAttrDefs = append(missingAttrDefs, attrName) + } else { + delete(indexedAttributes, attrName) } } @@ -2255,6 +2257,15 @@ func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { return fmt.Errorf("All attributes must be indexed. Unused attributes: %q", missingAttrDefs) } + if len(indexedAttributes) > 0 { + missingIndexes := []string{} + for index := range indexedAttributes { + missingIndexes = append(missingIndexes, index) + } + + return fmt.Errorf("All indexes must be attribute. Unused indexes: %q", missingIndexes) + } + return nil } From 61e9b5afd411cbc22226ab4c0720784d57237a5e Mon Sep 17 00:00:00 2001 From: John Robison <39737211+jrobison-sb@users.noreply.github.com> Date: Thu, 11 Feb 2021 15:27:13 -0500 Subject: [PATCH 1162/1212] use dot-local instead of dot-internal --- website/docs/r/service_discovery_service.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/service_discovery_service.html.markdown b/website/docs/r/service_discovery_service.html.markdown index 6b691cde263..d1774671cff 100644 --- a/website/docs/r/service_discovery_service.html.markdown +++ b/website/docs/r/service_discovery_service.html.markdown @@ -20,7 +20,7 @@ resource "aws_vpc" "example" { } resource "aws_service_discovery_private_dns_namespace" "example" { - name = "example.terraform.local" + name = "example.terraform.internal" description = "example" vpc = aws_vpc.example.id } From 35cfe2c1da7a41d6dfd0900cf60c44cd05e8d413 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 15:28:47 -0500 Subject: [PATCH 1163/1212] Add CHANGELOG for #14786 --- .changelog/14786.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/14786.txt diff --git a/.changelog/14786.txt b/.changelog/14786.txt new file mode 100644 index 00000000000..1b631c1526f --- /dev/null +++ b/.changelog/14786.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +data-source/aws_customer_gateway: Add `device_name` attribute +``` + +```release-note:enhancement +resource/aws_customer_gateway: Add `device_name` argument +``` From 9e358da8711a49994885ba1ae63e0ecff392785e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 15:26:47 -0500 Subject: [PATCH 1164/1212] docs/codebuild_project: Clean up documentation --- .../docs/r/codebuild_project.html.markdown | 233 ++++++++++-------- 1 file changed, 125 insertions(+), 108 deletions(-) diff --git a/website/docs/r/codebuild_project.html.markdown b/website/docs/r/codebuild_project.html.markdown index 8de5b60e7a1..29a631841f5 100755 --- a/website/docs/r/codebuild_project.html.markdown +++ b/website/docs/r/codebuild_project.html.markdown @@ -220,141 +220,158 @@ resource "aws_codebuild_project" "project-with-cache" { ## Argument Reference -The following arguments are supported: +The following arguments are required: + +* `artifacts` - (Required) Configuration block. Detailed below. +* `environment` - (Required) Configuration block. Detailed below. +* `name` - (Required) Project's name. +* `source` - (Required) Configuration block. Detailed below. + +The following arguments are optional: -* `artifacts` - (Required) Information about the project's build output artifacts. Artifact blocks are documented below. -* `environment` - (Required) Information about the project's build environment. Environment blocks are documented below. -* `name` - (Required) The projects name. -* `source` - (Required) Information about the project's input source code. Source blocks are documented below. * `badge_enabled` - (Optional) Generates a publicly-accessible URL for the projects build badge. Available as `badge_url` attribute when enabled. -* `build_timeout` - (Optional) How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes. -* `queued_timeout` - (Optional) How long in minutes, from 5 to 480 (8 hours), a build is allowed to be queued before it times out. The default is 8 hours. -* `cache` - (Optional) Information about the cache storage for the project. Cache blocks are documented below. -* `description` - (Optional) A short description of the project. -* `encryption_key` - (Optional) The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build project's build output artifacts. -* `logs_config` - (Optional) Configuration for the builds to store log data to CloudWatch or S3. -* `service_role` - (Required) The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account. -* `source_version` - (Optional) A version of the build input to be built for this project. If not specified, the latest version is used. -* `tags` - (Optional) A map of tags to assign to the resource. -* `vpc_config` - (Optional) Configuration for the builds to run inside a VPC. VPC config blocks are documented below. -* `secondary_artifacts` - (Optional) A set of secondary artifacts to be used inside the build. Secondary artifacts blocks are documented below. -* `secondary_sources` - (Optional) A set of secondary sources to be used inside the build. Secondary sources blocks are documented below. - -`artifacts` supports the following: - -* `type` - (Required) The build output artifact's type. Valid values for this parameter are: `CODEPIPELINE`, `NO_ARTIFACTS` or `S3`. -* `artifact_identifier` - (Optional) The artifact identifier. Must be the same specified inside AWS CodeBuild buildspec. -* `encryption_disabled` - (Optional) If set to true, output artifacts will not be encrypted. If `type` is set to `NO_ARTIFACTS` then this value will be ignored. Defaults to `false`. -* `override_artifact_name` (Optional) If set to true, a name specified in the build spec file overrides the artifact name. -* `location` - (Optional) Information about the build output artifact location. If `type` is set to `CODEPIPELINE` or `NO_ARTIFACTS` then this value will be ignored. If `type` is set to `S3`, this is the name of the output bucket. -* `name` - (Optional) The name of the project. If `type` is set to `S3`, this is the name of the output artifact object -* `namespace_type` - (Optional) The namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values for this parameter are: `BUILD_ID` or `NONE`. -* `packaging` - (Optional) The type of build output artifact to create. If `type` is set to `S3`, valid values for this parameter are: `NONE` or `ZIP` -* `path` - (Optional) If `type` is set to `S3`, this is the path to the output artifact - -`cache` supports the following: - -* `type` - (Optional) The type of storage that will be used for the AWS CodeBuild project cache. Valid values: `NO_CACHE`, `LOCAL`, and `S3`. Defaults to `NO_CACHE`. -* `location` - (Required when cache type is `S3`) The location where the AWS CodeBuild project stores cached resources. For type `S3` the value must be a valid S3 bucket name/prefix. -* `modes` - (Required when cache type is `LOCAL`) Specifies settings that AWS CodeBuild uses to store and reuse build dependencies. Valid values: `LOCAL_SOURCE_CACHE`, `LOCAL_DOCKER_LAYER_CACHE`, and `LOCAL_CUSTOM_CACHE` - -`environment` supports the following: - -* `compute_type` - (Required) Information about the compute resources the build project will use. Available values for this parameter are: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE` or `BUILD_GENERAL1_2XLARGE`. `BUILD_GENERAL1_SMALL` is only valid if `type` is set to `LINUX_CONTAINER`. When `type` is set to `LINUX_GPU_CONTAINER`, `compute_type` need to be `BUILD_GENERAL1_LARGE`. -* `image` - (Required) The Docker image to use for this build project. Valid values include [Docker images provided by CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-available.html) (e.g `aws/codebuild/standard:2.0`), [Docker Hub images](https://hub.docker.com/) (e.g. `hashicorp/terraform:latest`), and full Docker repository URIs such as those for ECR (e.g. `137112412989.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest`). -* `type` - (Required) The type of build environment to use for related builds. Available values are: `LINUX_CONTAINER`, `LINUX_GPU_CONTAINER`, `WINDOWS_CONTAINER` (deprecated), `WINDOWS_SERVER_2019_CONTAINER` or `ARM_CONTAINER`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). -* `image_pull_credentials_type` - (Optional) The type of credentials AWS CodeBuild uses to pull images in your build. Available values for this parameter are `CODEBUILD` or `SERVICE_ROLE`. When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials. Default to `CODEBUILD` -* `environment_variable` - (Optional) A set of environment variables to make available to builds for this build project. -* `privileged_mode` - (Optional) If set to true, enables running the Docker daemon inside a Docker container. Defaults to `false`. -* `certificate` - (Optional) The ARN of the S3 bucket, path prefix and object key that contains the PEM-encoded certificate. -* `registry_credential` - (Optional) Information about credentials for access to a private Docker registry. Registry Credential config blocks are documented below. - -`environment_variable` supports the following: - -* `name` - (Required) The environment variable's name or key. -* `value` - (Required) The environment variable's value. -* `type` - (Optional) The type of environment variable. Valid values: `PARAMETER_STORE`, `PLAINTEXT`, and `SECRETS_MANAGER`. - -`logs_config` supports the following: - -* `cloudwatch_logs` - (Optional) Configuration for the builds to store logs to CloudWatch -* `s3_logs` - (Optional) Configuration for the builds to store logs to S3. - -`cloudwatch_logs` supports the following: +* `build_timeout` - (Optional) Number of minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes. +* `cache` - (Optional) Configuration block. Detailed below. +* `description` - (Optional) Short description of the project. +* `encryption_key` - (Optional) AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build project's build output artifacts. +* `logs_config` - (Optional) Configuration block. Detailed below. +* `queued_timeout` - (Optional) Number of minutes, from 5 to 480 (8 hours), a build is allowed to be queued before it times out. The default is 8 hours. +* `secondary_artifacts` - (Optional) Configuration block. Detailed below. +* `secondary_sources` - (Optional) Configuration block. Detailed below. +* `service_role` - (Required) Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account. +* `source_version` - (Optional) Version of the build input to be built for this project. If not specified, the latest version is used. +* `tags` - (Optional) Map of tags to assign to the resource. +* `vpc_config` - (Optional) Configuration block. Detailed below. + +### artifacts + +* `artifact_identifier` - (Optional) Artifact identifier. Must be the same specified inside the AWS CodeBuild build specification. +* `encryption_disabled` - (Optional) Whether to disable encrypting output artifacts. If `type` is set to `NO_ARTIFACTS`, this value is ignored. Defaults to `false`. +* `location` - (Optional) Information about the build output artifact location. If `type` is set to `CODEPIPELINE` or `NO_ARTIFACTS`, this value is ignored. If `type` is set to `S3`, this is the name of the output bucket. +* `name` - (Optional) Name of the project. If `type` is set to `S3`, this is the name of the output artifact object +* `namespace_type` - (Optional) Namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values are `BUILD_ID`, `NONE`. +* `override_artifact_name` (Optional) Whether a name specified in the build specification overrides the artifact name. +* `packaging` - (Optional) Type of build output artifact to create. If `type` is set to `S3`, valid values are `NONE`, `ZIP` +* `path` - (Optional) If `type` is set to `S3`, this is the path to the output artifact. +* `type` - (Required) Build output artifact's type. Valid values: `CODEPIPELINE`, `NO_ARTIFACTS`, `S3`. + +### cache + +* `location` - (Required when cache type is `S3`) Location where the AWS CodeBuild project stores cached resources. For type `S3`, the value must be a valid S3 bucket name/prefix. +* `modes` - (Required when cache type is `LOCAL`) Specifies settings that AWS CodeBuild uses to store and reuse build dependencies. Valid values: `LOCAL_SOURCE_CACHE`, `LOCAL_DOCKER_LAYER_CACHE`, `LOCAL_CUSTOM_CACHE`. +* `type` - (Optional) Type of storage that will be used for the AWS CodeBuild project cache. Valid values: `NO_CACHE`, `LOCAL`, `S3`. Defaults to `NO_CACHE`. + +### environment + +* `certificate` - (Optional) ARN of the S3 bucket, path prefix and object key that contains the PEM-encoded certificate. +* `compute_type` - (Required) Information about the compute resources the build project will use. Valid values: `BUILD_GENERAL1_SMALL`, `BUILD_GENERAL1_MEDIUM`, `BUILD_GENERAL1_LARGE`, `BUILD_GENERAL1_2XLARGE`. `BUILD_GENERAL1_SMALL` is only valid if `type` is set to `LINUX_CONTAINER`. When `type` is set to `LINUX_GPU_CONTAINER`, `compute_type` must be `BUILD_GENERAL1_LARGE`. +* `environment_variable` - (Optional) Configuration block. Detailed below. +* `image_pull_credentials_type` - (Optional) Type of credentials AWS CodeBuild uses to pull images in your build. Valid values: `CODEBUILD`, `SERVICE_ROLE`. When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CodeBuild credentials. Defaults to `CODEBUILD`. +* `image` - (Required) Docker image to use for this build project. Valid values include [Docker images provided by CodeBuild](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-available.html) (e.g `aws/codebuild/standard:2.0`), [Docker Hub images](https://hub.docker.com/) (e.g. `hashicorp/terraform:latest`), and full Docker repository URIs such as those for ECR (e.g. `137112412989.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:latest`). +* `privileged_mode` - (Optional) Whether to enable running the Docker daemon inside a Docker container. Defaults to `false`. +* `registry_credential` - (Optional) Configuration block. Detailed below. +* `type` - (Required) Type of build environment to use for related builds. Valid values: `LINUX_CONTAINER`, `LINUX_GPU_CONTAINER`, `WINDOWS_CONTAINER` (deprecated), `WINDOWS_SERVER_2019_CONTAINER`, `ARM_CONTAINER`. For additional information, see the [CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). + +#### environment: environment_variable + +* `name` - (Required) Environment variable's name or key. +* `type` - (Optional) Type of environment variable. Valid values: `PARAMETER_STORE`, `PLAINTEXT`, `SECRETS_MANAGER`. +* `value` - (Required) Environment variable's value. + +#### environment: registry_credential +Credentials for access to a private Docker registry. + +* `credential` - (Required) ARN or name of credentials created using AWS Secrets Manager. +* `credential_provider` - (Required) Service that created the credentials to access a private Docker registry. Valid value: `SECRETS_MANAGER` (AWS Secrets Manager). + +### logs_config + +* `cloudwatch_logs` - (Optional) Configuration block. Detailed below. +* `s3_logs` - (Optional) Configuration block. Detailed below. + +#### logs_config: cloudwatch_logs + +* `group_name` - (Optional) Group name of the logs in CloudWatch Logs. * `status` - (Optional) Current status of logs in CloudWatch Logs for a build project. Valid values: `ENABLED`, `DISABLED`. Defaults to `ENABLED`. -* `group_name` - (Optional) The group name of the logs in CloudWatch Logs. -* `stream_name` - (Optional) The stream name of the logs in CloudWatch Logs. +* `stream_name` - (Optional) Stream name of the logs in CloudWatch Logs. -`s3_logs` supports the following: +#### logs_config: s3_logs +* `encryption_disabled` - (Optional) Whether to disable encrypting S3 logs. Defaults to `false`. +* `location` - (Optional) Name of the S3 bucket and the path prefix for S3 logs. Must be set if status is `ENABLED`, otherwise it must be empty. * `status` - (Optional) Current status of logs in S3 for a build project. Valid values: `ENABLED`, `DISABLED`. Defaults to `DISABLED`. -* `location` - (Optional) The name of the S3 bucket and the path prefix for S3 logs. Must be set if status is `ENABLED`, otherwise it must be empty. -* `encryption_disabled` - (Optional) Set to `true` if you do not want S3 logs encrypted. Defaults to `false`. -`source` supports the following: +### secondary_artifacts -* `type` - (Required) The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET`, `S3` or `NO_SOURCE`. -* `auth` - (Optional) Information about the authorization settings for AWS CodeBuild to access the source code to be built. Auth blocks are documented below. -* `buildspec` - (Optional) The build spec declaration to use for this build project's related builds. This must be set when `type` is `NO_SOURCE`. -* `git_clone_depth` - (Optional) Truncate git history to this many commits. Use `0` for a `Full` checkout which you need to run commands like `git branch --show-current`. See [here](https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-github-gitclone.html) for details. -* `git_submodules_config` - (Optional) Information about the Git submodules configuration for an AWS CodeBuild build project. Git submodules config blocks are documented below. This option is only valid when the `type` is `CODECOMMIT`, `GITHUB` or `GITHUB_ENTERPRISE`. +* `artifact_identifier` - (Required) Artifact identifier. Must be the same specified inside the AWS CodeBuild build specification. +* `encryption_disabled` - (Optional) Whether to disable encrypting output artifacts. If `type` is set to `NO_ARTIFACTS`, this value is ignored. Defaults to `false`. +* `location` - (Optional) Information about the build output artifact location. If `type` is set to `CODEPIPELINE` or `NO_ARTIFACTS`, this value is ignored. If `type` is set to `S3`, this is the name of the output bucket. If `path` is not also specified, then `location` can also specify the path of the output artifact in the output bucket. +* `name` - (Optional) Name of the project. If `type` is set to `S3`, this is the name of the output artifact object +* `namespace_type` - (Optional) Namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values are `BUILD_ID` or `NONE`. +* `override_artifact_name` (Optional) Whether a name specified in the build specification overrides the artifact name. +* `packaging` - (Optional) Type of build output artifact to create. If `type` is set to `S3`, valid values are `NONE`, `ZIP` +* `path` - (Optional) If `type` is set to `S3`, this is the path to the output artifact. +* `type` - (Required) Build output artifact's type. The only valid value is `S3`. + +### secondary_sources + +* `auth` - (Optional) Configuration block. Detailed below. +* `buildspec` - (Optional) Build specification to use for this build project's related builds. +* `git_clone_depth` - (Optional) Truncate git history to this many commits. Use `0` for a `Full` checkout which you need to run commands like `git branch --show-current`. See [AWS CodePipeline User Guide: Tutorial: Use full clone with a GitHub pipeline source](https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-github-gitclone.html) for details. +* `git_submodules_config` - (Optional) Configuration block. Detailed below. * `insecure_ssl` - (Optional) Ignore SSL warnings when connecting to source control. -* `location` - (Optional) The location of the source code from git or s3. -* `report_build_status` - (Optional) Set to `true` to report the status of a build's start and finish to your source provider. This option is only valid when the `type` is `BITBUCKET` or `GITHUB`. +* `location` - (Optional) Location of the source code from git or s3. +* `report_build_status` - (Optional) Whether to report the status of a build's start and finish to your source provider. This option is only valid when your source provider is `GITHUB`, `BITBUCKET`, or `GITHUB_ENTERPRISE`. +* `source_identifier` - (Required) Source identifier. Source data will be put inside a folder named as this parameter inside AWS CodeBuild source directory +* `type` - (Required) Type of repository that contains the source code to be built. Valid values: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`. +#### secondary_sources: auth -`auth` supports the following: +* `resource` - (Optional) Resource value that applies to the specified authorization type. +* `type` - (Required) Authorization type to use. The only valid value is `OAUTH`. -* `type` - (Required) The authorization type to use. The only valid value is `OAUTH` -* `resource` - (Optional) The resource value that applies to the specified authorization type. +#### secondary_sources: git_submodules_config -`git_submodules_config` supports the following: +This block is only valid when the `type` is `CODECOMMIT`, `GITHUB` or `GITHUB_ENTERPRISE`. -* `fetch_submodules` - (Required) If set to true, fetches Git submodules for the AWS CodeBuild build project. +* `fetch_submodules` - (Required) Whether to fetch Git submodules for the AWS CodeBuild build project. -`vpc_config` supports the following: +### source -* `security_group_ids` - (Required) The security group IDs to assign to running builds. -* `subnets` - (Required) The subnet IDs within which to run builds. -* `vpc_id` - (Required) The ID of the VPC within which to run builds. +* `auth` - (Optional) Configuration block. Detailed below. +* `buildspec` - (Optional) Build specification to use for this build project's related builds. This must be set when `type` is `NO_SOURCE`. +* `git_clone_depth` - (Optional) Truncate git history to this many commits. Use `0` for a `Full` checkout which you need to run commands like `git branch --show-current`. See [AWS CodePipeline User Guide: Tutorial: Use full clone with a GitHub pipeline source](https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-github-gitclone.html) for details. +* `git_submodules_config` - (Optional) Configuration block. Detailed below. +* `insecure_ssl` - (Optional) Ignore SSL warnings when connecting to source control. +* `location` - (Optional) Location of the source code from git or s3. +* `report_build_status` - (Optional) Whether to report the status of a build's start and finish to your source provider. This option is only valid when the `type` is `BITBUCKET` or `GITHUB`. +* `type` - (Required) Type of repository that contains the source code to be built. Valid values: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET`, `S3`, `NO_SOURCE`. -`registry_credential` supports the following: +#### source: auth -* `credential` - (Required) The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager. -* `credential_provider` - (Required) The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager. +* `resource` - (Optional) Resource value that applies to the specified authorization type. +* `type` - (Required) Authorization type to use. The only valid value is `OAUTH`. -`secondary_artifacts` supports the following: +#### source: git_submodules_config -* `type` - (Required) The build output artifact's type. The only valid value is `S3`. -* `artifact_identifier` - (Required) The artifact identifier. Must be the same specified inside AWS CodeBuild buildspec. -* `encryption_disabled` - (Optional) If set to true, output artifacts will not be encrypted. If `type` is set to `NO_ARTIFACTS` then this value will be ignored. Defaults to `false`. -* `override_artifact_name` (Optional) If set to true, a name specified in the build spec file overrides the artifact name. -* `location` - (Optional) Information about the build output artifact location. If `type` is set to `CODEPIPELINE` or `NO_ARTIFACTS` then this value will be ignored. If `type` is set to `S3`, this is the name of the output bucket. If `path` is not also specified, then `location` can also specify the path of the output artifact in the output bucket. -* `name` - (Optional) The name of the project. If `type` is set to `S3`, this is the name of the output artifact object -* `namespace_type` - (Optional) The namespace to use in storing build artifacts. If `type` is set to `S3`, then valid values for this parameter are: `BUILD_ID` or `NONE`. -* `packaging` - (Optional) The type of build output artifact to create. If `type` is set to `S3`, valid values for this parameter are: `NONE` or `ZIP` -* `path` - (Optional) If `type` is set to `S3`, this is the path to the output artifact +This block is only valid when the `type` is `CODECOMMIT`, `GITHUB` or `GITHUB_ENTERPRISE`. -`secondary_sources` supports the following: +* `fetch_submodules` - (Required) Whether to fetch Git submodules for the AWS CodeBuild build project. -* `type` - (Required) The type of repository that contains the source code to be built. Valid values for this parameter are: `CODECOMMIT`, `CODEPIPELINE`, `GITHUB`, `GITHUB_ENTERPRISE`, `BITBUCKET` or `S3`. -* `source_identifier` - (Required) The source identifier. Source data will be put inside a folder named as this parameter inside AWS CodeBuild source directory -* `auth` - (Optional) Information about the authorization settings for AWS CodeBuild to access the source code to be built. Auth blocks are documented below. -* `buildspec` - (Optional) The build spec declaration to use for this build project's related builds. -* `git_clone_depth` - (Optional) Truncate git history to this many commits. -* `git_submodules_config` - (Optional) Information about the Git submodules configuration for an AWS CodeBuild build project. Git submodules config blocks are documented below. This option is only valid when the `type` is `CODECOMMIT`, `GITHUB` or `GITHUB_ENTERPRISE`. -* `insecure_ssl` - (Optional) Ignore SSL warnings when connecting to source control. -* `location` - (Optional) The location of the source code from git or s3. -* `report_build_status` - (Optional) Set to `true` to report the status of a build's start and finish to your source provider. This option is only valid when your source provider is `GITHUB`, `BITBUCKET`, or `GITHUB_ENTERPRISE`. +### vpc_config + +* `security_group_ids` - (Required) Security group IDs to assign to running builds. +* `subnets` - (Required) Subnet IDs within which to run builds. +* `vpc_id` - (Required) ID of the VPC within which to run builds. ## Attributes Reference -In addition to all arguments above, the following attributes are exported: +In addition to the arguments above, the following attributes are exported: -* `id` - The name (if imported via `name`) or ARN (if created via Terraform or imported via ARN) of the CodeBuild project. -* `arn` - The ARN of the CodeBuild project. -* `badge_url` - The URL of the build badge when `badge_enabled` is enabled. +* `arn` - ARN of the CodeBuild project. +* `badge_url` - URL of the build badge when `badge_enabled` is enabled. +* `id` - Name (if imported via `name`) or ARN (if created via Terraform or imported via ARN) of the CodeBuild project. ## Import From 300f64e920b0f4a4c431697ec5315889d4e7baa3 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 11 Feb 2021 15:42:25 -0500 Subject: [PATCH 1165/1212] CR updates; service and method naming --- ...urce_aws_route53_resolver_dnssec_config.go | 34 +++++++++---------- ...aws_route53_resolver_dnssec_config_test.go | 12 +++---- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/aws/resource_aws_route53_resolver_dnssec_config.go b/aws/resource_aws_route53_resolver_dnssec_config.go index 07ada395cb9..70289a1d62b 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config.go +++ b/aws/resource_aws_route53_resolver_dnssec_config.go @@ -60,10 +60,10 @@ func resourceAwsRoute53ResolverDnssecConfigCreate(d *schema.ResourceData, meta i Validation: aws.String(route53resolver.ValidationEnable), } - log.Printf("[DEBUG] Creating Route53 Resolver DNSSEC config: %#v", req) + log.Printf("[DEBUG] Creating Route 53 Resolver DNSSEC config: %#v", req) resp, err := conn.UpdateResolverDnssecConfig(req) if err != nil { - return fmt.Errorf("error creating Route53 Resolver DNSSEC config: %w", err) + return fmt.Errorf("error creating Route 53 Resolver DNSSEC config: %w", err) } d.SetId(aws.StringValue(resp.ResolverDNSSECConfig.Id)) @@ -82,11 +82,11 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int config, err := finder.ResolverDnssecConfigByID(conn, d.Id()) if err != nil { - return fmt.Errorf("error getting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) + return fmt.Errorf("error getting Route 53 Resolver DNSSEC config (%s): %w", d.Id(), err) } if config == nil || aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusDisabled { - log.Printf("[WARN] Route53 Resolver DNSSEC config (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Route 53 Resolver DNSSEC config (%s) not found, removing from state", d.Id()) d.SetId("") return nil } @@ -111,7 +111,7 @@ func resourceAwsRoute53ResolverDnssecConfigRead(d *schema.ResourceData, meta int func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).route53resolverconn - // To delete a Route53 ResolverDnssecConfig, it must be: + // To delete a Route 53 ResolverDnssecConfig, it must be: // (1) updated to a "DISABLED" state // (2) updated again to be permanently removed // @@ -125,53 +125,53 @@ func resourceAwsRoute53ResolverDnssecConfigDelete(d *schema.ResourceData, meta i } if err != nil { - return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting Route 53 Resolver DNSSEC config (%s): %w", d.Id(), err) } if config == nil { return nil } - // (1) Update Route53 ResolverDnssecConfig to "DISABLED" state, if necessary + // (1) Update Route 53 ResolverDnssecConfig to "DISABLED" state, if necessary if aws.StringValue(config.ValidationStatus) == route53resolver.ResolverDNSSECValidationStatusEnabled { - config, err = updateResolverDnsSecConfig(conn, config.ResourceId) + config, err = updateResolverDnsSecConfigValidation(conn, aws.StringValue(config.ResourceId), route53resolver.ValidationDisable) if err != nil { - return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting Route 53 Resolver DNSSEC config (%s): %w", d.Id(), err) } if config == nil { return nil } } - // (1.a) Wait for Route53 ResolverDnssecConfig to reach "DISABLED" state, if necessary + // (1.a) Wait for Route 53 ResolverDnssecConfig to reach "DISABLED" state, if necessary if aws.StringValue(config.ValidationStatus) != route53resolver.ResolverDNSSECValidationStatusDisabled { if _, err = waiter.DnssecConfigDisabled(conn, d.Id()); err != nil { if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { return nil } - return fmt.Errorf("error waiting for Route53 Resolver DNSSEC config (%s) to be disabled: %w", d.Id(), err) + return fmt.Errorf("error waiting for Route 53 Resolver DNSSEC config (%s) to be disabled: %w", d.Id(), err) } } - // (2) Update Route53 ResolverDnssecConfig again, effectively deleting the resource - _, err = updateResolverDnsSecConfig(conn, config.ResourceId) + // (2) Update Route 53 ResolverDnssecConfig again, effectively deleting the resource + _, err = updateResolverDnsSecConfigValidation(conn, aws.StringValue(config.ResourceId), route53resolver.ValidationDisable) if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { return nil } if err != nil { - return fmt.Errorf("error deleting Route53 Resolver DNSSEC config (%s): %w", d.Id(), err) + return fmt.Errorf("error deleting Route 53 Resolver DNSSEC config (%s): %w", d.Id(), err) } return nil } -func updateResolverDnsSecConfig(conn *route53resolver.Route53Resolver, resourceId *string) (*route53resolver.ResolverDnssecConfig, error) { +func updateResolverDnsSecConfigValidation(conn *route53resolver.Route53Resolver, resourceId, validation string) (*route53resolver.ResolverDnssecConfig, error) { output, err := conn.UpdateResolverDnssecConfig(&route53resolver.UpdateResolverDnssecConfigInput{ - ResourceId: resourceId, - Validation: aws.String(route53resolver.ValidationDisable), + ResourceId: aws.String(resourceId), + Validation: aws.String(validation), }) if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { diff --git a/aws/resource_aws_route53_resolver_dnssec_config_test.go b/aws/resource_aws_route53_resolver_dnssec_config_test.go index 7be983a453c..2ba046421ff 100644 --- a/aws/resource_aws_route53_resolver_dnssec_config_test.go +++ b/aws/resource_aws_route53_resolver_dnssec_config_test.go @@ -44,7 +44,7 @@ func testSweepRoute53ResolverDnssecConfig(region string) error { id := aws.StringValue(resolverDnssecConfig.Id) resourceId := aws.StringValue(resolverDnssecConfig.ResourceId) - log.Printf("[INFO] Deleting Route53 Resolver Dnssec config: %s", id) + log.Printf("[INFO] Deleting Route 53 Resolver Dnssec config: %s", id) r := resourceAwsRoute53ResolverDnssecConfig() d := r.Data(nil) @@ -54,7 +54,7 @@ func testSweepRoute53ResolverDnssecConfig(region string) error { err := r.Delete(d, client) if err != nil { - sweeperErr := fmt.Errorf("error deleting Route53 Resolver Resolver Dnssec config (%s): %w", id, err) + sweeperErr := fmt.Errorf("error deleting Route 53 Resolver Resolver Dnssec config (%s): %w", id, err) log.Printf("[ERROR] %s", sweeperErr) sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) continue @@ -65,12 +65,12 @@ func testSweepRoute53ResolverDnssecConfig(region string) error { }) if testSweepSkipSweepError(err) { - log.Printf("[WARN] Skipping Route53 Resolver Resolver Dnssec config sweep for %s: %s", region, err) + log.Printf("[WARN] Skipping Route 53 Resolver Resolver Dnssec config sweep for %s: %s", region, err) return sweeperErrs.ErrorOrNil() } if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving Route53 Resolver Resolver Dnssec config: %w", err)) + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error retrieving Route 53 Resolver Resolver Dnssec config: %w", err)) } return sweeperErrs.ErrorOrNil() @@ -177,11 +177,11 @@ func testAccCheckRoute53ResolverDnssecConfigExists(n string) resource.TestCheckF } if config == nil { - return fmt.Errorf("Route53 Resolver Dnssec config (%s) not found", id) + return fmt.Errorf("Route 53 Resolver Dnssec config (%s) not found", id) } if aws.StringValue(config.ValidationStatus) != route53resolver.ResolverDNSSECValidationStatusEnabled { - return fmt.Errorf("Route53 Resolver Dnssec config (%s) is not enabled", aws.StringValue(config.Id)) + return fmt.Errorf("Route 53 Resolver Dnssec config (%s) is not enabled", aws.StringValue(config.Id)) } return nil From 24428b4236fb43b927abad9baf3f3d684ecdfd96 Mon Sep 17 00:00:00 2001 From: koooge Date: Thu, 11 Feb 2021 17:24:06 +0100 Subject: [PATCH 1166/1212] docs/r: Describe import Signed-off-by: koooge --- website/docs/r/default_security_group.html.markdown | 8 ++++++++ website/docs/r/default_subnet.html.markdown | 8 ++++++++ website/docs/r/default_vpc_dhcp_options.html.markdown | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/website/docs/r/default_security_group.html.markdown b/website/docs/r/default_security_group.html.markdown index feb4903d679..b7b177b144d 100644 --- a/website/docs/r/default_security_group.html.markdown +++ b/website/docs/r/default_security_group.html.markdown @@ -149,3 +149,11 @@ In addition to all arguments above, the following attributes are exported: * `description` - The description of the security group [aws-default-security-groups]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#default-security-group + +## Import + +Security Groups can be imported using the `security group id`, e.g. + +``` +$ terraform import aws_default_security_group.default_sg sg-903004f8 +``` diff --git a/website/docs/r/default_subnet.html.markdown b/website/docs/r/default_subnet.html.markdown index c59df217e28..c4a6632582e 100644 --- a/website/docs/r/default_subnet.html.markdown +++ b/website/docs/r/default_subnet.html.markdown @@ -60,3 +60,11 @@ In addition to all arguments above, the following attributes are exported: * `ipv6_association_id` - The association ID for the IPv6 CIDR block. * `ipv6_cidr_block` - The IPv6 CIDR block. * `owner_id` - The ID of the AWS account that owns the subnet. + +## Import + +Subnets can be imported using the `subnet id`, e.g. + +``` +$ terraform import aws_default_subnet.public_subnet subnet-9d4a7b6c +``` diff --git a/website/docs/r/default_vpc_dhcp_options.html.markdown b/website/docs/r/default_vpc_dhcp_options.html.markdown index fa177113a35..826b5ee26e1 100644 --- a/website/docs/r/default_vpc_dhcp_options.html.markdown +++ b/website/docs/r/default_vpc_dhcp_options.html.markdown @@ -55,3 +55,11 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ID of the DHCP Options Set. * `arn` - The ARN of the DHCP Options Set. * `owner_id` - The ID of the AWS account that owns the DHCP options set. + +## Import + +VPC DHCP Options can be imported using the `dhcp options id`, e.g. + +``` +$ terraform import aws_default_vpc_dhcp_options.default_options dopt-d9070ebb +``` From f0b4eacffb3d6f6630387f0e3551748fdc05f779 Mon Sep 17 00:00:00 2001 From: koooge Date: Thu, 11 Feb 2021 17:30:52 +0100 Subject: [PATCH 1167/1212] docs/r: Modify aws_default Signed-off-by: koooge --- website/docs/r/default_network_acl.html.markdown | 10 +++++----- website/docs/r/default_route_table.html.markdown | 2 +- website/docs/r/default_security_group.html.markdown | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/default_network_acl.html.markdown b/website/docs/r/default_network_acl.html.markdown index 6c287d7c2ec..44affd43690 100644 --- a/website/docs/r/default_network_acl.html.markdown +++ b/website/docs/r/default_network_acl.html.markdown @@ -45,13 +45,13 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id ingress { protocol = -1 rule_no = 100 action = "allow" - cidr_block = aws_vpc.mainvpc.cidr_block + cidr_block = aws_default_vpc.mainvpc.cidr_block from_port = 0 to_port = 0 } @@ -78,13 +78,13 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id ingress { protocol = -1 rule_no = 100 action = "allow" - cidr_block = aws_vpc.mainvpc.cidr_block + cidr_block = aws_default_vpc.mainvpc.cidr_block from_port = 0 to_port = 0 } @@ -103,7 +103,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id # no rules defined, deny all traffic in this ACL } diff --git a/website/docs/r/default_route_table.html.markdown b/website/docs/r/default_route_table.html.markdown index 57ab6f7ed8d..463d8cba73c 100644 --- a/website/docs/r/default_route_table.html.markdown +++ b/website/docs/r/default_route_table.html.markdown @@ -44,7 +44,7 @@ a conflict of rule settings and will overwrite routes. ```hcl resource "aws_default_route_table" "r" { - default_route_table_id = aws_vpc.foo.default_route_table_id + default_route_table_id = aws_default_vpc.foo.default_route_table_id route { # ... diff --git a/website/docs/r/default_security_group.html.markdown b/website/docs/r/default_security_group.html.markdown index b7b177b144d..bcd25bb1252 100644 --- a/website/docs/r/default_security_group.html.markdown +++ b/website/docs/r/default_security_group.html.markdown @@ -46,7 +46,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_security_group" "default" { - vpc_id = aws_vpc.mainvpc.id + vpc_id = aws_default_vpc.mainvpc.id ingress { protocol = -1 @@ -75,7 +75,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_security_group" "default" { - vpc_id = aws_vpc.mainvpc.id + vpc_id = aws_default_vpc.mainvpc.id ingress { protocol = -1 From 503970e57194a5c70283c12f0b4a5f67e5dd3824 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 21:04:01 +0000 Subject: [PATCH 1168/1212] Update CHANGELOG.md for #17503 --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4cc5774a6d1..07bf24afd9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,20 +2,27 @@ FEATURES: +* **New Resource:** `aws_cloudfront_realtime_log_config` ([#14974](https://github.com/hashicorp/terraform-provider-aws/issues/14974)) * **New Resource:** `aws_config_conformance_pack` ([#17313](https://github.com/hashicorp/terraform-provider-aws/issues/17313)) * **New Resource:** `aws_sagemaker_model_package_group` ([#17366](https://github.com/hashicorp/terraform-provider-aws/issues/17366)) * **New Resource:** `aws_securityhub_organization_admin_account` ([#17501](https://github.com/hashicorp/terraform-provider-aws/issues/17501)) ENHANCEMENTS: +* data-source/aws_customer_gateway: Add `device_name` attribute ([#14786](https://github.com/hashicorp/terraform-provider-aws/issues/14786)) * data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments ([#12055](https://github.com/hashicorp/terraform-provider-aws/issues/12055)) * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_cloudfront_distribution: Add `realtime_log_config_arn` attribute to `default_cache_behavior` and `ordered_cache_behavior` configuration blocks ([#14974](https://github.com/hashicorp/terraform-provider-aws/issues/14974)) * resource/aws_cloudwatch_log_destination: Add plan time validation to `role_arn`, `name` and `target_arn`. ([#11687](https://github.com/hashicorp/terraform-provider-aws/issues/11687)) +* resource/aws_cloudwatch_log_group: Add plan time validation for `retention_in_days` argument ([#14673](https://github.com/hashicorp/terraform-provider-aws/issues/14673)) +* resource/aws_codebuild_report_group: Add `delete_reports` argument ([#17338](https://github.com/hashicorp/terraform-provider-aws/issues/17338)) +* resource/aws_customer_gateway: Add `device_name` argument ([#14786](https://github.com/hashicorp/terraform-provider-aws/issues/14786)) * resource/aws_ec2_traffic_mirror_filter: Add `arn` attribute. ([#13948](https://github.com/hashicorp/terraform-provider-aws/issues/13948)) * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source ([#14746](https://github.com/hashicorp/terraform-provider-aws/issues/14746)) +* resource/aws_lb_listener_certificate: Add import support ([#16474](https://github.com/hashicorp/terraform-provider-aws/issues/16474)) * resource/aws_ses_active_receipt_rule_set: Add `arn` attribute ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) * resource/aws_ses_active_receipt_rule_set: Add plan time validation for `rule_set_name` argument ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) * resource/aws_ses_configuration_set: Add `arn` attribute. ([#13972](https://github.com/hashicorp/terraform-provider-aws/issues/13972)) @@ -31,8 +38,11 @@ ENHANCEMENTS: BUG FIXES: * resource/aws_glue_catalog_database: Use Catalog Id when deleting Databases. ([#17489](https://github.com/hashicorp/terraform-provider-aws/issues/17489)) +* resource/aws_iam_instance_profile: Detach role when role doesn't exist + remove when deleted from state. ([#16188](https://github.com/hashicorp/terraform-provider-aws/issues/16188)) * resource/aws_instance: Fix use of `throughput` and `iops` for `gp3` volumes at the same time ([#17380](https://github.com/hashicorp/terraform-provider-aws/issues/17380)) * resource/aws_lambda_event_source_mapping: Wait for create and update operations to complete ([#14765](https://github.com/hashicorp/terraform-provider-aws/issues/14765)) +* resource/aws_lambda_function: Prevent crash when using `Image` package type ([#17082](https://github.com/hashicorp/terraform-provider-aws/issues/17082)) +* resource/aws_wafv2_web_acl_association: Increase creation timeout value from 2 to 5 minutes to prevent WAFUnavailableEntityException ([#17545](https://github.com/hashicorp/terraform-provider-aws/issues/17545)) ## 3.27.0 (February 05, 2021) From 69b47c05b8d7c9aa3321486751c25cedd3ef16b6 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 16:06:19 -0500 Subject: [PATCH 1169/1212] resource/aws_codestar_connections: Add tag update support and CHANGELOG --- .changelog/16835.txt | 3 ++ ...urce_aws_codestarconnections_connection.go | 29 ++++++++---- ...aws_codestarconnections_connection_test.go | 46 +++++++++++++++---- .../r/codestarconnections_connection.markdown | 2 +- 4 files changed, 62 insertions(+), 18 deletions(-) create mode 100644 .changelog/16835.txt diff --git a/.changelog/16835.txt b/.changelog/16835.txt new file mode 100644 index 00000000000..cef858171af --- /dev/null +++ b/.changelog/16835.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codestarconnections_connection: Add `tags` argument +``` diff --git a/aws/resource_aws_codestarconnections_connection.go b/aws/resource_aws_codestarconnections_connection.go index df44221cb09..ce2fe09db25 100644 --- a/aws/resource_aws_codestarconnections_connection.go +++ b/aws/resource_aws_codestarconnections_connection.go @@ -16,6 +16,7 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { return &schema.Resource{ Create: resourceAwsCodeStarConnectionsConnectionCreate, Read: resourceAwsCodeStarConnectionsConnectionRead, + Update: resourceAwsCodeStarConnectionsConnectionUpdate, Delete: resourceAwsCodeStarConnectionsConnectionDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -45,12 +46,7 @@ func resourceAwsCodeStarConnectionsConnection() *schema.Resource { ValidateFunc: validation.StringInSlice(codestarconnections.ProviderType_Values(), false), }, - "tags": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "tags": tagsSchema(), }, } } @@ -63,7 +59,7 @@ func resourceAwsCodeStarConnectionsConnectionCreate(d *schema.ResourceData, meta ProviderType: aws.String(d.Get("provider_type").(string)), } - if v, ok := d.GetOk("tags"); ok { + if v, ok := d.GetOk("tags"); ok && len(v.(map[string]interface{})) > 0 { params.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().CodestarconnectionsTags() } @@ -105,12 +101,27 @@ func resourceAwsCodeStarConnectionsConnectionRead(d *schema.ResourceData, meta i d.Set("provider_type", resp.Connection.ProviderType) tags, err := keyvaluetags.CodestarconnectionsListTags(conn, arn) + if err != nil { - return fmt.Errorf("error listing tags for CodeStar connection (%s): %w", arn, err) + return fmt.Errorf("error listing tags for CodeStar Connection (%s): %w", arn, err) } if err := d.Set("tags", tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags for CodeStar connection (%s): %w", arn, err) + return fmt.Errorf("error setting tags for CodeStar Connection (%s): %w", arn, err) + } + + return nil +} + +func resourceAwsCodeStarConnectionsConnectionUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).codestarconnectionsconn + + if d.HasChange("tags") { + o, n := d.GetChange("tags") + + if err := keyvaluetags.CodestarconnectionsUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error Codestar Connection (%s) tags: %w", d.Id(), err) + } } return nil diff --git a/aws/resource_aws_codestarconnections_connection_test.go b/aws/resource_aws_codestarconnections_connection_test.go index ba8b6ecae3e..4d2826584fd 100644 --- a/aws/resource_aws_codestarconnections_connection_test.go +++ b/aws/resource_aws_codestarconnections_connection_test.go @@ -65,7 +65,7 @@ func TestAccAWSCodeStarConnectionsConnection_disappears(t *testing.T) { }) } -func TestAccAWSCodeStarConnectionsConnection_tags(t *testing.T) { +func TestAccAWSCodeStarConnectionsConnection_Tags(t *testing.T) { var v codestarconnections.Connection resourceName := "aws_codestarconnections_connection.test" rName := acctest.RandomWithPrefix("tf-acc-test") @@ -76,11 +76,11 @@ func TestAccAWSCodeStarConnectionsConnection_tags(t *testing.T) { CheckDestroy: testAccCheckAWSCodeStarConnectionsConnectionDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCodeStarConnectionsConnectionConfigTags(rName), + Config: testAccAWSCodeStarConnectionsConnectionConfigTags1(rName, "key1", "value1"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), - resource.TestCheckResourceAttr(resourceName, "tags.Environment", "production"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, { @@ -88,6 +88,23 @@ func TestAccAWSCodeStarConnectionsConnection_tags(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAWSCodeStarConnectionsConnectionConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSCodeStarConnectionsConnectionConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCodeStarConnectionsConnectionExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, }, }) } @@ -146,16 +163,29 @@ resource "aws_codestarconnections_connection" "test" { `, rName) } -func testAccAWSCodeStarConnectionsConnectionConfigTags(rName string) string { +func testAccAWSCodeStarConnectionsConnectionConfigTags1(rName string, tagKey1 string, tagValue1 string) string { return fmt.Sprintf(` resource "aws_codestarconnections_connection" "test" { name = %[1]q provider_type = "Bitbucket" tags = { - Name = %[1]q - Environment = "production" + %[2]q = %[3]q } } -`, rName) +`, rName, tagKey1, tagValue1) +} + +func testAccAWSCodeStarConnectionsConnectionConfigTags2(rName string, tagKey1 string, tagValue1 string, tagKey2 string, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_codestarconnections_connection" "test" { + name = %[1]q + provider_type = "Bitbucket" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) } diff --git a/website/docs/r/codestarconnections_connection.markdown b/website/docs/r/codestarconnections_connection.markdown index 51f9b60fb50..a64de09c2e3 100644 --- a/website/docs/r/codestarconnections_connection.markdown +++ b/website/docs/r/codestarconnections_connection.markdown @@ -67,7 +67,7 @@ The following arguments are supported: * `name` - (Required) The name of the connection to be created. The name must be unique in the calling AWS account. Changing `name` will create a new resource. * `provider_type` - (Required) The name of the external provider where your third-party code repository is configured. Valid values are `Bitbucket`, `GitHub`, or `GitHubEnterpriseServer`. Changing `provider_type` will create a new resource. -* `tags` - (Optional) An array of key:value pairs to associate with the resource. +* `tags` - (Optional) Map of key-value resource tags to associate with the resource. ## Attributes Reference From f7ff448ff61d931c13a83ea9bac6d00f838830a8 Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sat, 27 Jun 2020 23:17:18 +0300 Subject: [PATCH 1170/1212] add validations + disappears test --- ...esource_aws_elastic_transcoder_pipeline.go | 88 ++++++++++++++----- ...ce_aws_elastic_transcoder_pipeline_test.go | 35 ++------ 2 files changed, 74 insertions(+), 49 deletions(-) diff --git a/aws/resource_aws_elastic_transcoder_pipeline.go b/aws/resource_aws_elastic_transcoder_pipeline.go index 64df5e9d8a9..4c1154b679a 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline.go +++ b/aws/resource_aws_elastic_transcoder_pipeline.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/elastictranscoder" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func resourceAwsElasticTranscoderPipeline() *schema.Resource { @@ -51,6 +52,10 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "storage_class": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Standard", + "ReducedRedundancy", + }, false), }, }, }, @@ -64,15 +69,33 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "access": { Type: schema.TypeList, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "Read", + "ReadAcp", + "WriteAcp", + "FullControl", + }, false), + }, }, "grantee": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "AllUsers", + "AuthenticatedUsers", + "LogDelivery", + }, false), }, "grantee_type": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Canonical", + "Email", + "Group", + }, false), }, }, }, @@ -88,17 +111,11 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexp.MustCompile(`^[.0-9A-Za-z-_]+$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "only alphanumeric characters, hyphens, underscores, and periods allowed in %q", k)) - } - if len(value) > 40 { - errors = append(errors, fmt.Errorf("%q cannot be longer than 40 characters", k)) - } - return - }, + ValidateFunc: validation.All( + validation.StringMatch(regexp.MustCompile(`^[.0-9A-Za-z-_]+$`), + "only alphanumeric characters, hyphens, underscores, and periods allowed"), + validation.StringLenBetween(1, 40), + ), }, "notifications": { @@ -108,20 +125,24 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "completed": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "error": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "progressing": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "warning": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, }, }, @@ -160,6 +181,10 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "storage_class": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Standard", + "ReducedRedundancy", + }, false), }, }, }, @@ -173,15 +198,33 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "access": { Type: schema.TypeList, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "Read", + "ReadAcp", + "WriteAcp", + "FullControl", + }, false), + }, }, "grantee": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "AllUsers", + "AuthenticatedUsers", + "LogDelivery", + }, false), }, "grantee_type": { Type: schema.TypeString, Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Canonical", + "Email", + "Group", + }, false), }, }, }, @@ -408,7 +451,8 @@ func resourceAwsElasticTranscoderPipelineUpdate(d *schema.ResourceData, meta int } for _, w := range output.Warnings { - log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", *w.Code, *w.Message) + log.Printf("[WARN] Elastic Transcoder Pipeline %v: %v", aws.StringValue(w.Code), + aws.StringValue(w.Message)) } return resourceAwsElasticTranscoderPipelineRead(d, meta) diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index 95d4daa1510..cedb38b5697 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elastictranscoder" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -110,16 +109,16 @@ func testAccCheckAWSElasticTranscoderPipeline_notifications( return func(s *terraform.State) error { var notes []string - if p.Notifications.Completed != nil && *p.Notifications.Completed != "" { + if p.Notifications.Completed != nil && aws.StringValue(p.Notifications.Completed) != "" { notes = append(notes, "completed") } - if p.Notifications.Error != nil && *p.Notifications.Error != "" { + if p.Notifications.Error != nil && aws.StringValue(p.Notifications.Error) != "" { notes = append(notes, "error") } - if p.Notifications.Progressing != nil && *p.Notifications.Progressing != "" { + if p.Notifications.Progressing != nil && aws.StringValue(p.Notifications.Progressing) != "" { notes = append(notes, "progressing") } - if p.Notifications.Warning != nil && *p.Notifications.Warning != "" { + if p.Notifications.Warning != nil && aws.StringValue(p.Notifications.Warning) != "" { notes = append(notes, "warning") } @@ -212,7 +211,7 @@ func TestAccAWSElasticTranscoderPipeline_disappears(t *testing.T) { Config: awsElasticTranscoderPipelineConfigBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticTranscoderPipelineExists(resourceName, pipeline), - testAccCheckAWSElasticTranscoderPipelineDisappears(pipeline), + testAccCheckResourceDisappears(testAccProvider, resourceAwsElasticTranscoderPipeline(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -247,18 +246,6 @@ func testAccCheckAWSElasticTranscoderPipelineExists(n string, res *elastictransc } } -func testAccCheckAWSElasticTranscoderPipelineDisappears(res *elastictranscoder.Pipeline) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn - - _, err := conn.DeletePipeline(&elastictranscoder.DeletePipelineInput{ - Id: res.Id, - }) - - return err - } -} - func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn @@ -272,20 +259,14 @@ func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { }) if err == nil { - if out.Pipeline != nil && *out.Pipeline.Id == rs.Primary.ID { + if out.Pipeline != nil && aws.StringValue(out.Pipeline.Id) == rs.Primary.ID { return fmt.Errorf("Elastic Transcoder Pipeline still exists") } } - awsErr, ok := err.(awserr.Error) - if !ok { - return err - } - - if awsErr.Code() != "ResourceNotFoundException" { - return fmt.Errorf("unexpected error: %s", awsErr) + if !isAWSErr(err, elastictranscoder.ErrCodeResourceNotFoundException, "") { + return fmt.Errorf("unexpected error: %s", err) } - } return nil } From e0c3808117a2653a55f8fab4fd83c887dc8a014d Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Sun, 28 Jun 2020 08:51:11 +0300 Subject: [PATCH 1171/1212] add arn check --- aws/resource_aws_elastic_transcoder_pipeline_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index cedb38b5697..5bef9559b34 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "reflect" + "regexp" "sort" "testing" @@ -28,6 +29,7 @@ func TestAccAWSElasticTranscoderPipeline_basic(t *testing.T) { Config: awsElasticTranscoderPipelineConfigBasic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSElasticTranscoderPipelineExists(resourceName, pipeline), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "elastictranscoder", regexp.MustCompile(`pipeline/.+`)), ), }, { From e2b3260fb3e7d086c7bf76eeb14695510cad50eb Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 11 Feb 2021 23:11:02 +0200 Subject: [PATCH 1172/1212] add changelog --- .changelog/13973.txt | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changelog/13973.txt diff --git a/.changelog/13973.txt b/.changelog/13973.txt new file mode 100644 index 00000000000..49919be9dd9 --- /dev/null +++ b/.changelog/13973.txt @@ -0,0 +1,5 @@ +```release-note:enhancement +resource/aws_elastictranscoder_pipeline: Add plan time validations `content_config.storage_class`, `content_config_permissions.access`, `content_config_permissions.grantee`, `content_config_permissions.grantee_type`, +`notifications.completed`, `notifications.error`, `notifications.progressing`, `notifications.warning`, +`thumbnail_config.storage_class`, `thumbnail_config_permissions.access`, `thumbnail_config_permissions.grantee`, `thumbnail_config_permissions.grantee_type` +``` From 552fcd4760d671692c57a448719d2318fd206395 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 16:20:46 -0500 Subject: [PATCH 1173/1212] Add CHANGELOG for #17044 --- .changelog/17044.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17044.txt diff --git a/.changelog/17044.txt b/.changelog/17044.txt new file mode 100644 index 00000000000..615ec2cf96e --- /dev/null +++ b/.changelog/17044.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudfront_public_key: Add import support +``` From bba8526eae30451aeaf329606d7e8b91e6f0ec4b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 16:34:22 -0500 Subject: [PATCH 1174/1212] resource/aws_ec2_capacity_reservation: Update skip_requesting_account_id documentation and CHANGELOG for #17129 --- .changelog/17129.txt | 0 website/docs/index.html.markdown | 1 - 2 files changed, 1 deletion(-) create mode 100644 .changelog/17129.txt diff --git a/.changelog/17129.txt b/.changelog/17129.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 0805755aec2..dad41e5b6e7 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -263,7 +263,6 @@ for more information about connecting to alternate AWS endpoints or AWS compatib - [`aws_dx_public_virtual_interface` resource](/docs/providers/aws/r/dx_public_virtual_interface.html) - [`aws_dx_transit_virtual_interface` resource](/docs/providers/aws/r/dx_transit_virtual_interface.html) - [`aws_ebs_volume` data source](/docs/providers/aws/d/ebs_volume.html) - - [`aws_ec2_capacity_reservation` resource](/docs/providers/aws/r/ec2_capacity_reservation.html) - [`aws_ec2_client_vpn_endpoint` resource](/docs/providers/aws/r/ec2_client_vpn_endpoint.html) - [`aws_ec2_traffic_mirror_filter` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter.html) - [`aws_ec2_traffic_mirror_filter_rule` resource](/docs/providers/aws/r/ec2_traffic_mirror_filter_rule.html) From df2b927733a811ab53d7daf8347057f8aea4fcc5 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 16:35:23 -0500 Subject: [PATCH 1175/1212] Fill in #17129 CHANGELOG --- .changelog/17129.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.changelog/17129.txt b/.changelog/17129.txt index e69de29bb2d..a1b4bcd60f1 100644 --- a/.changelog/17129.txt +++ b/.changelog/17129.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ec2_capacity_reservation: Add `owner_id` attribute +``` From 30f2c1e9d94ab2dfb584c72e789832c32a48242a Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 16:41:08 -0500 Subject: [PATCH 1176/1212] Add CHANGELOG entry for #17160 --- .changelog/17160.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/17160.txt diff --git a/.changelog/17160.txt b/.changelog/17160.txt new file mode 100644 index 00000000000..9b720954eac --- /dev/null +++ b/.changelog/17160.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_licensemanager_license_configuration: Add `arn` and `owner_account_id` attributes +``` From 3ce92252e94f825e34de399a1295a1eeaae2c8c3 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 17:00:08 -0500 Subject: [PATCH 1177/1212] resource/aws_budgets_budget: Fix disappears test and add CHANGELOG for #13139 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSBudgetsBudget_disappears (8.81s) --- PASS: TestAccAWSBudgetsBudget_prefix (18.83s) --- PASS: TestAccAWSBudgetsBudget_basic (22.27s) --- PASS: TestAccAWSBudgetsBudget_notification (64.76s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- SKIP: TestAccAWSBudgetsBudget_basic (1.38s) --- SKIP: TestAccAWSBudgetsBudget_prefix (1.38s) --- SKIP: TestAccAWSBudgetsBudget_disappears (1.38s) --- SKIP: TestAccAWSBudgetsBudget_notification (1.38s) ``` --- .changelog/13139.txt | 7 +++++++ aws/resource_aws_budgets_budget_test.go | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changelog/13139.txt diff --git a/.changelog/13139.txt b/.changelog/13139.txt new file mode 100644 index 00000000000..3d783567d31 --- /dev/null +++ b/.changelog/13139.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_budgets_budget: Add `arn` attribute +``` + +```release-note:enhancement +resource/aws_budgets_budget: Add plan time validation for `budget_type`, `time_unit`, and `subscriber_sns_topic_arns` arguments +``` diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index 670cf66b626..98de8e4de9b 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -284,7 +284,7 @@ func TestAccAWSBudgetsBudget_disappears(t *testing.T) { resourceName := "aws_budgets_budget.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(budgets.EndpointsID, t) }, Providers: testAccProviders, CheckDestroy: testAccAWSBudgetsBudgetDestroy, Steps: []resource.TestStep{ From 046ed95854674eb89ead53d4c7be3c9a0e731df7 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 12 Feb 2021 00:08:56 +0200 Subject: [PATCH 1178/1212] Update .changelog/13973.txt Co-authored-by: angie pinilla --- .changelog/13973.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/13973.txt b/.changelog/13973.txt index 49919be9dd9..e79781655c0 100644 --- a/.changelog/13973.txt +++ b/.changelog/13973.txt @@ -1,5 +1,5 @@ ```release-note:enhancement -resource/aws_elastictranscoder_pipeline: Add plan time validations `content_config.storage_class`, `content_config_permissions.access`, `content_config_permissions.grantee`, `content_config_permissions.grantee_type`, +resource/aws_elastictranscoder_pipeline: Add plan time validations to `content_config.storage_class`, `content_config_permissions.access`, `content_config_permissions.grantee`, `content_config_permissions.grantee_type`, `notifications.completed`, `notifications.error`, `notifications.progressing`, `notifications.warning`, `thumbnail_config.storage_class`, `thumbnail_config_permissions.access`, `thumbnail_config_permissions.grantee`, `thumbnail_config_permissions.grantee_type` ``` From 1225f59da15300ce8197ca2721e8f7c70fd9ec7b Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 12 Feb 2021 00:09:26 +0200 Subject: [PATCH 1179/1212] Update aws/resource_aws_elastic_transcoder_pipeline_test.go Co-authored-by: angie pinilla --- aws/resource_aws_elastic_transcoder_pipeline_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index 5bef9559b34..09b12cc2f9d 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -111,7 +111,7 @@ func testAccCheckAWSElasticTranscoderPipeline_notifications( return func(s *terraform.State) error { var notes []string - if p.Notifications.Completed != nil && aws.StringValue(p.Notifications.Completed) != "" { + if aws.StringValue(p.Notifications.Completed) != "" { notes = append(notes, "completed") } if p.Notifications.Error != nil && aws.StringValue(p.Notifications.Error) != "" { From 8ce25810c7c93c8612d043384c311ac2b6c7a8a2 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 12 Feb 2021 00:09:51 +0200 Subject: [PATCH 1180/1212] Apply suggestions from code review Co-authored-by: angie pinilla --- aws/resource_aws_elastic_transcoder_pipeline_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index 09b12cc2f9d..742b0b07e01 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -114,13 +114,13 @@ func testAccCheckAWSElasticTranscoderPipeline_notifications( if aws.StringValue(p.Notifications.Completed) != "" { notes = append(notes, "completed") } - if p.Notifications.Error != nil && aws.StringValue(p.Notifications.Error) != "" { + if aws.StringValue(p.Notifications.Error) != "" { notes = append(notes, "error") } - if p.Notifications.Progressing != nil && aws.StringValue(p.Notifications.Progressing) != "" { + if aws.StringValue(p.Notifications.Progressing) != "" { notes = append(notes, "progressing") } - if p.Notifications.Warning != nil && aws.StringValue(p.Notifications.Warning) != "" { + if aws.StringValue(p.Notifications.Warning) != "" { notes = append(notes, "warning") } From ff91985859dec51214d6230658b1847c8451587e Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 12 Feb 2021 00:12:09 +0200 Subject: [PATCH 1181/1212] remove validation from grantee --- aws/resource_aws_elastic_transcoder_pipeline.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/aws/resource_aws_elastic_transcoder_pipeline.go b/aws/resource_aws_elastic_transcoder_pipeline.go index 4c1154b679a..0c998576cb3 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline.go +++ b/aws/resource_aws_elastic_transcoder_pipeline.go @@ -82,11 +82,6 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "grantee": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "AllUsers", - "AuthenticatedUsers", - "LogDelivery", - }, false), }, "grantee_type": { Type: schema.TypeString, @@ -211,11 +206,6 @@ func resourceAwsElasticTranscoderPipeline() *schema.Resource { "grantee": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "AllUsers", - "AuthenticatedUsers", - "LogDelivery", - }, false), }, "grantee_type": { Type: schema.TypeString, From fb4d1e92fb6aee09ed8aee5a6302610a3d514c93 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 12 Feb 2021 00:13:56 +0200 Subject: [PATCH 1182/1212] changelog --- .changelog/13973.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.changelog/13973.txt b/.changelog/13973.txt index e79781655c0..d36e47fcfeb 100644 --- a/.changelog/13973.txt +++ b/.changelog/13973.txt @@ -1,5 +1,5 @@ ```release-note:enhancement -resource/aws_elastictranscoder_pipeline: Add plan time validations to `content_config.storage_class`, `content_config_permissions.access`, `content_config_permissions.grantee`, `content_config_permissions.grantee_type`, +resource/aws_elastictranscoder_pipeline: Add plan time validations to `content_config.storage_class`, `content_config_permissions.access`, `content_config_permissions.grantee_type`, `notifications.completed`, `notifications.error`, `notifications.progressing`, `notifications.warning`, -`thumbnail_config.storage_class`, `thumbnail_config_permissions.access`, `thumbnail_config_permissions.grantee`, `thumbnail_config_permissions.grantee_type` -``` +`thumbnail_config.storage_class`, `thumbnail_config_permissions.access`, `thumbnail_config_permissions.grantee_type` +``` \ No newline at end of file From 5c87f23da8e2049c545975aefba815449f7b09fd Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 12 Feb 2021 00:15:57 +0200 Subject: [PATCH 1183/1212] handle error --- ...esource_aws_elastic_transcoder_pipeline_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index 742b0b07e01..3e4991912b8 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -259,15 +259,15 @@ func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { out, err := conn.ReadPipeline(&elastictranscoder.ReadPipelineInput{ Id: aws.String(rs.Primary.ID), }) - - if err == nil { - if out.Pipeline != nil && aws.StringValue(out.Pipeline.Id) == rs.Primary.ID { - return fmt.Errorf("Elastic Transcoder Pipeline still exists") - } + if isAWSErr(err, elastictranscoder.ErrCodeResourceNotFoundException, "") { + continue + } + if err != nil { + return fmt.Errorf("unexpected error: %w", err) } - if !isAWSErr(err, elastictranscoder.ErrCodeResourceNotFoundException, "") { - return fmt.Errorf("unexpected error: %s", err) + if out.Pipeline != nil && aws.StringValue(out.Pipeline.Id) == rs.Primary.ID { + return fmt.Errorf("Elastic Transcoder Pipeline still exists") } } return nil From d132078c2fa1adedd319d46a8d2473280fa33cce Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 12 Feb 2021 00:21:07 +0200 Subject: [PATCH 1184/1212] docs --- .../r/elastictranscoder_pipeline.html.markdown | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/website/docs/r/elastictranscoder_pipeline.html.markdown b/website/docs/r/elastictranscoder_pipeline.html.markdown index 9d0fb4a8b5c..95df95f1169 100644 --- a/website/docs/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/r/elastictranscoder_pipeline.html.markdown @@ -57,11 +57,11 @@ you specify values for `content_config`, you must also specify values for The `content_config` object supports the following: * `bucket` - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. -* `storage_class` - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. +* `storage_class` - The Amazon S3 storage class, `Standard` or `ReducedRedundancy`, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket. The `content_config_permissions` object supports the following: -* `access` - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee` +* `access` - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee`. Valid values are `Read`, `ReadAcp`, `WriteAcp` or `FullControl`. * `grantee` - The AWS user or group that you want to have access to transcoded files and playlists. * `grantee_type` - Specify the type of value that appears in the `content_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`. @@ -90,10 +90,16 @@ The `thumbnail_config` object supports the following: The `thumbnail_config_permissions` object supports the following: -* `access` - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`. +* `access` - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`. Valid values are `Read`, `ReadAcp`, `WriteAcp` or `FullControl`. * `grantee` - The AWS user or group that you want to have access to thumbnail files. -* `grantee_type` - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object. +* `grantee_type` - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`. +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The ID of the Elastictranscoder pipeline. +* `arn` - The ARN of the Elastictranscoder pipeline. ## Import Elastic Transcoder pipelines can be imported using the `id`, e.g. From 683c0cf6238df9b3225f7575e2c2db253bb11f2f Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Fri, 12 Feb 2021 00:22:48 +0200 Subject: [PATCH 1185/1212] space --- website/docs/r/elastictranscoder_pipeline.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/elastictranscoder_pipeline.html.markdown b/website/docs/r/elastictranscoder_pipeline.html.markdown index 95df95f1169..ab5238b5a3d 100644 --- a/website/docs/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/r/elastictranscoder_pipeline.html.markdown @@ -100,6 +100,7 @@ In addition to all arguments above, the following attributes are exported: * `id` - The ID of the Elastictranscoder pipeline. * `arn` - The ARN of the Elastictranscoder pipeline. + ## Import Elastic Transcoder pipelines can be imported using the `id`, e.g. From 7877fd264907ca869aeb06c2922519c9fbac0879 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 14:28:31 -0800 Subject: [PATCH 1186/1212] Adds validation to projection_type parameters --- aws/resource_aws_dynamodb_table.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_dynamodb_table.go b/aws/resource_aws_dynamodb_table.go index 4c5587af2da..1d4ad4bc6e1 100644 --- a/aws/resource_aws_dynamodb_table.go +++ b/aws/resource_aws_dynamodb_table.go @@ -166,9 +166,10 @@ func resourceAwsDynamoDbTable() *schema.Resource { ForceNew: true, }, "projection_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(dynamodb.ProjectionType_Values(), false), }, "non_key_attributes": { Type: schema.TypeList, @@ -211,8 +212,9 @@ func resourceAwsDynamoDbTable() *schema.Resource { Optional: true, }, "projection_type": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(dynamodb.ProjectionType_Values(), false), }, "non_key_attributes": { Type: schema.TypeSet, From 60c45fd4b0eccc65677864c7f5d954ebf84e937a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Feb 2019 13:54:23 -0500 Subject: [PATCH 1187/1212] Remove ALL leading '/'s when destroying aws_s3_bucket_object resource. --- aws/resource_aws_s3_bucket_object.go | 7 +- aws/resource_aws_s3_bucket_object_test.go | 130 +++++++++++++++++++++- 2 files changed, 134 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index 76c63be4ed6..fae139d9502 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -8,6 +8,7 @@ import ( "io" "log" "os" + "regexp" "strings" "time" @@ -462,8 +463,10 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e bucket := d.Get("bucket").(string) key := d.Get("key").(string) - // We are effectively ignoring any leading '/' in the key name as aws.Config.DisableRestProtocolURICleaning is false - key = strings.TrimPrefix(key, "/") + // We are effectively ignoring all leading '/'s in the key name and + // treating multiple '/'s as a single '/' as aws.Config.DisableRestProtocolURICleaning is false + key = strings.TrimLeft(key, "/") + key = regexp.MustCompile(`/+`).ReplaceAllString(key, "/") var err error if _, ok := d.GetOk("version_id"); ok { diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index 79c7d31c28e..847a6123849 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -702,7 +702,7 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { }) } -func TestAccAWSS3BucketObject_tagsLeadingSlash(t *testing.T) { +func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { var obj1, obj2, obj3, obj4 s3.GetObjectOutput resourceName := "aws_s3_bucket_object.object" rInt := acctest.RandInt() @@ -766,6 +766,134 @@ func TestAccAWSS3BucketObject_tagsLeadingSlash(t *testing.T) { }) } +func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { + var obj1, obj2, obj3, obj4 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + key := "/////test-key" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "AAA"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "CCC"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "4"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "XXX"), + resource.TestCheckResourceAttr(resourceName, "tags.Key4", "DDD"), + resource.TestCheckResourceAttr(resourceName, "tags.Key5", "EEE"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), + testAccCheckAWSS3BucketObjectBody(&obj4, "changed stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "AAA"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "CCC"), + ), + }, + }, + }) +} + +func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { + var obj1, obj2, obj3, obj4 s3.GetObjectOutput + resourceName := "aws_s3_bucket_object.object" + rInt := acctest.RandInt() + key := "first//second///third//" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "AAA"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "CCC"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), + testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "4"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "XXX"), + resource.TestCheckResourceAttr(resourceName, "tags.Key4", "DDD"), + resource.TestCheckResourceAttr(resourceName, "tags.Key5", "EEE"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), + testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), + testAccCheckAWSS3BucketObjectBody(&obj3, "stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), + testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), + testAccCheckAWSS3BucketObjectBody(&obj4, "changed stuff"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "AAA"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "CCC"), + ), + }, + }, + }) +} + func TestAccAWSS3BucketObject_ObjectLockLegalHoldStartWithNone(t *testing.T) { var obj1, obj2, obj3 s3.GetObjectOutput resourceName := "aws_s3_bucket_object.object" From 693d594a2b87a7497856dacad45a54d073f30f8a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 16:15:41 -0500 Subject: [PATCH 1188/1212] Fix 'TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes' and 'TestAccAWSS3BucketObject_tagsMultipleSlashes'. --- aws/resource_aws_s3_bucket_object_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index 847a6123849..6af10d88478 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -798,9 +798,9 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), resource.TestCheckResourceAttr(resourceName, "tags.%", "4"), resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), - resource.TestCheckResourceAttr(resourceName, "tags.Key3", "XXX"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "X X"), resource.TestCheckResourceAttr(resourceName, "tags.Key4", "DDD"), - resource.TestCheckResourceAttr(resourceName, "tags.Key5", "EEE"), + resource.TestCheckResourceAttr(resourceName, "tags.Key5", "E:/"), ), }, { @@ -862,9 +862,9 @@ func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { testAccCheckAWSS3BucketObjectBody(&obj2, "stuff"), resource.TestCheckResourceAttr(resourceName, "tags.%", "4"), resource.TestCheckResourceAttr(resourceName, "tags.Key2", "BBB"), - resource.TestCheckResourceAttr(resourceName, "tags.Key3", "XXX"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "X X"), resource.TestCheckResourceAttr(resourceName, "tags.Key4", "DDD"), - resource.TestCheckResourceAttr(resourceName, "tags.Key5", "EEE"), + resource.TestCheckResourceAttr(resourceName, "tags.Key5", "E:/"), ), }, { From 80e5982d448a4a6258aa4855a6154351a212415c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 17:26:55 -0500 Subject: [PATCH 1189/1212] docs/default network: Clean up older docs --- .../docs/r/default_network_acl.html.markdown | 157 +++++++----------- .../docs/r/default_route_table.html.markdown | 63 +++---- .../r/default_security_group.html.markdown | 110 ++++-------- website/docs/r/default_subnet.html.markdown | 48 +++--- 4 files changed, 136 insertions(+), 242 deletions(-) diff --git a/website/docs/r/default_network_acl.html.markdown b/website/docs/r/default_network_acl.html.markdown index 44affd43690..b164d40d208 100644 --- a/website/docs/r/default_network_acl.html.markdown +++ b/website/docs/r/default_network_acl.html.markdown @@ -3,41 +3,26 @@ subcategory: "VPC" layout: "aws" page_title: "AWS: aws_default_network_acl" description: |- - Manage the default Network ACL resource. + Manage a default network ACL. --- # Resource: aws_default_network_acl -Provides a resource to manage the default AWS Network ACL. VPC Only. +Provides a resource to manage a VPC's default network ACL. This resource can manage the default network ACL of the default or a non-default VPC. -Each VPC created in AWS comes with a Default Network ACL that can be managed, but not -destroyed. **This is an advanced resource**, and has special caveats to be aware -of when using it. Please read this document in its entirety before using this -resource. +~> **NOTE:** This is an advanced resource with special caveats. Please read this document in its entirety before using this resource. The `aws_default_network_acl` behaves differently from normal resources. Terraform does not _create_ this resource but instead attempts to "adopt" it into management. -The `aws_default_network_acl` behaves differently from normal resources, in that -Terraform does not _create_ this resource, but instead attempts to "adopt" it -into management. We can do this because each VPC created has a Default Network -ACL that cannot be destroyed, and is created with a known set of default rules. +Every VPC has a default network ACL that can be managed but not destroyed. When Terraform first adopts the Default Network ACL, it **immediately removes all rules in the ACL**. It then proceeds to create any rules specified in the configuration. This step is required so that only the rules specified in the configuration are created. -When Terraform first adopts the Default Network ACL, it **immediately removes all -rules in the ACL**. It then proceeds to create any rules specified in the -configuration. This step is required so that only the rules specified in the -configuration are created. +This resource treats its inline rules as absolute; only the rules defined inline are created, and any additions/removals external to this resource will result in diffs being shown. For these reasons, this resource is incompatible with the `aws_network_acl_rule` resource. -This resource treats its inline rules as absolute; only the rules defined -inline are created, and any additions/removals external to this resource will -result in diffs being shown. For these reasons, this resource is incompatible with the -`aws_network_acl_rule` resource. +For more information about Network ACLs, see the AWS Documentation on [Network ACLs][aws-network-acls]. -For more information about Network ACLs, see the AWS Documentation on -[Network ACLs][aws-network-acls]. +## Example Usage -## Basic Example Usage, with default rules +### Basic Example -The following config gives the Default Network ACL the same rules that AWS -includes, but pulls the resource under management by Terraform. This means that -any ACL rules added or changed will be detected as drift. +The following config gives the Default Network ACL the same rules that AWS includes but pulls the resource under management by Terraform. This means that any ACL rules added or changed will be detected as drift. ```hcl resource "aws_vpc" "mainvpc" { @@ -45,13 +30,13 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id ingress { protocol = -1 rule_no = 100 action = "allow" - cidr_block = aws_default_vpc.mainvpc.cidr_block + cidr_block = aws_vpc.mainvpc.cidr_block from_port = 0 to_port = 0 } @@ -67,10 +52,9 @@ resource "aws_default_network_acl" "default" { } ``` -## Example config to deny all Egress traffic, allowing Ingress +### Example: Deny All Egress Traffic, Allow Ingress -The following denies all Egress traffic by omitting any `egress` rules, while -including the default `ingress` rule to allow all traffic. +The following denies all Egress traffic by omitting any `egress` rules, while including the default `ingress` rule to allow all traffic. ```hcl resource "aws_vpc" "mainvpc" { @@ -78,7 +62,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id ingress { protocol = -1 @@ -91,11 +75,9 @@ resource "aws_default_network_acl" "default" { } ``` -## Example config to deny all traffic to any Subnet in the Default Network ACL +### Example: Deny All Traffic To Any Subnet In The Default Network ACL -This config denies all traffic in the Default ACL. This can be useful if you -want a locked down default to force all resources in the VPC to assign a -non-default ACL. +This config denies all traffic in the Default ACL. This can be useful if you want to lock down the VPC to force all resources to assign a non-default ACL. ```hcl resource "aws_vpc" "mainvpc" { @@ -103,62 +85,19 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_network_acl" "default" { - default_network_acl_id = aws_default_vpc.mainvpc.default_network_acl_id + default_network_acl_id = aws_vpc.mainvpc.default_network_acl_id # no rules defined, deny all traffic in this ACL } ``` -## Argument Reference - -The following arguments are supported: - -* `default_network_acl_id` - (Required) The Network ACL ID to manage. This -attribute is exported from `aws_vpc`, or manually found via the AWS Console. -* `subnet_ids` - (Optional) A list of Subnet IDs to apply the ACL to. See the -notes below on managing Subnets in the Default Network ACL -* `ingress` - (Optional) Specifies an ingress rule. Parameters defined below. -* `egress` - (Optional) Specifies an egress rule. Parameters defined below. -* `tags` - (Optional) A map of tags to assign to the resource. - -Both `egress` and `ingress` support the following keys: - -* `from_port` - (Required) The from port to match. -* `to_port` - (Required) The to port to match. -* `rule_no` - (Required) The rule number. Used for ordering. -* `action` - (Required) The action to take. -* `protocol` - (Required) The protocol to match. If using the -1 'all' -protocol, you must specify a from and to port of 0. -* `cidr_block` - (Optional) The CIDR block to match. This must be a -valid network mask. -* `ipv6_cidr_block` - (Optional) The IPv6 CIDR block. -* `icmp_type` - (Optional) The ICMP type to be used. Default 0. -* `icmp_code` - (Optional) The ICMP type code to be used. Default 0. - -~> Note: For more information on ICMP types and codes, see here: https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml +### Managing Subnets In A Default Network ACL -### Managing Subnets in the Default Network ACL +Within a VPC, all Subnets must be associated with a Network ACL. In order to "delete" the association between a Subnet and a non-default Network ACL, the association is destroyed by replacing it with an association between the Subnet and the Default ACL instead. -Within a VPC, all Subnets must be associated with a Network ACL. In order to -"delete" the association between a Subnet and a non-default Network ACL, the -association is destroyed by replacing it with an association between the Subnet -and the Default ACL instead. +When managing the Default Network ACL, you cannot "remove" Subnets. Instead, they must be reassigned to another Network ACL, or the Subnet itself must be destroyed. Because of these requirements, removing the `subnet_ids` attribute from the configuration of a `aws_default_network_acl` resource may result in a reoccurring plan, until the Subnets are reassigned to another Network ACL or are destroyed. -When managing the Default Network ACL, you cannot "remove" Subnets. -Instead, they must be reassigned to another Network ACL, or the Subnet itself must be -destroyed. Because of these requirements, removing the `subnet_ids` attribute from the -configuration of a `aws_default_network_acl` resource may result in a reoccurring -plan, until the Subnets are reassigned to another Network ACL or are destroyed. - -Because Subnets are by default associated with the Default Network ACL, any -non-explicit association will show up as a plan to remove the Subnet. For -example: if you have a custom `aws_network_acl` with two subnets attached, and -you remove the `aws_network_acl` resource, after successfully destroying this -resource future plans will show a diff on the managed `aws_default_network_acl`, -as those two Subnets have been orphaned by the now destroyed network acl and thus -adopted by the Default Network ACL. In order to avoid a reoccurring plan, they -will need to be reassigned, destroyed, or added to the `subnet_ids` attribute of -the `aws_default_network_acl` entry. +Because Subnets are by default associated with the Default Network ACL, any non-explicit association will show up as a plan to remove the Subnet. For example: if you have a custom `aws_network_acl` with two subnets attached, and you remove the `aws_network_acl` resource, after successfully destroying this resource future plans will show a diff on the managed `aws_default_network_acl`, as those two Subnets have been orphaned by the now destroyed network acl and thus adopted by the Default Network ACL. In order to avoid a reoccurring plan, they will need to be reassigned, destroyed, or added to the `subnet_ids` attribute of the `aws_default_network_acl` entry. As an alternative to the above, you can also specify the following lifecycle configuration in your `aws_default_network_acl` resource: @@ -172,26 +111,52 @@ resource "aws_default_network_acl" "default" { } ``` -### Removing `aws_default_network_acl` from your configuration +### Removing `aws_default_network_acl` From Your Configuration + +Each AWS VPC comes with a Default Network ACL that cannot be deleted. The `aws_default_network_acl` allows you to manage this Network ACL, but Terraform cannot destroy it. Removing this resource from your configuration will remove it from your statefile and management, **but will not destroy the Network ACL.** All Subnets associations and ingress or egress rules will be left as they are at the time of removal. You can resume managing them via the AWS Console. + +## Argument Reference + +The following arguments are required: + +* `default_network_acl_id` - (Required) Network ACL ID to manage. This attribute is exported from `aws_vpc`, or manually found via the AWS Console. + +The following arguments are optional: + +* `egress` - (Optional) Configuration block for an egress rule. Detailed below. +* `ingress` - (Optional) Configuration block for an ingress rule. Detailed below. +* `subnet_ids` - (Optional) List of Subnet IDs to apply the ACL to. See the notes below on managing Subnets in the Default Network ACL +* `tags` - (Optional) Map of tags to assign to the resource. + +### egress and ingress + +Both the `egress` and `ingress` configuration blocks have the same arguments. + +The following arguments are required: + +* `action` - (Required) The action to take. +* `from_port` - (Required) The from port to match. +* `protocol` - (Required) The protocol to match. If using the -1 'all' protocol, you must specify a from and to port of 0. +* `rule_no` - (Required) The rule number. Used for ordering. +* `to_port` - (Required) The to port to match. + +The following arguments are optional: + +* `cidr_block` - (Optional) The CIDR block to match. This must be a valid network mask. +* `icmp_code` - (Optional) The ICMP type code to be used. Default 0. +* `icmp_type` - (Optional) The ICMP type to be used. Default 0. +* `ipv6_cidr_block` - (Optional) The IPv6 CIDR block. -Each AWS VPC comes with a Default Network ACL that cannot be deleted. The `aws_default_network_acl` -allows you to manage this Network ACL, but Terraform cannot destroy it. Removing -this resource from your configuration will remove it from your statefile and -management, **but will not destroy the Network ACL.** All Subnets associations -and ingress or egress rules will be left as they are at the time of removal. You -can resume managing them via the AWS Console. +-> For more information on ICMP types and codes, see [Internet Control Message Protocol (ICMP) Parameters](https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml). ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* `id` - The ID of the Default Network ACL -* `arn` - The ARN of the Default Network ACL -* `vpc_id` - The ID of the associated VPC -* `ingress` - Set of ingress rules -* `egress` - Set of egress rules -* `subnet_ids` – IDs of associated Subnets -* `owner_id` - The ID of the AWS account that owns the Default Network ACL +* `arn` - ARN of the Default Network ACL +* `id` - ID of the Default Network ACL +* `owner_id` - ID of the AWS account that owns the Default Network ACL +* `vpc_id` - ID of the associated VPC [aws-network-acls]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html diff --git a/website/docs/r/default_route_table.html.markdown b/website/docs/r/default_route_table.html.markdown index 463d8cba73c..003d7ad777a 100644 --- a/website/docs/r/default_route_table.html.markdown +++ b/website/docs/r/default_route_table.html.markdown @@ -3,48 +3,24 @@ subcategory: "VPC" layout: "aws" page_title: "AWS: aws_default_route_table" description: |- - Provides a resource to manage a Default VPC Routing Table. + Provides a resource to manage a default route table of a VPC. --- # Resource: aws_default_route_table -Provides a resource to manage a Default VPC Routing Table. +Provides a resource to manage a default route table of a VPC. This resource can manage the default route table of the default or a non-default VPC. -Each VPC created in AWS comes with a Default Route Table that can be managed, but not -destroyed. **This is an advanced resource**, and has special caveats to be aware -of when using it. Please read this document in its entirety before using this -resource. It is recommended you **do not** use both `aws_default_route_table` to -manage the default route table **and** use the `aws_main_route_table_association`, -due to possible conflict in routes. +~> **NOTE:** This is an advanced resource with special caveats. Please read this document in its entirety before using this resource. The `aws_default_route_table` resource behaves differently from normal resources. Terraform does not _create_ this resource but instead attempts to "adopt" it into management. **Do not** use both `aws_default_route_table` to manage a default route table **and** `aws_main_route_table_association` with the same VPC due to possible route conflicts. -The `aws_default_route_table` behaves differently from normal resources, in that -Terraform does not _create_ this resource, but instead attempts to "adopt" it -into management. We can do this because each VPC created has a Default Route -Table that cannot be destroyed, and is created with a single route. +Every VPC has a default route table that can be managed but not destroyed. When Terraform first adopts a default route table, it **immediately removes all defined routes**. It then proceeds to create any routes specified in the configuration. This step is required so that only the routes specified in the configuration exist in the default route table. -When Terraform first adopts the Default Route Table, it **immediately removes all -defined routes**. It then proceeds to create any routes specified in the -configuration. This step is required so that only the routes specified in the -configuration present in the Default Route Table. +For more information, see the Amazon VPC User Guide on [Route Tables](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html). For information about managing normal route tables in Terraform, see [`aws_route_table`](/docs/providers/aws/r/route_table.html). -For more information about Route Tables, see the AWS Documentation on -[Route Tables][aws-route-tables]. - -For more information about managing normal Route Tables in Terraform, see our -documentation on [aws_route_table][tf-route-tables]. - -~> **NOTE on Route Tables and Routes:** Terraform currently -provides both a standalone [Route resource](route.html) and a Route Table resource with routes -defined in-line. At this time you cannot use a Route Table with in-line routes -in conjunction with any Route resources. Doing so will cause -a conflict of rule settings and will overwrite routes. - - -## Example usage with tags +## Example Usage ```hcl resource "aws_default_route_table" "r" { - default_route_table_id = aws_default_vpc.foo.default_route_table_id + default_route_table_id = aws_vpc.foo.default_route_table_id route { # ... @@ -58,15 +34,19 @@ resource "aws_default_route_table" "r" { ## Argument Reference -The following arguments are supported: +The following arguments are required: + +* `default_route_table_id` - (Required) ID of the default route table. + +The following arguments are optional: + +* `propagating_vgws` - (Optional) List of virtual gateways for propagation. +* `route` - (Optional) Configuration block of routes. Detailed below. +* `tags` - (Optional) Map of tags to assign to the resource. -* `default_route_table_id` - (Required) The ID of the Default Routing Table. -* `route` - (Optional) A list of route objects. Their keys are documented below. - This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). -* `tags` - (Optional) A map of tags to assign to the resource. -* `propagating_vgws` - (Optional) A list of virtual gateways for propagation. +### route -### route Argument Reference +This argument is processed in [attribute-as-blocks mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html). One of the following destination arguments must be supplied: @@ -90,12 +70,13 @@ Note that the default route, mapping the VPC's CIDR block to "local", is created In addition to all arguments above, the following attributes are exported: -* `id` - The ID of the routing table -* `owner_id` - The ID of the AWS account that owns the route table +* `id` - ID of the route table. +* `owner_id` - ID of the AWS account that owns the route table. +* `vpc_id` - ID of the VPC. ## Import -Default VPC Routing tables can be imported using the `vpc_id`, e.g. +Default VPC route tables can be imported using the `vpc_id`, e.g. ``` $ terraform import aws_default_route_table.example vpc-33cc44dd diff --git a/website/docs/r/default_security_group.html.markdown b/website/docs/r/default_security_group.html.markdown index bcd25bb1252..a9a6b620b00 100644 --- a/website/docs/r/default_security_group.html.markdown +++ b/website/docs/r/default_security_group.html.markdown @@ -3,42 +3,26 @@ subcategory: "VPC" layout: "aws" page_title: "AWS: aws_default_security_group" description: |- - Manage the default Security Group resource. + Manage a default security group resource. --- # Resource: aws_default_security_group -Provides a resource to manage the default AWS Security Group. +Provides a resource to manage a default security group. This resource can manage the default security group of the default or a non-default VPC. -For EC2 Classic accounts, each region comes with a Default Security Group. -Additionally, each VPC created in AWS comes with a Default Security Group that can be managed, but not -destroyed. **This is an advanced resource**, and has special caveats to be aware -of when using it. Please read this document in its entirety before using this -resource. +~> **NOTE:** This is an advanced resource with special caveats. Please read this document in its entirety before using this resource. The `aws_default_security_group` resource behaves differently from normal resources. Terraform does not _create_ this resource but instead attempts to "adopt" it into management. -The `aws_default_security_group` behaves differently from normal resources, in that -Terraform does not _create_ this resource, but instead "adopts" it -into management. We can do this because these default security groups cannot be -destroyed, and are created with a known set of default ingress/egress rules. +For EC2 Classic accounts, each region comes with a default security group. Additionally, each VPC created in AWS comes with a default security group that can be managed but not destroyed. -When Terraform first adopts the Default Security Group, it **immediately removes all -ingress and egress rules in the Security Group**. It then proceeds to create any rules specified in the -configuration. This step is required so that only the rules specified in the -configuration are created. +When Terraform first adopts the default security group, it **immediately removes all ingress and egress rules in the Security Group**. It then creates any rules specified in the configuration. This way only the rules specified in the configuration are created. -This resource treats its inline rules as absolute; only the rules defined -inline are created, and any additions/removals external to this resource will -result in diff shown. For these reasons, this resource is incompatible with the -`aws_security_group_rule` resource. +This resource treats its inline rules as absolute; only the rules defined inline are created, and any additions/removals external to this resource will result in diff shown. For these reasons, this resource is incompatible with the `aws_security_group_rule` resource. -For more information about Default Security Groups, see the AWS Documentation on -[Default Security Groups][aws-default-security-groups]. +For more information about default security groups, see the AWS documentation on [Default Security Groups][aws-default-security-groups]. To manage normal security groups, see the [`aws_security_group`](/docs/providers/aws/r/security_group.html) resource. -## Basic Example Usage, with default rules +## Example Usage -The following config gives the Default Security Group the same rules that AWS -provides by default, but pulls the resource under management by Terraform. This means that -any ingress or egress rules added or changed will be detected as drift. +The following config gives the default security group the same rules that AWS provides by default but under management by Terraform. This means that any ingress or egress rules added or changed will be detected as drift. ```hcl resource "aws_vpc" "mainvpc" { @@ -46,7 +30,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_security_group" "default" { - vpc_id = aws_default_vpc.mainvpc.id + vpc_id = aws_vpc.mainvpc.id ingress { protocol = -1 @@ -64,10 +48,9 @@ resource "aws_default_security_group" "default" { } ``` -## Example config to deny all Egress traffic, allowing Ingress +### Example Config To Deny All Egress Traffic, Allowing Ingress -The following denies all Egress traffic by omitting any `egress` rules, while -including the default `ingress` rule to allow all traffic. +The following denies all Egress traffic by omitting any `egress` rules, while including the default `ingress` rule to allow all traffic. ```hcl resource "aws_vpc" "mainvpc" { @@ -75,7 +58,7 @@ resource "aws_vpc" "mainvpc" { } resource "aws_default_security_group" "default" { - vpc_id = aws_default_vpc.mainvpc.id + vpc_id = aws_vpc.mainvpc.id ingress { protocol = -1 @@ -86,67 +69,42 @@ resource "aws_default_security_group" "default" { } ``` -## Argument Reference +### Removing `aws_default_security_group` From Your Configuration -The arguments of an `aws_default_security_group` differ slightly from `aws_security_group` -resources. Namely, the `name` argument is computed, and the `name_prefix` attribute -removed. The following arguments are still supported: +Removing this resource from your configuration will remove it from your statefile and management, but will not destroy the Security Group. All ingress or egress rules will be left as they are at the time of removal. You can resume managing them via the AWS Console. -* `ingress` - (Optional) Can be specified multiple times for each ingress rule. Each ingress block supports fields documented [below](#ingress-blocks). -* `egress` - (Optional, VPC only) Can be specified multiple times for each egress rule. Each egress block supports fields documented [below](#egress-blocks). -* `vpc_id` - (Optional, Forces new resource) The VPC ID. **Note that changing the `vpc_id` will _not_ restore any default security group rules that were modified, added, or removed.** It will be left in its current state -* `tags` - (Optional) A map of tags to assign to the resource. +## Argument Reference -### `ingress` Block +The following arguments are optional: -* `cidr_blocks` - (Optional) List of CIDR blocks. -* `description` - (Optional) Description of this ingress rule. -* `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp" or "icmpv6") -* `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks. -* `prefix_list_ids` - (Optional) List of prefix list IDs. -* `protocol` - (Required) The protocol. If you select a protocol of "-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, icmpv6, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) -* `security_groups` - (Optional) List of security group Group Names if using EC2-Classic, or Group IDs if using a VPC. -* `self` - (Optional) If true, the security group itself will be added as a source to this ingress rule. -* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp"). +* `egress` - (Optional, VPC only) Configuration block. Detailed below. +* `ingress` - (Optional) Configuration block. Detailed below. +* `tags` - (Optional) Map of tags to assign to the resource. +* `vpc_id` - (Optional, Forces new resource) VPC ID. **Note that changing the `vpc_id` will _not_ restore any default security group rules that were modified, added, or removed.** It will be left in its current state. + +### egress and ingress -### `egress` Block +Both the `egress` and `ingress` configuration blocks have the same arguments. * `cidr_blocks` - (Optional) List of CIDR blocks. -* `description` - (Optional) Description of this egress rule. -* `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp") +* `description` - (Optional) Description of this rule. +* `from_port` - (Required) Start port (or ICMP type number if protocol is `icmp`) * `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks. * `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints) -* `protocol` - (Required) The protocol. If you select a protocol of "-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) +* `protocol` - (Required) Protocol. If you select a protocol of "-1" (semantically equivalent to `all`, which is not a valid value here), you must specify a `from_port` and `to_port` equal to `0`. If not `icmp`, `tcp`, `udp`, or `-1` use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). * `security_groups` - (Optional) List of security group Group Names if using EC2-Classic, or Group IDs if using a VPC. -* `self` - (Optional) If true, the security group itself will be added as a source to this egress rule. -* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp"). - - -## Usage - -With the exceptions mentioned above, `aws_default_security_group` should -identical behavior to `aws_security_group`. Please consult [AWS_SECURITY_GROUP](/docs/providers/aws/r/security_group.html) -for further usage documentation. - -### Removing `aws_default_security_group` from your configuration - -Each AWS VPC (or region, if using EC2 Classic) comes with a Default Security -Group that cannot be deleted. The `aws_default_security_group` allows you to -manage this Security Group, but Terraform cannot destroy it. Removing this resource -from your configuration will remove it from your statefile and management, but -will not destroy the Security Group. All ingress or egress rules will be left as -they are at the time of removal. You can resume managing them via the AWS Console. +* `self` - (Optional) Whether the security group itself will be added as a source to this egress rule. +* `to_port` - (Required) End range port (or ICMP code if protocol is `icmp`). ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* `id` - The ID of the security group -* `arn` - The ARN of the security group -* `vpc_id` - The VPC ID. -* `owner_id` - The owner ID. -* `name` - The name of the security group -* `description` - The description of the security group +* `arn` - ARN of the security group. +* `description` - Description of the security group. +* `id` - ID of the security group. +* `name` - Name of the security group. +* `owner_id` - Owner ID. [aws-default-security-groups]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#default-security-group diff --git a/website/docs/r/default_subnet.html.markdown b/website/docs/r/default_subnet.html.markdown index c4a6632582e..eaf0fe853a3 100644 --- a/website/docs/r/default_subnet.html.markdown +++ b/website/docs/r/default_subnet.html.markdown @@ -8,16 +8,13 @@ description: |- # Resource: aws_default_subnet -Provides a resource to manage a [default AWS VPC subnet](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html#default-vpc-basics) -in the current region. +Provides a resource to manage a [default AWS VPC subnet](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html#default-vpc-basics) in the current region. -The `aws_default_subnet` behaves differently from normal resources, in that -Terraform does not _create_ this resource, but instead "adopts" it -into management. +The `aws_default_subnet` behaves differently from normal resources, in that Terraform does not _create_ this resource but instead "adopts" it into management. -## Example Usage +The `aws_default_subnet` resource allows you to manage a region's default VPC subnet but Terraform cannot destroy it. Removing this resource from your configuration will remove it from your statefile and Terraform management. -Basic usage with tags: +## Example Usage ```hcl resource "aws_default_subnet" "default_az1" { @@ -31,35 +28,28 @@ resource "aws_default_subnet" "default_az1" { ## Argument Reference -The arguments of an `aws_default_subnet` differ from `aws_subnet` resources. -Namely, the `availability_zone` argument is required and the `availability_zone_id`, `vpc_id`, `cidr_block`, `ipv6_cidr_block`, -and `assign_ipv6_address_on_creation` arguments are computed. -The following arguments are still supported: +The following argument is required: -* `map_public_ip_on_launch` - (Optional) Specify true to indicate - that instances launched into the subnet should be assigned - a public IP address. -* `tags` - (Optional) A map of tags to assign to the resource. +* `availability_zone`- (Required) AZ for the subnet. -### Removing `aws_default_subnet` from your configuration +The following arguments are optional: -The `aws_default_subnet` resource allows you to manage a region's default VPC subnet, -but Terraform cannot destroy it. Removing this resource from your configuration -will remove it from your statefile and management, but will not destroy the subnet. -You can resume managing the subnet via the AWS Console. +* `map_public_ip_on_launch` - (Optional) Whether instances launched into the subnet should be assigned a public IP address. +* `tags` - (Optional) Map of tags to assign to the resource. ## Attributes Reference In addition to all arguments above, the following attributes are exported: -* `id` - The ID of the subnet -* `availability_zone`- The AZ for the subnet. -* `availability_zone_id`- The AZ ID of the subnet. -* `cidr_block` - The CIDR block for the subnet. -* `vpc_id` - The VPC ID. -* `ipv6_association_id` - The association ID for the IPv6 CIDR block. -* `ipv6_cidr_block` - The IPv6 CIDR block. -* `owner_id` - The ID of the AWS account that owns the subnet. +* `arn` - ARN for the subnet. +* `assign_ipv6_address_on_creation` - Whether IPv6 addresses are assigned on creation. +* `availability_zone_id`- AZ ID of the subnet. +* `cidr_block` - CIDR block for the subnet. +* `id` - ID of the subnet +* `ipv6_association_id` - Association ID for the IPv6 CIDR block. +* `ipv6_cidr_block` - IPv6 CIDR block. +* `owner_id` - ID of the AWS account that owns the subnet. +* `vpc_id` - VPC ID. ## Import @@ -67,4 +57,4 @@ Subnets can be imported using the `subnet id`, e.g. ``` $ terraform import aws_default_subnet.public_subnet subnet-9d4a7b6c -``` +``` \ No newline at end of file From beb9f06241e89acad9e6dd3267fd8ccbb2a0f953 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 11 Feb 2021 18:22:50 -0500 Subject: [PATCH 1190/1212] Ensure example resource lifecycle waiter code compiles (#17578) --- docs/contributing/retries-and-waiters.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/contributing/retries-and-waiters.md b/docs/contributing/retries-and-waiters.md index 7ced24fd694..86fbc7dacf1 100644 --- a/docs/contributing/retries-and-waiters.md +++ b/docs/contributing/retries-and-waiters.md @@ -359,7 +359,7 @@ function ExampleThingUpdate(d *schema.ResourceData, meta interface{}) error { d.HasChange("attribute") { // ... AWS Go SDK logic to update attribute ... - if err := waiter.ThingAttributeUpdated(conn, d.Id(), d.Get("attribute").(string)); err != nil { + if _, err := waiter.ThingAttributeUpdated(conn, d.Id(), d.Get("attribute").(string)); err != nil { return fmt.Errorf("error waiting for Example Thing (%s) attribute update: %w", d.Id(), err) } } @@ -466,7 +466,7 @@ func ThingDeleted(conn *example.Example, id string) (*example.Thing, error) { function ExampleThingCreate(d *schema.ResourceData, meta interface{}) error { // ... AWS Go SDK logic to create resource ... - if err := waiter.ThingCreated(conn, d.Id()); err != nil { + if _, err := waiter.ThingCreated(conn, d.Id()); err != nil { return fmt.Errorf("error waiting for Example Thing (%s) creation: %w", d.Id(), err) } @@ -476,7 +476,7 @@ function ExampleThingCreate(d *schema.ResourceData, meta interface{}) error { function ExampleThingDelete(d *schema.ResourceData, meta interface{}) error { // ... AWS Go SDK logic to delete resource ... - if err := waiter.ThingDeleted(conn, d.Id()); err != nil { + if _, err := waiter.ThingDeleted(conn, d.Id()); err != nil { return fmt.Errorf("error waiting for Example Thing (%s) deletion: %w", d.Id(), err) } From 976ec8e85a9c90c14d9d42431316d953a3097b4a Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 11 Feb 2021 15:24:58 -0800 Subject: [PATCH 1191/1212] Add CHANGELOG entry --- .changelog/17336.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .changelog/17336.txt diff --git a/.changelog/17336.txt b/.changelog/17336.txt new file mode 100644 index 00000000000..b7df7acc0c2 --- /dev/null +++ b/.changelog/17336.txt @@ -0,0 +1,11 @@ +```release-note:new-data-source +aws_cloudfront_cache_policy +``` + +```release-note:new-resource +aws_cloudfront_cache_policy +``` + +```release-note:enhancement +resource/aws_cloudfront_distribution: Add `cache_policy_id` attribute +``` From 9d94f78c737efb710836713fcf085496995c898c Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 23:47:57 +0000 Subject: [PATCH 1192/1212] Update CHANGELOG.md for #17556 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07bf24afd9d..67593ac7024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,17 +12,24 @@ ENHANCEMENTS: * data-source/aws_customer_gateway: Add `device_name` attribute ([#14786](https://github.com/hashicorp/terraform-provider-aws/issues/14786)) * data-source/aws_iam_policy_document: Support merging policy documents by adding `source_policy_documents` and `override_policy_documents` arguments ([#12055](https://github.com/hashicorp/terraform-provider-aws/issues/12055)) * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) +* resource/aws_budgets_budget: Add `arn` attribute ([#13139](https://github.com/hashicorp/terraform-provider-aws/issues/13139)) +* resource/aws_budgets_budget: Add plan time validation for `budget_type`, `time_unit`, and `subscriber_sns_topic_arns` arguments ([#13139](https://github.com/hashicorp/terraform-provider-aws/issues/13139)) * resource/aws_cloudfront_distribution: Add `realtime_log_config_arn` attribute to `default_cache_behavior` and `ordered_cache_behavior` configuration blocks ([#14974](https://github.com/hashicorp/terraform-provider-aws/issues/14974)) +* resource/aws_cloudfront_public_key: Add import support ([#17044](https://github.com/hashicorp/terraform-provider-aws/issues/17044)) * resource/aws_cloudwatch_log_destination: Add plan time validation to `role_arn`, `name` and `target_arn`. ([#11687](https://github.com/hashicorp/terraform-provider-aws/issues/11687)) * resource/aws_cloudwatch_log_group: Add plan time validation for `retention_in_days` argument ([#14673](https://github.com/hashicorp/terraform-provider-aws/issues/14673)) * resource/aws_codebuild_report_group: Add `delete_reports` argument ([#17338](https://github.com/hashicorp/terraform-provider-aws/issues/17338)) +* resource/aws_codestarconnections_connection: Add `tags` argument ([#16835](https://github.com/hashicorp/terraform-provider-aws/issues/16835)) * resource/aws_customer_gateway: Add `device_name` argument ([#14786](https://github.com/hashicorp/terraform-provider-aws/issues/14786)) +* resource/aws_ec2_capacity_reservation: Add `owner_id` attribute ([#17129](https://github.com/hashicorp/terraform-provider-aws/issues/17129)) * resource/aws_ec2_traffic_mirror_filter: Add `arn` attribute. ([#13948](https://github.com/hashicorp/terraform-provider-aws/issues/13948)) * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) +* resource/aws_imagebuilder_image_recipe: Add `gp3` as a valid value for the `volume_type` attribute ([#17286](https://github.com/hashicorp/terraform-provider-aws/issues/17286)) * resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source ([#14746](https://github.com/hashicorp/terraform-provider-aws/issues/14746)) * resource/aws_lb_listener_certificate: Add import support ([#16474](https://github.com/hashicorp/terraform-provider-aws/issues/16474)) +* resource/aws_licensemanager_license_configuration: Add `arn` and `owner_account_id` attributes ([#17160](https://github.com/hashicorp/terraform-provider-aws/issues/17160)) * resource/aws_ses_active_receipt_rule_set: Add `arn` attribute ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) * resource/aws_ses_active_receipt_rule_set: Add plan time validation for `rule_set_name` argument ([#13962](https://github.com/hashicorp/terraform-provider-aws/issues/13962)) * resource/aws_ses_configuration_set: Add `arn` attribute. ([#13972](https://github.com/hashicorp/terraform-provider-aws/issues/13972)) From e04e60924d3d0d9522cb2a60f11bd9f30d8f6156 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 11 Feb 2021 23:58:36 +0000 Subject: [PATCH 1193/1212] Update CHANGELOG.md for #17336 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67593ac7024..b5f61474d63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ FEATURES: +* **New Data Source:** `aws_cloudfront_cache_policy` ([#17336](https://github.com/hashicorp/terraform-provider-aws/issues/17336)) +* **New Resource:** `aws_cloudfront_cache_policy` ([#17336](https://github.com/hashicorp/terraform-provider-aws/issues/17336)) * **New Resource:** `aws_cloudfront_realtime_log_config` ([#14974](https://github.com/hashicorp/terraform-provider-aws/issues/14974)) * **New Resource:** `aws_config_conformance_pack` ([#17313](https://github.com/hashicorp/terraform-provider-aws/issues/17313)) * **New Resource:** `aws_sagemaker_model_package_group` ([#17366](https://github.com/hashicorp/terraform-provider-aws/issues/17366)) @@ -14,6 +16,7 @@ ENHANCEMENTS: * provider: Add terraform-provider-aws version to HTTP User-Agent header ([#17486](https://github.com/hashicorp/terraform-provider-aws/issues/17486)) * resource/aws_budgets_budget: Add `arn` attribute ([#13139](https://github.com/hashicorp/terraform-provider-aws/issues/13139)) * resource/aws_budgets_budget: Add plan time validation for `budget_type`, `time_unit`, and `subscriber_sns_topic_arns` arguments ([#13139](https://github.com/hashicorp/terraform-provider-aws/issues/13139)) +* resource/aws_cloudfront_distribution: Add `cache_policy_id` attribute ([#17336](https://github.com/hashicorp/terraform-provider-aws/issues/17336)) * resource/aws_cloudfront_distribution: Add `realtime_log_config_arn` attribute to `default_cache_behavior` and `ordered_cache_behavior` configuration blocks ([#14974](https://github.com/hashicorp/terraform-provider-aws/issues/14974)) * resource/aws_cloudfront_public_key: Add import support ([#17044](https://github.com/hashicorp/terraform-provider-aws/issues/17044)) * resource/aws_cloudwatch_log_destination: Add plan time validation to `role_arn`, `name` and `target_arn`. ([#11687](https://github.com/hashicorp/terraform-provider-aws/issues/11687)) From 596aa3dfc3867a880dea48d52e8802e3a3d11164 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 12 Feb 2021 02:07:41 +0200 Subject: [PATCH 1194/1212] resource/aws_ssm_maint_windows_task: Add CloudWatch configuration and plan time validations (#11774) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMMaintenanceWindowTask_disappears (19.58s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_emptyNotificationConfig (19.85s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationStepFunctionParameters (22.39s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource (34.36s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_basic (35.48s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationAutomationParameters (42.68s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParametersCloudWatch (44.70s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters (45.14s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationLambdaParameters (46.92s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMMaintenanceWindowTask_disappears (24.63s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_emptyNotificationConfig (24.80s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationStepFunctionParameters (27.60s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationLambdaParameters (39.96s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_basic (41.36s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource (41.58s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationAutomationParameters (48.45s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters (49.11s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParametersCloudWatch (54.92s) ``` --- .changelog/11774.txt | 7 + ...esource_aws_ssm_maintenance_window_task.go | 172 ++++++++++++----- ...ce_aws_ssm_maintenance_window_task_test.go | 173 +++++++++++++++--- aws/validators.go | 13 -- .../ssm_maintenance_window_task.html.markdown | 6 + 5 files changed, 282 insertions(+), 89 deletions(-) create mode 100644 .changelog/11774.txt diff --git a/.changelog/11774.txt b/.changelog/11774.txt new file mode 100644 index 00000000000..82746db52db --- /dev/null +++ b/.changelog/11774.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ssm_maintenance_window_task: Add `task_invocation_parameters` `run_command_parameters` block `cloudwatch_config` and `document_version` arguments +``` + +```release-note:enhancement +resource/aws_ssm_maintenance_window_task: Add plan time validation to `max_concurrency`, `max_errors`, `priority`, `service_role_arn`, `targets`, `targets.notification_arn`, `targets.service_role_arn`, `task_type`, `task_invocation_parameters.run_command_parameters.comment`, `task_invocation_parameters.run_command_parameters.document_hash`, `task_invocation_parameters.run_command_parameters.timeout_seconds`, and `task_invocation_parameters.run_command_parameters.notification_config.notification_events` arguments +``` diff --git a/aws/resource_aws_ssm_maintenance_window_task.go b/aws/resource_aws_ssm_maintenance_window_task.go index c4631d0f87b..e79b9d3df86 100644 --- a/aws/resource_aws_ssm_maintenance_window_task.go +++ b/aws/resource_aws_ssm_maintenance_window_task.go @@ -31,19 +31,22 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "max_concurrency": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^([1-9][0-9]*|[1-9][0-9]%|[1-9]%|100%)$`), "must be a number without leading zeros or a percentage between 1% and 100% without leading zeros and ending with the percentage symbol"), }, "max_errors": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^([1-9][0-9]*|[0]|[1-9][0-9]%|[0-9]%|100%)$`), "must be zero, a number without leading zeros, or a percentage between 1% and 100% without leading zeros and ending with the percentage symbol"), }, "task_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(ssm.MaintenanceWindowTaskType_Values(), false), }, "task_arn": { @@ -52,13 +55,15 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "service_role_arn": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, }, "targets": { Type: schema.TypeList, - Required: true, + Optional: true, + MaxItems: 5, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -68,6 +73,7 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "values": { Type: schema.TypeList, Required: true, + MaxItems: 50, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -75,9 +81,10 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validateAwsSSMMaintenanceWindowTaskName, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,128}$`), + "Only alphanumeric characters, hyphens, dots & underscores allowed."), }, "description": { @@ -87,8 +94,9 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "priority": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "task_invocation_parameters": { @@ -165,22 +173,26 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "comment": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 100), }, "document_hash": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), }, "document_hash_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - ssm.DocumentHashTypeSha256, - ssm.DocumentHashTypeSha1, - }, false), + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(ssm.DocumentHashType_Values(), false), + }, + "document_version": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch(regexp.MustCompile(`([$]LATEST|[$]DEFAULT|^[1-9][0-9]*$)`), "must be $DEFAULT, $LATEST, or a version number"), }, "notification_config": { @@ -190,23 +202,24 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "notification_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "notification_events": { Type: schema.TypeList, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(ssm.NotificationEvent_Values(), false), + }, }, "notification_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - ssm.NotificationTypeCommand, - ssm.NotificationTypeInvocation, - }, false), + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(ssm.NotificationType_Values(), false), }, }, }, @@ -242,13 +255,33 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { }, "service_role_arn": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateArn, }, "timeout_seconds": { - Type: schema.TypeInt, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(30, 2592000), + }, + "cloudwatch_config": { + Type: schema.TypeList, Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloudwatch_log_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cloudwatch_output_enabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, }, }, }, @@ -407,6 +440,9 @@ func expandAwsSsmTaskInvocationRunCommandParameters(config []interface{}) *ssm.M if attr, ok := configParam["document_hash_type"]; ok && len(attr.(string)) != 0 { params.DocumentHashType = aws.String(attr.(string)) } + if attr, ok := configParam["document_version"]; ok && len(attr.(string)) != 0 { + params.DocumentVersion = aws.String(attr.(string)) + } if attr, ok := configParam["notification_config"]; ok && len(attr.([]interface{})) > 0 { params.NotificationConfig = expandAwsSsmTaskInvocationRunCommandParametersNotificationConfig(attr.([]interface{})) } @@ -425,6 +461,10 @@ func expandAwsSsmTaskInvocationRunCommandParameters(config []interface{}) *ssm.M if attr, ok := configParam["timeout_seconds"]; ok && attr.(int) != 0 { params.TimeoutSeconds = aws.Int64(int64(attr.(int))) } + + if attr, ok := configParam["cloudwatch_config"]; ok && len(attr.([]interface{})) > 0 { + params.CloudWatchOutputConfig = expandAwsSsmTaskInvocationRunCommandParametersCloudWatchConfig(attr.([]interface{})) + } return params } @@ -440,6 +480,9 @@ func flattenAwsSsmTaskInvocationRunCommandParameters(parameters *ssm.Maintenance if parameters.DocumentHashType != nil { result["document_hash_type"] = aws.StringValue(parameters.DocumentHashType) } + if parameters.DocumentVersion != nil { + result["document_version"] = aws.StringValue(parameters.DocumentVersion) + } if parameters.NotificationConfig != nil { result["notification_config"] = flattenAwsSsmTaskInvocationRunCommandParametersNotificationConfig(parameters.NotificationConfig) } @@ -458,6 +501,9 @@ func flattenAwsSsmTaskInvocationRunCommandParameters(parameters *ssm.Maintenance if parameters.TimeoutSeconds != nil { result["timeout_seconds"] = aws.Int64Value(parameters.TimeoutSeconds) } + if parameters.CloudWatchOutputConfig != nil { + result["cloudwatch_config"] = flattenAwsSsmTaskInvocationRunCommandParametersCloudWatchConfig(parameters.CloudWatchOutputConfig) + } return []interface{}{result} } @@ -529,6 +575,37 @@ func flattenAwsSsmTaskInvocationRunCommandParametersNotificationConfig(config *s return []interface{}{result} } +func expandAwsSsmTaskInvocationRunCommandParametersCloudWatchConfig(config []interface{}) *ssm.CloudWatchOutputConfig { + if len(config) == 0 || config[0] == nil { + return nil + } + + params := &ssm.CloudWatchOutputConfig{} + configParam := config[0].(map[string]interface{}) + + if attr, ok := configParam["cloudwatch_log_group_name"]; ok && len(attr.(string)) != 0 { + params.CloudWatchLogGroupName = aws.String(attr.(string)) + } + if attr, ok := configParam["cloudwatch_output_enabled"]; ok { + params.CloudWatchOutputEnabled = aws.Bool(attr.(bool)) + } + + return params +} + +func flattenAwsSsmTaskInvocationRunCommandParametersCloudWatchConfig(config *ssm.CloudWatchOutputConfig) []interface{} { + result := make(map[string]interface{}) + + if config.CloudWatchLogGroupName != nil { + result["cloudwatch_log_group_name"] = aws.StringValue(config.CloudWatchLogGroupName) + } + if config.CloudWatchOutputEnabled != nil { + result["cloudwatch_output_enabled"] = aws.BoolValue(config.CloudWatchOutputEnabled) + } + + return []interface{}{result} +} + func expandAwsSsmTaskInvocationCommonParameters(config []interface{}) map[string][]*string { if len(config) == 0 || config[0] == nil { return nil @@ -569,7 +646,7 @@ func flattenAwsSsmTaskInvocationCommonParameters(parameters map[string][]*string } func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn log.Printf("[INFO] Registering SSM Maintenance Window Task") @@ -580,7 +657,10 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte TaskType: aws.String(d.Get("task_type").(string)), ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)), TaskArn: aws.String(d.Get("task_arn").(string)), - Targets: expandAwsSsmTargets(d.Get("targets").([]interface{})), + } + + if v, ok := d.GetOk("targets"); ok { + params.Targets = expandAwsSsmTargets(v.([]interface{})) } if v, ok := d.GetOk("name"); ok { @@ -599,7 +679,7 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte params.TaskInvocationParameters = expandAwsSsmTaskInvocationParameters(v.([]interface{})) } - resp, err := ssmconn.RegisterTaskWithMaintenanceWindow(params) + resp, err := conn.RegisterTaskWithMaintenanceWindow(params) if err != nil { return err } @@ -610,14 +690,14 @@ func resourceAwsSsmMaintenanceWindowTaskCreate(d *schema.ResourceData, meta inte } func resourceAwsSsmMaintenanceWindowTaskRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn windowID := d.Get("window_id").(string) params := &ssm.GetMaintenanceWindowTaskInput{ WindowId: aws.String(windowID), WindowTaskId: aws.String(d.Id()), } - resp, err := ssmconn.GetMaintenanceWindowTask(params) + resp, err := conn.GetMaintenanceWindowTask(params) if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { log.Printf("[WARN] Maintenance Window (%s) Task (%s) not found, removing from state", windowID, d.Id()) d.SetId("") @@ -651,7 +731,7 @@ func resourceAwsSsmMaintenanceWindowTaskRead(d *schema.ResourceData, meta interf } func resourceAwsSsmMaintenanceWindowTaskUpdate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn windowID := d.Get("window_id").(string) params := &ssm.UpdateMaintenanceWindowTaskInput{ @@ -681,7 +761,7 @@ func resourceAwsSsmMaintenanceWindowTaskUpdate(d *schema.ResourceData, meta inte params.TaskInvocationParameters = expandAwsSsmTaskInvocationParameters(v.([]interface{})) } - _, err := ssmconn.UpdateMaintenanceWindowTask(params) + _, err := conn.UpdateMaintenanceWindowTask(params) if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { log.Printf("[WARN] Maintenance Window (%s) Task (%s) not found, removing from state", windowID, d.Id()) d.SetId("") @@ -696,7 +776,7 @@ func resourceAwsSsmMaintenanceWindowTaskUpdate(d *schema.ResourceData, meta inte } func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn log.Printf("[INFO] Deregistering SSM Maintenance Window Task: %s", d.Id()) @@ -705,7 +785,7 @@ func resourceAwsSsmMaintenanceWindowTaskDelete(d *schema.ResourceData, meta inte WindowTaskId: aws.String(d.Id()), } - _, err := ssmconn.DeregisterTaskFromMaintenanceWindow(params) + _, err := conn.DeregisterTaskFromMaintenanceWindow(params) if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { return nil } diff --git a/aws/resource_aws_ssm_maintenance_window_task_test.go b/aws/resource_aws_ssm_maintenance_window_task_test.go index 989a119c636..c28ece1aa32 100644 --- a/aws/resource_aws_ssm_maintenance_window_task_test.go +++ b/aws/resource_aws_ssm_maintenance_window_task_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ssm" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -16,23 +15,23 @@ func TestAccAWSSSMMaintenanceWindowTask_basic(t *testing.T) { var before, after ssm.MaintenanceWindowTask resourceName := "aws_ssm_maintenance_window_task.test" - name := acctest.RandString(10) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(name), + Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &before), ), }, { - Config: testAccAWSSSMMaintenanceWindowTaskBasicConfigUpdate(name, "test description", "RUN_COMMAND", "AWS-InstallPowerShellModule", 3, 3, 2), + Config: testAccAWSSSMMaintenanceWindowTaskBasicConfigUpdate(rName, "test description", "RUN_COMMAND", "AWS-InstallPowerShellModule", 3, 3, 2), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &after), - resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("maintenance-window-task-%s", name)), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("maintenance-window-task-%s", rName)), resource.TestCheckResourceAttr(resourceName, "description", "test description"), resource.TestCheckResourceAttr(resourceName, "task_type", "RUN_COMMAND"), resource.TestCheckResourceAttr(resourceName, "task_arn", "AWS-InstallPowerShellModule"), @@ -54,7 +53,7 @@ func TestAccAWSSSMMaintenanceWindowTask_basic(t *testing.T) { func TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource(t *testing.T) { var before, after ssm.MaintenanceWindowTask - name := acctest.RandString(10) + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_ssm_maintenance_window_task.test" resource.ParallelTest(t, resource.TestCase{ @@ -63,13 +62,13 @@ func TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource(t *testing.T) { CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(name), + Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &before), ), }, { - Config: testAccAWSSSMMaintenanceWindowTaskBasicConfigUpdated(name), + Config: testAccAWSSSMMaintenanceWindowTaskBasicConfigUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &after), resource.TestCheckResourceAttr(resourceName, "name", "TestMaintenanceWindowTask"), @@ -91,21 +90,21 @@ func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationAutomationParameters(t *te var task ssm.MaintenanceWindowTask resourceName := "aws_ssm_maintenance_window_task.test" - name := acctest.RandString(10) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSMMaintenanceWindowTaskAutomationConfig(name, "$DEFAULT"), + Config: testAccAWSSSMMaintenanceWindowTaskAutomationConfig(rName, "$DEFAULT"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.automation_parameters.0.document_version", "$DEFAULT"), ), }, { - Config: testAccAWSSSMMaintenanceWindowTaskAutomationConfigUpdate(name, "$LATEST"), + Config: testAccAWSSSMMaintenanceWindowTaskAutomationConfigUpdate(rName, "$LATEST"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.automation_parameters.0.document_version", "$LATEST"), @@ -157,16 +156,16 @@ func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters(t *te var task ssm.MaintenanceWindowTask resourceName := "aws_ssm_maintenance_window_task.test" serviceRoleResourceName := "aws_iam_role.test" - s3BucketResourceName := "aws_s3_bucket.foo" + s3BucketResourceName := "aws_s3_bucket.test" - name := acctest.RandString(10) + rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSMMaintenanceWindowTaskRunCommandConfig(name, "test comment", 30), + Config: testAccAWSSSMMaintenanceWindowTaskRunCommandConfig(rName, "test comment", 30), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), resource.TestCheckResourceAttrPair(resourceName, "service_role_arn", serviceRoleResourceName, "arn"), @@ -176,7 +175,7 @@ func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters(t *te ), }, { - Config: testAccAWSSSMMaintenanceWindowTaskRunCommandConfigUpdate(name, "test comment update", 60), + Config: testAccAWSSSMMaintenanceWindowTaskRunCommandConfigUpdate(rName, "test comment update", 60), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.run_command_parameters.0.comment", "test comment update"), @@ -194,6 +193,57 @@ func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters(t *te }) } +func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParametersCloudWatch(t *testing.T) { + var task ssm.MaintenanceWindowTask + resourceName := "aws_ssm_maintenance_window_task.test" + serviceRoleResourceName := "aws_iam_role.test" + cwResourceName := "aws_cloudwatch_log_group.test" + + name := acctest.RandString(10) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMMaintenanceWindowTaskRunCommandCloudWatchConfig(name, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), + resource.TestCheckResourceAttrPair(resourceName, "service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "task_invocation_parameters.0.run_command_parameters.0.service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "task_invocation_parameters.0.run_command_parameters.0.cloudwatch_config.0.cloudwatch_log_group_name", cwResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.run_command_parameters.0.cloudwatch_config.0.cloudwatch_output_enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccAWSSSMMaintenanceWindowTaskImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMMaintenanceWindowTaskRunCommandCloudWatchConfig(name, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), + resource.TestCheckResourceAttrPair(resourceName, "service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "task_invocation_parameters.0.run_command_parameters.0.service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.run_command_parameters.0.cloudwatch_config.0.cloudwatch_output_enabled", "false"), + ), + }, + { + Config: testAccAWSSSMMaintenanceWindowTaskRunCommandCloudWatchConfig(name, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), + resource.TestCheckResourceAttrPair(resourceName, "service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "task_invocation_parameters.0.run_command_parameters.0.service_role_arn", serviceRoleResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "task_invocation_parameters.0.run_command_parameters.0.cloudwatch_config.0.cloudwatch_log_group_name", cwResourceName, "name"), + resource.TestCheckResourceAttr(resourceName, "task_invocation_parameters.0.run_command_parameters.0.cloudwatch_config.0.cloudwatch_output_enabled", "true"), + ), + }, + }, + }) +} + func TestAccAWSSSMMaintenanceWindowTask_TaskInvocationStepFunctionParameters(t *testing.T) { var task ssm.MaintenanceWindowTask resourceName := "aws_ssm_maintenance_window_task.test" @@ -241,6 +291,28 @@ func TestAccAWSSSMMaintenanceWindowTask_emptyNotificationConfig(t *testing.T) { }) } +func TestAccAWSSSMMaintenanceWindowTask_disappears(t *testing.T) { + var before ssm.MaintenanceWindowTask + resourceName := "aws_ssm_maintenance_window_task.test" + + name := acctest.RandString(10) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMMaintenanceWindowTaskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMMaintenanceWindowTaskBasicConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &before), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsmMaintenanceWindowTask(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAwsSsmWindowsTaskNotRecreated(t *testing.T, before, after *ssm.MaintenanceWindowTask) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -282,7 +354,7 @@ func testAccCheckAWSSSMMaintenanceWindowTaskExists(n string, task *ssm.Maintenan } for _, i := range resp.Tasks { - if *i.WindowTaskId == rs.Primary.ID { + if aws.StringValue(i.WindowTaskId) == rs.Primary.ID { *task = *i return nil } @@ -296,7 +368,7 @@ func testAccCheckAWSSSMMaintenanceWindowTaskDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ssmconn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_ssm_maintenance_window_target" { + if rs.Type != "aws_ssm_maintenance_window_task" { continue } @@ -306,7 +378,7 @@ func testAccCheckAWSSSMMaintenanceWindowTaskDestroy(s *terraform.State) error { if err != nil { // Verify the error is what we want - if ae, ok := err.(awserr.Error); ok && ae.Code() == "DoesNotExistException" { + if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { continue } return err @@ -427,10 +499,10 @@ func testAccAWSSSMMaintenanceWindowTaskBasicConfigUpdate(rName, description, tas resource "aws_ssm_maintenance_window_task" "test" { window_id = aws_ssm_maintenance_window.test.id - task_type = "%[2]s" - task_arn = "%[3]s" + task_type = %[2]q + task_arn = %[3]q name = "maintenance-window-task-%[1]s" - description = "%[4]s" + description = %[4]q priority = %[5]d service_role_arn = aws_iam_role.ssm_role_update.arn max_concurrency = %[6]d @@ -574,7 +646,7 @@ resource "aws_ssm_maintenance_window_task" "test" { task_invocation_parameters { automation_parameters { - document_version = "%[2]s" + document_version = %[2]q parameter { name = "InstanceId" @@ -593,8 +665,8 @@ resource "aws_ssm_maintenance_window_task" "test" { func testAccAWSSSMMaintenanceWindowTaskAutomationConfigUpdate(rName, version string) string { return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName)+` -resource "aws_s3_bucket" "foo" { - bucket = "tf-s3-%[1]s" +resource "aws_s3_bucket" "test" { + bucket = %[1]q acl = "private" force_destroy = true } @@ -615,7 +687,7 @@ resource "aws_ssm_maintenance_window_task" "test" { task_invocation_parameters { automation_parameters { - document_version = "%[2]s" + document_version = %[2]q parameter { name = "InstanceId" @@ -685,7 +757,7 @@ resource "aws_ssm_maintenance_window_task" "test" { task_invocation_parameters { run_command_parameters { - comment = "%[2]s" + comment = %[2]q document_hash = sha256("COMMAND") document_hash_type = "Sha256" service_role_arn = aws_iam_role.test.arn @@ -703,8 +775,8 @@ resource "aws_ssm_maintenance_window_task" "test" { func testAccAWSSSMMaintenanceWindowTaskRunCommandConfigUpdate(rName, comment string, timeoutSeconds int) string { return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName)+` -resource "aws_s3_bucket" "foo" { - bucket = "tf-s3-%[1]s" +resource "aws_s3_bucket" "test" { + bucket = %[1]q acl = "private" force_destroy = true } @@ -725,12 +797,12 @@ resource "aws_ssm_maintenance_window_task" "test" { task_invocation_parameters { run_command_parameters { - comment = "%[2]s" + comment = %[2]q document_hash = sha256("COMMAND") document_hash_type = "Sha256" service_role_arn = aws_iam_role.test.arn timeout_seconds = %[3]d - output_s3_bucket = aws_s3_bucket.foo.id + output_s3_bucket = aws_s3_bucket.test.id output_s3_key_prefix = "foo" parameter { @@ -743,6 +815,47 @@ resource "aws_ssm_maintenance_window_task" "test" { `, rName, comment, timeoutSeconds) } +func testAccAWSSSMMaintenanceWindowTaskRunCommandCloudWatchConfig(rName string, enabled bool) string { + return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName)+` +resource "aws_cloudwatch_log_group" "test" { + name = %[1]q +} + +resource "aws_ssm_maintenance_window_task" "test" { + window_id = aws_ssm_maintenance_window.test.id + task_type = "RUN_COMMAND" + task_arn = "AWS-RunShellScript" + priority = 1 + service_role_arn = aws_iam_role.test.arn + max_concurrency = "2" + max_errors = "1" + + targets { + key = "WindowTargetIds" + values = [aws_ssm_maintenance_window_target.test.id] + } + + task_invocation_parameters { + run_command_parameters { + document_hash = sha256("COMMAND") + document_hash_type = "Sha256" + service_role_arn = aws_iam_role.test.arn + + parameter { + name = "commands" + values = ["date"] + } + + cloudwatch_config { + cloudwatch_log_group_name = aws_cloudwatch_log_group.test.name + cloudwatch_output_enabled = %[2]t + } + } + } +} +`, rName, enabled) +} + func testAccAWSSSMMaintenanceWindowTaskStepFunctionConfig(rName string) string { return testAccAWSSSMMaintenanceWindowTaskConfigBase(rName) + fmt.Sprintf(` resource "aws_sfn_activity" "test" { diff --git a/aws/validators.go b/aws/validators.go index 14f80bb226b..61cd136d114 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -1915,19 +1915,6 @@ func validateAwsSSMName(v interface{}, k string) (ws []string, errors []error) { return } -func validateAwsSSMMaintenanceWindowTaskName(v interface{}, k string) (ws []string, errors []error) { - // https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_RegisterTaskWithMaintenanceWindow.html#systemsmanager-RegisterTaskWithMaintenanceWindow-request-Name - value := v.(string) - - if !regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,128}$`).MatchString(value) { - errors = append(errors, fmt.Errorf( - "Only alphanumeric characters, hyphens, dots & underscores allowed in %q: %q (Must satisfy regular expression pattern: ^[a-zA-Z0-9_\\-.]{3,128}$)", - k, value)) - } - - return -} - func validateBatchName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !regexp.MustCompile(`^[0-9a-zA-Z]{1}[0-9a-zA-Z_\-]{0,127}$`).MatchString(value) { diff --git a/website/docs/r/ssm_maintenance_window_task.html.markdown b/website/docs/r/ssm_maintenance_window_task.html.markdown index 4978332f6e9..e521c0a1a55 100644 --- a/website/docs/r/ssm_maintenance_window_task.html.markdown +++ b/website/docs/r/ssm_maintenance_window_task.html.markdown @@ -178,6 +178,7 @@ The following arguments are supported: * `parameter` - (Optional) The parameters for the RUN_COMMAND task execution. Documented below. * `service_role_arn` - (Optional) The IAM service role to assume during task execution. * `timeout_seconds` - (Optional) If this time is reached and the command has not already started executing, it doesn't run. +* `cloudwatch_config` - (Optional) Configuration options for sending command output to CloudWatch Logs. Documented below. `step_functions_parameters` supports the following: @@ -190,6 +191,11 @@ The following arguments are supported: * `notification_events` - (Optional) The different events for which you can receive notifications. Valid values: `All`, `InProgress`, `Success`, `TimedOut`, `Cancelled`, and `Failed` * `notification_type` - (Optional) When specified with `Command`, receive notification when the status of a command changes. When specified with `Invocation`, for commands sent to multiple instances, receive notification on a per-instance basis when the status of a command changes. Valid values: `Command` and `Invocation` +`cloudwatch_config` supports the following: + +* `cloudwatch_log_group_name` - (Optional) The name of the CloudWatch log group where you want to send command output. If you don't specify a group name, Systems Manager automatically creates a log group for you. The log group uses the following naming format: aws/ssm/SystemsManagerDocumentName. +* `cloudwatch_output_enabled` - (Optional) Enables Systems Manager to send command output to CloudWatch Logs. + `parameter` supports the following: * `name` - (Required) The parameter name. From 893ba66b3a29378515e23efb3eb47229b341145d Mon Sep 17 00:00:00 2001 From: bill-rich Date: Thu, 11 Feb 2021 16:15:51 -0800 Subject: [PATCH 1195/1212] Fix docs and add changelog entry --- .changelog/17418.txt | 3 +++ website/docs/r/fms_policy.html.markdown | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 .changelog/17418.txt diff --git a/.changelog/17418.txt b/.changelog/17418.txt new file mode 100644 index 00000000000..d505f8eb1f3 --- /dev/null +++ b/.changelog/17418.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fms_policy: Allow use of `resource_type` or `resource_type_list` attributes +``` diff --git a/website/docs/r/fms_policy.html.markdown b/website/docs/r/fms_policy.html.markdown index 803ac1f3057..a3ccf82a3a0 100644 --- a/website/docs/r/fms_policy.html.markdown +++ b/website/docs/r/fms_policy.html.markdown @@ -59,8 +59,8 @@ The following arguments are supported: * `include_map` - (Optional) A map of lists, with a single key named 'account' with a list of AWS Account IDs to include for this policy. * `remediation_enabled` - (Required) A boolean value, indicates if the policy should automatically applied to resources that already exist in the account. * `resource_tags` - (Optional) A map of resource tags, that if present will filter protections on resources based on the exclude_resource_tags. -* `resource_type` - (Optional) A resource type to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. Conflicts with `resource_type_list`. -* `resource_type_list` - (Optional) A list of resource types to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`. Conflicts with `resource_type`. +* `resource_type` - (Optional) A resource type to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`, `AWS::EC2::Instance`, `AWS::EC2::NetworkInterface`, `AWS::EC2::SecurityGroup`. Conflicts with `resource_type_list`. +* `resource_type_list` - (Optional) A list of resource types to protect, valid values are: `AWS::ElasticLoadBalancingV2::LoadBalancer`, `AWS::ApiGateway::Stage`, `AWS::CloudFront::Distribution`, `AWS::EC2::Instance`, `AWS::EC2::NetworkInterface`, `AWS::EC2::SecurityGroup`. Conflicts with `resource_type`. * `security_service_policy_data` - (Required) The objects to include in Security Service Policy Data. Documented below. ## `exclude_map` Configuration Block From 9345038049a0a21bcd244c0df9ff4c33fe293f17 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 16:27:19 -0800 Subject: [PATCH 1196/1212] Adds test for unmatched index --- aws/resource_aws_dynamodb_table_test.go | 53 +++++++++++++++++++------ aws/validators.go | 13 +++--- 2 files changed, 48 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_dynamodb_table_test.go b/aws/resource_aws_dynamodb_table_test.go index 66eb51140d5..18999a85354 100644 --- a/aws/resource_aws_dynamodb_table_test.go +++ b/aws/resource_aws_dynamodb_table_test.go @@ -1225,6 +1225,10 @@ func TestAccAWSDynamoDbTable_attributeUpdateValidation(t *testing.T) { Config: testAccAWSDynamoDbConfigTwoAttributes(rName, "firstKey", "secondKey", "firstUnused", "N", "secondUnused", "S"), ExpectError: regexp.MustCompile(`All attributes must be indexed. Unused attributes: \["firstUnused"\ \"secondUnused\"]`), }, + { + Config: testAccAWSDynamoDbConfigUnmatchedIndexes(rName, "firstUnused", "secondUnused"), + ExpectError: regexp.MustCompile(`All indexes must match a defined attribute. Unmatched indexes: \["firstUnused"\ \"secondUnused\"]`), + }, }, }) } @@ -2175,7 +2179,7 @@ resource "aws_dynamodb_table" "test" { func testAccAWSDynamoDbConfigOneAttribute(rName, hashKey, attrName, attrType string) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "test" { - name = "%s" + name = "%[1]s" read_capacity = 10 write_capacity = 10 hash_key = "staticHashKey" @@ -2186,25 +2190,25 @@ resource "aws_dynamodb_table" "test" { } attribute { - name = "%s" - type = "%s" + name = "%[3]s" + type = "%[4]s" } global_secondary_index { name = "gsiName" - hash_key = "%s" + hash_key = "%[2]s" write_capacity = 10 read_capacity = 10 projection_type = "KEYS_ONLY" } } -`, rName, attrName, attrType, hashKey) +`, rName, hashKey, attrName, attrType) } func testAccAWSDynamoDbConfigTwoAttributes(rName, hashKey, rangeKey, attrName1, attrType1, attrName2, attrType2 string) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "test" { - name = "%s" + name = "%[1]s" read_capacity = 10 write_capacity = 10 hash_key = "staticHashKey" @@ -2215,25 +2219,48 @@ resource "aws_dynamodb_table" "test" { } attribute { - name = "%s" - type = "%s" + name = "%[4]s" + type = "%[5]s" } attribute { - name = "%s" - type = "%s" + name = "%[6]s" + type = "%[7]s" } global_secondary_index { name = "gsiName" - hash_key = "%s" - range_key = "%s" + hash_key = "%[2]s" + range_key = "%[3]s" write_capacity = 10 read_capacity = 10 projection_type = "KEYS_ONLY" } } -`, rName, attrName1, attrType1, attrName2, attrType2, hashKey, rangeKey) +`, rName, hashKey, rangeKey, attrName1, attrType1, attrName2, attrType2) +} + +func testAccAWSDynamoDbConfigUnmatchedIndexes(rName, attr1, attr2 string) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + name = %[1]q + read_capacity = 10 + write_capacity = 10 + hash_key = "staticHashKey" + range_key = %[2]q + + attribute { + name = "staticHashKey" + type = "S" + } + + local_secondary_index { + name = "lsiName" + range_key = %[3]q + projection_type = "KEYS_ONLY" + } +} +`, rName, attr1, attr2) } func testAccAWSDynamoDbTableConfigReplica0(rName string) string { diff --git a/aws/validators.go b/aws/validators.go index 5825cd6216e..636b41d55c3 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -16,6 +16,7 @@ import ( "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/waf" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" @@ -2233,8 +2234,8 @@ func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { hashKey := index["hash_key"].(string) indexedAttributes[hashKey] = true - if rk, ok := index["range_key"]; ok { - indexedAttributes[rk.(string)] = true + if rk, ok := index["range_key"].(string); ok && rk != "" { + indexedAttributes[rk] = true } } } @@ -2253,8 +2254,10 @@ func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { } } + var err *multierror.Error + if len(missingAttrDefs) > 0 { - return fmt.Errorf("All attributes must be indexed. Unused attributes: %q", missingAttrDefs) + err = multierror.Append(err, fmt.Errorf("All attributes must be indexed. Unused attributes: %q", missingAttrDefs)) } if len(indexedAttributes) > 0 { @@ -2263,10 +2266,10 @@ func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { missingIndexes = append(missingIndexes, index) } - return fmt.Errorf("All indexes must be attribute. Unused indexes: %q", missingIndexes) + err = multierror.Append(err, fmt.Errorf("All indexes must match a defined attribute. Unmatched indexes: %q", missingIndexes)) } - return nil + return err.ErrorOrNil() } func validateLaunchTemplateName(v interface{}, k string) (ws []string, errors []error) { From 39a98e033eb8468369171b02c6e3478dad88754b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 19:27:26 -0500 Subject: [PATCH 1197/1212] resource/s3_bucket_object: Add retry --- aws/internal/keyvaluetags/s3_tags.go | 22 ++++- aws/resource_aws_s3_bucket_object_test.go | 97 ++++++++++++++--------- 2 files changed, 80 insertions(+), 39 deletions(-) diff --git a/aws/internal/keyvaluetags/s3_tags.go b/aws/internal/keyvaluetags/s3_tags.go index 90182d759f0..995547b1c29 100644 --- a/aws/internal/keyvaluetags/s3_tags.go +++ b/aws/internal/keyvaluetags/s3_tags.go @@ -4,10 +4,13 @@ package keyvaluetags import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" tfs3 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3" ) @@ -86,7 +89,24 @@ func S3ObjectListTags(conn *s3.S3, bucket, key string) (KeyValueTags, error) { Key: aws.String(key), } - output, err := conn.GetObjectTagging(input) + var output *s3.GetObjectTaggingOutput + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.GetObjectTagging(input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchKey" { + return resource.RetryableError( + fmt.Errorf("getting object tagging %s, retrying: %w", bucket, err), + ) + } + } + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchTagSet) { return New(nil), nil diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index 6af10d88478..de759a0d1d7 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -640,8 +641,8 @@ func TestAccAWSS3BucketObject_storageClass(t *testing.T) { func TestAccAWSS3BucketObject_tags(t *testing.T) { var obj1, obj2, obj3, obj4 s3.GetObjectOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_s3_bucket_object.object" - rInt := acctest.RandInt() key := "test-key" resource.ParallelTest(t, resource.TestCase{ @@ -651,7 +652,7 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { Steps: []resource.TestStep{ { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), @@ -663,7 +664,7 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), @@ -677,7 +678,7 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withNoTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), @@ -687,7 +688,7 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "changed stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), @@ -704,8 +705,8 @@ func TestAccAWSS3BucketObject_tags(t *testing.T) { func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { var obj1, obj2, obj3, obj4 s3.GetObjectOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_s3_bucket_object.object" - rInt := acctest.RandInt() key := "/test-key" resource.ParallelTest(t, resource.TestCase{ @@ -715,7 +716,7 @@ func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { Steps: []resource.TestStep{ { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), @@ -727,7 +728,7 @@ func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), @@ -741,7 +742,7 @@ func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withNoTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), @@ -751,7 +752,7 @@ func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "changed stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), @@ -768,8 +769,8 @@ func TestAccAWSS3BucketObject_tagsLeadingSingleSlash(t *testing.T) { func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { var obj1, obj2, obj3, obj4 s3.GetObjectOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_s3_bucket_object.object" - rInt := acctest.RandInt() key := "/////test-key" resource.ParallelTest(t, resource.TestCase{ @@ -779,7 +780,7 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { Steps: []resource.TestStep{ { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), @@ -791,7 +792,7 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), @@ -805,7 +806,7 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withNoTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), @@ -815,7 +816,7 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "changed stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), @@ -832,8 +833,8 @@ func TestAccAWSS3BucketObject_tagsLeadingMultipleSlashes(t *testing.T) { func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { var obj1, obj2, obj3, obj4 s3.GetObjectOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_s3_bucket_object.object" - rInt := acctest.RandInt() key := "first//second///third//" resource.ParallelTest(t, resource.TestCase{ @@ -843,7 +844,7 @@ func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { Steps: []resource.TestStep{ { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj1), testAccCheckAWSS3BucketObjectBody(&obj1, "stuff"), @@ -855,7 +856,7 @@ func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withUpdatedTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj2), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj2, &obj1), @@ -869,7 +870,7 @@ func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff"), + Config: testAccAWSS3BucketObjectConfig_withNoTags(rName, key, "stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj3), testAccCheckAWSS3BucketObjectVersionIdEquals(&obj3, &obj2), @@ -879,7 +880,7 @@ func TestAccAWSS3BucketObject_tagsMultipleSlashes(t *testing.T) { }, { PreConfig: func() {}, - Config: testAccAWSS3BucketObjectConfig_withTags(rInt, key, "changed stuff"), + Config: testAccAWSS3BucketObjectConfig_withTags(rName, key, "changed stuff"), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj4), testAccCheckAWSS3BucketObjectVersionIdDiffers(&obj4, &obj3), @@ -1107,8 +1108,8 @@ func TestAccAWSS3BucketObject_defaultBucketSSE(t *testing.T) { func TestAccAWSS3BucketObject_ignoreTags(t *testing.T) { var obj s3.GetObjectOutput + rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_s3_bucket_object.object" - rInt := acctest.RandInt() key := "test-key" resource.ParallelTest(t, resource.TestCase{ @@ -1120,7 +1121,7 @@ func TestAccAWSS3BucketObject_ignoreTags(t *testing.T) { PreConfig: func() {}, Config: composeConfig( testAccProviderConfigIgnoreTagsKeyPrefixes1("ignorekey"), - testAccAWSS3BucketObjectConfig_withNoTags(rInt, key, "stuff")), + testAccAWSS3BucketObjectConfig_withNoTags(rName, key, "stuff")), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj), testAccCheckAWSS3BucketObjectBody(&obj, "stuff"), @@ -1135,7 +1136,7 @@ func TestAccAWSS3BucketObject_ignoreTags(t *testing.T) { PreConfig: func() {}, Config: composeConfig( testAccProviderConfigIgnoreTagsKeyPrefixes1("ignorekey"), - testAccAWSS3BucketObjectConfig_withTags(rInt, key, "stuff")), + testAccAWSS3BucketObjectConfig_withTags(rName, key, "stuff")), Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketObjectExists(resourceName, &obj), testAccCheckAWSS3BucketObjectBody(&obj, "stuff"), @@ -1222,12 +1223,32 @@ func testAccCheckAWSS3BucketObjectExists(n string, obj *s3.GetObjectOutput) reso } s3conn := testAccProvider.Meta().(*AWSClient).s3conn - out, err := s3conn.GetObject( - &s3.GetObjectInput{ - Bucket: aws.String(rs.Primary.Attributes["bucket"]), - Key: aws.String(rs.Primary.Attributes["key"]), - IfMatch: aws.String(rs.Primary.Attributes["etag"]), - }) + + input := &s3.GetObjectInput{ + Bucket: aws.String(rs.Primary.Attributes["bucket"]), + Key: aws.String(rs.Primary.Attributes["key"]), + IfMatch: aws.String(rs.Primary.Attributes["etag"]), + } + + var out *s3.GetObjectOutput + + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + out, err = s3conn.GetObject(input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchKey" { + return resource.RetryableError( + fmt.Errorf("getting object %s, retrying: %w", rs.Primary.Attributes["bucket"], err), + ) + } + } + if err != nil { + return resource.NonRetryableError(err) + } + + return nil + }) + if err != nil { return fmt.Errorf("S3Bucket Object error: %s", err) } @@ -1591,10 +1612,10 @@ resource "aws_s3_bucket_object" "object" { `, randInt, storage_class) } -func testAccAWSS3BucketObjectConfig_withTags(randInt int, key, content string) string { +func testAccAWSS3BucketObjectConfig_withTags(rName, key, content string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" + bucket = %[1]q versioning { enabled = true @@ -1612,13 +1633,13 @@ resource "aws_s3_bucket_object" "object" { Key3 = "CCC" } } -`, randInt, key, content) +`, rName, key, content) } -func testAccAWSS3BucketObjectConfig_withUpdatedTags(randInt int, key, content string) string { +func testAccAWSS3BucketObjectConfig_withUpdatedTags(rName, key, content string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" + bucket = %[1]q versioning { enabled = true @@ -1637,13 +1658,13 @@ resource "aws_s3_bucket_object" "object" { Key5 = "E:/" } } -`, randInt, key, content) +`, rName, key, content) } -func testAccAWSS3BucketObjectConfig_withNoTags(randInt int, key, content string) string { +func testAccAWSS3BucketObjectConfig_withNoTags(rName, key, content string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { - bucket = "tf-object-test-bucket-%[1]d" + bucket = %[1]q versioning { enabled = true @@ -1655,7 +1676,7 @@ resource "aws_s3_bucket_object" "object" { key = %[2]q content = %[3]q } -`, randInt, key, content) +`, rName, key, content) } func testAccAWSS3BucketObjectConfig_withMetadata(randInt int, metadataKey1, metadataValue1, metadataKey2, metadataValue2 string) string { From ef28f92dd34ce3fdbf62b6330d9ae6b4dd816316 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 11 Feb 2021 19:38:06 -0500 Subject: [PATCH 1198/1212] resource/s3_bucket_object: Check for timeout --- aws/internal/keyvaluetags/s3_tags.go | 4 ++++ aws/resource_aws_s3_bucket_object_test.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/aws/internal/keyvaluetags/s3_tags.go b/aws/internal/keyvaluetags/s3_tags.go index 995547b1c29..a858b48e51c 100644 --- a/aws/internal/keyvaluetags/s3_tags.go +++ b/aws/internal/keyvaluetags/s3_tags.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" tfs3 "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/s3" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfresource" ) // Custom S3 tag service update functions using the same format as generated code. @@ -107,6 +108,9 @@ func S3ObjectListTags(conn *s3.S3, bucket, key string) (KeyValueTags, error) { return nil }) + if tfresource.TimedOut(err) { + output, err = conn.GetObjectTagging(input) + } if tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchTagSet) { return New(nil), nil diff --git a/aws/resource_aws_s3_bucket_object_test.go b/aws/resource_aws_s3_bucket_object_test.go index de759a0d1d7..8201af3f769 100644 --- a/aws/resource_aws_s3_bucket_object_test.go +++ b/aws/resource_aws_s3_bucket_object_test.go @@ -1248,6 +1248,9 @@ func testAccCheckAWSS3BucketObjectExists(n string, obj *s3.GetObjectOutput) reso return nil }) + if isResourceTimeoutError(err) { + out, err = s3conn.GetObject(input) + } if err != nil { return fmt.Errorf("S3Bucket Object error: %s", err) From fc1c88382f8266c4e0f8601969a5ac73dc6f019b Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Fri, 12 Feb 2021 02:44:27 +0200 Subject: [PATCH 1199/1212] resource/aws_ssm_patch_baseline: Approved/Rejected patch enhancements, arn attribute, and validations (#11772) Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMPatchBaseline_disappears (11.66s) --- PASS: TestAccAWSSSMPatchBaseline_ApprovedPatchesNonSec (15.49s) --- PASS: TestAccAWSSSMPatchBaseline_RejectPatchesAction (15.51s) --- PASS: TestAccAWSSSMPatchBaseline_basic (24.91s) --- PASS: TestAccAWSSSMPatchBaseline_OperatingSystem (25.68s) --- PASS: TestAccAWSSSMPatchBaseline_tags (34.07s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMPatchBaseline_disappears (14.52s) --- PASS: TestAccAWSSSMPatchBaseline_RejectPatchesAction (19.65s) --- PASS: TestAccAWSSSMPatchBaseline_ApprovedPatchesNonSec (20.14s) --- PASS: TestAccAWSSSMPatchBaseline_basic (34.06s) --- PASS: TestAccAWSSSMPatchBaseline_OperatingSystem (34.41s) --- PASS: TestAccAWSSSMPatchBaseline_tags (46.50s) ``` --- .changelog/11772.txt | 12 ++ aws/resource_aws_ssm_patch_baseline.go | 133 +++++++++++++----- aws/resource_aws_ssm_patch_baseline_test.go | 121 ++++++++++++---- .../docs/r/ssm_patch_baseline.html.markdown | 3 + 4 files changed, 205 insertions(+), 64 deletions(-) create mode 100644 .changelog/11772.txt diff --git a/.changelog/11772.txt b/.changelog/11772.txt new file mode 100644 index 00000000000..af336b2664a --- /dev/null +++ b/.changelog/11772.txt @@ -0,0 +1,12 @@ +```release-note:enhancement +resource/aws_ssm_patch_baseline: Adds plan time validation for `name`, `description`, `global_filter.key`, `global_filter.values`, +`approved_patches`, `rejected_patches`, `approval_rule.approve_after_days`, `approval_rule.patch_filter.key`, and `approval_rule.patch_filter.values`. +``` + +```release-note:enhancement +resource/aws_ssm_patch_baseline: Add `approved_patches_enable_non_security` and `rejected_patches_action` arguments +``` + +```release-note:enhancement +resource/aws_ssm_patch_baseline: Adds `arn` attribute. +``` diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index fe64e3286df..70e29e3a081 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -3,8 +3,11 @@ package aws import ( "fmt" "log" + "regexp" + "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ssm" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -22,14 +25,23 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, "name": { Type: schema.TypeString, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 128), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,128}$`), "must contain only alphanumeric, underscore, hyphen, or period characters"), + ), }, "description": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, "global_filter": { @@ -39,13 +51,19 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ssm.PatchFilterKey_Values(), false), }, "values": { Type: schema.TypeList, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + MaxItems: 20, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 64), + }, }, }, }, @@ -57,8 +75,9 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "approve_after_days": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 100), }, "compliance_level": { @@ -81,13 +100,19 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(ssm.PatchFilterKey_Values(), false), }, "values": { Type: schema.TypeList, Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + MaxItems: 20, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 64), + }, }, }, }, @@ -99,15 +124,21 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { "approved_patches": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + MaxItems: 50, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 100), + }, }, "rejected_patches": { Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + MaxItems: 50, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 100), + }, }, "operating_system": { @@ -124,13 +155,23 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Default: ssm.PatchComplianceLevelUnspecified, ValidateFunc: validation.StringInSlice(ssm.PatchComplianceLevel_Values(), false), }, + "rejected_patches_action": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(ssm.PatchAction_Values(), false), + }, + "approved_patches_enable_non_security": { + Type: schema.TypeBool, + Optional: true, + }, "tags": tagsSchema(), }, } } func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn params := &ssm.CreatePatchBaselineInput{ Name: aws.String(d.Get("name").(string)), @@ -162,7 +203,15 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) } - resp, err := ssmconn.CreatePatchBaseline(params) + if v, ok := d.GetOk("approved_patches_enable_non_security"); ok { + params.ApprovedPatchesEnableNonSecurity = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("rejected_patches_action"); ok { + params.RejectedPatchesAction = aws.String(v.(string)) + } + + resp, err := conn.CreatePatchBaseline(params) if err != nil { return err } @@ -172,7 +221,7 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) } func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn params := &ssm.UpdatePatchBaselineInput{ BaselineId: aws.String(d.Id()), @@ -206,20 +255,25 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) params.GlobalFilters = expandAwsSsmPatchFilterGroup(d) } - _, err := ssmconn.UpdatePatchBaseline(params) - if err != nil { - if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { - log.Printf("[WARN] Patch Baseline %s not found, removing from state", d.Id()) - d.SetId("") - return nil + if d.HasChange("approved_patches_enable_non_security") { + params.ApprovedPatchesEnableNonSecurity = aws.Bool(d.Get("approved_patches_enable_non_security").(bool)) + } + + if d.HasChange("rejected_patches_action") { + params.RejectedPatchesAction = aws.String(d.Get("rejected_patches_action").(string)) + } + + if d.HasChangesExcept("tags") { + _, err := conn.UpdatePatchBaseline(params) + if err != nil { + return fmt.Errorf("error updating SSM Patch Baseline (%s): %w", d.Id(), err) } - return err } if d.HasChange("tags") { o, n := d.GetChange("tags") - if err := keyvaluetags.SsmUpdateTags(ssmconn, d.Id(), ssm.ResourceTypeForTaggingPatchBaseline, o, n); err != nil { + if err := keyvaluetags.SsmUpdateTags(conn, d.Id(), ssm.ResourceTypeForTaggingPatchBaseline, o, n); err != nil { return fmt.Errorf("error updating SSM Patch Baseline (%s) tags: %s", d.Id(), err) } } @@ -227,14 +281,14 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) return resourceAwsSsmPatchBaselineRead(d, meta) } func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig params := &ssm.GetPatchBaselineInput{ BaselineId: aws.String(d.Id()), } - resp, err := ssmconn.GetPatchBaseline(params) + resp, err := conn.GetPatchBaseline(params) if err != nil { if isAWSErr(err, ssm.ErrCodeDoesNotExistException, "") { log.Printf("[WARN] Patch Baseline %s not found, removing from state", d.Id()) @@ -250,6 +304,8 @@ func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) e d.Set("approved_patches_compliance_level", resp.ApprovedPatchesComplianceLevel) d.Set("approved_patches", flattenStringList(resp.ApprovedPatches)) d.Set("rejected_patches", flattenStringList(resp.RejectedPatches)) + d.Set("rejected_patches_action", resp.RejectedPatchesAction) + d.Set("approved_patches_enable_non_security", resp.ApprovedPatchesEnableNonSecurity) if err := d.Set("global_filter", flattenAwsSsmPatchFilterGroup(resp.GlobalFilters)); err != nil { return fmt.Errorf("Error setting global filters error: %#v", err) @@ -259,7 +315,16 @@ func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error setting approval rules error: %#v", err) } - tags, err := keyvaluetags.SsmListTags(ssmconn, d.Id(), ssm.ResourceTypeForTaggingPatchBaseline) + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Region: meta.(*AWSClient).region, + Service: "ssm", + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("patchbaseline/%s", strings.TrimPrefix(d.Id(), "/")), + } + d.Set("arn", arn.String()) + + tags, err := keyvaluetags.SsmListTags(conn, d.Id(), ssm.ResourceTypeForTaggingPatchBaseline) if err != nil { return fmt.Errorf("error listing tags for SSM Patch Baseline (%s): %s", d.Id(), err) @@ -273,7 +338,7 @@ func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) e } func resourceAwsSsmPatchBaselineDelete(d *schema.ResourceData, meta interface{}) error { - ssmconn := meta.(*AWSClient).ssmconn + conn := meta.(*AWSClient).ssmconn log.Printf("[INFO] Deleting SSM Patch Baseline: %s", d.Id()) @@ -281,7 +346,7 @@ func resourceAwsSsmPatchBaselineDelete(d *schema.ResourceData, meta interface{}) BaselineId: aws.String(d.Id()), } - _, err := ssmconn.DeletePatchBaseline(params) + _, err := conn.DeletePatchBaseline(params) if err != nil { return fmt.Errorf("error deleting SSM Patch Baseline (%s): %s", d.Id(), err) } @@ -319,7 +384,7 @@ func flattenAwsSsmPatchFilterGroup(group *ssm.PatchFilterGroup) []map[string]int for _, filter := range group.PatchFilters { f := make(map[string]interface{}) - f["key"] = *filter.Key + f["key"] = aws.StringValue(filter.Key) f["values"] = flattenStringList(filter.Values) result = append(result, f) @@ -378,9 +443,9 @@ func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interfa for _, rule := range group.PatchRules { r := make(map[string]interface{}) - r["approve_after_days"] = *rule.ApproveAfterDays - r["compliance_level"] = *rule.ComplianceLevel - r["enable_non_security"] = *rule.EnableNonSecurity + r["approve_after_days"] = aws.Int64Value(rule.ApproveAfterDays) + r["compliance_level"] = aws.StringValue(rule.ComplianceLevel) + r["enable_non_security"] = aws.BoolValue(rule.EnableNonSecurity) r["patch_filter"] = flattenAwsSsmPatchFilterGroup(rule.PatchFilterGroup) result = append(result, r) } diff --git a/aws/resource_aws_ssm_patch_baseline_test.go b/aws/resource_aws_ssm_patch_baseline_test.go index dc0881fdfb0..f9f40f2bda9 100644 --- a/aws/resource_aws_ssm_patch_baseline_test.go +++ b/aws/resource_aws_ssm_patch_baseline_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -14,7 +15,7 @@ import ( func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { var before, after ssm.PatchBaselineIdentity name := acctest.RandString(10) - resourceName := "aws_ssm_patch_baseline.foo" + resourceName := "aws_ssm_patch_baseline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -24,12 +25,14 @@ func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { Config: testAccAWSSSMPatchBaselineBasicConfig(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMPatchBaselineExists(resourceName, &before), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ssm", regexp.MustCompile(`patchbaseline/pb-.+`)), resource.TestCheckResourceAttr(resourceName, "approved_patches.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "approved_patches.*", "KB123456"), resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("patch-baseline-%s", name)), resource.TestCheckResourceAttr(resourceName, "approved_patches_compliance_level", ssm.PatchComplianceLevelCritical), resource.TestCheckResourceAttr(resourceName, "description", "Baseline containing all updates approved for production systems"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "approved_patches_enable_non_security", "false"), ), }, { @@ -41,6 +44,7 @@ func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { Config: testAccAWSSSMPatchBaselineBasicConfigUpdated(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMPatchBaselineExists(resourceName, &after), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "ssm", regexp.MustCompile(`patchbaseline/pb-.+`)), resource.TestCheckResourceAttr(resourceName, "approved_patches.#", "2"), resource.TestCheckTypeSetElemAttr(resourceName, "approved_patches.*", "KB123456"), resource.TestCheckTypeSetElemAttr(resourceName, "approved_patches.*", "KB456789"), @@ -49,7 +53,7 @@ func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "description", "Baseline containing all updates approved for production systems - August 2017"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), func(*terraform.State) error { - if *before.BaselineId != *after.BaselineId { + if aws.StringValue(before.BaselineId) != aws.StringValue(after.BaselineId) { t.Fatal("Baseline IDs changed unexpectedly") } return nil @@ -63,7 +67,7 @@ func TestAccAWSSSMPatchBaseline_basic(t *testing.T) { func TestAccAWSSSMPatchBaseline_tags(t *testing.T) { var patch ssm.PatchBaselineIdentity name := acctest.RandString(10) - resourceName := "aws_ssm_patch_baseline.foo" + resourceName := "aws_ssm_patch_baseline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -106,7 +110,7 @@ func TestAccAWSSSMPatchBaseline_tags(t *testing.T) { func TestAccAWSSSMPatchBaseline_disappears(t *testing.T) { var identity ssm.PatchBaselineIdentity name := acctest.RandString(10) - resourceName := "aws_ssm_patch_baseline.foo" + resourceName := "aws_ssm_patch_baseline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -117,7 +121,7 @@ func TestAccAWSSSMPatchBaseline_disappears(t *testing.T) { Config: testAccAWSSSMPatchBaselineBasicConfig(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMPatchBaselineExists(resourceName, &identity), - testAccCheckAWSSSMPatchBaselineDisappears(&identity), + testAccCheckResourceDisappears(testAccProvider, resourceAwsSsmPatchBaseline(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -128,7 +132,7 @@ func TestAccAWSSSMPatchBaseline_disappears(t *testing.T) { func TestAccAWSSSMPatchBaseline_OperatingSystem(t *testing.T) { var before, after ssm.PatchBaselineIdentity name := acctest.RandString(10) - resourceName := "aws_ssm_patch_baseline.foo" + resourceName := "aws_ssm_patch_baseline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -167,6 +171,56 @@ func TestAccAWSSSMPatchBaseline_OperatingSystem(t *testing.T) { }) } +func TestAccAWSSSMPatchBaseline_ApprovedPatchesNonSec(t *testing.T) { + var ssmPatch ssm.PatchBaselineIdentity + name := acctest.RandString(10) + resourceName := "aws_ssm_patch_baseline.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMPatchBaselineBasicConfigApprovedPatchesNonSec(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &ssmPatch), + resource.TestCheckResourceAttr(resourceName, "approved_patches_enable_non_security", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSSSMPatchBaseline_RejectPatchesAction(t *testing.T) { + var ssmPatch ssm.PatchBaselineIdentity + name := acctest.RandString(10) + resourceName := "aws_ssm_patch_baseline.test" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMPatchBaselineBasicConfigRejectPatchesAction(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMPatchBaselineExists(resourceName, &ssmPatch), + resource.TestCheckResourceAttr(resourceName, "rejected_patches_action", "ALLOW_AS_DEPENDENCY"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckAwsSsmPatchBaselineRecreated(t *testing.T, before, after *ssm.PatchBaselineIdentity) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -213,24 +267,6 @@ func testAccCheckAWSSSMPatchBaselineExists(n string, patch *ssm.PatchBaselineIde } } -func testAccCheckAWSSSMPatchBaselineDisappears(patch *ssm.PatchBaselineIdentity) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := testAccProvider.Meta().(*AWSClient).ssmconn - - id := aws.StringValue(patch.BaselineId) - params := &ssm.DeletePatchBaselineInput{ - BaselineId: aws.String(id), - } - - _, err := conn.DeletePatchBaseline(params) - if err != nil { - return fmt.Errorf("error deleting Patch Baseline %s: %s", id, err) - } - - return nil - } -} - func testAccCheckAWSSSMPatchBaselineDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ssmconn @@ -264,7 +300,7 @@ func testAccCheckAWSSSMPatchBaselineDestroy(s *terraform.State) error { func testAccAWSSSMPatchBaselineBasicConfig(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = "patch-baseline-%s" description = "Baseline containing all updates approved for production systems" approved_patches = ["KB123456"] @@ -275,7 +311,7 @@ resource "aws_ssm_patch_baseline" "foo" { func testAccAWSSSMPatchBaselineBasicConfigTags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = %[1]q description = "Baseline containing all updates approved for production systems" approved_patches = ["KB123456"] @@ -290,7 +326,7 @@ resource "aws_ssm_patch_baseline" "foo" { func testAccAWSSSMPatchBaselineBasicConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = %[1]q description = "Baseline containing all updates approved for production systems" approved_patches = ["KB123456"] @@ -306,7 +342,7 @@ resource "aws_ssm_patch_baseline" "foo" { func testAccAWSSSMPatchBaselineBasicConfigUpdated(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = "updated-patch-baseline-%s" description = "Baseline containing all updates approved for production systems - August 2017" approved_patches = ["KB123456", "KB456789"] @@ -317,7 +353,7 @@ resource "aws_ssm_patch_baseline" "foo" { func testAccAWSSSMPatchBaselineConfigWithOperatingSystem(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = "patch-baseline-%s" operating_system = "AMAZON_LINUX" description = "Baseline containing all updates approved for production systems" @@ -347,7 +383,7 @@ resource "aws_ssm_patch_baseline" "foo" { func testAccAWSSSMPatchBaselineConfigWithOperatingSystemUpdated(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = "patch-baseline-%s" operating_system = "WINDOWS" description = "Baseline containing all updates approved for production systems" @@ -373,3 +409,28 @@ resource "aws_ssm_patch_baseline" "foo" { } `, rName) } + +func testAccAWSSSMPatchBaselineBasicConfigApprovedPatchesNonSec(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "test" { + name = %q + operating_system = "AMAZON_LINUX" + description = "Baseline containing all updates approved for production systems" + approved_patches = ["KB123456"] + approved_patches_compliance_level = "CRITICAL" + approved_patches_enable_non_security = true +} +`, rName) +} + +func testAccAWSSSMPatchBaselineBasicConfigRejectPatchesAction(rName string) string { + return fmt.Sprintf(` +resource "aws_ssm_patch_baseline" "test" { + name = "patch-baseline-%s" + description = "Baseline containing all updates approved for production systems" + approved_patches = ["KB123456"] + approved_patches_compliance_level = "CRITICAL" + rejected_patches_action = "ALLOW_AS_DEPENDENCY" +} +`, rName) +} diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 48a02bbd29d..2aa5a891132 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -132,6 +132,8 @@ The following arguments are supported: * `rejected_patches` - (Optional) A list of rejected patches. * `global_filter` - (Optional) A set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`. * `approval_rule` - (Optional) A set of rules used to include patches in the baseline. up to 10 approval rules can be specified. Each approval_rule block requires the fields documented below. +* `rejected_patches_action` - (Optional) The action for Patch Manager to take on patches included in the `rejected_patches` list. Allow values are `ALLOW_AS_DEPENDENCY` and `BLOCK`. +* `approved_patches_enable_non_security` - (Optional) Indicates whether the list of approved patches includes non-security updates that should be applied to the instances. Applies to Linux instances only. The `approval_rule` block supports: @@ -147,6 +149,7 @@ The `approval_rule` block supports: In addition to all arguments above, the following attributes are exported: * `id` - The ID of the patch baseline. +* `arn` - The ARN of the patch baseline. ## Import From 9b3b583a210b99b38f38d8e016e812cad25ce29f Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 12 Feb 2021 00:51:29 +0000 Subject: [PATCH 1200/1212] Update CHANGELOG.md for #17418 --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5f61474d63..fad34df742c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ ENHANCEMENTS: * resource/aws_ec2_traffic_mirror_filter_rule: Add arn attribute. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) * resource/aws_ec2_traffic_mirror_filter_rule: Add plan time validation to `destination_port_range.from_port`, `destination_port_range.to_port`, `source_port_range.from_port`, and `source_port_range.to_port`. ([#13949](https://github.com/hashicorp/terraform-provider-aws/issues/13949)) +* resource/aws_fms_policy: Allow use of `resource_type` or `resource_type_list` attributes ([#17418](https://github.com/hashicorp/terraform-provider-aws/issues/17418)) * resource/aws_imagebuilder_image_recipe: Add `gp3` as a valid value for the `volume_type` attribute ([#17286](https://github.com/hashicorp/terraform-provider-aws/issues/17286)) * resource/aws_lambda_event_source_mapping: Add `topics` attribute to support Amazon MSK as an event source ([#14746](https://github.com/hashicorp/terraform-provider-aws/issues/14746)) * resource/aws_lb_listener_certificate: Add import support ([#16474](https://github.com/hashicorp/terraform-provider-aws/issues/16474)) @@ -44,6 +45,12 @@ ENHANCEMENTS: * resource/aws_ses_receipt_rule: Add plan time validations for `name`, `tls_policy`, `add_header_action.header_name`, `add_header_action.header_value`, `bounce_action.topic_arn`, `lambda_action.function_arn`, `lambda_action.topic_arn`, `lambda_action.invocation_type`, `s3_action,topic_arn`, `sns_action.topic_arn`, `stop_action.scope`, `stop_action.topic_arn`, `workmail_action.topic_arn`, and `workmail_action.organization_arn` attributes ([#13960](https://github.com/hashicorp/terraform-provider-aws/issues/13960)) * resource/aws_ses_template: Add `arn` attribute ([#13963](https://github.com/hashicorp/terraform-provider-aws/issues/13963)) * resource/aws_sns_topic_subscription: Add `redrive_policy` argument ([#11770](https://github.com/hashicorp/terraform-provider-aws/issues/11770)) +* resource/aws_ssm_maintenance_window_task: Add `task_invocation_parameters` `run_command_parameters` block `cloudwatch_config` and `document_version` arguments ([#11774](https://github.com/hashicorp/terraform-provider-aws/issues/11774)) +* resource/aws_ssm_maintenance_window_task: Add plan time validation to `max_concurrency`, `max_errors`, `priority`, `service_role_arn`, `targets`, `targets.notification_arn`, `targets.service_role_arn`, `task_type`, `task_invocation_parameters.run_command_parameters.comment`, `task_invocation_parameters.run_command_parameters.document_hash`, `task_invocation_parameters.run_command_parameters.timeout_seconds`, and `task_invocation_parameters.run_command_parameters.notification_config.notification_events` arguments ([#11774](https://github.com/hashicorp/terraform-provider-aws/issues/11774)) +* resource/aws_ssm_patch_baseline: Add `approved_patches_enable_non_security` and `rejected_patches_action` arguments ([#11772](https://github.com/hashicorp/terraform-provider-aws/issues/11772)) +* resource/aws_ssm_patch_baseline: Adds `arn` attribute. ([#11772](https://github.com/hashicorp/terraform-provider-aws/issues/11772)) +* resource/aws_ssm_patch_baseline: Adds plan time validation for `name`, `description`, `global_filter.key`, `global_filter.values`, +`approved_patches`, `rejected_patches`, `approval_rule.approve_after_days`, `approval_rule.patch_filter.key`, and `approval_rule.patch_filter.values`. ([#11772](https://github.com/hashicorp/terraform-provider-aws/issues/11772)) BUG FIXES: From 2f65ce8a91e58908f54ab2a2122b23fdb5b17829 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 17:07:32 -0800 Subject: [PATCH 1201/1212] Moves DynamoDB Table attribute validator to resource file --- aws/resource_aws_dynamodb_table.go | 64 ++++++++++++++++++++++++++++++ aws/validators.go | 64 ------------------------------ 2 files changed, 64 insertions(+), 64 deletions(-) diff --git a/aws/resource_aws_dynamodb_table.go b/aws/resource_aws_dynamodb_table.go index 1d4ad4bc6e1..de260c282b6 100644 --- a/aws/resource_aws_dynamodb_table.go +++ b/aws/resource_aws_dynamodb_table.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -1286,3 +1287,66 @@ func isDynamoDbTableOptionDisabled(v interface{}) bool { e := options[0].(map[string]interface{})["enabled"] return !e.(bool) } + +func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { + // Collect all indexed attributes + primaryHashKey := d.Get("hash_key").(string) + indexedAttributes := map[string]bool{ + primaryHashKey: true, + } + if v, ok := d.GetOk("range_key"); ok { + indexedAttributes[v.(string)] = true + } + if v, ok := d.GetOk("local_secondary_index"); ok { + indexes := v.(*schema.Set).List() + for _, idx := range indexes { + index := idx.(map[string]interface{}) + rangeKey := index["range_key"].(string) + indexedAttributes[rangeKey] = true + } + } + if v, ok := d.GetOk("global_secondary_index"); ok { + indexes := v.(*schema.Set).List() + for _, idx := range indexes { + index := idx.(map[string]interface{}) + + hashKey := index["hash_key"].(string) + indexedAttributes[hashKey] = true + + if rk, ok := index["range_key"].(string); ok && rk != "" { + indexedAttributes[rk] = true + } + } + } + + // Check if all indexed attributes have an attribute definition + attributes := d.Get("attribute").(*schema.Set).List() + unindexedAttributes := []string{} + for _, attr := range attributes { + attribute := attr.(map[string]interface{}) + attrName := attribute["name"].(string) + + if _, ok := indexedAttributes[attrName]; !ok { + unindexedAttributes = append(unindexedAttributes, attrName) + } else { + delete(indexedAttributes, attrName) + } + } + + var err *multierror.Error + + if len(unindexedAttributes) > 0 { + err = multierror.Append(err, fmt.Errorf("All attributes must be indexed. Unused attributes: %q", unindexedAttributes)) + } + + if len(indexedAttributes) > 0 { + missingIndexes := []string{} + for index := range indexedAttributes { + missingIndexes = append(missingIndexes, index) + } + + err = multierror.Append(err, fmt.Errorf("All indexes must match a defined attribute. Unmatched indexes: %q", missingIndexes)) + } + + return err.ErrorOrNil() +} diff --git a/aws/validators.go b/aws/validators.go index 636b41d55c3..2f47a5687de 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -16,7 +16,6 @@ import ( "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/waf" - multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" @@ -2209,69 +2208,6 @@ func validateIotThingTypeSearchableAttribute(v interface{}, k string) (ws []stri return } -func validateDynamoDbTableAttributes(d *schema.ResourceDiff) error { - // Collect all indexed attributes - primaryHashKey := d.Get("hash_key").(string) - indexedAttributes := map[string]bool{ - primaryHashKey: true, - } - if v, ok := d.GetOk("range_key"); ok { - indexedAttributes[v.(string)] = true - } - if v, ok := d.GetOk("local_secondary_index"); ok { - indexes := v.(*schema.Set).List() - for _, idx := range indexes { - index := idx.(map[string]interface{}) - rangeKey := index["range_key"].(string) - indexedAttributes[rangeKey] = true - } - } - if v, ok := d.GetOk("global_secondary_index"); ok { - indexes := v.(*schema.Set).List() - for _, idx := range indexes { - index := idx.(map[string]interface{}) - - hashKey := index["hash_key"].(string) - indexedAttributes[hashKey] = true - - if rk, ok := index["range_key"].(string); ok && rk != "" { - indexedAttributes[rk] = true - } - } - } - - // Check if all indexed attributes have an attribute definition - attributes := d.Get("attribute").(*schema.Set).List() - missingAttrDefs := []string{} - for _, attr := range attributes { - attribute := attr.(map[string]interface{}) - attrName := attribute["name"].(string) - - if _, ok := indexedAttributes[attrName]; !ok { - missingAttrDefs = append(missingAttrDefs, attrName) - } else { - delete(indexedAttributes, attrName) - } - } - - var err *multierror.Error - - if len(missingAttrDefs) > 0 { - err = multierror.Append(err, fmt.Errorf("All attributes must be indexed. Unused attributes: %q", missingAttrDefs)) - } - - if len(indexedAttributes) > 0 { - missingIndexes := []string{} - for index := range indexedAttributes { - missingIndexes = append(missingIndexes, index) - } - - err = multierror.Append(err, fmt.Errorf("All indexes must match a defined attribute. Unmatched indexes: %q", missingIndexes)) - } - - return err.ErrorOrNil() -} - func validateLaunchTemplateName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if len(value) < 3 { From 30b2de94e65e96246e18b208f7009ea00ae8c666 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 11 Feb 2021 17:10:15 -0800 Subject: [PATCH 1202/1212] Adds CHANGELOG --- .changelog/6364.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/6364.txt diff --git a/.changelog/6364.txt b/.changelog/6364.txt new file mode 100644 index 00000000000..7fd1a30b4b1 --- /dev/null +++ b/.changelog/6364.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dynamodb_table: Add plan-time validation for indexes on undefined attributes +``` \ No newline at end of file From affdc3d21874fbab498e55a7a1fed449d0ecf1dc Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 22:06:20 -0500 Subject: [PATCH 1203/1212] resource/aws_ssm_patch_baseline: Rename patch_source to source, add CHANGELOG entry Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMPatchBaseline_disappears (13.63s) --- PASS: TestAccAWSSSMPatchBaseline_ApprovedPatchesNonSec (16.37s) --- PASS: TestAccAWSSSMPatchBaseline_RejectPatchesAction (17.55s) --- PASS: TestAccAWSSSMPatchBaseline_basic (26.22s) --- PASS: TestAccAWSSSMPatchBaseline_Sources (27.70s) --- PASS: TestAccAWSSSMPatchBaseline_OperatingSystem (28.85s) --- PASS: TestAccAWSSSMPatchBaseline_tags (35.86s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMPatchBaseline_disappears (16.44s) --- PASS: TestAccAWSSSMPatchBaseline_ApprovedPatchesNonSec (22.40s) --- PASS: TestAccAWSSSMPatchBaseline_RejectPatchesAction (22.56s) --- PASS: TestAccAWSSSMPatchBaseline_basic (33.18s) --- PASS: TestAccAWSSSMPatchBaseline_Sources (35.37s) --- PASS: TestAccAWSSSMPatchBaseline_OperatingSystem (35.76s) --- PASS: TestAccAWSSSMPatchBaseline_tags (47.99s) ``` --- .changelog/11879.txt | 3 ++ aws/resource_aws_ssm_patch_baseline.go | 23 +++++---- aws/resource_aws_ssm_patch_baseline_test.go | 50 +++++++++---------- .../docs/r/ssm_patch_baseline.html.markdown | 27 +++++++--- 4 files changed, 62 insertions(+), 41 deletions(-) create mode 100644 .changelog/11879.txt diff --git a/.changelog/11879.txt b/.changelog/11879.txt new file mode 100644 index 00000000000..1a1c5ab3353 --- /dev/null +++ b/.changelog/11879.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_patch_baseline: Add `source` configuration block +``` diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index 5a57a2fc3cd..d5f7f6eb6d5 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -166,16 +166,19 @@ func resourceAwsSsmPatchBaseline() *schema.Resource { Optional: true, }, - "patch_source": { + "source": { Type: schema.TypeList, Optional: true, MaxItems: 20, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,50}$`), "see https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchSource.html"), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 50), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_\-.]{3,50}$`), "must contain only alphanumeric, underscore, hyphen, or period characters"), + ), }, "configuration": { @@ -235,7 +238,7 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) } - if _, ok := d.GetOk("patch_source"); ok { + if _, ok := d.GetOk("source"); ok { params.Sources = expandAwsSsmPatchSource(d) } @@ -292,7 +295,7 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) params.GlobalFilters = expandAwsSsmPatchFilterGroup(d) } - if d.HasChange("patch_source") { + if d.HasChange("source") { params.Sources = expandAwsSsmPatchSource(d) } @@ -356,7 +359,7 @@ func resourceAwsSsmPatchBaselineRead(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error setting approval rules error: %#v", err) } - if err := d.Set("patch_source", flattenAwsSsmPatchSource(resp.Sources)); err != nil { + if err := d.Set("source", flattenAwsSsmPatchSource(resp.Sources)); err != nil { return fmt.Errorf("Error setting patch sources error: %#v", err) } @@ -501,7 +504,7 @@ func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interfa func expandAwsSsmPatchSource(d *schema.ResourceData) []*ssm.PatchSource { var sources []*ssm.PatchSource - sourceConfigs := d.Get("patch_source").([]interface{}) + sourceConfigs := d.Get("source").([]interface{}) for _, sConfig := range sourceConfigs { config := sConfig.(map[string]interface{}) @@ -527,8 +530,8 @@ func flattenAwsSsmPatchSource(sources []*ssm.PatchSource) []map[string]interface for _, source := range sources { s := make(map[string]interface{}) - s["name"] = *source.Name - s["configuration"] = *source.Configuration + s["name"] = aws.StringValue(source.Name) + s["configuration"] = aws.StringValue(source.Configuration) s["products"] = flattenStringList(source.Products) result = append(result, s) } diff --git a/aws/resource_aws_ssm_patch_baseline_test.go b/aws/resource_aws_ssm_patch_baseline_test.go index 29805093090..8df9c566107 100644 --- a/aws/resource_aws_ssm_patch_baseline_test.go +++ b/aws/resource_aws_ssm_patch_baseline_test.go @@ -171,7 +171,7 @@ func TestAccAWSSSMPatchBaseline_OperatingSystem(t *testing.T) { }) } -func TestAccAWSSSMPatchBaseline_PatchSources(t *testing.T) { +func TestAccAWSSSMPatchBaseline_Sources(t *testing.T) { var before, after ssm.PatchBaselineIdentity name := acctest.RandString(10) resourceName := "aws_ssm_patch_baseline.test" @@ -182,14 +182,14 @@ func TestAccAWSSSMPatchBaseline_PatchSources(t *testing.T) { CheckDestroy: testAccCheckAWSSSMPatchBaselineDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSSSMPatchBaselineConfigWithPatchSource(name), + Config: testAccAWSSSMPatchBaselineConfigWithSource(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMPatchBaselineExists(resourceName, &before), - resource.TestCheckResourceAttr(resourceName, "patch_source.#", "1"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.name", "My-AL2017.09"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.#", "1"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.0", "AmazonLinux2017.09"), + resource.TestCheckResourceAttr(resourceName, "source.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source.0.name", "My-AL2017.09"), + resource.TestCheckResourceAttr(resourceName, "source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "source.0.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source.0.products.0", "AmazonLinux2017.09"), ), }, { @@ -198,20 +198,20 @@ func TestAccAWSSSMPatchBaseline_PatchSources(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccAWSSSMPatchBaselineConfigWithPatchSourceUpdated(name), + Config: testAccAWSSSMPatchBaselineConfigWithSourceUpdated(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMPatchBaselineExists(resourceName, &after), - resource.TestCheckResourceAttr(resourceName, "patch_source.#", "2"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.name", "My-AL2017.09"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.#", "1"), - resource.TestCheckResourceAttr(resourceName, "patch_source.0.products.0", "AmazonLinux2017.09"), - resource.TestCheckResourceAttr(resourceName, "patch_source.1.name", "My-AL2018.03"), - resource.TestCheckResourceAttr(resourceName, "patch_source.1.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), - resource.TestCheckResourceAttr(resourceName, "patch_source.1.products.#", "1"), - resource.TestCheckResourceAttr(resourceName, "patch_source.1.products.0", "AmazonLinux2018.03"), + resource.TestCheckResourceAttr(resourceName, "source.#", "2"), + resource.TestCheckResourceAttr(resourceName, "source.0.name", "My-AL2017.09"), + resource.TestCheckResourceAttr(resourceName, "source.0.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "source.0.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source.0.products.0", "AmazonLinux2017.09"), + resource.TestCheckResourceAttr(resourceName, "source.1.name", "My-AL2018.03"), + resource.TestCheckResourceAttr(resourceName, "source.1.configuration", "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes"), + resource.TestCheckResourceAttr(resourceName, "source.1.products.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source.1.products.0", "AmazonLinux2018.03"), func(*terraform.State) error { - if *before.BaselineId != *after.BaselineId { + if aws.StringValue(before.BaselineId) != aws.StringValue(after.BaselineId) { t.Fatal("Baseline IDs changed unexpectedly") } return nil @@ -462,16 +462,16 @@ resource "aws_ssm_patch_baseline" "test" { `, rName) } -func testAccAWSSSMPatchBaselineConfigWithPatchSource(rName string) string { +func testAccAWSSSMPatchBaselineConfigWithSource(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = %[1]q description = "Baseline containing all updates approved for production systems" approved_patches_compliance_level = "CRITICAL" approved_patches = ["test123"] operating_system = "AMAZON_LINUX" - patch_source { + source { name = "My-AL2017.09" configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" products = ["AmazonLinux2017.09"] @@ -480,22 +480,22 @@ resource "aws_ssm_patch_baseline" "foo" { `, rName) } -func testAccAWSSSMPatchBaselineConfigWithPatchSourceUpdated(rName string) string { +func testAccAWSSSMPatchBaselineConfigWithSourceUpdated(rName string) string { return fmt.Sprintf(` -resource "aws_ssm_patch_baseline" "foo" { +resource "aws_ssm_patch_baseline" "test" { name = %[1]q description = "Baseline containing all updates approved for production systems" approved_patches_compliance_level = "CRITICAL" approved_patches = ["test123"] operating_system = "AMAZON_LINUX" - patch_source { + source { name = "My-AL2017.09" configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" products = ["AmazonLinux2017.09"] } - patch_source { + source { name = "My-AL2018.03" configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" products = ["AmazonLinux2018.03"] diff --git a/website/docs/r/ssm_patch_baseline.html.markdown b/website/docs/r/ssm_patch_baseline.html.markdown index 39043e4e9c3..94b0b611543 100644 --- a/website/docs/r/ssm_patch_baseline.html.markdown +++ b/website/docs/r/ssm_patch_baseline.html.markdown @@ -128,13 +128,28 @@ resource "aws_ssm_patch_baseline" "al_2017_09" { operating_system = "AMAZON_LINUX" approval_rule { - ... + # ... } - patch_source { + source { name = "My-AL2017.09" - configuration = "[amzn-main] \nname=amzn-main-Base\nmirrorlist=http://repo./$awsregion./$awsdomain//$releasever/main/mirror.list //nmirrorlist_expire=300//nmetadata_expire=300 \npriority=10 \nfailovermethod=priority \nfastestmirror_enabled=0 \ngpgcheck=1 \ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-ga \nenabled=1 \nretries=3 \ntimeout=5\nreport_instanceid=yes" products = ["AmazonLinux2017.09"] + configuration = < Date: Thu, 11 Feb 2021 22:37:12 -0500 Subject: [PATCH 1204/1212] Apply suggestions from code review --- ...urce_aws_ssm_maintenance_window_task_test.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/aws/resource_aws_ssm_maintenance_window_task_test.go b/aws/resource_aws_ssm_maintenance_window_task_test.go index d3cd52d6e5f..54a43e83d97 100644 --- a/aws/resource_aws_ssm_maintenance_window_task_test.go +++ b/aws/resource_aws_ssm_maintenance_window_task_test.go @@ -466,24 +466,23 @@ resource "aws_iam_role" "test" { POLICY } - resource "aws_iam_role_policy" "test" { name = %[1]q role = aws_iam_role.test.name policy = < Date: Thu, 11 Feb 2021 22:38:26 -0500 Subject: [PATCH 1205/1212] Update aws/resource_aws_ssm_maintenance_window_task_test.go --- aws/resource_aws_ssm_maintenance_window_task_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_ssm_maintenance_window_task_test.go b/aws/resource_aws_ssm_maintenance_window_task_test.go index 54a43e83d97..410038a9de2 100644 --- a/aws/resource_aws_ssm_maintenance_window_task_test.go +++ b/aws/resource_aws_ssm_maintenance_window_task_test.go @@ -466,6 +466,7 @@ resource "aws_iam_role" "test" { POLICY } + resource "aws_iam_role_policy" "test" { name = %[1]q role = aws_iam_role.test.name From e7b9b5d284e615df7dfc6137f37360a320c0acdc Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 22:51:02 -0500 Subject: [PATCH 1206/1212] resource/aws_ssm_maintenance_window_task: Mark service_role_arn as Computed, add CHANGELOG for #12200 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMMaintenanceWindowTask_disappears (21.36s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_emptyNotificationConfig (22.49s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_noRole (23.79s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationStepFunctionParameters (25.18s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource (37.93s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_basic (38.21s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationAutomationParameters (46.55s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters (48.17s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationLambdaParameters (48.58s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParametersCloudWatch (49.26s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMMaintenanceWindowTask_noRole (26.04s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_disappears (26.62s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_emptyNotificationConfig (27.01s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationStepFunctionParameters (29.35s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_updateForcesNewResource (43.07s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_basic (43.41s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationLambdaParameters (46.46s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationAutomationParameters (51.63s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParameters (51.98s) --- PASS: TestAccAWSSSMMaintenanceWindowTask_TaskInvocationRunCommandParametersCloudWatch (59.12s) ``` --- .changelog/12200.txt | 3 +++ aws/resource_aws_ssm_maintenance_window_task.go | 1 + ...urce_aws_ssm_maintenance_window_task_test.go | 17 ++++++++--------- 3 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 .changelog/12200.txt diff --git a/.changelog/12200.txt b/.changelog/12200.txt new file mode 100644 index 00000000000..980de930d0d --- /dev/null +++ b/.changelog/12200.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_maintenance_window_task: Make `service_role_arn` optional +``` diff --git a/aws/resource_aws_ssm_maintenance_window_task.go b/aws/resource_aws_ssm_maintenance_window_task.go index 683715ac35c..d22dd54461c 100644 --- a/aws/resource_aws_ssm_maintenance_window_task.go +++ b/aws/resource_aws_ssm_maintenance_window_task.go @@ -57,6 +57,7 @@ func resourceAwsSsmMaintenanceWindowTask() *schema.Resource { "service_role_arn": { Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validateArn, }, diff --git a/aws/resource_aws_ssm_maintenance_window_task_test.go b/aws/resource_aws_ssm_maintenance_window_task_test.go index 410038a9de2..13548ee7011 100644 --- a/aws/resource_aws_ssm_maintenance_window_task_test.go +++ b/aws/resource_aws_ssm_maintenance_window_task_test.go @@ -66,7 +66,6 @@ func TestAccAWSSSMMaintenanceWindowTask_noRole(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMMaintenanceWindowTaskExists(resourceName, &task), ), - ExpectNonEmptyPlan: true, }, }, }) @@ -649,16 +648,16 @@ resource "aws_ssm_maintenance_window_task" "test" { } func testAccAWSSSMMaintenanceWindowTaskNoRoleConfig(rName string) string { - return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName)+` + return fmt.Sprintf(testAccAWSSSMMaintenanceWindowTaskConfigBase(rName) + ` resource "aws_ssm_maintenance_window_task" "test" { - window_id = aws_ssm_maintenance_window.test.id - task_type = "RUN_COMMAND" - task_arn = "AWS-RunShellScript" - priority = 1 - max_concurrency = "2" - max_errors = "1" + description = "This resource is for test purpose only" + max_concurrency = 2 + max_errors = 1 name = "TestMaintenanceWindowTask" - description = "This resource is for test purpose only" + priority = 1 + task_arn = "AWS-RunShellScript" + task_type = "RUN_COMMAND" + window_id = aws_ssm_maintenance_window.test.id targets { key = "WindowTargetIds" From 415c1acd88169df4a5f9a1bd3f99aab999caf8e7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 23:21:16 -0500 Subject: [PATCH 1207/1212] Apply suggestions from code review --- aws/resource_aws_ssm_patch_baseline.go | 34 ++++++++------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index 3fec11801c0..a6d47f44880 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -241,11 +241,7 @@ func resourceAwsSsmPatchBaselineCreate(d *schema.ResourceData, meta interface{}) } if _, ok := d.GetOk("approval_rule"); ok { - rules, err := expandAwsSsmPatchRuleGroup(d) - if err != nil { - return err - } - params.ApprovalRules = rules + params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) } if _, ok := d.GetOk("source"); ok { @@ -298,11 +294,7 @@ func resourceAwsSsmPatchBaselineUpdate(d *schema.ResourceData, meta interface{}) } if d.HasChange("approval_rule") { - rules, err := expandAwsSsmPatchRuleGroup(d) - if err != nil { - return err - } - params.ApprovalRules = rules + params.ApprovalRules = expandAwsSsmPatchRuleGroup(d) } if d.HasChange("global_filter") { @@ -455,7 +447,7 @@ func flattenAwsSsmPatchFilterGroup(group *ssm.PatchFilterGroup) []map[string]int return result } -func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) (*ssm.PatchRuleGroup, error) { +func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { var rules []*ssm.PatchRule ruleConfig := d.Get("approval_rule").([]interface{}) @@ -487,18 +479,12 @@ func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) (*ssm.PatchRuleGroup, er EnableNonSecurity: aws.Bool(rCfg["enable_non_security"].(bool)), } - // Verify that at least one of approve_after_days or approve_until_date is set - approveAfterDays, _ := rCfg["approve_after_days"].(int) - approveUntilDate, _ := rCfg["approve_until_date"].(string) - - if approveAfterDays > 0 && len(approveUntilDate) > 0 { - return nil, fmt.Errorf("Only one of approve_after_days or approve_until_date must be configured") + if v, ok := rCfg["approve_after_days"].(int); ok && v != 0 { + rule.ApproveAfterDays = aws.Int64(int64(v)) } - if len(approveUntilDate) > 0 { - rule.ApproveUntilDate = aws.String(approveUntilDate) - } else { - rule.ApproveAfterDays = aws.Int64(int64(approveAfterDays)) + if v, ok := rCfg["approve_until_date"].(string); ok && v != "" { + rule.ApproveUntilDate = aws.String(v) } rules = append(rules, rule) @@ -506,7 +492,7 @@ func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) (*ssm.PatchRuleGroup, er return &ssm.PatchRuleGroup{ PatchRules: rules, - }, nil + } } func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interface{} { @@ -523,11 +509,11 @@ func flattenAwsSsmPatchRuleGroup(group *ssm.PatchRuleGroup) []map[string]interfa r["patch_filter"] = flattenAwsSsmPatchFilterGroup(rule.PatchFilterGroup) if rule.ApproveAfterDays != nil { - r["approve_after_days"] = aws.Int64Value(rule.ApproveAfterDays) + r["approve_after_days"] = aws.Int64Value(rule.ApproveAfterDays) } if rule.ApproveUntilDate != nil { - r["approve_until_date"] = aws.StringValue(rule.ApproveUntilDate) + r["approve_until_date"] = aws.StringValue(rule.ApproveUntilDate) } result = append(result, r) From 975c8a7272e3e0dbdf49d7b55433d19557afbae0 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 23:25:18 -0500 Subject: [PATCH 1208/1212] Update aws/resource_aws_ssm_patch_baseline.go --- aws/resource_aws_ssm_patch_baseline.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/aws/resource_aws_ssm_patch_baseline.go b/aws/resource_aws_ssm_patch_baseline.go index a6d47f44880..e7446cfd363 100644 --- a/aws/resource_aws_ssm_patch_baseline.go +++ b/aws/resource_aws_ssm_patch_baseline.go @@ -479,12 +479,10 @@ func expandAwsSsmPatchRuleGroup(d *schema.ResourceData) *ssm.PatchRuleGroup { EnableNonSecurity: aws.Bool(rCfg["enable_non_security"].(bool)), } - if v, ok := rCfg["approve_after_days"].(int); ok && v != 0 { - rule.ApproveAfterDays = aws.Int64(int64(v)) - } - if v, ok := rCfg["approve_until_date"].(string); ok && v != "" { rule.ApproveUntilDate = aws.String(v) + } else if v, ok := rCfg["approve_after_days"].(int); ok { + rule.ApproveAfterDays = aws.Int64(int64(v)) } rules = append(rules, rule) From cf21b1eb79dd28273facf60cc5ff1ac631f8b034 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 23:29:19 -0500 Subject: [PATCH 1209/1212] Add CHANGELOG entry for #13850 --- .changelog/13850.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/13850.txt diff --git a/.changelog/13850.txt b/.changelog/13850.txt new file mode 100644 index 00000000000..c3572bee4fc --- /dev/null +++ b/.changelog/13850.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_patch_baseline: Add `approval_rule` block `approve_until_date` argument +``` From 27e2c67e4951238cb81021a3f741ae6c96a83be7 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Thu, 11 Feb 2021 23:46:24 -0500 Subject: [PATCH 1210/1212] Apply suggestions from code review --- aws/resource_aws_ssm_document_test.go | 2 +- website/docs/r/ssm_document.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_ssm_document_test.go b/aws/resource_aws_ssm_document_test.go index 501747cfa19..8974a967e5c 100644 --- a/aws/resource_aws_ssm_document_test.go +++ b/aws/resource_aws_ssm_document_test.go @@ -660,7 +660,7 @@ func testAccAWSSSMDocumentBasicConfigVersionName(rName, version string) string { resource "aws_ssm_document" "test" { name = "%s" document_type = "Command" - version_name = "%s" + version_name = "%s" content = < Date: Fri, 12 Feb 2021 00:05:54 -0500 Subject: [PATCH 1211/1212] resource/aws_ssm_document: Support in-place version_name updates, add CHANGELOG entry for #14128 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMDocument_params (30.50s) --- PASS: TestAccAWSSSMDocument_basic (32.24s) --- PASS: TestAccAWSSSMDocument_session (32.29s) --- PASS: TestAccAWSSSMDocument_permission_private (32.51s) --- PASS: TestAccAWSSSMDocument_permission_batching (32.78s) --- PASS: TestAccAWSSSMDocument_permission_public (32.80s) --- PASS: TestAccAWSSSMDocument_automation (37.50s) --- PASS: TestAccAWSSSMDocument_VersionName (46.97s) --- PASS: TestAccAWSSSMDocument_DocumentFormat_YAML (46.99s) --- PASS: TestAccAWSSSMDocument_target_type (48.06s) --- PASS: TestAccAWSSSMDocument_update (48.17s) --- PASS: TestAccAWSSSMDocument_SchemaVersion_1 (48.20s) --- PASS: TestAccAWSSSMDocument_Tags (57.02s) --- PASS: TestAccAWSSSMDocument_permission_change (57.33s) --- PASS: TestAccAWSSSMDocument_package (73.74s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMDocument_basic (34.86s) --- PASS: TestAccAWSSSMDocument_session (39.41s) --- PASS: TestAccAWSSSMDocument_params (39.84s) --- PASS: TestAccAWSSSMDocument_permission_private (40.26s) --- PASS: TestAccAWSSSMDocument_permission_public (40.37s) --- PASS: TestAccAWSSSMDocument_permission_batching (40.94s) --- PASS: TestAccAWSSSMDocument_automation (52.65s) --- PASS: TestAccAWSSSMDocument_target_type (54.33s) --- PASS: TestAccAWSSSMDocument_update (55.65s) --- PASS: TestAccAWSSSMDocument_DocumentFormat_YAML (55.72s) --- PASS: TestAccAWSSSMDocument_VersionName (55.79s) --- PASS: TestAccAWSSSMDocument_SchemaVersion_1 (55.96s) --- PASS: TestAccAWSSSMDocument_Tags (66.39s) --- PASS: TestAccAWSSSMDocument_permission_change (68.28s) --- PASS: TestAccAWSSSMDocument_package (80.15s) ``` --- .changelog/14128.txt | 3 +++ aws/resource_aws_ssm_document.go | 6 ++++-- aws/resource_aws_ssm_document_test.go | 9 +++++---- website/docs/r/ssm_document.html.markdown | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 .changelog/14128.txt diff --git a/.changelog/14128.txt b/.changelog/14128.txt new file mode 100644 index 00000000000..dc09171f977 --- /dev/null +++ b/.changelog/14128.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_document: Add `version_name` argument +``` diff --git a/aws/resource_aws_ssm_document.go b/aws/resource_aws_ssm_document.go index a218777208e..4d02a85d0c8 100644 --- a/aws/resource_aws_ssm_document.go +++ b/aws/resource_aws_ssm_document.go @@ -175,8 +175,6 @@ func resourceAwsSsmDocument() *schema.Resource { "version_name": { Type: schema.TypeString, Optional: true, - Computed: true, - ForceNew: true, }, }, } @@ -658,6 +656,10 @@ func updateAwsSSMDocument(d *schema.ResourceData, meta interface{}) error { updateDocInput.TargetType = aws.String(v.(string)) } + if v, ok := d.GetOk("version_name"); ok { + updateDocInput.VersionName = aws.String(v.(string)) + } + if d.HasChange("attachments_source") { updateDocInput.Attachments = expandSsmAttachmentsSources(d.Get("attachments_source").([]interface{})) } diff --git a/aws/resource_aws_ssm_document_test.go b/aws/resource_aws_ssm_document_test.go index c2739750356..f4410d0ca76 100644 --- a/aws/resource_aws_ssm_document_test.go +++ b/aws/resource_aws_ssm_document_test.go @@ -30,6 +30,7 @@ func TestAccAWSSSMDocument_basic(t *testing.T) { testAccCheckResourceAttrRfc3339(resourceName, "created_date"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrSet(resourceName, "document_version"), + resource.TestCheckResourceAttr(resourceName, "version_name", ""), ), }, { @@ -72,7 +73,7 @@ func TestAccAWSSSMDocument_target_type(t *testing.T) { }) } -func TestAccAWSSSMDocument_version_name(t *testing.T) { +func TestAccAWSSSMDocument_VersionName(t *testing.T) { name := acctest.RandString(10) resourceName := "aws_ssm_document.test" resource.ParallelTest(t, resource.TestCase{ @@ -659,14 +660,14 @@ DOC func testAccAWSSSMDocumentBasicConfigVersionName(rName, version string) string { return fmt.Sprintf(` resource "aws_ssm_document" "test" { - name = "%s" + name = %[1]q document_type = "Command" - version_name = "%s" + version_name = %[2]q content = < Date: Fri, 12 Feb 2021 00:30:37 -0500 Subject: [PATCH 1212/1212] resource/aws_ssm_association: Finalize apply_only_at_cron_interval and add CHANGELOG entry for #15038 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSSSMAssociation_withDocumentVersion (26.48s) --- PASS: TestAccAWSSSMAssociation_rateControl (36.32s) --- PASS: TestAccAWSSSMAssociation_withScheduleExpression (39.12s) --- PASS: TestAccAWSSSMAssociation_withAssociationNameAndScheduleExpression (39.44s) --- PASS: TestAccAWSSSMAssociation_withParameters (41.73s) --- PASS: TestAccAWSSSMAssociation_withAssociationName (41.97s) --- PASS: TestAccAWSSSMAssociation_withComplianceSeverity (49.30s) --- PASS: TestAccAWSSSMAssociation_ApplyOnlyAtCronInterval (53.28s) --- PASS: TestAccAWSSSMAssociation_withTargets (57.36s) --- PASS: TestAccAWSSSMAssociation_withAutomationTargetParamName (57.49s) --- PASS: TestAccAWSSSMAssociation_withOutputLocation (87.00s) --- PASS: TestAccAWSSSMAssociation_basic (98.52s) --- PASS: TestAccAWSSSMAssociation_disappears (116.82s) ``` Output from acceptance testing in AWS GovCloud (US): ``` --- PASS: TestAccAWSSSMAssociation_withDocumentVersion (38.37s) --- PASS: TestAccAWSSSMAssociation_withParameters (49.05s) --- PASS: TestAccAWSSSMAssociation_rateControl (49.73s) --- PASS: TestAccAWSSSMAssociation_ApplyOnlyAtCronInterval (52.95s) --- PASS: TestAccAWSSSMAssociation_withAssociationName (52.95s) --- PASS: TestAccAWSSSMAssociation_withComplianceSeverity (56.16s) --- PASS: TestAccAWSSSMAssociation_withAssociationNameAndScheduleExpression (63.14s) --- PASS: TestAccAWSSSMAssociation_withScheduleExpression (63.99s) --- PASS: TestAccAWSSSMAssociation_withAutomationTargetParamName (64.88s) --- PASS: TestAccAWSSSMAssociation_withTargets (71.86s) --- PASS: TestAccAWSSSMAssociation_withOutputLocation (87.33s) --- PASS: TestAccAWSSSMAssociation_basic (112.52s) --- PASS: TestAccAWSSSMAssociation_disappears (119.66s) ``` --- .changelog/15038.txt | 3 + aws/resource_aws_ssm_association_test.go | 80 ++++++++++++++++++-- website/docs/r/ssm_association.html.markdown | 2 +- 3 files changed, 79 insertions(+), 6 deletions(-) create mode 100644 .changelog/15038.txt diff --git a/.changelog/15038.txt b/.changelog/15038.txt new file mode 100644 index 00000000000..ddc074336bb --- /dev/null +++ b/.changelog/15038.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ssm_association: Add `apply_only_at_cron_interval` argument +``` diff --git a/aws/resource_aws_ssm_association_test.go b/aws/resource_aws_ssm_association_test.go index be255dd09e9..cbea001e4a0 100644 --- a/aws/resource_aws_ssm_association_test.go +++ b/aws/resource_aws_ssm_association_test.go @@ -24,6 +24,7 @@ func TestAccAWSSSMAssociation_basic(t *testing.T) { Config: testAccAWSSSMAssociationBasicConfig(name), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "false"), ), }, { @@ -56,6 +57,38 @@ func TestAccAWSSSMAssociation_disappears(t *testing.T) { }) } +func TestAccAWSSSMAssociation_ApplyOnlyAtCronInterval(t *testing.T) { + name := acctest.RandString(10) + resourceName := "aws_ssm_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSSMAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSSMAssociationBasicConfigWithApplyOnlyAtCronInterval(name, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMAssociationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSSSMAssociationBasicConfigWithApplyOnlyAtCronInterval(name, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSSMAssociationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "false"), + ), + }, + }, + }) +} + func TestAccAWSSSMAssociation_withTargets(t *testing.T) { name := acctest.RandString(10) resourceName := "aws_ssm_association.test" @@ -351,7 +384,7 @@ func TestAccAWSSSMAssociation_withAutomationTargetParamName(t *testing.T) { }) } -func TestAccAWSSSMAssociation_withScheduleExpressionAndCronInterval(t *testing.T) { +func TestAccAWSSSMAssociation_withScheduleExpression(t *testing.T) { name := acctest.RandString(10) resourceName := "aws_ssm_association.test" @@ -365,7 +398,6 @@ func TestAccAWSSSMAssociation_withScheduleExpressionAndCronInterval(t *testing.T Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists(resourceName), resource.TestCheckResourceAttr(resourceName, "schedule_expression", "cron(0 16 ? * TUE *)"), - resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "true"), ), }, { @@ -378,7 +410,6 @@ func TestAccAWSSSMAssociation_withScheduleExpressionAndCronInterval(t *testing.T Check: resource.ComposeTestCheckFunc( testAccCheckAWSSSMAssociationExists(resourceName), resource.TestCheckResourceAttr(resourceName, "schedule_expression", "cron(0 16 ? * WED *)"), - resource.TestCheckResourceAttr(resourceName, "apply_only_at_cron_interval", "false"), ), }, }, @@ -519,6 +550,47 @@ func testAccCheckAWSSSMAssociationDestroy(s *terraform.State) error { return nil } +func testAccAWSSSMAssociationBasicConfigWithApplyOnlyAtCronInterval(rName string, applyOnlyAtCronInterval bool) string { + return fmt.Sprintf(` +resource "aws_ssm_document" "test" { + name = "test_document_association-%s" + document_type = "Command" + + content = <